rpact/0000755000176200001440000000000013574450502011370 5ustar liggesusersrpact/NAMESPACE0000644000176200001440000000510613574441634012617 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,AnalysisResults) S3method(as.data.frame,ParameterSet) S3method(as.data.frame,PowerAndAverageSampleNumberResult) S3method(as.data.frame,StageResults) S3method(as.data.frame,TrialDesign) S3method(as.data.frame,TrialDesignCharacteristics) S3method(as.data.frame,TrialDesignPlan) S3method(as.data.frame,TrialDesignSet) S3method(as.matrix,FieldSet) S3method(length,TrialDesignSet) S3method(names,AnalysisResults) S3method(names,FieldSet) S3method(names,StageResults) S3method(names,TrialDesignSet) S3method(plot,AnalysisResults) S3method(plot,Dataset) S3method(plot,SimulationResults) S3method(plot,StageResults) S3method(plot,TrialDesign) S3method(plot,TrialDesignPlan) S3method(plot,TrialDesignSet) S3method(print,FieldSet) S3method(print,ParameterSet) S3method(summary,ParameterSet) S3method(summary,TrialDesignPlanSurvival) export(.getAnalysisResultsMeansParallelComputing) export(getAccrualTime) export(getAnalysisResults) export(getAvailablePlotTypes) export(getConditionalPower) export(getConditionalRejectionProbabilities) export(getData) export(getDataset) export(getDesignCharacteristics) export(getDesignFisher) export(getDesignGroupSequential) export(getDesignInverseNormal) export(getDesignSet) export(getEventProbabilities) export(getFinalConfidenceInterval) export(getFinalPValue) export(getHazardRatioByPi) export(getLambdaByMedian) export(getLambdaByPi) export(getLogLevel) export(getMedianByLambda) export(getMedianByPi) export(getNumberOfSubjects) export(getPiByLambda) export(getPiByMedian) export(getPiecewiseExponentialDistribution) export(getPiecewiseExponentialQuantile) export(getPiecewiseExponentialRandomNumbers) export(getPiecewiseSurvivalTime) export(getPowerAndAverageSampleNumber) export(getPowerMeans) export(getPowerRates) export(getPowerSurvival) export(getRawData) export(getRepeatedConfidenceIntervals) export(getRepeatedPValues) export(getSampleSizeMeans) export(getSampleSizeRates) export(getSampleSizeSurvival) export(getSimulationMeans) export(getSimulationRates) export(getSimulationSurvival) export(getStageResults) export(getTestActions) export(ppwexp) export(printCitation) export(qpwexp) export(readDataset) export(readDatasets) export(resetLogLevel) export(rpwexp) export(setLogLevel) export(testPackage) export(writeDataset) export(writeDatasets) exportMethods("[") import(graphics) import(methods) import(stats) import(tools) import(utils) importFrom(Rcpp,evalCpp) importFrom(methods,new) useDynLib(rpact, .registration = TRUE) rpact/README.md0000644000176200001440000000050513574432664012660 0ustar liggesusers# rpact Confirmatory Adaptive Clinical Trial Design and Analysis. # Installation Install the latest CRAN release via ``` r install.packages("rpact") ``` # Documentation The documentation is hosted at # Vignettes The vignettes are hosted at rpact/man/0000755000176200001440000000000013574442443012150 5ustar liggesusersrpact/man/TrialDesignSet_length.Rd0000644000176200001440000000070513574441634016664 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{TrialDesignSet_length} \alias{TrialDesignSet_length} \alias{length.TrialDesignSet} \title{Length of Trial Design Set} \usage{ \method{length}{TrialDesignSet}(x) } \description{ Returns the number of designs in a \code{TrialDesignSet}. } \details{ Is helpful for iteration over all designs in a design set with "[index]"-syntax. } \keyword{internal} rpact/man/TrialDesignSet_as.data.frame.Rd0000644000176200001440000000121413574430660020000 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{TrialDesignSet_as.data.frame} \alias{TrialDesignSet_as.data.frame} \alias{as.data.frame.TrialDesignSet} \title{Coerce Trial Design Set to a Data Frame} \usage{ \method{as.data.frame}{TrialDesignSet}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_, ... ) } \description{ Returns the \code{TrialDesignSet} as data frame. } \details{ Coerces the design set to a data frame. } \keyword{internal} rpact/man/getDesignGroupSequential.Rd0000644000176200001440000001063013574430660017416 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getDesignGroupSequential} \alias{getDesignGroupSequential} \title{Get Design Group Sequential} \usage{ getDesignGroupSequential( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = C_DESIGN_TOLERANCE_DEFAULT ) } \arguments{ \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{kMax}{The maximum number of stages K. K = 1, 2, 3,..., 10, default is \code{3}.} \item{alpha}{The significance level alpha, default is \code{0.025}.} \item{beta}{Type II error rate, necessary for providing sample size calculations \cr (e.g., \code{\link{getSampleSizeMeans}}), beta spending function designs, or optimum designs, default is \code{0.20}.} \item{sided}{One-sided or two-sided, default is \code{1}.} \item{informationRates}{The information rates, default is \code{(1 : kMax)/kMax}.} \item{futilityBounds}{The futility bounds, defined on the test statistic z scale (vector of length K - 1).} \item{typeOfDesign}{The type of design. Type of design is one of the following: O'Brien & Fleming ("OF"), Pocock ("P"), Wang & Tsiatis Delta class ("WT"), Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class ("WToptimum"), O'Brien & Fleming type alpha spending ("asOF"), Pocock type alpha spending ("asP"), Kim & DeMets alpha spending ("asKD"), Hwang, Shi & DeCani alpha spending ("asHSD"), user defined alpha spending ("asUser"), default is \code{"OF"}.} \item{deltaWT}{Delta for Wang & Tsiatis Delta class.} \item{optimizationCriterion}{Optimization criterion for optimum design within Wang & Tsiatis class ("ASNH1", "ASNIFH1", "ASNsum"), default is \code{"ASNH1"}.} \item{gammaA}{Parameter for alpha spending function, default is \code{1}.} \item{typeBetaSpending}{Type of beta spending. Type of of beta spending is one of the following: O'Brien & Fleming type beta spending, Pocock type beta spending, Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined beta spending ("bsOF", "bsP",...).} \item{userAlphaSpending}{The user defined alpha spending. Vector of length kMax containing the cumulative alpha-spending up to each interim stage.} \item{userBetaSpending}{The user defined beta spending. Vector of length kMax containing the cumulative beta-spending up to each interim stage.} \item{gammaB}{Parameter for beta spending function, default is \code{1}.} \item{bindingFutility}{If \code{bindingFutility = TRUE} is specified the calculation of the critical values is affected by the futility bounds (default is \code{FALSE}).} \item{constantBoundsHP}{The constant bounds up to stage K - 1 for the Haybittle & Peto design (default is \code{3}).} \item{twoSidedPower}{For two-sided testing, if \code{twoSidedPower = TRUE} is specified the sample size calculation is performed by considering both tails of the distribution. Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power should be directed to one part.} \item{tolerance}{The tolerance, default is \code{1e-08}.} } \value{ Returns a \code{\link{TrialDesignGroupSequential}} object. } \description{ Provides adjusted boundaries and defines a group sequential design. } \details{ Depending on \code{typeOfDesign} some parameters are specified, others not. For example, only if \code{typeOfDesign} "asHSD" is selected, \code{gammaA} needs to be specified. If an alpha spending approach was specified ("asOF", "asP", "asKD", "asHSD", or "asUser") additionally a beta spending function can be specified to produce futility bounds. } \examples{ # Run with default values getDesignGroupSequential() # Calculate the Pocock type alpha spending critical values if the second # interim analysis was performed after 70\% of information was observed getDesignGroupSequential(informationRates = c(0.4, 0.7), typeOfDesign = "asP") } \seealso{ \code{\link{getDesignSet}} for creating a set of designs to compare. } rpact/man/FrameSet_as.matrix.Rd0000644000176200001440000000063413574441634016137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{FrameSet_as.matrix} \alias{FrameSet_as.matrix} \alias{as.matrix.FieldSet} \title{Coerce Frame Set to a Matrix} \usage{ \method{as.matrix}{FieldSet}(x, rownames.force = NA, ...) } \description{ Returns the \code{FrameSet} as matrix. } \details{ Coerces the frame set to a matrix. } \keyword{internal} rpact/man/PowerAndAverageSampleNumberResult_as.data.frame.Rd0000644000176200001440000000125313574430660023646 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_power_and_asn.R \name{PowerAndAverageSampleNumberResult_as.data.frame} \alias{PowerAndAverageSampleNumberResult_as.data.frame} \alias{as.data.frame.PowerAndAverageSampleNumberResult} \title{Coerce Power And Average Sample Number Result to a Data Frame} \usage{ \method{as.data.frame}{PowerAndAverageSampleNumberResult}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \description{ Returns the \code{PowerAndAverageSampleNumberResult} as data frame. } \details{ Coerces the object to a data frame. } \keyword{internal} rpact/man/ParameterSet_as.data.frame.Rd0000644000176200001440000000105613574430660017517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{ParameterSet_as.data.frame} \alias{ParameterSet_as.data.frame} \alias{as.data.frame.ParameterSet} \title{Coerce Parameter Set to a Data Frame} \usage{ \method{as.data.frame}{ParameterSet}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \description{ Returns the \code{ParameterSet} as data frame. } \details{ Coerces the parameter set to a data frame. } \keyword{internal} rpact/man/getDesignCharacteristics.Rd0000644000176200001440000000144013574430660017401 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getDesignCharacteristics} \alias{getDesignCharacteristics} \title{Get Design Characteristics} \usage{ getDesignCharacteristics(design) } \arguments{ \item{design}{The design.} } \value{ Returns a \code{\link{TrialDesignCharacteristics}} object. } \description{ Calculates the characteristics of a design and returns it. } \details{ Calculates the inflation factor (IF), the expected reduction in sample size under H1, under H0, and under a value in between H0 and H1. Furthermore, absolute information values are calculated under the prototype case testing H0: mu = 0 against H1: mu = 1. } \examples{ # Run with default values getDesignCharacteristics(getDesignGroupSequential()) } rpact/man/getEventProbabilities.Rd0000644000176200001440000000623713574430660016737 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getEventProbabilities} \alias{getEventProbabilities} \title{Get Event Probabilities} \usage{ getEventProbabilities( time, ..., accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, allocationRatioPlanned = 1, hazardRatio = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT, maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{time}{A numeric vector with time values.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (see details).} \item{accrualIntensity}{A vector of accrual intensities, default is the relative intensity \code{0.1} (see details).} \item{kappa}{The shape parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. lambda2 can also be used to define piecewise exponentially distributed survival times (see details).} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. lambda1 can also be used to define piecewise exponentially distributed survival times (see details).} \item{allocationRatioPlanned}{The planned allocation ratio, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest number of subjects is determined.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or \code{accrualIntensity} is calculated at fixed end of accrual.} } \value{ Returns a \code{\link{EventProbabilities}} object. } \description{ Returns the event probabilities for specified parameters at given time vector. } \details{ For details of the parameters see \code{\link{getSampleSizeSurvival}}. } \keyword{internal} rpact/man/getRepeatedConfidenceIntervals.Rd0000644000176200001440000000212613574430660020535 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getRepeatedConfidenceIntervals} \alias{getRepeatedConfidenceIntervals} \title{Get Repeated Confidence Intervals} \usage{ getRepeatedConfidenceIntervals(design, dataInput, ...) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. See \code{\link{getDataset}}.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial. } \details{ The repeated confidence interval at a given stage of the trial contains the parameter values that are not rejected using the specified sequential design. It can be calculated at each stage of the trial and can thus be used as a monitoring tool. The repeated confidence intervals are provided up to the specified stage. } \keyword{internal} rpact/man/getDesignInverseNormal.Rd0000644000176200001440000001066313574430660017061 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getDesignInverseNormal} \alias{getDesignInverseNormal} \title{Get Design Inverse Normal} \usage{ getDesignInverseNormal( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, tolerance = C_DESIGN_TOLERANCE_DEFAULT ) } \arguments{ \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{kMax}{The maximum number of stages K. K = 1, 2, 3,..., 10, default is \code{3}.} \item{alpha}{The significance level alpha, default is \code{0.025}.} \item{beta}{Type II error rate, necessary for providing sample size calculations \cr (e.g., \code{\link{getSampleSizeMeans}}), beta spending function designs, or optimum designs, default is \code{0.20}.} \item{sided}{One-sided or two-sided, default is \code{1}.} \item{informationRates}{The information rates, default is \code{(1 : kMax)/kMax}.} \item{futilityBounds}{The futility bounds (vector of length K - 1).} \item{typeOfDesign}{The type of design. Type of design is one of the following: O'Brien & Fleming ("OF"), Pocock ("P"), Wang & Tsiatis Delta class ("WT"), Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class ("WToptimum"), O'Brien & Fleming type alpha spending ("asOF"), Pocock type alpha spending ("asP"), Kim & DeMets alpha spending ("asKD"), Hwang, Shi & DeCani alpha spending ("asHSD"), user defined alpha spending ("asUser"), default is \code{"OF"}.} \item{deltaWT}{Delta for Wang & Tsiatis Delta class.} \item{optimizationCriterion}{Optimization criterion for optimum design within Wang & Tsiatis class ("ASNH1", "ASNIFH1", "ASNsum"), default is \code{"ASNH1"}.} \item{gammaA}{Parameter for alpha spending function, default is \code{1}.} \item{typeBetaSpending}{Type of beta spending. Type of of beta spending is one of the following: O'Brien & Fleming type beta spending, Pocock type beta spending, Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined beta spending ("bsOF", "bsP",...).} \item{userAlphaSpending}{The user defined alpha spending. Vector of length kMax containing the cumulative alpha-spending up to each interim stage.} \item{userBetaSpending}{The user defined beta spending. Vector of length kMax containing the cumulative beta-spending up to each interim stage.} \item{gammaB}{Parameter for beta spending function, default is \code{1}.} \item{bindingFutility}{If \code{bindingFutility = TRUE} is specified the calculation of the critical values is affected by the futility bounds (default is \code{FALSE}).} \item{constantBoundsHP}{The constant bounds up to stage K - 1 for the Haybittle & Peto design (default is \code{3}).} \item{twoSidedPower}{For two-sided testing, if \code{twoSidedPower = TRUE} is specified the sample size calculation is performed by considering both tails of the distribution. Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power should be directed to one part.} \item{tolerance}{The tolerance, default is \code{1e-08}.} } \value{ Returns a \code{\link{TrialDesignInverseNormal}} object. } \description{ Provides adjusted boundaries and defines a group sequential design for its use in the inverse normal combination test. } \details{ Depending on \code{typeOfDesign} some parameters are specified, others not. For example, only if \code{typeOfDesign} "asHSD" is selected, \code{gammaA} needs to be specified. If an alpha spending approach was specified ("asOF", "asP", "asKD", "asHSD", or "asUser") additionally a beta spending function can be specified to produce futility bounds. } \examples{ # Run with default values getDesignInverseNormal() # Calculate the Pocock type alpha spending critical values if the second # interim analysis was performed after 70\% of information was observed getDesignInverseNormal(informationRates = c(0.4, 0.7), typeOfDesign = "asP") } \seealso{ \code{\link{getDesignSet}} for creating a set of designs to compare. } rpact/man/getStageResults.Rd0000644000176200001440000000536013574430660015566 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getStageResults} \alias{getStageResults} \title{Get Stage Results} \usage{ getStageResults(design, dataInput, ...) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. See \code{\link{getDataset}}.} \item{...}{Further (optional) arguments to be passed: \describe{ \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} \item{thetaH0}{The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for defining the null hypothesis H0: pi = thetaH0. \cr For non-inferiority designs, this is the non-inferiority bound. } \item{thetaH1 and assumedStDev or pi1, pi2}{The assumed effect size or assumed rates to calculate the conditional power. Depending on the type of dataset, either thetaH1 (means and survival) or pi1, pi2 (rates) can be specified. Additionally, if testing means is specified, an assumed standard deviation can be specified, default is 1.} \item{normalApproximation}{The type of computation of the p-values. Default is FALSE for testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. For testing rates, if \cr \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \cr \code{normalApproximation = FALSE} has no effect.} \item{equalVariances}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}.} \item{directionUpper}{The direction of one-sided testing. Default is \code{directionUpper = TRUE} which means that larger values of the test statistics yield smaller p-values.} }} } \value{ Returns a \code{\link{StageResults}} object. } \description{ Returns summary statistics and p-values for a given data set and a given design. } \details{ Calculates and returns the stage results of the specified design and data input at the specified stage. } \examples{ design <- getDesignInverseNormal() dataRates <- getDataset( n1 = c(10,10), n2 = c(20,20), events1 = c(8,10), events2 = c(10,16)) getStageResults(design, dataRates) } rpact/man/SimulationResultsSurvival.Rd0000644000176200001440000000061613574441634017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsSurvival} \alias{SimulationResultsSurvival} \title{Class for Simulation Results Survival} \description{ A class for simulation results survival. } \details{ Use \code{\link{getSimulationSurvival}} to create an object of this type. } \keyword{internal} rpact/man/StageResultsRates.Rd0000644000176200001440000000172213574430660016063 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsRates} \alias{StageResultsRates} \title{Stage Results of Rates} \description{ Class for stage results of rates. } \details{ This object can not be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of rates. } \section{Fields}{ \describe{ \item{\code{testStatistics}}{The stage-wise test statistics.} \item{\code{pValues}}{The stage-wise p-values.} \item{\code{combInverseNormal}}{The inverse normal test.} \item{\code{combFisher}}{The Fisher's combination test.} \item{\code{effectSizes}}{The effect sizes for different designs.} \item{\code{testActions}}{The action drawn from test result.} \item{\code{weightsFisher}}{The weights for Fisher's combination test.} \item{\code{weightsInverseNormal}}{The weights for inverse normal statistic.} }} \keyword{internal} rpact/man/TrialDesignPlanSurvival_summary.Rd0000644000176200001440000000102113574430660020760 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \name{TrialDesignPlanSurvival_summary} \alias{TrialDesignPlanSurvival_summary} \alias{summary.TrialDesignPlanSurvival} \title{Trial Design Plan Survival Set Summary} \usage{ \method{summary}{TrialDesignPlanSurvival}(object, ..., type = 1, digits = NA_integer_) } \description{ Displays a summary of \code{TrialDesignPlanSurvival} object. } \details{ Summarizes the parameters and results of a survival design. } \keyword{internal} rpact/man/getSimulationRates.Rd0000644000176200001440000002421113574430660016260 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_rates.R \name{getSimulationRates} \alias{getSimulationRates} \title{Get Simulation Rates} \usage{ getSimulationRates( design = NULL, ..., groups = 2L, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_DEFAULT, pi2 = NA_real_, plannedSubjects = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, pi1H1 = NA_real_, pi2H1 = 0.2, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, seed = NA_real_, calcSubjectsFunction = NULL ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{riskRatio}{If \code{riskRatio = TRUE} is specified, the design characteristics for one-sided testing of H0: pi1/pi2 = thetaH0 are simulated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or a value != 1 for testing the mean ratio) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{pi1}{The assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2,0.5,0.1)}.} \item{pi2}{The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{plannedSubjects}{\code{plannedSubjects} is a vector of length kMax (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design, default is \code{1}.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{minNumberOfSubjectsPerStage} determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{maxNumberOfSubjectsPerStage} determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{The conditional power for the subsequent stage under which the sample size recalculation is performed.} \item{pi1H1}{If specified, the assumed probability in the active treatment group if two treatment groups are considered, or the assumed probability for a one treatment group design, for which the conditional power was calculated.} \item{pi2H1}{If specified, the assumed probability in the reference group if two treatment groups are considered, for which the conditional power was calculated, default is \code{0.2}.} \item{maxNumberOfIterations}{The number of simulation iterations.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalulation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} } \value{ Returns a \code{\link{SimulationResultsRates}} object. } \description{ Returns the simulated power, stopping probabilities, conditional power, and expected sample size for testing rates in a one or two treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of subjects and parameter configuration. Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. calcSubjectsFunction\cr This function returns the number of subjects at given conditional power and conditional Type I error rate for specified testing situation. The function might depend on variables \code{stage}, \code{riskRatio}, \code{thetaH0}, \code{groups}, \code{plannedSubjects}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{sampleSizesPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallRate}, \code{farringtonManningValue1}, and \code{farringtonManningValue2}. The function has to obtain the three-dots arument '...' (see examples). } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link{getData}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{pi1}: The assumed or derived event rate in the treatment group (if available). \item \code{pi2}: The assumed or derived event rate in the control group (if available). \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher combination test)' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from the considered stage is taken into account. \item \code{overallRates1}: The overall rate in treatment group 1. \item \code{overallRates2}: The overall rate in treatment group 2. \item \code{stagewiseRates1}: The stagewise rate in treatment group 1. \item \code{stagewiseRates2}: The stagewise rate in treatment group 2. \item \code{sampleSizesPerStage1}: The stagewise sample size in treatment group 1. \item \code{sampleSizesPerStage2}: The stagewise sample size in treatment group 2. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{pi1H1} and \code{pi2H1}. } } \examples{ # Fixed sample size with minimum required definitions, pi1 = (0.3,0.4,0.5, 0.6) and pi2 = 0.3 getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = 120, maxNumberOfIterations = 50) \donttest{ # Increase number of simulation iterations and compare results with power calculator getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = 120, maxNumberOfIterations = 50) getPowerRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 120) # Do the same for a two-stage Pocock inverse normal group sequential # design with non-binding futility stops designIN <- getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0)) getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = c(40, 80), maxNumberOfIterations = 50) getPowerRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 80) # Assess power and average sample size if a sample size reassessment is # foreseen at conditional power 80\% for the subsequent stage (decrease and increase) # based on observed overall rates and specified minNumberOfSubjectsPerStage # and maxNumberOfSubjectsPerStage # Do the same under the assumption that a sample size increase only takes place # if the rate difference exceeds the value 0.1 at interim. For this, the sample # size recalculation method needs to be redefined: mySampleSizeCalculationFunction <- function(..., stage, plannedSubjects, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, conditionalCriticalValue, overallRate) { if (overallRate[1] - overallRate[2] < 0.1) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } else { rateUnderH0 <- (overallRate[1] + overallRate[2]) / 2 stageSubjects <- 2 * (max(0, conditionalCriticalValue * sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / (max(1e-12, (overallRate[1] - overallRate[2])))^2 stageSubjects <- ceiling(min(max( minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } } getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = 50) } } rpact/man/Dataset.Rd0000644000176200001440000000116713574430660014027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{Dataset} \alias{Dataset} \title{Dataset} \description{ Basic class for datasets. } \details{ \code{Dataset} is the basic class for \itemize{ \item \code{\link{DatasetMeans}}, \item \code{\link{DatasetRates}}, and \item \code{\link{DatasetSurvival}}. } This basic class contains the fields \code{stages} and \code{groups} and several commonly used functions. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers.} \item{\code{groups}}{The group numbers.} }} \keyword{internal} rpact/man/getConditionalRejectionProbabilities.Rd0000644000176200001440000000273613574430660021764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getConditionalRejectionProbabilities} \alias{getConditionalRejectionProbabilities} \title{Get Conditional Rejection Probabilities} \usage{ getConditionalRejectionProbabilities(design, stageResults, ...) } \arguments{ \item{design}{The trial design.} \item{stageResults}{The results at given stage, obtained from \code{\link{getStageResults}}.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Calculates the conditional rejection probabilities (CRP) for given test results. } \details{ The conditional rejection probability is the probability, under H0, to reject H0 in one of the subsequent (remaining) stages. The probability is calculated using the specified design. For testing rates and the survival design, the normal approximation is used, i.e., it is calculated with the use of the prototype case testing a mean for normally distributed data with known variance. The conditional rejection probabilities are provided up to the specified stage. For Fisher's combination test, you can check the validity of the CRP calculation via simulation. } \examples{ x <- getDesignFisher(kMax = 3, informationRates = c(0.1,0.8,1)) y <- getDataset(n = c(40,40), events = c(20,22)) getConditionalRejectionProbabilities(x, getStageResults(x, y, thetaH0 = 0.4)) # provides # [1] 0.0216417 0.1068607 NA } \keyword{internal} rpact/man/TrialDesignConditionalDunnett.Rd0000644000176200001440000000054413574441634020376 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignConditionalDunnett} \alias{TrialDesignConditionalDunnett} \title{Conditional Dunnett Design} \description{ Trial design for conditional Dunnett tests. } \details{ This object should not be created directly. } \keyword{internal} rpact/man/plot.SimulationResults.Rd0000644000176200001440000000566613574430660017135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{plot.SimulationResults} \alias{plot.SimulationResults} \title{Simulation Results Plotting} \usage{ \method{plot}{SimulationResults}( x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1, palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ... ) } \arguments{ \item{x}{The simulation results, obtained from \cr \code{\link{getSimulationSurvival}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{2}: creates a 'Boundaries Effect Scale' plot \item \code{3}: creates a 'Boundaries p Values Scale' plot \item \code{4}: creates a 'Type One Error Spending' plot \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot \item \code{7}: creates an 'Overall Power' plot \item \code{8}: creates an 'Overall Early Stopping' plot \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot \item \code{10}: creates a 'Study Duration' plot \item \code{11}: creates an 'Expected Number of Subjects' plot \item \code{12}: creates an 'Analysis Times' plot \item \code{13}: creates a 'Cumulative Distribution Function' plot \item \code{14}: creates a 'Survival Function' plot }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of theta values.} \item{plotPointsEnabled}{If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} \item{...}{Optional \code{ggplot2} arguments.} } \value{ A \code{ggplot2} object. } \description{ Plots simulation results. } \details{ Generic function to plot all kinds of simulation results. } rpact/man/getDataset.Rd0000644000176200001440000001174013574430660014525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{getDataset} \alias{getDataset} \title{Get Dataset} \usage{ getDataset(..., floatingPointNumbersEnabled = FALSE) } \arguments{ \item{...}{A \code{data.frame} or some data vectors defining the dataset.} \item{floatingPointNumbersEnabled}{If \code{TRUE}, sample sizes can be specified as floating-point numbers (in general this only make sense for simulation purposes); \cr by default \code{floatingPointNumbersEnabled = FALSE}, i.e., samples sizes defined as floating-point numbers will be truncated.} } \value{ Returns a \code{\link{Dataset}} object. } \description{ Creates a dataset object and returns it. } \details{ The different dataset types \code{DatasetMeans}, of \code{DatasetRates}, or \code{DatasetSurvival} can be created as follows: \itemize{ \item An element of \code{\link{DatasetMeans}} for one sample is created by \cr \code{getDataset(sampleSizes =, means =, stDevs =)} where \cr \code{sampleSizes}, \code{means}, \code{stDevs} are vectors with stagewise sample sizes, means and standard deviations of length given by the number of available stages. \item An element of \code{\link{DatasetMeans}} for two samples is created by \cr \code{getDataset(sampleSizes1 =, sampleSizes2 =, means1 =, means2 =, } \cr \code{stDevs1 =, stDevs2 =)} where \code{sampleSizes1}, \code{sampleSizes2}, \code{means1}, \code{means2}, \code{stDevs1}, \code{stDevs2} are vectors with stagewise sample sizes, means and standard deviations for the two treatment groups of length given by the number of available stages. \item An element of \code{\link{DatasetRates}} for one sample is created by \cr \code{getDataset(sampleSizes =, events =)} where \code{sampleSizes}, \code{events} are vectors with stagewise sample sizes and events of length given by the number of available stages. \item An element of \code{\link{DatasetRates}} for two samples is created by \cr \code{getDataset(sampleSizes1 =, sampleSizes2 =, events1 =, events2 =)} where \code{sampleSizes1}, \code{sampleSizes2}, \code{events1}, \code{events2} are vectors with stagewise sample sizes and events for the two treatment groups of length given by the number of available stages. \item An element of \code{\link{DatasetSurvival}} is created by \cr \code{getDataset(events =, logRanks =, allocationRatios =)} where \code{events}, \code{logRanks}, and \code{allocation ratios} are the stagewise events, (one-sided) logrank statistics, and allocation ratios. } Prefix \code{overall[Capital case of first letter of variable name]...} for the variable names enables entering the overall results and calculates stagewise statistics. Note that in survival design usually the overall events and logrank test statistics are provided in the output, so \cr \code{getDataset(overallEvents=, overallLogRanks =, overallAllocationRatios =)} \cr is the usual command for entering survival data. Note also that for \code{overallLogranks} also the z scores from a Cox regression can be used. \code{n} can be used in place of \code{samplesizes}. } \examples{ # Create a Dataset of Means (one group): datasetOfMeans <- getDataset( n = c(22, 11, 22, 11), means = c(1, 1.1, 1, 1), stDevs = c(1, 2, 2, 1.3) ) datasetOfMeans datasetOfMeans$show(showType = 2) datasetOfMeans <- getDataset( overallSampleSizes = c(22, 33, 55, 66), overallMeans = c(1.000, 1.033, 1.020, 1.017 ), overallStDevs = c(1.00, 1.38, 1.64, 1.58) ) datasetOfMeans datasetOfMeans$show(showType = 2) as.data.frame(datasetOfMeans) # Create a Dataset of Means (two groups): datasetOfMeans <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) datasetOfMeans datasetOfMeans <- getDataset( overallSampleSizes1 = c(22, 33, 55, 66), overallSampleSizes2 = c(22, 35, 57, 70), overallMeans1 = c(1, 1.033, 1.020, 1.017), overallMeans2 = c(1.4, 1.437, 2.040, 2.126), overallStDevs1 = c(1, 1.38, 1.64, 1.58), overallStDevs2 = c(1, 1.43, 1.82, 1.74) ) datasetOfMeans df <- data.frame( stages = 1:4, n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) datasetOfMeans <- getDataset(df) datasetOfMeans ## Create a Dataset of Rates (one group): datasetOfRates <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) datasetOfRates ## Create a Dataset of Rates (two groups): datasetOfRates <- getDataset( n2 = c(8, 10, 9, 11), n1 = c(11, 13, 12, 13), events2 = c(3, 5, 5, 6), events1 = c(10, 10, 12, 12) ) datasetOfRates ## Create a Survival Dataset dataset <- getDataset( overallEvents = c(8, 15, 19, 31), overallAllocationRatios = c(1, 1, 1, 2), overallLogRanks = c(1.52, 1.98, 1.99, 2.11) ) dataset } rpact/man/TrialDesignGroupSequential.Rd0000644000176200001440000000103413574430660017710 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignGroupSequential} \alias{TrialDesignGroupSequential} \title{Group Sequential Design} \description{ Trial design for group sequential design. } \details{ This object should not be created directly; use \code{\link{getDesignGroupSequential}} with suitable arguments to create a group sequential design. } \seealso{ \code{\link{getDesignGroupSequential}} for creating a group sequential design. } \keyword{internal} rpact/man/TrialDesignPlanMeans.Rd0000644000176200001440000000066013574441634016446 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanMeans} \alias{TrialDesignPlanMeans} \title{Trial Design Plan Means} \description{ Trial design plan for means. } \details{ This object can not be created directly; use \code{\link{getSampleSizeMeans}} with suitable arguments to create a design plan for a dataset of means. } \keyword{internal} rpact/man/TrialDesignPlanSurvival.Rd0000644000176200001440000000071413574441634017216 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanSurvival} \alias{TrialDesignPlanSurvival} \title{Trial Design Plan Survival} \description{ Trial design plan for survival data. } \details{ This object can not be created directly; use \code{\link{getSampleSizeSurvival}} with suitable arguments to create a design plan for a dataset of survival data. } \keyword{internal} rpact/man/ParameterSet_summary.Rd0000644000176200001440000000223413574430660016607 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{ParameterSet_summary} \alias{ParameterSet_summary} \alias{summary.ParameterSet} \title{Parameter Set Summary} \usage{ \method{summary}{ParameterSet}(object, ..., type = 1, digits = NA_integer_) } \arguments{ \item{digits}{defines how many digits are to be used for numeric values.} } \description{ Displays a summary of \code{ParameterSet} object. } \details{ Summarizes the parameters and results of a parameter set. The following options get be set globaly: \enumerate{ \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; shall the values be right-justified (the default), left-justified or centred. \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is 3). \item \code{rpact.summary.digits.fixed}: if \code{FALSE} (default) probabilities get one more digits a the as the defined \code{rpact.summary.digits}. \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", e.g. "0.000" will become "0". } } \keyword{internal} rpact/man/getDesignSet.Rd0000644000176200001440000000232313574430660015022 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{getDesignSet} \alias{getDesignSet} \title{Get Design Set} \usage{ getDesignSet(...) } \arguments{ \item{...}{'designs' OR 'design' and one or more design parameters, e.g., deltaWT = c(0.1, 0.3, 0.4). \itemize{ \item \code{design} The master design (optional, you need to specify an additional parameter that shall be varied). \item \code{designs} The designs to compare (optional). }} } \value{ Returns a \code{\link{TrialDesignSet}} object. } \description{ Creates a trial design set object and returns it. } \details{ Specify a master design and one or more design parameters or a list of designs. } \examples{ # Example 1 design <- getDesignGroupSequential(alpha = 0.05, kMax = 6, sided = 2, typeOfDesign = "WT", deltaWT = 0.1) designSet <- getDesignSet() designSet$add(design = design, deltaWT = c(0.3, 0.4)) if (require(ggplot2)) plot(designSet, type = 1) # Example 2 (shorter script) design <- getDesignGroupSequential(alpha = 0.05, kMax = 6, sided = 2, typeOfDesign = "WT", deltaWT = 0.1) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) if (require(ggplot2)) plot(designSet) } rpact/man/AccrualTime.Rd0000644000176200001440000000055013574441634014631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \docType{class} \name{AccrualTime} \alias{AccrualTime} \title{Accrual Time} \description{ Class for definition of accrual time and accrual intensity. } \details{ \code{AccrualTime} is a class for definition of accrual time and accrual intensity. } \keyword{internal} rpact/man/StageResults.Rd0000644000176200001440000000166313574430660015070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResults} \alias{StageResults} \title{Basic Stage Results} \description{ Basic class for stage results. } \details{ \code{StageResults} is the basic class for \code{StageResultsMeans}, \code{StageResultsRates}, and \code{StageResultsSurvival}. } \section{Fields}{ \describe{ \item{\code{testStatistics}}{The stage-wise test statistics.} \item{\code{pValues}}{The stage-wise p-values.} \item{\code{combInverseNormal}}{The inverse normal test.} \item{\code{combFisher}}{The Fisher's combination test.} \item{\code{effectSizes}}{The effect sizes for different designs.} \item{\code{testActions}}{The action drawn from test result.} \item{\code{weightsFisher}}{The weights for Fisher's combination test.} \item{\code{weightsInverseNormal}}{The weights for inverse normal statistic.} }} \keyword{internal} rpact/man/PiecewiseSurvivalTime.Rd0000644000176200001440000000061113574441634016726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \docType{class} \name{PiecewiseSurvivalTime} \alias{PiecewiseSurvivalTime} \title{Piecewise Exponential Survival Time} \description{ Class for definition of piecewise survival times. } \details{ \code{PiecewiseSurvivalTime} is a class for definition of piecewise survival times. } \keyword{internal} rpact/man/ParameterSet.Rd0000644000176200001440000000051713574441634015037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \docType{class} \name{ParameterSet} \alias{ParameterSet} \title{Parameter Set} \description{ Basic class for parameter sets. } \details{ The parameter set implements basic functions for a set of parameters. } \keyword{internal} rpact/man/AnalysisResults.Rd0000644000176200001440000000077313574430660015611 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResults} \alias{AnalysisResults} \title{Basic Class for Analysis Results} \description{ A basic class for analysis results. } \details{ \code{AnalysisResults} is the basic class for \itemize{ \item \code{\link{AnalysisResultsFisher}}, \item \code{\link{AnalysisResultsGroupSequential}}, and \item \code{\link{AnalysisResultsInverseNormal}}. } } \keyword{internal} rpact/man/rpact.Rd0000644000176200001440000000521113574430660013545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pkgname.R \docType{package} \name{rpact} \alias{rpact} \alias{rpact-package} \title{rpact - Confirmatory Adaptive Clinical Trial Design and Analysis} \description{ rpact (R Package for Adaptive Clinical Trials) is a comprehensive package that enables the design and analysis of confirmatory adaptive group sequential designs. Particularly, the methods described in the recent \href{http://monograph.wassmer.brannath.rpact.com}{monograph by Wassmer and Brannath} (published by Springer, 2016) are implemented. It also comprises advanced methods for sample size calculations for fixed sample size designs incl., e.g., sample size calculation for survival trials with piecewise exponentially distributed survival times and staggered patients entry. } \details{ rpact includes the classical group sequential designs (incl. user spending function approaches) where the sample sizes per stage (or the time points of interim analysis) cannot be changed in a data-driven way. Confirmatory adaptive designs explicitly allow for this under control of the Type I error rate. They are either based on the combination testing or the conditional rejection probability (CRP) principle. Both are available, for the former the inverse normal combination test and Fisher's combination test can be used. Specific techniques of the adaptive methodology are also available, e.g., overall confidence intervals, overall p-values, and conditional and predictive power assessments. Simulations can be performed to assess the design characteristics of a (user-defined) sample size recalculation strategy. Designs are available for trials with continuous, binary, and survival endpoint. For more information please visit \href{https://www.rpact.org}{www.rpact.org}. If you are interested in professional services round about the package or need a comprehensive validation documentation to fulfill regulatory requirements please visit \href{https://www.rpact.com}{www.rpact.com}. rpact is developed by \itemize{ \item Gernot Wassmer (\href{mailto:gernot.wassmer@rpact.com}{gernot.wassmer@rpact.com}) and \item Friedrich Pahlke (\href{mailto:friedrich.pahlke@rpact.com}{friedrich.pahlke@rpact.com}). } } \references{ Wassmer, G., Brannath, W. (2016) Group Sequential and Confirmatory Adaptive Designs in Clinical Trials (Springer Series in Pharmaceutical Statistics) <\href{https://doi.org/10.1007/978-3-319-32562-0}{doi:10.1007/978-3-319-32562-0}> } \seealso{ Useful links: \itemize{ \item \url{https://www.rpact.org} \item Report bugs at \url{https://bugreport.rpact.org} } } \author{ Gernot Wassmer, Friedrich Pahlke } rpact/man/StageResults_as.data.frame.Rd0000644000176200001440000000107613574430660017552 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{StageResults_as.data.frame} \alias{StageResults_as.data.frame} \alias{as.data.frame.StageResults} \title{Coerce Stage Results to a Data Frame} \usage{ \method{as.data.frame}{StageResults}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, type = 1, ... ) } \description{ Returns the \code{StageResults} as data frame. } \details{ Coerces the stage results to a data frame. } \keyword{internal} rpact/man/getPiecewiseSurvivalTime.Rd0000644000176200001440000001033213574430660017424 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \name{getPiecewiseSurvivalTime} \alias{getPiecewiseSurvivalTime} \title{Get Piecewise Survival Time} \usage{ getPiecewiseSurvivalTime( piecewiseSurvivalTime = NA_real_, ..., lambda1 = NA_real_, lambda2 = NA_real_, hazardRatio = NA_real_, pi1 = NA_real_, pi2 = NA_real_, median1 = NA_real_, median2 = NA_real_, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1, delayedResponseAllowed = FALSE ) } \arguments{ \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{...}{Ensures that all arguments after \code{piecewiseSurvivalTime} are be named and that a warning will be displayed if unknown arguments are passed.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. lambda1 can also be used to define piecewise exponentially distributed survival times (see details).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. lambda2 can also be used to define piecewise exponentially distributed survival times (see details).} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated.} \item{pi1}{The assumed event rate in the treatment group, default is \code{seq(0.4, 0.6, 0.1)}.} \item{pi2}{The assumed event rate in the control group, default is 0.2.} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{kappa}{The shape parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact.} \item{delayedResponseAllowed}{If \code{TRUE}, delayed response is allowed; otherwise it will be validatet that the definition is not delayed, default is \code{FALSE}.} } \value{ Returns a \code{\link{PiecewiseSurvivalTime}} object. } \description{ Returns a \code{PiecewiseSurvivalTime} object that contains the all relevant parameters of an exponential survival time cumulative distribution function. } \details{ \code{piecewiseSurvivalTime} The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). } \examples{ pwst <- getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) pwst pwst <- getPiecewiseSurvivalTime(lambda2 = 0.5, lambda1 = 0.4) pwst pwst <- getPiecewiseSurvivalTime(pi2 = 0.5, hazardRatio = 0.8) pwst pwst <- getPiecewiseSurvivalTime(pi2 = 0.5, pi1 = 0.4) pwst pwst <- getPiecewiseSurvivalTime(pi1 = 0.3) pwst pwst <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) pwst pwst <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8) pwst pwst <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.025, 0.04, 0.015) * 0.8) pwst pwst <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.75) pwst \donttest{ # The object created by getPiecewiseSurvivalTime() can be used directly in getSampleSizeSurvival(): getSampleSizeSurvival(piecewiseSurvivalTime = pwst) # The object created by getPiecewiseSurvivalTime() can be used directly in getPowerSurvival(): getPowerSurvival(piecewiseSurvivalTime = pwst, maxNumberOfEvents = 40, maxNumberOfSubjects = 100) } } rpact/man/getRepeatedPValues.Rd0000644000176200001440000000164213574430660016171 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getRepeatedPValues} \alias{getRepeatedPValues} \title{Get Repeated P Values} \usage{ getRepeatedPValues(design, stageResults, ...) } \arguments{ \item{design}{The trial design.} \item{stageResults}{The results at given stage, obtained from \code{\link{getStageResults}}.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Calculates the repeated p-values for given test results. } \details{ The repeated p-value at a given stage of the trial is defined as the smallest significance level under which at given test design the test results obtain rejection of the null hypothesis. It can be calculated at each stage of the trial and can thus be used as a monitoring tool. The repeated p-values are provided up to the specified stage. } \keyword{internal} rpact/man/getSampleSizeSurvival.Rd0000644000176200001440000003107613574430660016754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeSurvival} \alias{getSampleSizeSurvival} \title{Get Sample Size Survival} \usage{ getSampleSizeSurvival( design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = NA_real_, accountForObservationTimes = TRUE, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{typeOfComputation}{Three options are available: "Schoenfeld", "Freedman", "HsiehFreedman", the default is "Schoenfeld". For details, see Hsieh (Statistics in Medicine, 1992). For non-inferiority testing (i.e., thetaH0 != 1), only Schoenfelds formula can be used} \item{thetaH0}{The null hypothesis value. The default value is \code{1}. For one-sided testing, a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified.} \item{pi1}{The assumed event rate in the active treatment group, default is \code{seq(0.4,0.6,0.1)}.} \item{pi2}{The assumed event rate in the control group, default is \code{0.2}.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. lambda1 can also be used to define piecewise exponentially distributed survival times (see details).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. lambda2 can also be used to define piecewise exponentially distributed survival times (see details).} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default.} \item{kappa}{The shape parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{allocationRatioPlanned}{The planned allocation ratio, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest number of subjects is determined.} \item{accountForObservationTimes}{If \code{accountForObservationTimes = TRUE}, the number of subjects is calculated assuming specific accrual and follow-up time, default is \code{TRUE} (see details).} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0,12)} (see details).} \item{accrualIntensity}{A vector of accrual intensities, default is the relative intensity \code{0.1} (see details).} \item{followUpTime}{The assumed (additional) follow-up time for the study, default is \code{6}. The total study duration is \code{accrualTime + followUpTime}.} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the follow-up time for the required number of events is determined.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} } \value{ Returns a \code{\link{TrialDesignPlanSurvival}} object. } \description{ Returns the sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function calculates the number of events and an estimate for the necessary number of subjects for testing the hazard ratio in a survival design. It also calculates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. The formula of Kim & Tsiatis (Biometrics, 1990) is used to calculate the expected number of events under the alternative (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and non-constant piecewise accrual over time.\cr If \code{accountForObservationTimes = FALSE}, only the event rates are used for the calculation of the maximum number of subjects. \code{piecewiseSurvivalTime} The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). \code{accrualTime} can also be used to define a non-constant accrual over time. For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and \code{accrualIntensity} needs to be specified. The first element of \code{accrualTime} must be equal to 0.\cr \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity \code{accrualIntensity} (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated. \cr If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the *relative* intensity how subjects enter the trial, and \code{maxNumberOfSubjects} must be given or can be calculated at given follow-up time. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual accrual intensity is calculated for the given (or calculated) \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the *absolute* accrual intensity will be calculated. \code{accountForObservationTime} can be selected as \code{FALSE}. In this case, the number of subjects is calculated from the event probabilities only. This kind of computation does not account for the specific accrual pattern and survival distribution. } \examples{ # Fixed sample size trial with median survival 20 vs. 30 months in treatment and # reference group, respectively, alpha = 0.05 (two-sided), and power 1 - beta = 90\%. # 20 subjects will be recruited per month up to 400 subjects, i.e., accrual time is 20 months. getSampleSizeSurvival(alpha = 0.05, sided = 2, beta = 0.1, lambda1 = log(2) / 20, lambda2 = log(2) / 30, accrualTime = c(0,20), accrualIntensity = 20) \donttest{ # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.6) and # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, # only alpha = 0.01 is specified getSampleSizeSurvival(alpha = 0.01) # Four stage O'Brien & Fleming group sequential design with minimum required # definitions, pi1 = c(0.4,0.5,0.6) and pi2 = 0.2 at event time 12, # accrual time 12 and follow-up time 6 as default getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 4)) # For fixed sample design, determine necessary accrual time if 200 subjects and # 30 subjects per time unit can be recruited getSampleSizeSurvival(accrualTime = c(0), accrualIntensity = c(30), maxNumberOfSubjects = 200) # Determine necessary accrual time if 200 subjects and if the first 6 time units # 20 subjects per time unit can be recruited, then 30 subjects per time unit getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) # Determine maximum number of Subjects if the first 6 time units 20 subjects # per time unit can be recruited, and after 10 time units 30 subjects per time unit getSampleSizeSurvival(accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30) getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 200) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30) getSampleSizeSurvival(accrualTime = at) # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries # Effect size is based on event rates at specified event time # needs to be specified because it should be shown that hazard ratio < 1 getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24) # Effect size is based on event rate at specified event # time for the reference group and hazard ratio getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) # Effect size is based on hazard rate for the reference group and hazard ratio getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02) # Specification of piecewise exponential survival time and hazard ratios getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2)) # Specification of piecewise exponential survival time as a list and hazard ratios pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) # Specification of piecewise exponential survival time for both treatment arms getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06)) # Specification of piecewise exponential survival time as a list pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) # Specify effect size based on median survival times getSampleSizeSurvival(median1 = 5, median2 = 3) # Specify effect size based on median survival times of Weibull distribtion with kappa = 2 getSampleSizeSurvival(median1 = 5, median2 = 3, kappa = 2) # Identify minimal and maximal required subjects to # reach the required events in spite of dropouts getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), lambda2 = 0.4, lambda1 = 0.3, followUpTime = Inf, dropoutRate1 = 0.001, dropoutRate2 = 0.005) getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), lambda2 = 0.4, lambda1 = 0.3, followUpTime = 0, dropoutRate1 = 0.001, dropoutRate2 = 0.005) } } rpact/man/TrialDesign_as.data.frame.Rd0000644000176200001440000000156013574430660017330 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{TrialDesign_as.data.frame} \alias{TrialDesign_as.data.frame} \alias{as.data.frame.TrialDesign} \title{Coerce TrialDesign to a Data Frame} \usage{ \method{as.data.frame}{TrialDesign}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{niceColumnNamesEnabled}{logical. If \code{TRUE}, nice looking names will be used; syntactic names otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{logical. If \code{TRUE}, all parameters will be included; a meaningful parameter selection otherwise.} } \description{ Returns the \code{TrialDesign} as data frame. } \details{ Each element of the \code{TrialDesign} is converted to a column in the data frame. } \keyword{internal} rpact/man/FieldSet.Rd0000644000176200001440000000046713574441634014146 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \docType{class} \name{FieldSet} \alias{FieldSet} \title{Field Set} \description{ Basic class for field sets. } \details{ The field set implements basic functions for a set of fields. } \keyword{internal} rpact/man/sub-TrialDesignSet-method.Rd0000644000176200001440000000073613574441610017366 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{[,TrialDesignSet-method} \alias{[,TrialDesignSet-method} \title{Access Trial Design by Index} \usage{ \S4method{[}{TrialDesignSet}(x, i, j = NA_character_) } \description{ Function to the \code{TrialDesign} at position \code{i} in a \code{TrialDesignSet} object. } \details{ Can be used to iterate with "[index]"-syntax over all designs in a design set. } \keyword{internal} rpact/man/readDataset.Rd0000644000176200001440000000431613574430660014662 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{readDataset} \alias{readDataset} \title{Read Dataset} \usage{ readDataset( file, ..., header = TRUE, sep = ",", quote = "\\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8" ) } \arguments{ \item{file}{A CSV file (see \code{\link[utils]{read.table}}).} \item{...}{Further arguments to be passed to code{\link[utils]{read.table}}.} \item{header}{A logical value indicating whether the file contains the names of the variables as its first line.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{readDataset}) the separator is a comma.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{dec}{The character used in the file for decimal points.} \item{fill}{logical. If \code{TRUE} then in case the rows have unequal length, blank fields are implicitly added.} \item{comment.char}{character: a character vector of length one containing a single character or an empty string. Use "" to turn off the interpretation of comments altogether.} \item{fileEncoding}{character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \value{ Returns a \code{\link{Dataset}} object. } \description{ Reads a data file and returns it as dataset object. } \details{ \code{readDataset} is a wrapper function that uses \code{\link[utils]{read.table}} to read the CSV file into a data frame, transfers it from long to wide format with \code{\link[stats]{reshape}} and puts the data to \code{\link{getDataset}}. } \seealso{ \itemize{ \item \code{\link{readDatasets}} for reading multiple datasets, \item \code{\link{writeDataset}} for writing a single dataset, \item \code{\link{writeDatasets}} for writing multiple datasets. } } rpact/man/DatasetRates.Rd0000644000176200001440000000111513574430660015017 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetRates} \alias{DatasetRates} \title{Dataset of Rates} \description{ Class for a dataset of rates. } \details{ This object can not be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of rates. } \section{Fields}{ \describe{ \item{\code{group}}{The group numbers.} \item{\code{stage}}{The stage numbers.} \item{\code{sampleSize}}{The sample sizes.} \item{\code{event}}{The events.} }} \keyword{internal} rpact/man/writeDataset.Rd0000644000176200001440000000525013574430660015077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{writeDataset} \alias{writeDataset} \title{Write Dataset} \usage{ writeDataset( dataset, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8" ) } \arguments{ \item{dataset}{A dataset.} \item{file}{The target CSV file.} \item{...}{Further arguments to be passed to \code{\link[utils]{write.table}}.} \item{append}{Logical. Only relevant if file is a character string. If \code{TRUE}, the output is appended to the file. If \code{FALSE}, any existing file of the name is destroyed.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{writeDataset}) the separator is a comma.} \item{eol}{The character(s) to print at the end of each line (row).} \item{na}{The string to use for missing values in the data.} \item{dec}{The character used in the file for decimal points.} \item{row.names}{Either a logical value indicating whether the row names of \code{dataset} are to be written along with \code{dataset}, or a character vector of row names to be written.} \item{col.names}{Either a logical value indicating whether the column names of \code{dataset} are to be written along with \code{dataset}, or a character vector of column names to be written. See the section on 'CSV files' for the meaning of \code{col.names = NA}.} \item{qmethod}{A character string specifying how to deal with embedded double quote characters when quoting strings. Must be one of "double" (default in \code{writeDataset}) or "escape".} \item{fileEncoding}{Character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \description{ Writes a dataset to a CSV file. } \details{ \code{\link{writeDataset}} is a wrapper function that coerces the dataset to a data frame and uses \cr \code{\link[utils]{write.table}} to write it to a CSV file. } \seealso{ \itemize{ \item \code{\link{writeDatasets}} for writing multiple datasets, \item \code{\link{readDataset}} for reading a single dataset, \item \code{\link{readDatasets}} for reading multiple datasets. } } rpact/man/TrialDesignPlan_as.data.frame.Rd0000644000176200001440000000107013574430660020137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \name{TrialDesignPlan_as.data.frame} \alias{TrialDesignPlan_as.data.frame} \alias{as.data.frame.TrialDesignPlan} \title{Coerce Trial Design Plan to a Data Frame} \usage{ \method{as.data.frame}{TrialDesignPlan}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \description{ Returns the \code{TrialDesignPlan} as data frame. } \details{ Coerces the design plan to a data frame. } \keyword{internal} rpact/man/plot.Dataset.Rd0000644000176200001440000000334713574430660015006 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{plot.Dataset} \alias{plot.Dataset} \title{Dataset Plotting} \usage{ \method{plot}{Dataset}( x, y, ..., main = "Dataset", xlab = "Stage", ylab = NA_character_, legendTitle = "Group", palette = "Set1", showSource = FALSE ) } \arguments{ \item{x}{The \code{\link{Dataset}} object to plot.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional \code{ggplot2} arguments.} \item{main}{The main title, default is \code{"Dataset"}.} \item{xlab}{The x-axis label, default is \code{"Stage"}.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title, default is \code{"Group"}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} } \value{ A \code{ggplot2} object. } \description{ Plots a dataset. } \details{ Generic function to plot all kinds of datasets. } \examples{ # Plot a dataset of means dataExample <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3)) if (require(ggplot2)) plot(dataExample, main = "Comparison of means") # Plot a dataset of rates dataExample <- getDataset( n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), events1 = c(3, 5, 5, 6), events2 = c(8, 10, 12, 12) ) if (require(ggplot2)) plot(dataExample, main = "Comparison of rates") } rpact/man/StageResults_names.Rd0000644000176200001440000000070313574441634016250 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{StageResults_names} \alias{StageResults_names} \alias{names.StageResults} \title{The Names of a Stage Results object} \usage{ \method{names}{StageResults}(x) } \description{ Function to get the names of a \code{StageResults} object. } \details{ Returns the names of stage results that can be accessed by the user. } \keyword{internal} rpact/man/utilitiesForPiecewiseExponentialDistribution.Rd0000644000176200001440000000725413574430660023574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_utilities.R \name{utilitiesForPiecewiseExponentialDistribution} \alias{utilitiesForPiecewiseExponentialDistribution} \alias{getPiecewiseExponentialDistribution} \alias{ppwexp} \alias{getPiecewiseExponentialQuantile} \alias{qpwexp} \alias{getPiecewiseExponentialRandomNumbers} \alias{rpwexp} \title{The Piecewise Exponential Distribution} \usage{ getPiecewiseExponentialDistribution( time, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) ppwexp(t, ..., s = NA_real_, lambda = NA_real_, kappa = 1) getPiecewiseExponentialQuantile( quantile, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) qpwexp(q, ..., s = NA_real_, lambda = NA_real_, kappa = 1) getPiecewiseExponentialRandomNumbers( n, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) rpwexp(n, ..., s = NA_real_, lambda = NA_real_, kappa = 1) } \arguments{ \item{...}{Ensures that all arguments after \code{time} are be named and that a warning will be displayed if unknown arguments are passed.} \item{kappa}{The kappa value. Is needed for the specification of the Weibull distribution. In this case, no piecewise definition is possible, i.e., only lambda and kappa need to be specified. This function is equivalent to pweibull(t, kappa, 1 / lambda) of the R core system, i.e., the scale parameter is 1 / 'hazard rate'. For example, getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2) and pweibull(q = 130, shape = 4.2, scale = 1 /0.01) provide the sample result.} \item{t, time}{Vector of time values.} \item{s, piecewiseSurvivalTime}{Vector of start times defining the "time pieces".} \item{lambda, piecewiseLambda}{Vector of lambda values (hazard rates) corresponding to the start times.} \item{q, quantile}{Vector of quantiles.} \item{n}{Number of observations.} } \description{ Distribution function, quantile function and random number generation for the piecewise exponential distribution. } \details{ \code{getPiecewiseExponentialDistribution} (short: \code{ppwexp}), \code{getPiecewiseExponentialQuantile} (short: \code{qpwexp}), and \code{getPiecewiseExponentialRandomNumbers} (short: \code{rpwexp}) provide probabilities, quantiles, and random numbers according to a piecewise exponential or a Weibull distribution. The piecewise definition is performed through a vector of starting times (\code{piecewiseSurvivalTime}) and a vector of hazard rates (\code{piecewiseLambda}). You can also use a list that defines the starting times and piecewise lambdas together and define piecewiseSurvivalTime as this list. The list needs to have the form, for example, piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, ">=15" = 0.007) For the Weibull case, you can also specify a shape parameter kappa in order to calculated probabilities, quantiles, or random numbers. In this case, no piecewise definition is possible, i.e., only piecewiseLambda and kappa need to be specified. } \examples{ # Calculate probabilties for a range of time values for a # piecewise exponential distribution with hazard rates # 0.025, 0.04, 0.015, and 0.007 in the intervals # [0, 6), [6, 9), [9, 15), [15,Inf), respectively, # and re-return the time values: piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, ">=15" = 0.01) y <- getPiecewiseExponentialDistribution(seq(0, 150, 15), piecewiseSurvivalTime = piecewiseSurvivalTime) getPiecewiseExponentialQuantile(y, piecewiseSurvivalTime = piecewiseSurvivalTime) } rpact/man/writeDatasets.Rd0000644000176200001440000000515713574430660015270 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{writeDatasets} \alias{writeDatasets} \title{Write Multiple Datasets} \usage{ writeDatasets( datasets, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8" ) } \arguments{ \item{datasets}{A list of datasets.} \item{file}{The target CSV file.} \item{...}{Further arguments to be passed to \code{\link[utils]{write.table}}.} \item{append}{Logical. Only relevant if file is a character string. If \code{TRUE}, the output is appended to the file. If FALSE, any existing file of the name is destroyed.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{writeDatasets}) the separator is a comma.} \item{eol}{The character(s) to print at the end of each line (row).} \item{na}{The string to use for missing values in the data.} \item{dec}{The character used in the file for decimal points.} \item{row.names}{Either a logical value indicating whether the row names of \code{dataset} are to be written along with \code{dataset}, or a character vector of row names to be written.} \item{col.names}{Either a logical value indicating whether the column names of \code{dataset} are to be written along with \code{dataset}, or a character vector of column names to be written. See the section on 'CSV files' for the meaning of \code{col.names = NA}.} \item{qmethod}{A character string specifying how to deal with embedded double quote characters when quoting strings. Must be one of "double" (default in \code{writeDatasets}) or "escape".} \item{fileEncoding}{Character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \description{ Writes a list of datasets to a CSV file. } \details{ The format of the CSV file is optimized for usage of \code{\link{readDatasets}}. } \seealso{ \itemize{ \item \code{\link{writeDataset}} for writing a single dataset, \item \code{\link{readDatasets}} for reading multiple datasets, \item \code{\link{readDataset}} for reading a single dataset. } } rpact/man/TrialDesignPlanRates.Rd0000644000176200001440000000066013574441634016461 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanRates} \alias{TrialDesignPlanRates} \title{Trial Design Plan Rates} \description{ Trial design plan for rates. } \details{ This object can not be created directly; use \code{\link{getSampleSizeRates}} with suitable arguments to create a design plan for a dataset of rates. } \keyword{internal} rpact/man/DatasetMeans.Rd0000644000176200001440000000117713574430660015014 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetMeans} \alias{DatasetMeans} \title{Dataset of Means} \description{ Class for a dataset of means. } \details{ This object can not be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of means. } \section{Fields}{ \describe{ \item{\code{groups}}{The group numbers.} \item{\code{stages}}{The stage numbers.} \item{\code{sampleSizes}}{The sample sizes.} \item{\code{means}}{The means.} \item{\code{stDevs}}{The standard deviations.} }} \keyword{internal} rpact/man/AnalysisResults_as.data.frame.Rd0000644000176200001440000000103113574430660020261 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{AnalysisResults_as.data.frame} \alias{AnalysisResults_as.data.frame} \alias{as.data.frame.AnalysisResults} \title{Coerce AnalysisResults to a Data Frame} \usage{ \method{as.data.frame}{AnalysisResults}(x, row.names = NULL, optional = FALSE, ...) } \value{ A data frame } \description{ Returns the \code{\link{AnalysisResults}} object as data frame. } \details{ Coerces the analysis results to a data frame. } \keyword{internal} rpact/man/plot.TrialDesignSet.Rd0000644000176200001440000000577613574430660016312 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{plot.TrialDesignSet} \alias{plot.TrialDesignSet} \title{Trial Design Set Plotting} \usage{ \method{plot}{TrialDesignSet}( x, y, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ... ) } \arguments{ \item{x}{The trial design set, obtained from \code{\link{getDesignSet}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{3}: creates a 'Stage Levels' plot \item \code{4}: creates a 'Type One Error Spending' plot \item \code{5}: creates a 'Power and Early Stopping' plot \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot \item \code{7}: creates an 'Power' plot \item \code{8}: creates an 'Early Stopping' plot \item \code{9}: creates an 'Average Sample Size' plot }} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of theta values.} \item{nMax}{The maximum sample size.} \item{plotPointsEnabled}{If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} \item{...}{Optional \code{ggplot2} arguments.} } \value{ A \code{ggplot2} object. } \description{ Plots a trial design set. } \details{ Generic function to plot a trial design set. Is, e.g., useful to compare different designs or design parameters visual. } \examples{ design <- getDesignInverseNormal(kMax = 3, alpha = 0.025, typeOfDesign = "asKD", gammaA = 2, informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF") # Create a set of designs based on the master design defined above # and varied parameter 'gammaA' designSet <- getDesignSet(design = design, gammaA = 4) if (require(ggplot2)) plot(designSet, type = 1, legendPosition = 6) } rpact/man/TrialDesignFisher.Rd0000644000176200001440000000073513574441634016013 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignFisher} \alias{TrialDesignFisher} \title{Fisher Design} \description{ Trial design for Fisher's combination test. } \details{ This object should not be created directly; use \code{\link{getDesignFisher}} with suitable arguments to create a Fisher design. } \seealso{ \code{\link{getDesignFisher}} for creating a Fisher design. } \keyword{internal} rpact/man/getRawData.Rd0000644000176200001440000000132313574430660014457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{getRawData} \alias{getRawData} \title{Get Simulation Raw Data} \usage{ getRawData(x, aggregate = FALSE) } \arguments{ \item{x}{An \code{SimulationResults} object created by \code{\link{getSimulationSurvival}}.} \item{aggregate}{If \code{TRUE} the raw data will be aggregated similar to the result of \code{\link{getData}}, default is \code{FALSE}.} } \description{ Returns the raw data which was generated randomly for simulation. } \details{ This function works only if \code{\link{getSimulationSurvival}} was called with a \code{maxNumberOfRawDatasetsPerStage > 0} (default is \code{0}). } \keyword{internal} rpact/man/getPowerSurvival.Rd0000644000176200001440000002750313574430660015774 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerSurvival} \alias{getPowerSurvival} \title{Get Power Survival} \usage{ getPowerSurvival( design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = NA, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = 1, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, maxNumberOfSubjects = NA_real_, maxNumberOfEvents = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{typeOfComputation}{Three options are available: "Schoenfeld", "Freedman", "HsiehFreedman", the default is "Schoenfeld". For details, see Hsieh (Statistics in Medicine, 1992). For non-inferiority testing (i.e., thetaH0 != 1), only Schoenfelds formula can be used} \item{thetaH0}{The null hypothesis value. The default value is 1. For one-sided testing, a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is TRUE.} \item{pi1}{The assumed event rate in the treatment group, default is \code{seq(0.2,0.5,0.1)}.} \item{pi2}{The assumed event rate in the control group, default is 0.2.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. lambda1 can also be used to define piecewise exponentially distributed survival times (see details).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. lambda2 can also be used to define piecewise exponentially distributed survival times (see details).} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default.} \item{kappa}{The shape parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{allocationRatioPlanned}{The planned allocation ratio, default is \code{1}.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0,12)} (see details).} \item{accrualIntensity}{A vector of accrual intensities, default is \code{1} (see details).} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. If accrual time and accrual intensity is specified, this will be calculated.} \item{maxNumberOfEvents}{\code{maxNumberOfEvents > 0} is the maximum number of events, determines the power of the test and needs to be specified.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} } \value{ Returns a \code{\link{TrialDesignPlanSurvival}} object. } \description{ Returns the power, stopping probabilities, and expected sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size at given number of events and number of subjects. It also calculates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. The formula of Kim & Tsiatis (Biometrics, 1990) is used to calculated the expected number of events under the alternative (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and non-constant piecewise accrual over time.\cr \code{piecewiseSurvivalTime} The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). \code{accrualTime} can also be used to define a non-constant accrual over time. For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and \code{accrualIntensity} needs to be specified. The first element of \code{accrualTime} must be equal to 0.\cr \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity \code{accrualIntensity} (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated.\cr If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the *relative* intensity how subjects enter the trial. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual accrual intensity is calculated for the given \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the *absolute* accrual intensity will be calculated. } \examples{ # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.5) and # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) \donttest{ # Four stage O'Brien & Fleming group sequential design with minimum required # definitions, pi1 = c(0.4,0.5,0.5) and pi2 = 0.2 at event time 12, # accrual time 12 and follow-up time 6 as default getPowerSurvival(design = getDesignGroupSequential(kMax = 4), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # For fixed sample design, determine necessary accrual time if 200 subjects and # 30 subjects per time unit can be recruited getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0), accrualIntensity = 30, maxNumberOfSubjects = 200) # Determine necessary accrual time if 200 subjects and if the first 6 time units # 20 subjects per time unit can be recruited, then 30 subjects per time unit getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) # Determine maximum number of Subjects if the first 6 time units 20 subjects per # time unit can be recruited, and after 10 time units 30 subjects per time unit getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30) getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30) getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries # Effect size is based on event rates at specified event time, directionUpper = FALSE # needs to be specified because it should be shown that hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Effect size is based on event rate at specified event time for the reference group # and hazard ratio, directionUpper = FALSE needs to be specified # because it should be shown that hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Effect size is based on hazard rate for the reference group and hazard ratio, # directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Specification of piecewise exponential survival time and hazard ratios getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01,0.02,0.04), hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time as list and hazard ratios pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time for both treatment arms getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015,0.03,0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time as a list pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specify effect size based on median survival times getPowerSurvival(median1 = 5, median2 = 3, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Specify effect size based on median survival times of Weibull distribtion with kappa = 2 getPowerSurvival(median1 = 5, median2 = 3, kappa = 2, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) } } rpact/man/testPackage.Rd0000644000176200001440000000261113574430660014670 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{testPackage} \alias{testPackage} \title{Test Package} \usage{ testPackage( outDir = ".", ..., completeUnitTestSetEnabled = TRUE, types = "tests", sourceDirectory = NULL ) } \arguments{ \item{outDir}{The output directory where all test results shall be saved. By default the current working directory is used.} \item{completeUnitTestSetEnabled}{If \code{TRUE} (default) all existing unit tests will be executed; a subset of all unit tests will be used otherwise.} \item{types}{The type(s) of tests to be done. Can be one or more of \code{c("tests", "examples", "vignettes")}. Default is "tests" only.} \item{sourceDirectory}{An optional directory to look for \code{.save} files.} } \description{ This function allows the installed package \code{rpact} to be tested. } \details{ This function creates the subdirectory \code{rpact-tests} in the specified output directory and copies all unit test files of the package to this newly created directory. Then the function runs all tests (or a subset of all tests if \code{completeUnitTestSetEnabled} is \code{FALSE}) using \code{\link[tools]{testInstalledPackage}}. The test results will be saved to the text file \code{testthat.Rout} that can be found in the subdirectory \code{rpact-tests}. } \examples{ \dontrun{ testPackage() } } \keyword{internal} rpact/man/FieldSet_names.Rd0000644000176200001440000000064513574441634015327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{FieldSet_names} \alias{FieldSet_names} \alias{names.FieldSet} \title{The Names of a Field Set object} \usage{ \method{names}{FieldSet}(x) } \description{ Function to get the names of a \code{FieldSet} object. } \details{ Returns the names of a field set that can be accessed by the user. } \keyword{internal} rpact/man/getSimulationMeans.Rd0000644000176200001440000002263613574430660016256 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_means.R \name{getSimulationMeans} \alias{getSimulationMeans} \title{Get Simulation Means} \usage{ getSimulationMeans( design = NULL, ..., groups = 2L, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_POWER_SIMULATION_DEFAULT, stDev = C_STDEV_DEFAULT, plannedSubjects = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, seed = NA_real_, calcSubjectsFunction = NULL ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{meanRatio}{If \code{meanRatio = TRUE} is specified, the design characteristics for one-sided testing of H0: mu1/mu2 = thetaH0 are simulated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or a value != 1 for testing the mean ratio) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{alternative}{The alternative hypothesis value. This can be a vector of assumed alternatives, default is \code{seq(0,1,0.2)}.} \item{stDev}{The standard deviation under which the conditional power calculation is performed, default is 1. If \code{meanRatio = TRUE} is specified, stDev defines the coefficient of variation sigma/mu2.} \item{plannedSubjects}{\code{plannedSubjects} is a vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design, default is \code{1}.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{minNumberOfSubjectsPerStage} determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{maxNumberOfSubjectsPerStage} determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{The conditional power for the subsequent stage under which the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power calculation is performed.} \item{maxNumberOfIterations}{The number of simulation iterations.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalulation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} } \value{ Returns a \code{\link{SimulationResultsMeans}} object. } \description{ Returns the simulated power, stopping probabilities, conditional power, and expected sample size for testing means in a one or two treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of subjects and parameter configuration. Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. calcSubjectsFunction\cr This function returns the number of subjects at given conditional power and conditional Type I error rate for specified testing situation. The function might depend on variables \code{stage}, \code{meanRatio}, \code{thetaH0}, \code{groups}, \code{plannedSubjects}, \code{sampleSizesPerStage}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{thetaStandardized}. The function has to obtain the three-dots arument '...' (see examples). } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link{getData}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{alternative}: The alternative hypothesis value. \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fishers combination test). \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from the considered stage is taken into account. \item \code{effectEstimate}: Standardized overall simulated effect estimate. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{thetaH1}. } } \examples{ # Fixed sample size with minimum required definitions, # alternative = c(0, 1, 2, 3, 4), standard deviation = 5 getSimulationMeans(getDesignGroupSequential(), alternative = 40, stDev = 50, plannedSubjects = c(20, 40, 60), thetaH1 = 60, maxNumberOfIterations = 50) \donttest{ # Increase number of simulation iterations and compare results # with power calculator using normal approximation getSimulationMeans(alternative = 0:4, stDev = 5, plannedSubjects = 40, maxNumberOfIterations = 50) getPowerMeans(alternative = 0:4, stDev = 5, maxNumberOfSubjects = 40, normalApproximation = TRUE) # Do the same for a three-stage O'Brien&Fleming inverse # normal group sequential design with non-binding futility stops designIN <- getDesignInverseNormal(typeOfDesign = "OF", futilityBounds = c(0, 0)) x <- getSimulationMeans(designIN, alternative = c(0:4), stDev = 5, plannedSubjects = c(20, 40, 60), maxNumberOfIterations = 1000) getPowerMeans(designIN, alternative = 0:4, stDev = 5, maxNumberOfSubjects = 60, normalApproximation = TRUE) # Assess power and average sample size if a sample size increase is foreseen # at conditional power 80\% for each subsequent stage based on observed overall # effect and specified minNumberOfSubjectsPerStage and # maxNumberOfSubjectsPerStage getSimulationMeans(designIN, alternative = 0:4, stDev = 5, plannedSubjects = c(20, 40, 60), minNumberOfSubjectsPerStage = c(20, 20, 20), maxNumberOfSubjectsPerStage = c(80, 80, 80), conditionalPower = 0.8, maxNumberOfIterations = 50) # Do the same under the assumption that a sample size increase only takes # place at the first interim. The sample size for the third stage is set equal # to the second stage sample size. mySampleSizeCalculationFunction <- function(..., stage, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, conditionalPower, conditionalCriticalValue, thetaStandardized) { if (stage == 2) { stageSubjects <- 4 * (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 / (max(1e-12, thetaStandardized))^2 stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage]) } else { stageSubjects <- sampleSizesPerStage[stage - 1] } return(stageSubjects) } getSimulationMeans(designIN, alternative = 2:4, stDev = 5, plannedSubjects = c(20, 40, 60), minNumberOfSubjectsPerStage = c(20, 20, 20), maxNumberOfSubjectsPerStage = c(40, 160, 160), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = 50) } } rpact/man/getDesignFisher.Rd0000644000176200001440000000645713574430660015523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_fisher_combination_test.R \name{getDesignFisher} \alias{getDesignFisher} \title{Get Design Fisher} \usage{ getDesignFisher( ..., kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = NA, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_ ) } \arguments{ \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{kMax}{The maximum number of stages K. K = 1, 2, 3, ..., 6, default is 3.} \item{alpha}{The significance level alpha, default is 0.025.} \item{method}{"equalAlpha", "fullAlpha", "noInteraction", or "userDefinedAlpha", default is "equalAlpha".} \item{userAlphaSpending}{A vector of levels 0 < alpha_1 < ... < alpha_K < alpha specifying the cumulative Type I error rate.} \item{alpha0Vec}{Stopping for futility bounds for stage-wise p-values.} \item{informationRates}{Information rates that must be fixed prior to the trial, default is \code{(1 : kMax) / kMax}.} \item{sided}{Is the alternative one-sided (1) or two-sided (2), default is 1.} \item{bindingFutility}{If \code{bindingFutility = FALSE} is specified the calculation of the critical values is not affected by the futility bounds (default is \code{TRUE}).} \item{tolerance}{The tolerance, default is 1E-14.} \item{iterations}{The number of simulation iterations, e.g., getDesignFisher(iterations = 100000) checks the validity of the critical values for the default design. The default value of \code{iterations} is 0, i.e., no simulation will be executed.} \item{seed}{Seed for simulating the power for Fisher's combination test. See above, default is a random seed.} } \value{ Returns a \code{\link{TrialDesignFisher}} object } \description{ Performs Fisher's combination test and returns critical values for this design. } \details{ \code{getDesignFisher} calculates the critical values and stage levels for Fisher's combination test as described in Bauer (1989), Bauer and Koehne (1994), Bauer and Roehmel (1995), and Wassmer (1999) for equally and unequally sized stages. } \examples{ # Run with default values getDesignFisher() # The output is: # # Design parameters and output of Fisher design: # User defined parameters: not available # # Derived from user defined parameters: not available # # Default parameters: # Method : equalAlpha # Maximum number of stages : 3 # Stages : 1, 2, 3 # Information rates : 0.333, 0.667, 1.000 # Significance level : 0.0250 # Alpha_0 : 1.0000, 1.0000 # Binding futility : TRUE # Test : one-sided # Tolerance : 1e-14 # # Output: # Cumulative alpha spending : 0.01231, 0.01962, 0.02500 # Critical values : 0.0123085, 0.0016636, 0.0002911 # Stage levels : 0.01231, 0.01231, 0.01231 # Scale : 1, 1 # Non stochastic curtailment : FALSE } \seealso{ \code{\link{getDesignSet}} for creating a set of designs to compare. } rpact/man/getPowerMeans.Rd0000644000176200001440000000721213574430660015217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerMeans} \alias{getPowerMeans} \title{Get Power Means} \usage{ getPowerMeans( design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_POWER_SIMULATION_DEFAULT, stDev = C_STDEV_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{If \code{normalApproximation = TRUE} is specified, the variance is assumed to be known, default is FALSE, i.e., the calculations are performed with the t distribution.} \item{meanRatio}{If \code{meanRatio = TRUE} is specified, the power for one-sided testing of H0: mu1/mu2 = thetaH0 is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or a value != 1 for testing the mean ratio) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{alternative}{The alternative hypothesis value. This can be a vector of assumed alternatives, default is \code{seq(0,1,0.2)}.} \item{stDev}{The standard deviation, default is 1. If \code{meanRatio = TRUE} is specified, stDev defines the coefficient of variation sigma/mu2.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified for power calculations.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design, default is \code{1}.} } \value{ Returns a \code{\link{TrialDesignPlanMeans}} object. } \description{ Returns the power, stopping probabilities, and expected sample size for testing means in one or two samples at given sample size. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size, for testing means at given sample size. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. A null hypothesis value thetaH0 != 0 for testing the difference of two means or thetaH0 != 1 for testing the ratio of two means can be specified. For the specified sample size, critical bounds and stopping for futility bounds are provided at the effect scale (mean, mean difference, or mean ratio, respectively) } \examples{ # Calculate the power, stopping probabilities, and expected sample size for testing H0: # mu1 - mu2 = 0 in a two-armed design # against a range of alternatives H1: mu1 - m2 = delta, delta = (0, 1, 2, 3, 4, 5), # standard deviation sigma = 8, maximum sample size N = 80 (both treatment arms), # and an allocation ratio n1/n2 = 2. The design is a three stage O'Brien & Fleming design # with non-binding futility bounds (-0.5, 0.5) for the two interims. # The computation takes into account that the t test is used (normalApproximation = FALSE). getPowerMeans(getDesignGroupSequential(alpha = 0.025, sided = 1, futilityBounds = c(-0.5, 0.5)), groups = 2, alternative = c(0:5), stDev = 8, normalApproximation = FALSE, maxNumberOfSubjects = 80, allocationRatioPlanned = 2) } rpact/man/TrialDesignCharacteristics.Rd0000644000176200001440000000115413574430660017677 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignCharacteristics} \alias{TrialDesignCharacteristics} \title{Trial Design Characteristics} \description{ Class for trial design characteristics. } \details{ \code{TrialDesignCharacteristics} contains all fields required to collect the characteristics of a design. This object should not be created directly; use \code{getDesignCharacteristics} with suitable arguments to create it. } \seealso{ \code{\link{getDesignCharacteristics}} for getting the design characteristics. } \keyword{internal} rpact/man/resetLogLevel.Rd0000644000176200001440000000056313574441634015220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{resetLogLevel} \alias{resetLogLevel} \title{Reset Log Level} \usage{ resetLogLevel() } \description{ Resets the \code{rpact} log level. } \details{ This function is intended for debugging purposes only. } \examples{ \dontrun{ resetLogLevel() } } \keyword{internal} rpact/man/StageResultsSurvival.Rd0000644000176200001440000000175513574430660016626 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsSurvival} \alias{StageResultsSurvival} \title{Stage Results of Survival Data} \description{ Class for stage results survival data. } \details{ This object can not be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of survival data. } \section{Fields}{ \describe{ \item{\code{testStatistics}}{The stage-wise test statistics.} \item{\code{pValues}}{The stage-wise p-values.} \item{\code{combInverseNormal}}{The inverse normal test.} \item{\code{combFisher}}{The Fisher's combination test.} \item{\code{effectSizes}}{The effect sizes for different designs.} \item{\code{testActions}}{The action drawn from test result.} \item{\code{weightsFisher}}{The weights for Fisher's combination test.} \item{\code{weightsInverseNormal}}{The weights for inverse normal statistic.} }} \keyword{internal} rpact/man/TrialDesignPlan.Rd0000644000176200001440000000071113574441634015457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlan} \alias{TrialDesignPlan} \title{Basic Trial Design Plan} \description{ Basic class for trial design plans. } \details{ \code{TrialDesignPlan} is the basic class for \itemize{ \item \code{TrialDesignPlanMeans}, \item \code{TrialDesignPlanRates}, and \item \code{TrialDesignPlanSurvival}. } } \keyword{internal} rpact/man/getSampleSizeRates.Rd0000644000176200001440000000751413574430660016217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeRates} \alias{getSampleSizeRates} \title{Get Sample Size Rates} \usage{ getSampleSizeRates( design = NULL, ..., groups = 2, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.4, 0.6, 0.1), pi2 = 0.2, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{If \code{normalApproximation = FALSE} is specified, the sample size for the case of one treatment group is calculated exactly using the binomial distribution, default is \code{TRUE}.} \item{riskRatio}{If \code{riskRatio = TRUE} is specified, the sample size for one-sided testing of H0: \code{pi1/pi2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or != 1 for testing the risk ratio \code{pi1/pi2}) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{pi1}{The assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.4,0.6,0.1)}.} \item{pi2}{The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design. \cr If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined, default is \code{1}.} } \value{ Returns a \code{\link{TrialDesignPlanRates}} object. } \description{ Returns the sample size for testing rates in one or two samples. } \details{ At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing rates. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. If a null hypothesis value thetaH0 != 0 for testing the difference of two rates thetaH0 != 1 for testing the risk ratio is specified, the sample size formula according to Farrington & Manning (Statistics in Medicine, 1990) is used. Critical bounds and stopping for futility bounds are provided at the effect scale (rate, rate difference, or rate ratio, respectively) for each sample size calculation separately. For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. } \examples{ # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum # allocation ratios for a range of pi1 values when testing # H0: pi1 - pi2 = -0.1 within a two-stage O'Brien & Fleming design; # alpha = 0.05 one-sided, power 1- beta = 90\%: getSampleSizeRates(design = getDesignGroupSequential(kMax = 2, alpha = 0.05, beta = 0.1, sided = 1), groups = 2, thetaH0 = -0.1, pi1 = seq(0.4, 0.55, 0.025), pi2 = 0.4, allocationRatioPlanned = 0) # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum # allocation ratios for a range of pi1 values when testing # H0: pi1 / pi2 = 0.80 within a three-stage O'Brien & Fleming design; # alpha = 0.025 one-sided, power 1- beta = 90\%: getSampleSizeRates(getDesignGroupSequential(kMax = 3, alpha = 0.025, beta = 0.1, sided = 1), groups = 2, riskRatio = TRUE, thetaH0 = 0.80, pi1 = seq(0.3,0.5,0.025), pi2 = 0.3, allocationRatioPlanned = 0) } rpact/man/setLogLevel.Rd0000644000176200001440000000111213574430660014655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{setLogLevel} \alias{setLogLevel} \title{Set Log Level} \usage{ setLogLevel( logLevel = c("PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED") ) } \arguments{ \item{logLevel}{The new log level to set. Can be one of "PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED".} } \description{ Sets the \code{rpact} log level. } \details{ This function is intended for debugging purposes only. } \examples{ \dontrun{ setLogLevel("DEBUG") } } \keyword{internal} rpact/man/TrialDesignInverseNormal.Rd0000644000176200001440000000101413574430660017343 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignInverseNormal} \alias{TrialDesignInverseNormal} \title{Inverse Normal Design} \description{ Trial design for inverse normal method. } \details{ This object should not be created directly; use \code{\link{getDesignInverseNormal}} with suitable arguments to create a inverse normal design. } \seealso{ \code{\link{getDesignInverseNormal}} for creating a inverse normal design. } \keyword{internal} rpact/man/getPowerAndAverageSampleNumber.Rd0000644000176200001440000000216213574430660020463 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getPowerAndAverageSampleNumber} \alias{getPowerAndAverageSampleNumber} \title{Get Power And Average Sample Number} \usage{ getPowerAndAverageSampleNumber(design, theta = seq(-1, 1, 0.02), nMax = 100) } \arguments{ \item{design}{The design.} \item{theta}{A vector of standardized effect sizes.} \item{nMax}{The maximum sample size.} } \value{ Returns a \code{\link{PowerAndAverageSampleNumberResult}} object. } \description{ Returns the power and average sample number of the specified design. } \details{ This function returns the power and average sample number (ASN) of the specified design for the prototype case which is testing H0: mu = mu0 in a one-sample design. theta represents the standardized effect (mu - mu0)/sigma and power and ASN is calculated for maximum sample size nMax. For other designs than the one-sample test of a mean the standardized effect needs to be adjusted accordingly. } \examples{ getPowerAndAverageSampleNumber( getDesignGroupSequential(), theta = seq(-1, 1, 0.5), nMax = 100) } rpact/man/EventProbabilities.Rd0000644000176200001440000000056513574441634016240 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \docType{class} \name{EventProbabilities} \alias{EventProbabilities} \title{Event Probabilities} \description{ Class for definition of event probabilities. } \details{ \code{EventProbabilities} is a class for definition of event probabilities. } \keyword{internal} rpact/man/AnalysisResultsFisher.Rd0000644000176200001440000000072213574441634016747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsFisher} \alias{AnalysisResultsFisher} \title{Analysis Results Fisher} \description{ Class for analysis results based on a Fisher design. } \details{ This object can not be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a Fisher design. } \keyword{internal} rpact/man/getNumberOfSubjects.Rd0000644000176200001440000000246213574430660016361 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getNumberOfSubjects} \alias{getNumberOfSubjects} \title{Get Number Of Subjects} \usage{ getNumberOfSubjects( time, ..., accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{time}{A numeric vector with time values.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0,12)} (see details).} \item{accrualIntensity}{A vector of accrual intensities, default is the relative intensity \code{0.1} (see details).} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or \code{accrualIntensity} is calculated at fixed end of accrual.} } \value{ Returns a \code{\link{NumberOfSubjects}} object. } \description{ Returns the number of recruited subjects at given time vector. } \details{ For details of the parameters \code{accrualTime} and \code{accrualIntensity} see \code{\link{getSampleSizeSurvival}}. } \keyword{internal} rpact/man/PowerAndAverageSampleNumberResult.Rd0000644000176200001440000000074113574441634021166 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_power_and_asn.R \docType{class} \name{PowerAndAverageSampleNumberResult} \alias{PowerAndAverageSampleNumberResult} \title{Power and Average Sample Number Result} \description{ Class for power and average sample number (ASN) results. } \details{ This object can not be created directly; use \code{getPowerAndAverageSampleNumber} with suitable arguments to create it. } \keyword{internal} rpact/man/getAccrualTime.Rd0000644000176200001440000001722513574430660015335 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \name{getAccrualTime} \alias{getAccrualTime} \title{Get Accrual Time} \usage{ getAccrualTime( accrualTime = NA_real_, ..., accrualIntensity = NA_real_, maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{accrualTime}{The assumed accrual time for the study, default is \code{c(0,12)} (see details).} \item{...}{Ensures that all arguments after \code{accrualTime} are be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualIntensity}{A vector of accrual intensities, default is the relative intensity \code{0.1} (see details).} \item{maxNumberOfSubjects}{The maximum number of subjects.} } \value{ Returns a \code{\link{AccrualTime}} object. } \description{ Returns a \code{AccrualTime} object that contains the accrual time and the accrual intensity. } \details{ \code{accrualTime} can also be used to define a non-constant accrual over time. For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and \code{accrualIntensity} needs to be specified. The first element of \code{accrualTime} must be equal to 0.\cr \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity \code{accrualIntensity} (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfPatients > 0} needs to be specified and the end of accrual is calculated. \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfPatients > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfPatients} is calculated. In that case, \code{accrualIntensity} defines the intensity how subjects enter the trial. For example, \code{accrualIntensity = c(1,2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual accrual intensity is calculated for the calculated \code{maxNumberOfPatients}. } \examples{ \donttest{ # Case 1 # > End of accrual, absolute accrual intensity and `maxNumberOfSubjects` are given, # > `followUpTime`** shall be calculated. ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 924) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33), maxNumberOfSubjects = 924) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Case 2 # > End of accrual, relative accrual intensity and `maxNumberOfSubjects` are given, # > absolute accrual intensity* and `followUpTime`** shall be calculated. ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33), maxNumberOfSubjects = 1000) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Case 3 # > End of accrual and absolute accrual intensity are given, # > `maxNumberOfSubjects`* and `followUpTime`** shall be calculated. ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33)) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Case 4 # > End of accrual, relative accrual intensity and `followUpTime` are given, # > absolute accrual intensity** and `maxNumberOfSubjects`** shall be calculated. ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33)) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Case 5 # > `maxNumberOfSubjects` and absolute accrual intensity are given, # > absolute accrual intensity*, end of accrual* and `followUpTime`** shall be calculated ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 22, "6" = 33), maxNumberOfSubjects = 1000) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Case 6 (not possible) # > `maxNumberOfSubjects` and relative accrual intensity are given, # > absolute accrual intensity[x], end of accrual* and `followUpTime`** shall be calculated ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33), maxNumberOfSubjects = 1000) accrualTime ## Example: how to use accrual time object # Case 6 is not allowed and therefore an error will be shown: tryCatch({ getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) }, error = function(e) { print(e$message) }) # Case 7 # > `followUpTime` and absolute accrual intensity are given, # > end of accrual** and `maxNumberOfSubjects`** shall be calculated ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 22, "6" = 33)) accrualTime ## Example: how to use accrual time object getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2, followUpTime = 6) # Case 8 (not possible) # > `followUpTime` and relative accrual intensity are given, # > absolute accrual intensity[x], end of accrual and `maxNumberOfSubjects` shall be calculated ## Example: vector based definition accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) accrualTime ## Example: list based definition accrualTime <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33)) accrualTime ## Example: how to use accrual time object # Case 8 is not allowed and therefore an error will be shown: tryCatch({ getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2, followUpTime = 6) }, error = function(e) { print(e$message) }) # How to show accrual time details # You can use a sample size or power object as argument for function `getAccrualTime`: sampleSize <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.05, hazardRatio = 0.8, followUpTime = 6) sampleSize accrualTime <- getAccrualTime(sampleSize) accrualTime } } rpact/man/SimulationResultsRates.Rd0000644000176200001440000000057713574441634017156 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsRates} \alias{SimulationResultsRates} \title{Class for Simulation Results Rates} \description{ A class for simulation results rates. } \details{ Use \code{\link{getSimulationRates}} to create an object of this type. } \keyword{internal} rpact/man/plot.TrialDesignPlan.Rd0000644000176200001440000000617413574430660016442 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \name{plot.TrialDesignPlan} \alias{plot.TrialDesignPlan} \title{Trial Design Plan Plotting} \usage{ \method{plot}{TrialDesignPlan}( x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = ifelse(x$.design$kMax == 1, 5, 1), palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ... ) } \arguments{ \item{x}{The trial design plan, obtained from \cr \code{\link{getSampleSizeMeans}}, \cr \code{\link{getSampleSizeRates}}, \cr \code{\link{getSampleSizeSurvival}}, \cr \code{\link{getPowerMeans}}, \cr \code{\link{getPowerRates}} or \cr \code{\link{getPowerSurvival}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{2}: creates a 'Boundaries Effect Scale' plot \item \code{3}: creates a 'Boundaries p Values Scale' plot \item \code{4}: creates a 'Type One Error Spending' plot \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot \item \code{7}: creates an 'Overall Power' plot \item \code{8}: creates an 'Overall Early Stopping' plot \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot \item \code{10}: creates a 'Study Duration' plot \item \code{11}: creates an 'Expected Number of Subjects' plot \item \code{12}: creates an 'Analysis Times' plot \item \code{13}: creates a 'Cumulative Distribution Function' plot \item \code{14}: creates a 'Survival Function' plot }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of theta values.} \item{plotPointsEnabled}{If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} \item{...}{Optional \code{ggplot2} arguments.} } \value{ A \code{ggplot2} object. } \description{ Plots a trial design plan. } \details{ Generic function to plot all kinds of trial design plans. } rpact/man/SimulationResultsMeans.Rd0000644000176200001440000000057713574441634017143 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsMeans} \alias{SimulationResultsMeans} \title{Class for Simulation Results Means} \description{ A class for simulation results means. } \details{ Use \code{\link{getSimulationMeans}} to create an object of this type. } \keyword{internal} rpact/man/AnalysisResultsInverseNormal.Rd0000644000176200001440000000100013574430660020276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsInverseNormal} \alias{AnalysisResultsInverseNormal} \title{Analysis Results Inverse Normal} \description{ Class for analysis results results based on an inverse normal design. } \details{ This object can not be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a inverse normal design. } \keyword{internal} rpact/man/readDatasets.Rd0000644000176200001440000000410113574430660015035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{readDatasets} \alias{readDatasets} \title{Read Multiple Datasets} \usage{ readDatasets( file, ..., header = TRUE, sep = ",", quote = "\\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8" ) } \arguments{ \item{file}{A CSV file (see \code{\link[utils]{read.table}}).} \item{...}{Further arguments to be passed to \code{\link[utils]{read.table}}.} \item{header}{A logical value indicating whether the file contains the names of the variables as its first line.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{readDatasets}) the separator is a comma.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{dec}{The character used in the file for decimal points.} \item{fill}{logical. If \code{TRUE} then in case the rows have unequal length, blank fields are implicitly added.} \item{comment.char}{character: a character vector of length one containing a single character or an empty string. Use "" to turn off the interpretation of comments altogether.} \item{fileEncoding}{character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \value{ Returns a list of \code{\link{Dataset}} objects. } \description{ Reads a data file and returns it as a list of dataset objects. } \details{ Reads a file that was written by \code{\link{writeDatasets}} before. } \seealso{ \itemize{ \item \code{\link{readDataset}} for reading a single dataset, \item \code{\link{writeDatasets}} for writing multiple datasets, \item \code{\link{writeDataset}} for writing a single dataset. } } rpact/man/getSampleSizeMeans.Rd0000644000176200001440000000667013574430660016206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeMeans} \alias{getSampleSizeMeans} \title{Get Sample Size Means} \usage{ getSampleSizeMeans( design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_DEFAULT, stDev = C_STDEV_DEFAULT, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{If \code{normalApproximation = TRUE} is specified, the variance is assumed to be known, default is FALSE, i.e., the calculations are performed with the t distribution.} \item{meanRatio}{If \code{meanRatio = TRUE} is specified, the sample size for one-sided testing of H0: mu1/mu2 = thetaH0 is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or a value != 1 for testing the mean ratio) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{alternative}{The alternative hypothesis value. This can be a vector of assumed alternatives, default is \code{seq(0.2,1,0.2)}.} \item{stDev}{The standard deviation, default is 1. If \code{meanRatio = TRUE} is specified, stDev defines the coefficient of variation sigma/mu2.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design, default is 1. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} } \value{ Returns a \code{\link{TrialDesignPlanMeans}} object. } \description{ Returns the sample size for testing means in one or two samples. } \details{ At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing means. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. A null hypothesis value thetaH0 != 0 for testing the difference of two means or thetaH0 != 1 for testing the ratio of two means can be specified. Critical bounds and stopping for futility bounds are provided at the effect scale (mean, mean difference, or mean ratio, respectively) for each sample size calculation separately. } \examples{ # Calculate sample sizes in a fixed sample size parallel group design # with allocation ratio n1/n2 = 2 for a range of alternative values 1,...,5 # with assumed standard deviation = 3.5; two-sided alpha = 0.05, power 1 - beta = 90\%: getSampleSizeMeans(alpha = 0.05, beta = 0.1, sided = 2, groups = 2, alternative = seq(1, 5, 1), stDev = 3.5, allocationRatioPlanned = 2) # Calculate sample sizes in a three-stage Pocock paired comparison design testing # H0: mu = 2 for a range of alternative values 3,4,5 with assumed standard # deviation = 3.5; one-sided alpha = 0.05, power 1 - beta = 90\%: getSampleSizeMeans(getDesignGroupSequential(typeOfDesign = "P", alpha = 0.05, sided = 1, beta = 0.1), groups = 1, thetaH0 = 2, alternative = seq(3, 5, 1), stDev = 3.5) } rpact/man/getConditionalPower.Rd0000644000176200001440000000374713574430660016430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getConditionalPower} \alias{getConditionalPower} \title{Get Conditional Power} \usage{ getConditionalPower(design, stageResults, ..., nPlanned) } \arguments{ \item{design}{The trial design.} \item{stageResults}{The results at given stage, obtained from \code{\link{getStageResults}}.} \item{nPlanned}{The sample size planned for the subsequent stages. It should be a vector with length equal to the remaining stages and is the overall sample size in the two treatment groups if two groups are considered.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} \item{allocationRatioPlanned}{The allocation ratio for two treatment groups planned for the subsequent stages, the default value is 1.} \item{thetaH1}{or pi1, pi2 Assumed effect sizes or assumed rates pi1 to calculate the conditional power. Depending on the type of dataset, either thetaH1 (means and survival) or pi1, pi2 (rates) needs to be specified. Additionally, if testing means is specified, an assumed standard (\code{assumedStDev}) deviation can be specified, default is 1.} \item{iterations}{Iterations for simulating the power for Fisher's combination test. If the power for more than one remaining stages is to be determined for Fisher's combination test, it is estimated via simulation with specified \code{iterations}, the default value is 10000.} \item{seed}{Seed for simulating the power for Fisher's combination test. See above, default is a random seed.} } \description{ Calculates and returns the conditional power. } \details{ The conditional power is calculated only if effect size and sample size is specified. For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. } \seealso{ \code{\link{plot.StageResults}} or \code{\link{plot.AnalysisResults}} for plotting the conditional power. } \keyword{internal} rpact/man/plot.AnalysisResults.Rd0000644000176200001440000001016313574430660016560 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{plot.AnalysisResults} \alias{plot.AnalysisResults} \title{Analysis Results Plotting} \usage{ \method{plot}{AnalysisResults}( x, y, ..., type = 1L, nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = "", palette = "Set1", legendPosition = NA_integer_, showSource = FALSE ) } \arguments{ \item{x}{The analysis results at given stage, obtained from \code{\link{getAnalysisResults}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional \code{ggplot2} arguments. Furthermore the following arguments can be defined: \itemize{ \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). \item \code{piRange}: A range of assumed rates pi1 to calculate the conditional power. Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from \code{getAnalysisResults}). \item \code{directionUpper}: The direction of one-sided testing. Default is \code{directionUpper = TRUE} which means that larger values of the test statistics yield smaller p-values. \item \code{thetaH0}: The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for defining the null hypothesis H0: pi = thetaH0. }} \item{type}{The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available.} \item{nPlanned}{The additional (i.e. "new" and not cumulative) sample size planned for each of the subsequent stages. The argument should be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input used to create the analysis results.} \item{allocationRatioPlanned}{The allocation ratio n1/n2 for two treatment groups planned for the subsequent stages, the default value is 1.} \item{main}{The main title, default is \code{"Dataset"}.} \item{xlab}{The x-axis label, default is \code{"Stage"}.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title, default is \code{""}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} } \value{ A \code{ggplot2} object. } \description{ Plots the conditional power together with the likelihood function. } \details{ The conditional power is calculated only if effect size and sample size is specified. } \examples{ design <- getDesignGroupSequential(kMax = 2) dataExample <- getDataset( n = c(20, 30), means = c(50, 51), stDevs = c(130, 140) ) result <- getAnalysisResults(design = design, dataInput = dataExample, thetaH0 = 20, nPlanned = c(30), thetaH1 = 1.5, stage = 1) if (require(ggplot2)) plot(result, thetaRange = c(0, 100)) } rpact/man/getFinalPValue.Rd0000644000176200001440000000145013574430660015303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getFinalPValue} \alias{getFinalPValue} \title{Get Final P Value} \usage{ getFinalPValue(design, stageResults, ...) } \arguments{ \item{design}{The trial design.} \item{stageResults}{The results at given stage, obtained from \code{\link{getStageResults}}.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Returns the final p-value for given stage results. } \details{ The calculation of the final p-value is based on the stagewise ordering of the sample space. This enables the calculation for both the non-adaptive and the adaptive case. For Fisher's combination test, it is available for \code{kMax = 2} only. } \keyword{internal} rpact/man/getData.Rd0000644000176200001440000000104113574430660014002 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{getData} \alias{getData} \title{Get Simulation Data} \usage{ getData(x) } \arguments{ \item{x}{An \code{SimulationResults} object created by \code{\link{getSimulationMeans}}, \code{\link{getSimulationRates}}, or \code{\link{getSimulationSurvival}}.} } \description{ Returns the aggregated simulation data. } \details{ This data are the base for creation of the small statistics in the simulation results output. } \keyword{internal} rpact/man/SimulationResults.Rd0000644000176200001440000000076213574441634016153 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResults} \alias{SimulationResults} \title{Class for Simulation Results} \description{ A class for simulation results. } \details{ \code{SimulationResults} is the basic class for \itemize{ \item \code{\link{SimulationResultsMeans}}, \item \code{\link{SimulationResultsRates}}, and \item \code{\link{SimulationResultsSurvival}}. } } \keyword{internal} rpact/man/StageResultsMeans.Rd0000644000176200001440000000172213574430660016050 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsMeans} \alias{StageResultsMeans} \title{Stage Results of Means} \description{ Class for stage results of means. } \details{ This object can not be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of means. } \section{Fields}{ \describe{ \item{\code{testStatistics}}{The stage-wise test statistics.} \item{\code{pValues}}{The stage-wise p-values.} \item{\code{combInverseNormal}}{The inverse normal test.} \item{\code{combFisher}}{The Fisher's combination test.} \item{\code{effectSizes}}{The effect sizes for different designs.} \item{\code{testActions}}{The action drawn from test result.} \item{\code{weightsFisher}}{The weights for Fisher's combination test.} \item{\code{weightsInverseNormal}}{The weights for inverse normal statistic.} }} \keyword{internal} rpact/man/TrialDesignCharacteristics_as.data.frame.Rd0000644000176200001440000000173113574430660022364 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{TrialDesignCharacteristics_as.data.frame} \alias{TrialDesignCharacteristics_as.data.frame} \alias{as.data.frame.TrialDesignCharacteristics} \title{Coerce TrialDesignCharacteristics to a Data Frame} \usage{ \method{as.data.frame}{TrialDesignCharacteristics}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{niceColumnNamesEnabled}{logical. If \code{TRUE}, nice looking names will be used; syntactic names otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{logical. If \code{TRUE}, all parameters will be included; a meaningful parameter selection otherwise.} } \description{ Returns the \code{TrialDesignCharacteristics} as data frame. } \details{ Each element of the \code{TrialDesignCharacteristics} is converted to a column in the data frame. } \keyword{internal} rpact/man/TrialDesign.Rd0000644000176200001440000000070713574441634014651 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesign} \alias{TrialDesign} \title{Basic Trial Design} \description{ Basic class for trial designs. } \details{ \code{TrialDesign} is the basic class for \itemize{ \item \code{\link{TrialDesignFisher}}, \item \code{\link{TrialDesignGroupSequential}}, and \item \code{\link{TrialDesignInverseNormal}}. } } \keyword{internal} rpact/man/FieldSet_print.Rd0000644000176200001440000000064213574441634015355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{FieldSet_print} \alias{FieldSet_print} \alias{print.FieldSet} \title{Print Field Set Values} \usage{ \method{print}{FieldSet}(x, ...) } \description{ \code{print} prints its \code{FieldSet} argument and returns it invisibly (via \code{invisible(x)}). } \details{ Prints the field set. } \keyword{internal} rpact/man/getFinalConfidenceInterval.Rd0000644000176200001440000000547613574430660017665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getFinalConfidenceInterval} \alias{getFinalConfidenceInterval} \title{Get Final Confidence Interval} \usage{ getFinalConfidenceInterval(design, dataInput, ...) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The data input.} \item{stage}{The stage number.} \item{thetaH0}{The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value \code{thetaH0} in (0,1) has to be specified for defining the null hypothesis H0: pi= thetaH0. \cr For non-inferiority designs, this is the non-inferiority bound.} \item{directionUpper}{The direction of one-sided testing. Default is \code{directionUpper = TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{normalApproximation}{The type of computation of the p-values. Default is FALSE for testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the test of Fisher (two samples) is used for calculating the p-values. In the survival setting \code{normalApproximation = FALSE} has no effect.} \item{equalVariances}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}.} } \value{ Returns a \code{list} containing \itemize{ \item \code{finalStage}, \item \code{medianUnbiased}, \item \code{finalConfidenceInterval}, \item \code{medianUnbiasedGeneral}, and \item \code{finalConfidenceIntervalGeneral}. } } \description{ Returns the final confidence interval for the parameter of interest. It is based on the prototype case, i.e., the test for testing a mean for normally distributed variables. } \details{ Depending on \code{design} and \code{dataInput} the final confidence interval and median unbiased estimate that is based on the stagewise ordering of the sample space will be calculated and returned. Additionally, a non-standardized ("general") version is provided, use the standard deviation to obtain the confidence interval for the parameter of interest. } \examples{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c(20, 30), means = c(50, 51), stDevs = c(130, 140) ) getFinalConfidenceInterval(design, dataInput = data) # Results in: # # $finalStage # [1] 2 # # $medianUnbiasedGeneral # [1] 0.3546145 # # $finalConfidenceIntervalGeneral # [1] 0.06967801 0.63468553 # # $medianUnbiased # [1] 47.7787 # # $finalConfidenceInterval # [1] 9.388012 85.513851' } \keyword{internal} rpact/man/plot.StageResults.Rd0000644000176200001440000001026013574430660016036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{plot.StageResults} \alias{plot.StageResults} \title{Stage Results Plotting} \usage{ \method{plot}{StageResults}( x, y, ..., type = 1L, nPlanned, stage = x$getNumberOfStages(), allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE ) } \arguments{ \item{x}{The stage results at given stage, obtained from \code{getStageResults} or \code{getAnalysisResults}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional \code{ggplot2} arguments. Furthermore the following arguments can be defined: \itemize{ \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). \item \code{piRange}: A range of assumed rates pi1 to calculate the conditional power. Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from \code{getAnalysisResults}). \item \code{directionUpper}: The direction of one-sided testing. Default is \code{directionUpper = TRUE} which means that larger values of the test statistics yield smaller p-values. \item \code{thetaH0}: The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for defining the null hypothesis H0: pi = thetaH0. }} \item{type}{The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available.} \item{nPlanned}{The additional (i.e. "new" and not cumulative) sample size planned for each of the subsequent stages. The argument should be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input used to create the stage results.} \item{allocationRatioPlanned}{The allocation ratio for two treatment groups planned for the subsequent stages, the default value is 1.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title.} \item{palette}{The palette, default is \code{"Set1"}.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} } \value{ A \code{ggplot2} object. } \description{ Plots the conditional power together with the likelihood function. } \details{ Generic function to plot all kinds of stage results. The conditional power is calculated only if effect size and sample size is specified. } \examples{ design <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), typeOfDesign = "WT", deltaWT = 0.25) dataExample <- getDataset( n = c(20, 30, 30), means = c(50, 51, 55), stDevs = c(130, 140, 120) ) stageResults <- getStageResults(design, dataExample, thetaH0 = 20) if (require(ggplot2)) plot(stageResults, nPlanned = c(30), thetaRange = c(0, 100)) } rpact/man/TrialDesignSet.Rd0000644000176200001440000000141613574430660015320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \docType{class} \name{TrialDesignSet} \alias{TrialDesignSet} \title{Class for trial design sets.} \description{ \code{TrialDesignSet} is a class for creating a collection of different trial designs. } \details{ This object can not be created directly; better use \code{\link{getDesignSet}} with suitable arguments to create a set of designs. } \section{Fields}{ \describe{ \item{\code{designs}}{The designs (optional).} \item{\code{design}}{The master design (optional).} }} \section{Methods}{ \describe{ \item{\code{add(...)}}{Adds 'designs' OR a 'design' and/or a design parameter, e.g., deltaWT = c(0.1, 0.3, 0.4)} }} \seealso{ \code{\link{getDesignSet}} } \keyword{internal} rpact/man/ParameterSet_print.Rd0000644000176200001440000000127513574430660016252 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{ParameterSet_print} \alias{ParameterSet_print} \alias{print.ParameterSet} \title{Print Parameter Set Values} \usage{ \method{print}{ParameterSet}(x, ..., markdown = FALSE) } \arguments{ \item{x}{The object to print.} \item{markdown}{If \code{TRUE}, the object \code{x} will be printed using markdown syntax; normal representation will be used otherwise (default is \code{FALSE})} } \description{ \code{print} prints its \code{ParameterSet} argument and returns it invisibly (via \code{invisible(x)}). } \details{ Prints the parameters and results of a parameter set. } \keyword{internal} rpact/man/AnalysisResultsGroupSequential.Rd0000644000176200001440000000101113574430660020643 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsGroupSequential} \alias{AnalysisResultsGroupSequential} \title{Analysis Results Group Sequential} \description{ Class for analysis results results based on a group sequential design. } \details{ This object can not be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a group sequential design. } \keyword{internal} rpact/man/utilitiesForSurvivalTrials.Rd0000644000176200001440000000267713574430660020046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_utilities.R \name{utilitiesForSurvivalTrials} \alias{utilitiesForSurvivalTrials} \alias{getLambdaByPi} \alias{getLambdaByMedian} \alias{getHazardRatioByPi} \alias{getPiByLambda} \alias{getPiByMedian} \alias{getMedianByLambda} \alias{getMedianByPi} \title{Survival Helper Functions for Conversion of Pi, Lambda, Median} \usage{ getLambdaByPi(piValue, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) getLambdaByMedian(median, kappa = 1) getHazardRatioByPi(pi1, pi2, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) getPiByLambda(lambda, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) getPiByMedian(median, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) getMedianByLambda(lambda, kappa = 1) getMedianByPi(piValue, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) } \arguments{ \item{piValue, pi1, pi2, lambda, median}{Value that shall be converted.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{kappa}{The scale parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution.} } \description{ Functions to convert pi, lambda and median values into each other. } \details{ Can be used, e.g., to convert median values into pi or lambda values for usage in \code{\link{getSampleSizeSurvival}} or \code{\link{getPowerSurvival}}. } rpact/man/getSimulationSurvival.Rd0000644000176200001440000005012313574430660017016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_survival.R \name{getSimulationSurvival} \alias{getSimulationSurvival} \title{Get Simulation Survival} \usage{ getSimulationSurvival( design = NULL, ..., thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, hazardRatio = NA_real_, kappa = 1, piecewiseSurvivalTime = NA_real_, allocation1 = C_ALLOCATION_1_DEFAULT, allocation2 = C_ALLOCATION_2_DEFAULT, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT, maxNumberOfSubjects = NA_real_, plannedEvents = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, maxNumberOfRawDatasetsPerStage = 0, longTimeSimulationAllowed = FALSE, seed = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument.} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{thetaH0}{The null hypothesis value. The default value is \code{1}. For one-sided testing, a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}.} \item{pi1}{The assumed event rate in the treatment group, default is \code{seq(0.2,0.5,0.1)}.} \item{pi2}{The assumed event rate in the control group, default is 0.2.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. lambda1 can also be used to define piecewise exponentially distributed survival times (see details).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. lambda2 can also be used to define piecewise exponentially distributed survival times (see details).} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated.} \item{kappa}{The scale parameter of the Weibull distribution, default is \code{1}. The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{allocation1}{The number how many subjects are assigned to treatment 1 in a subsequent order, default is \code{1}} \item{allocation2}{The number how many subjects are assigned to treatment 2 in a subsequent order, default is \code{1}} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time for the study, default is \code{12} (see \code{\link{getAccrualTime}}).} \item{accrualIntensity}{A vector of accrual intensities, default is the relative intensity \code{0.1} (see \code{\link{getAccrualTime}}).} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. If accrual time and accrual intensity is specified, this will be calculated.} \item{plannedEvents}{\code{plannedEvents} is a vector of length kMax (the number of stages of the design) with increasing numbers that determines the number of cumulated (overall) events when the interim stages are planned.} \item{minNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{minNumberOfEventsPerStage} determines the minimum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the vector with length kMax \code{maxNumberOfEventsPerStage} determines the maximum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{The conditional power for the subsequent stage under which the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the hazard ratio under which the conditional power calculation is performed.} \item{maxNumberOfIterations}{The number of simulation iterations.} \item{maxNumberOfRawDatasetsPerStage}{The number of raw datasets per stage that shall be extracted and saved as \code{\link[base]{data.frame}}, default is \code{0}. \code{\link{getRawData}} can be used to get the extracted raw data from the object.} \item{longTimeSimulationAllowed}{Logical that indicates whether long time simulations that consumes more than 30 seconds are allowed or not, default is \code{FALSE}.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} } \value{ Returns a \code{\link{SimulationResultsSurvival}} object. } \description{ Returns the analysis times, power, stopping probabilities, conditional power, and expected sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of events, number of subjects, and parameter configuration. It also simulates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, integers \code{allocation1} and \code{allocation2} can be specified that determine the number allocated to treatment group 1 and treatment group 2, respectively. The formula of Kim & Tsiatis (Biometrics, 1990) is used to calculated the expected number of events under the alternative (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and non-constant piecewise accrual over time.\cr \code{piecewiseSurvivalTime} The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). Note that \code{numberOfSubjects}, \code{numberOfSubjects1}, and \code{numberOfSubjects2} in the output are expected number of subjects. } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link{getData}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{pi1}: The assumed or derived event rate in the treatment group. \item \code{pi2}: The assumed or derived event rate in the control group. \item \code{hazardRatio}: The hazard ratio under consideration (if available). \item \code{analysisTime}: The analysis time. \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{eventsPerStage1}: The observed number of events per stage in treatment group 1. \item \code{eventsPerStage2}: The observed number of events per stage in treatment group 2. \item \code{eventsPerStage}: The observed number of events per stage in both treatment groups. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{eventsNotAchieved}: 1 if number of events could not be reached with observed number of subjects, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher combination test)' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided log-rank test at considered stage. \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the log-rank statistic. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{thetaH1}. } } \section{Raw Data}{ \code{\link{getRawData}} can be used to get the simulated raw data from the object as \code{\link[base]{data.frame}}. Note that \code{getSimulationSurvival} must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stopStage}: The stage of stopping. \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. \item \code{treatmentGroup}: The treatment group number (1 or 2). \item \code{survivalTime}: The survival time of the subject. \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). \item \code{observationTime}: The specific observation time. \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr if (event == TRUE) {\cr timeUnderObservation <- survivalTime;\cr } else if (dropoutEvent == TRUE) {\cr timeUnderObservation <- dropoutTime;\cr } else {\cr timeUnderObservation <- observationTime - accrualTime;\cr } \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. } } \examples{ # Fixed sample size with minimum required definitions, pi1 = (0.3,0.4,0.5,0.6) and # pi2 = 0.3 at event time 12, and accrual time 24 getSimulationSurvival(pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 50) \donttest{ # Increase number of simulation iterations getSimulationSurvival(pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Determine necessary accrual time with default settings if 200 subjects and # 30 subjects per time unit can be recruited getSimulationSurvival(plannedEvents = 40, accrualTime = 0, accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Determine necessary accrual time with default settings if 200 subjects and # if the first 6 time units 20 subjects per time unit can be recruited, # then 30 subjects per time unit getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Determine maximum number of Subjects with default settings if the first # 6 time units 20 subjects per time unit can be recruited, and after # 10 time units 30 subjects per time unit getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30), maxNumberOfIterations = 50) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30) getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30) getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 50) # Specify effect size for a two-stage group sequential design with O'Brien & Fleming boundaries. # Effect size is based on event rates at specified event time, directionUpper = FALSE # needs to be specified because it should be shown that hazard ratio < 1 getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # As above, but with a three-stage O'Brien and Flemming design with # specified information rates, note that planned events consists of integer values d3 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) getSimulationSurvival(design = d3, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = round(d3$informationRates * 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # Effect size is based on event rate at specified event time for the reference group and # hazard ratio, directionUpper = FALSE needs to be specified because it should be shown # that hazard ratio < 1 getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # Effect size is based on hazard rate for the reference group and # hazard ratio, directionUpper = FALSE needs to be specified because # it should be shown that hazard ratio < 1 getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # Specification of piecewise exponential survival time and hazard ratios, # note that in getSimulationSurvival only on hazard ratio is used # in the case that the survival time is piecewise expoential getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Specification of piecewise exponential survival time for both treatment arms getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Specification of piecewise exponential survival time as a list, # note that in getSimulationSurvival only on hazard ratio # (not a vector) can be used pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Specification of piecewise exponential survival time and delayed effect # (response after 5 time units) getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50) # Specify effect size based on median survival times getSimulationSurvival(median1 = 5, median2 = 3, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # Specify effect size based on median survival # times of Weibull distribtion with kappa = 2 getSimulationSurvival(median1 = 5, median2 = 3, kappa = 2, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) # Perform recalculation of number of events based on conditional power for a # three-stage design with inverse normal combination test, where the conditional power # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold # increase in originally planned sample size (number of events) is allowed # Note that the first value in minNumberOfEventsPerStage and # maxNumberOfEventsPerStage is arbitrary, i.e., it has no effect. dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) resultsWithSSR1 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(58, 44, 44), maxNumberOfEventsPerStage = 4 * c(58, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50) resultsWithSSR1 # If thetaH1 is unspecified, the observed hazard ratio estimate # (calculated from the log-rank statistic) is used for performing the # recalculation of the number of events resultsWithSSR2 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(58, 44, 44), maxNumberOfEventsPerStage = 4 * c(58, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50) resultsWithSSR2 # Compare it with design without event size recalculation resultsWithoutSSR <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 145), maxNumberOfSubjects = 800, maxNumberOfIterations = 50) resultsWithoutSSR$overallReject resultsWithSSR1$overallReject resultsWithSSR2$overallReject # Confirm that event size racalcuation increases the Type I error rate, # i.e., you have to use the combination test dGS <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) resultsWithSSRGS <- getSimulationSurvival(design = dGS, hazardRatio = seq(1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), minNumberOfEventsPerStage = c(58, 44, 44), maxNumberOfEventsPerStage = 4 * c(58, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50) resultsWithSSRGS$overallReject # Set seed to get reproduceable results identical( getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, seed = 99)$analysisTime, getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, seed = 99)$analysisTime ) } } rpact/man/printCitation.Rd0000644000176200001440000000110313574430660015257 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{printCitation} \alias{printCitation} \title{Print Citation} \usage{ printCitation(inclusiveR = TRUE) } \arguments{ \item{inclusiveR}{If \code{TRUE} (default) the information on how to cite the base R system in publications will be added.} } \description{ How to cite \code{rpact} and \code{R} in publications. } \details{ This function shows how to cite \code{rpact} and \code{R} (\code{inclusiveR = TRUE}) in publications. } \examples{ printCitation() } \keyword{internal} rpact/man/AnalysisResults_names.Rd0000644000176200001440000000107113574430660016764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{AnalysisResults_names} \alias{AnalysisResults_names} \alias{names.AnalysisResults} \title{The Names of a Analysis Results object} \usage{ \method{names}{AnalysisResults}(x) } \value{ A character vector containing the names of the \code{\link{AnalysisResults}} object. } \description{ Function to get the names of a \code{\link{AnalysisResults}} object. } \details{ Returns the names of a analysis results that can be accessed by the user. } \keyword{internal} rpact/man/DatasetSurvival.Rd0000644000176200001440000000133013574430660015553 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetSurvival} \alias{DatasetSurvival} \title{Dataset of Survival Data} \description{ Class for a dataset of survival data. } \details{ This object can not be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of survival data. } \section{Fields}{ \describe{ \item{\code{group}}{The group numbers.} \item{\code{stage}}{The stage numbers.} \item{\code{overallEvent}}{The overall events.} \item{\code{overallAllocationRatio}}{The overall allocations ratios.} \item{\code{overallLogRank}}{The overall logrank test statistics.} }} \keyword{internal} rpact/man/getAnalysisResults.Rd0000644000176200001440000001216513574430660016307 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getAnalysisResults} \alias{getAnalysisResults} \title{Get Analysis Results} \usage{ getAnalysisResults( design, dataInput, ..., directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = NA_real_, nPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. For more information see \code{details} below.} \item{...}{Further arguments to be passed to methods (cp. separate functions in See Also), e.g., \describe{ \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} \item{allocationRatioPlanned}{The allocation ratio n1/n2 for two treatment groups planned for the subsequent stages, the default value is 1.} \item{thetaH1 and assumedStDev or pi1, pi2}{The assumed effect size or assumed rates to calculate the conditional power. Depending on the type of dataset, either thetaH1 (means and survival) or pi1, pi2 (rates) can be specified. Additionally, if testing means is specified, an assumed standard deviation can be specified, default is 1.} \item{normalApproximation}{The type of computation of the p-values. Default is FALSE for testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. For testing rates, if \cr \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \cr \code{normalApproximation = FALSE} has no effect.} \item{equalVariances}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}.} \item{iterations}{Iterations for simulating the power for Fisher's combination test. If the power for more than one remaining stages is to be determined for Fisher's combination test, it is estimated via simulation with specified \cr \code{iterations}, the default value is 10000.} \item{seed}{Seed for simulating the power for Fisher's combination test. See above, default is a random seed.} }} \item{directionUpper}{The direction of one-sided testing. Default is \code{directionUpper = TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{thetaH0}{The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for defining the null hypothesis H0: pi = thetaH0.\cr For non-inferiority designs, this is the non-inferiority bound.} \item{nPlanned}{The sample size planned for the subsequent stages. It should be a vector with length equal to the remaining stages and is the overall sample size in the two treatment groups if two groups are considered.} } \value{ Returns an \code{\link{AnalysisResults}} object. } \description{ Calculates and returns the analysis results for the specified design and data. } \details{ Given a design and a dataset, at given stage the function calculates the test results (effect sizes, stage-wise test statistics and p-values, overall p-values and test statistics, conditional rejection probability (CRP), conditional power, Repeated Confidence Intervals (RCIs), repeated overall p-values, and final stage p-values, median unbiased effect estimates, and final confidence intervals. \cr \code{dataInput} is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link{getDataset}}. } \section{Note}{ The conditional power is calculated only if effect size and sample size is specified. Median unbiased effect estimates and confidence intervals are calculated if a group sequential design or an inverse normal combination test design was chosen, i.e., it is not applicable for Fisher's p-value combination test design. A final stage p-value for Fisher's combination test is calculated only if a two-stage design was chosen. For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. } \examples{ \donttest{ design <- getDesignGroupSequential() dataMeans <- getDataset( n = c(10,10), means = c(1.96,1.76), stDevs = c(1.92,2.01)) getAnalysisResults(design, dataMeans) } } \seealso{ Alternatively the analysis results can be calculated separately using one of the following functions: \itemize{ \item \code{\link{getTestActions}}, \item \code{\link{getConditionalPower}}, \item \code{\link{getConditionalRejectionProbabilities}}, \item \code{\link{getRepeatedConfidenceIntervals}}, \item \code{\link{getRepeatedPValues}}, \item \code{\link{getFinalConfidenceInterval}}, \item \code{\link{getFinalPValue}}. } } rpact/man/getTestActions.Rd0000644000176200001440000000116013574430660015373 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getTestActions} \alias{getTestActions} \title{Get Test Actions} \usage{ getTestActions(design, stageResults, ...) } \arguments{ \item{design}{The trial design.} \item{stageResults}{The results at given stage, obtained from \code{\link{getStageResults}}.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Returns test actions. } \details{ Returns the test actions of the specified design and stage results at the specified stage. } \keyword{internal} rpact/man/getPowerRates.Rd0000644000176200001440000001020713574430660015230 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerRates} \alias{getPowerRates} \title{Get Power Rates} \usage{ getPowerRates( design = NULL, ..., groups = 2, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_DEFAULT, pi2 = 0.2, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument} \item{...}{Ensures that all arguments are be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{riskRatio}{If \code{riskRatio = TRUE} is specified, the power for one-sided testing of H0: \code{pi1/pi2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value. For one-sided testing, a value != 0 (or != 1 for testing the risk ratio \code{pi1/pi2}) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively.} \item{pi1}{The assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2,0.5,0.1)}.} \item{pi2}{The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{directionUpper}{Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified.} \item{allocationRatioPlanned}{The planned allocation ratio for a two treatment groups design, default is \code{1}.} } \value{ Returns a \code{\link{TrialDesignPlanRates}} object. } \description{ Returns the power, stopping probabilities, and expected sample size for testing rates in one or two samples at given sample sizes. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size, for testing rates for given maximum sample size. The sample sizes over the stages are calculated according to the specified information rate in the design. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. If a null hypothesis value thetaH0 != 0 for testing the difference of two rates or thetaH0 != 1 for testing the risk ratio is specified, the formulas according to Farrington & Manning (Statistics in Medicine, 1990) are used (only one-sided testing). Critical bounds and stopping for futility bounds are provided at the effect scale (rate, rate difference, or rate ratio, respectively). For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. Note that the power calculation for rates is always based on the normal approximation. } \examples{ # Calculate the power, stopping probabilities, and expected sample size in a two-armed # design at given maximum sample size N = 200 # in a three-stage O'Brien & Fleming design with information rate vector (0.2,0.5,1), # non-binding futility boundaries (0,0), i.e., # the study stops for futility if the p-value exceeds 0.5 at interm, and # allocation ratio = 2 for a range of pi1 values when testing H0: pi1 - pi2 = -0.1: getPowerRates(getDesignGroupSequential(informationRates = c(0.2,0.5,1), futilityBounds = c(0,0)), groups = 2, thetaH0 = -0.1, pi1 = seq(0.3, 0.6, 0.1), directionUpper = FALSE, pi2 = 0.7, allocationRatioPlanned = 2, maxNumberOfSubjects = 200) # Calculate the power, stopping probabilities, and expected sample size in a single # arm design at given maximum sample size N = 60 in a three-stage two-sided # O'Brien & Fleming design with information rate vector (0.2,0.5,1) # for a range of pi1 values when testing H0: pi = 0.3: getPowerRates(getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 2), groups = 1, thetaH0 = 0.3, pi1 = seq(0.3, 0.5, 0.05), maxNumberOfSubjects = 60) } rpact/man/PlotSettings.Rd0000644000176200001440000000302613574430660015075 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_plot_settings.R \docType{class} \name{PlotSettings} \alias{PlotSettings} \title{Plot Settings} \description{ Class for plot settings. } \details{ Collects typical plot settings in an object. } \section{Fields}{ \describe{ \item{\code{lineSize}}{The line size.} \item{\code{pointSize}}{The point size.} \item{\code{mainTitleFontSize}}{The main tile font size.} \item{\code{axesTextFontSize}}{The text font size.} \item{\code{legendFontSize}}{The legend font size.} }} \section{Methods}{ \describe{ \item{\code{adjustLegendFontSize(adjustingValue)}}{Adjusts the legend font size, e.g., run \cr \code{adjustLegendFontSize(-2)} # makes the font size 2 points smaller} \item{\code{enlargeAxisTicks(p)}}{Enlarges the axis ticks} \item{\code{expandAxesRange(p, x = NA_real_, y = NA_real_)}}{Expands the axes range} \item{\code{hideGridLines(p)}}{Hides the grid lines} \item{\code{setAxesAppearance(p)}}{Sets the font size and face of the axes titles and texts} \item{\code{setColorPalette(p, palette, mode = c("colour", "fill", "all"))}}{Sets the color palette} \item{\code{setLegendBorder(p)}}{Sets the legend border} \item{\code{setMainTitle(p, mainTitle, subtitle = NA_character_)}}{Sets the main title} \item{\code{setMarginAroundPlot(p, margin = 0.2)}}{Sets the margin around the plot, e.g., run \cr \code{setMarginAroundPlot(p, .2)} or \cr \code{setMarginAroundPlot(p, c(.1, .2, .1, .2)}} \item{\code{setTheme(p)}}{Sets the theme} }} \keyword{internal} rpact/man/getAvailablePlotTypes.Rd0000644000176200001440000000201613574430660016700 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_plot.R \name{getAvailablePlotTypes} \alias{getAvailablePlotTypes} \title{Get Available Plot Types} \usage{ getAvailablePlotTypes( obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE ) } \arguments{ \item{obj}{The object for which the plot types shall be identified, e.g. produced by \code{\link{getDesignGroupSequential}} or \code{\link{getSampleSizeMeans}}.} \item{output}{The output type. Can be one of \code{c("numeric", "caption", "numcap", "capnum")}.} \item{numberInCaptionEnabled}{If \code{TRUE}, the number will be added to the caption, default is \code{FALSE}.} } \description{ Function to identify the available plot types of an object. } \details{ \code{output}: \enumerate{ \item \code{numeric}: numeric output \item \code{caption}: caption as character output \item \code{numcap}: list with number and caption \item \code{capnum}: list with caption and number } } \keyword{internal} rpact/man/getLogLevel.Rd0000644000176200001440000000056213574441634014654 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{getLogLevel} \alias{getLogLevel} \title{Get Log Level} \usage{ getLogLevel() } \description{ Returns the current \code{rpact} log level. } \details{ This function is intended for debugging purposes only. } \examples{ \dontrun{ getLogLevel() } } \keyword{internal} rpact/man/plot.TrialDesign.Rd0000644000176200001440000000574713574430660015634 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{plot.TrialDesign} \alias{plot.TrialDesign} \title{Trial Design Plotting} \usage{ \method{plot}{TrialDesign}( x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1, palette = "Set1", theta = seq(-1, 1, 0.01), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ... ) } \arguments{ \item{x}{The trial design, obtained from \cr \code{\link{getDesignGroupSequential}}, \cr \code{\link{getDesignInverseNormal}} or \cr \code{\link{getDesignFisher}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{3}: creates a 'Stage Levels' plot \item \code{4}: creates a 'Type One Error Spending' plot \item \code{5}: creates a 'Power and Early Stopping' plot \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot \item \code{7}: creates an 'Power' plot \item \code{8}: creates an 'Early Stopping' plot \item \code{9}: creates an 'Average Sample Size' plot }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of theta values.} \item{nMax}{The maximum sample size.} \item{plotPointsEnabled}{If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with \code{\link[graphics]{plot}}.} \item{...}{Optional \code{ggplot2} arguments.} } \value{ A \code{ggplot2} object. } \description{ Plots a trial design. } \details{ Generic function to plot a trial design. Generic function to plot a trial design. } \examples{ design <- getDesignInverseNormal(kMax = 3, alpha = 0.025, typeOfDesign = "asKD", gammaA = 2, informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF") if (require(ggplot2)) { plot(design) # default: type = 1 } } \seealso{ \code{\link{plot.TrialDesignSet}} to compare different designs or design parameters visual. } rpact/man/NumberOfSubjects.Rd0000644000176200001440000000057413574441634015666 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \docType{class} \name{NumberOfSubjects} \alias{NumberOfSubjects} \title{Number Of Subjects} \description{ Class for definition of number of subjects results. } \details{ \code{NumberOfSubjects} is a class for definition of number of subjects results. } \keyword{internal} rpact/man/TrialDesignSet_names.Rd0000644000176200001440000000070313574441634016504 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{TrialDesignSet_names} \alias{TrialDesignSet_names} \alias{names.TrialDesignSet} \title{The Names of a Trial Design Set object} \usage{ \method{names}{TrialDesignSet}(x) } \description{ Function to get the names of a \code{TrialDesignSet} object. } \details{ Returns the names of a design set that can be accessed by the user. } \keyword{internal} rpact/DESCRIPTION0000644000176200001440000000410713574450502013100 0ustar liggesusersPackage: rpact Title: Confirmatory Adaptive Clinical Trial Design and Analysis Version: 2.0.6 Date: 2019-12-12 Authors@R: c( person("Gernot", "Wassmer", email = "gernot.wassmer@rpact.com", role = c("aut")), person("Friedrich", "Pahlke", email = "friedrich.pahlke@rpact.com", role = c("aut", "cre"))) Description: Design and analysis of confirmatory adaptive clinical trials with continuous, binary, and survival endpoints according to the methods described in the monograph by Wassmer and Brannath (2016) . This includes classical group sequential as well as multi-stage adaptive hypotheses tests that are based on the combination testing principle. License: GPL-3 Encoding: UTF-8 LazyData: true URL: https://www.rpact.org BugReports: https://bugreport.rpact.org Language: en-US Depends: R (>= 3.4.0) Imports: methods, stats, utils, graphics, tools, Rcpp (>= 1.0.0) LinkingTo: Rcpp Suggests: parallel, ggplot2 (>= 2.2.0), testthat (>= 2.0.0) RoxygenNote: 7.0.2 Collate: 'RcppExports.R' 'f_core_constants.R' 'class_core_parameter_set.R' 'class_core_plot_settings.R' 'class_analysis_dataset.R' 'f_core_plot.R' 'class_design.R' 'class_analysis_stage_results.R' 'class_analysis_results.R' 'f_core_utilities.R' 'class_time.R' 'class_design_set.R' 'f_core_assertions.R' 'f_design_utilities.R' 'class_design_plan.R' 'class_design_power_and_asn.R' 'class_event_probabilities.R' 'f_simulation_survival.R' 'class_simulation_results.R' 'class_summary.R' 'f_analysis_base.R' 'f_analysis_base_means.R' 'f_analysis_base_rates.R' 'f_analysis_base_survival.R' 'f_core_output_formats.R' 'f_design_fisher_combination_test.R' 'f_design_group_sequential.R' 'f_design_sample_size_calculator.R' 'f_simulation_means.R' 'f_simulation_rates.R' 'pkgname.R' NeedsCompilation: yes Packaged: 2019-12-12 13:48:22 UTC; fried Author: Gernot Wassmer [aut], Friedrich Pahlke [aut, cre] Maintainer: Friedrich Pahlke Repository: CRAN Date/Publication: 2019-12-12 14:40:02 UTC rpact/tests/0000755000176200001440000000000013574442443012537 5ustar liggesusersrpact/tests/testthat/0000755000176200001440000000000013574450502014372 5ustar liggesusersrpact/tests/testthat/test-class_summary.R0000644000176200001440000005576113574411563020376 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:08:57 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { ## test designs invisible(capture.output(expect_error(summary(getDesignGroupSequential(beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF")), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0))), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5), NA))) .skipTestifDisabled() invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(futilityBounds = c(0, 1))), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher()), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2))), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4, sided = 2), digits = 5), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1, sided = 2)), NA))) ## test design plans - means invisible(capture.output(expect_error(summary(getSampleSizeMeans(sided = 2, alternative = -0.5)), NA))) invisible(capture.output(expect_warning(summary(getSampleSizeMeans(sided = 2), alternative = -0.5)))) # warning expected invisible(capture.output(expect_error(summary(getPowerMeans(sided = 1, alternative = c(-0.5,-0.3), maxNumberOfSubjects = 100, directionUpper = FALSE)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, sided = 2, stDev = 1, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 0), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 4), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 3), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 2), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = -1), NA))) ## test design plans - rates invisible(capture.output(expect_error(summary(getSampleSizeRates(pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getPowerRates(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4,0.5))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = 0.4)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 2, thetaH0 = 0, pi1 = 0.25)), NA))) invisible(capture.output(expect_error(summary(getPowerRates(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100)), NA))) ## test design plans - survival invisible(capture.output(expect_error(summary(getSampleSizeSurvival()), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040))), NA))) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2)/6, lambda1 = log(2)/8)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2)/6, lambda1 = log(2)/8)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(sided = 2), lambda2 = log(2)/6, hazardRatio = c(0.55), accrualTime = c(0,10), accrualIntensity = 60)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8),directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) design <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) ## simulations design <- getDesignInverseNormal(alpha = 0.05, kMax = 4, futilityBounds = c(0,0,0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1) invisible(capture.output(expect_error(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE)), NA))) invisible(capture.output(expect_error(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345)), NA))) design <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1,1)) invisible(capture.output(expect_error(summary(getSampleSizeMeans(design)), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans(design, stDev = 4, plannedSubjects = (1:3)*200, alternative = c(1,2))), NA))) invisible(capture.output(expect_error(summary(getSimulationRates(design, plannedSubjects = (1:3)*200, pi1 = c(0.3,0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8)), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans(getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = c(1))), NA))) }) test_that("Testing 'summary.ParameterSet': output will be produced", { ## test designs expect_output(summary(getDesignGroupSequential(beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF"))$show()) expect_output(summary(getDesignGroupSequential(kMax = 1))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignGroupSequential(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5)$show()) .skipTestifDisabled() expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(futilityBounds = c(0, 1)))$show()) expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignInverseNormal(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignFisher())$show()) expect_output(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2)))$show()) expect_output(summary(getDesignFisher(kMax = 1))$show()) expect_output(summary(getDesignFisher(kMax = 4, sided = 2), digits = 5)$show()) expect_output(summary(getDesignFisher(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignFisher(kMax = 1, sided = 2))$show()) ## test design plans - means expect_output(summary(getSampleSizeMeans(sided = 2, alternative = -0.5))$show()) expect_output(summary(getPowerMeans(sided = 1, alternative = c(-0.5,-0.3), maxNumberOfSubjects = 100, directionUpper = FALSE))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, sided = 2, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 0)$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100))$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 4)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 3)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 2)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = -1)$show()) ## test design plans - rates expect_output(summary(getSampleSizeRates(pi2 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45))$show()) expect_output(summary(getSampleSizeRates(groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4,0.5)))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = 0.4))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 2, thetaH0 = 0, pi1 = 0.25))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100))$show()) ## test design plans - survival expect_output(summary(getSampleSizeSurvival())$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2)))$show()) expect_output(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040)))$show()) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) expect_output(summary(getSampleSizeSurvival(piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60))$show()) expect_output(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2)/6, lambda1 = log(2)/8))$show()) expect_output(summary(getPowerSurvival(sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2)/6, lambda1 = log(2)/8))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(sided = 2), lambda2 = log(2)/6, hazardRatio = c(0.55), accrualTime = c(0,10), accrualIntensity = 60))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8),directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) design <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE) expect_output(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12))$show()) expect_output(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) ## simulations design <- getDesignInverseNormal(alpha = 0.05, kMax = 4, futilityBounds = c(0,0,0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1) expect_output(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE))$show()) expect_output(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345))$show()) design <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1,1)) expect_output(summary(getSampleSizeMeans(design))$show()) expect_output(summary(getSimulationMeans(design, stDev = 4, plannedSubjects = (1:3)*200, alternative = c(1,2)))$show()) expect_output(summary(getSimulationRates(design, plannedSubjects = (1:3)*200, pi1 = c(0.3,0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8))$show()) expect_output(summary(getSimulationMeans(getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = 1))$show()) }) rpact/tests/testthat/test_generic_functions.R0000644000176200001440000001636113573723560021275 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:08:57 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { design <- getDesignGroupSequential(alpha = 0.05, kMax = 4, sided = 1, typeOfDesign = "WT", deltaWT = 0.1) designFisher <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), alpha0Vec = rep(0.4, 3)) designCharacteristics <- getDesignCharacteristics(design) powerAndASN <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = 100) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) dataset <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults <- getStageResults(design, dataset) analysisResults <- getAnalysisResults(design, dataset) designPlan <- getSampleSizeMeans(design) simulationResults <- getSimulationSurvival(design, maxNumberOfSubjects = 100, plannedEvents = c(50, 100, 150, 200), seed = 12345) piecewiseSurvivalTime <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.8) accrualTime <- getAccrualTime(list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45), maxNumberOfSubjects = 1400) invisible(capture.output(names(design))) invisible(capture.output(names(designFisher))) invisible(capture.output(names(designCharacteristics))) invisible(capture.output(names(powerAndASN))) invisible(capture.output(names(designSet))) invisible(capture.output(names(dataset))) invisible(capture.output(names(stageResults))) invisible(capture.output(names(analysisResults))) invisible(capture.output(names(designPlan))) invisible(capture.output(names(simulationResults))) invisible(capture.output(names(piecewiseSurvivalTime))) invisible(capture.output(names(accrualTime))) invisible(capture.output(design$criticalValues)) invisible(capture.output(design[["criticalValues"]])) invisible(capture.output(print(design))) invisible(capture.output(print(designFisher))) invisible(capture.output(print(designCharacteristics))) invisible(capture.output(print(powerAndASN))) invisible(capture.output(print(designSet))) invisible(capture.output(print(dataset))) invisible(capture.output(print(stageResults))) invisible(capture.output(print(analysisResults))) invisible(capture.output(print(designPlan))) invisible(capture.output(print(simulationResults))) invisible(capture.output(print(piecewiseSurvivalTime))) invisible(capture.output(print(accrualTime))) invisible(capture.output(summary(design))) invisible(capture.output(summary(designFisher))) invisible(capture.output(summary(designCharacteristics))) invisible(capture.output(summary(powerAndASN))) invisible(capture.output(summary(designSet))) invisible(capture.output(summary(dataset))) invisible(capture.output(summary(stageResults))) invisible(capture.output(summary(analysisResults))) invisible(capture.output(summary(designPlan))) invisible(capture.output(summary(simulationResults))) invisible(capture.output(summary(piecewiseSurvivalTime))) invisible(capture.output(summary(accrualTime))) invisible(capture.output(as.data.frame(design))) invisible(capture.output(as.data.frame(designFisher))) invisible(capture.output(as.data.frame(designCharacteristics))) invisible(capture.output(as.data.frame(powerAndASN))) invisible(capture.output(as.data.frame(designSet))) invisible(capture.output(as.data.frame(dataset))) invisible(capture.output(as.data.frame(stageResults))) invisible(capture.output(as.data.frame(analysisResults))) invisible(capture.output(as.data.frame(designPlan))) invisible(capture.output(as.data.frame(simulationResults))) invisible(capture.output(as.data.frame(piecewiseSurvivalTime))) invisible(capture.output(as.data.frame(accrualTime))) invisible(capture.output(as.data.frame(design, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designFisher, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designCharacteristics, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(powerAndASN, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designSet, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(dataset, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(stageResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(analysisResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designPlan, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(simulationResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(piecewiseSurvivalTime, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(accrualTime, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.matrix(design))) invisible(capture.output(as.matrix(designFisher))) invisible(capture.output(as.matrix(designCharacteristics))) invisible(capture.output(as.matrix(powerAndASN))) invisible(capture.output(as.matrix(designSet))) invisible(capture.output(as.matrix(dataset))) invisible(capture.output(as.matrix(stageResults))) invisible(capture.output(as.matrix(analysisResults))) invisible(capture.output(as.matrix(designPlan))) invisible(capture.output(as.matrix(simulationResults))) invisible(capture.output(as.matrix(piecewiseSurvivalTime))) invisible(capture.output(as.matrix(accrualTime))) }) rpact/tests/testthat/test-f_simulation_survival.R0000644000176200001440000026527013567165663022147 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:59 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation survival function") test_that("'getSimulationSurvival': configuration 1", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 200, plannedEvents = 50, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$analysisTime[1, ], c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(50, 50, 50, 50)) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResults$overallReject, c(0.01, 0.41, 0.81, 1), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': configuration 2", { .skipTestifDisabled() design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(60, 73, 78, 65)) expect_equal(simulationResults$iterations[3, ], c(5, 9, 28, 46)) expect_equal(simulationResults$analysisTime[1, ], c(5.4183926, 5.2945044, 5.1495619, 5.0392001), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(10.130549, 10.39649, 10.458778, 9.7641943), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(13.506679, 14.455396, 18.382917, 18.866629), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(8.500396, 9.4448778, 11.628285, 12.227203), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(84.483333, 93.054795, 98.884615, 92.015385), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(125.8, 159.44444, 229.53571, 250.93478), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(60.755833, 79.305068, 118.11231, 139.91292), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(186.51, 180.63, 173.73, 168.48), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(406.05, 420.67123, 424.60256, 393.44615), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(428.4, 466.33333, 480.96429, 488.78261), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(319.3515, 359.96969, 385.19188, 358.56277), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.4, 0.21, 0.2, 0.13), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.55, 0.63, 0.5, 0.15), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.05, 0.09, 0.26, 0.41), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(1, 0.93, 0.96, 0.69), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0.06, 0.02, 0.22), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0.01, 0, 0.04), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0.07, 0.02, 0.26), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.95, 0.91, 0.72, 0.54), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.61612368, 0.57564124, 0.49458667, 0.52832804), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.78816558, 0.77803263, 0.64572713, 0.66129837), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 3", { .skipTestifDisabled() design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.2, pi1 = seq(0.3, 0.45, 0.05), directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1.5984103, 1.9305192, 2.2892242, 2.6791588), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(69, 72, 59, 50)) expect_equal(simulationResults$iterations[3, ], c(37, 12, 7, 2)) expect_equal(simulationResults$analysisTime[1, ], c(7.2763799, 7.0838561, 6.7193502, 6.3616317), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(16.764021, 14.756285, 13.821816, 12.988284), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(38.977945, 24.200748, 26.934721, 11.875967), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.098154, 13.978342, 11.899449, 9.7796143), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(106.50725, 94.541667, 94.677966, 94.48), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(259.13514, 175, 204, 84.5), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(136.16232, 83.325, 71.712542, 57.0404), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(275.04, 265.86, 248.46, 231.45), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(496.07246, 481.84722, 476, 463.84), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(500, 500, 500, 494)) expect_equal(simulationResults$expectedNumberOfSubjects, c(429.00559, 423.54913, 384.3886, 348.2482), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.18, 0.22, 0.39, 0.49), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.31, 0.59, 0.52, 0.48), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.35, 0.11, 0.07, 0.02), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.84, 0.92, 0.98, 0.99), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0.13, 0.06, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0.01, 0.01, 0, 0), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0.14, 0.07, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.63, 0.88, 0.93, 0.98), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.46273079, 0.58305775, 0.61313502, 0.59484117), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.66165116, 0.75066235, 0.71981679, 0.8), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 4", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) simulationResults <- getSimulationSurvival(design = design, directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.7, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0425, 0.068, 0.0255, 0.017, 0.0119), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 95) expect_equal(simulationResults$iterations[3, ], 30) expect_equal(simulationResults$analysisTime[1, ], 6.3619038, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 12.345684, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 36.687962, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 19.26207, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 91.694737, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 207.83333, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 122.95158, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 231.41, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 448.23158, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], 491.66667, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 450.42103, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.05, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.65, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.29, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.99, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.7, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.49425129, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.73157546, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 5", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 40, 40), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(87, 89, 92, 100)) expect_equal(simulationResults$iterations[3, ], c(18, 34, 58, 77)) expect_equal(simulationResults$analysisTime[1, ], c(8.1674426, 7.9228743, 7.6045868, 7.4881493), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(12.354338, 12.56529, 12.380125, 12.254955), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(16.473595, 17.9949, 17.847597, 17.390492), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(12.562909, 13.818364, 15.044701, 16.144285), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(48.54023, 51.561798, 55.130435, 55.79), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(75.277778, 87.176471, 94.103448, 94.545455), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(49.642759, 60.198989, 74.924348, 85.6317), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(126.03, 121.42, 115.37, 113.16), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(187.50575, 190.98876, 193.16304, 192.33), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(199.11111, 200, 199.39655, 199.28571), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(181.60287, 186.40002, 190.55503, 197.6859), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.13, 0.11, 0.08, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.69, 0.55, 0.34, 0.23), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.17, 0.31, 0.26, 0.25), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.99, 0.97, 0.68, 0.48), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.82, 0.66, 0.42, 0.23), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.56161185, 0.47418383, 0.31608317, 0.29578133), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.71394365, 0.57778506, 0.37448609, 0.32265113), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 6", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = c(0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.02, 0.032, 0.012, 0.008, 0.0056), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 29) expect_equal(simulationResults$iterations[3, ], 8) expect_equal(simulationResults$analysisTime[1, ], 11.099103, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 137.1048, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 83.267347, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 10.437225, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 102.10345, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 96.375, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 96.661422, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.71, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.16, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 179.93, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 200) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.05, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.03, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.08, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.05, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80243482, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 7", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$pi1, 0.21337214, tolerance = 1e-07) expect_equal(simulationResults$pi2, 0.25918178, tolerance = 1e-07) expect_equal(simulationResults$median1, 34.657359, tolerance = 1e-07) expect_equal(simulationResults$median2, 27.725887, tolerance = 1e-07) expect_equal(simulationResults$lambda1, 0.02, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 26) expect_equal(simulationResults$iterations[3, ], 12) expect_equal(simulationResults$analysisTime[1, ], 11.419107, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 43.00709, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 62.010907, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 8.0301114, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 87.076923, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 122.5, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 121.79154, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.74, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.12, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 183.49, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 200) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.02, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.04, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.06, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.02, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80311744, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 8", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0.04, dropoutRate2 = 0.08, dropoutTime = 12, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.008, 0.024), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 99) expect_equal(simulationResults$iterations[3, ], 95) expect_equal(simulationResults$analysisTime[1, ], 14.155697, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 19.508242, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 25.008056, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 24.627971, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 40) expect_equal(simulationResults$eventsPerStage[3, ], 60) expect_equal(simulationResults$expectedNumberOfEvents, 58.8, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 199.73, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 199.9973, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.04, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.06, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.11, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.05, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.13387917, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.12806393, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 9; ", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = c(0.75), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0075, 0.0225), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 27) expect_equal(simulationResults$iterations[3, ], 2) expect_equal(simulationResults$analysisTime[1, ], 14.263292, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 43.719076, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 38.174522, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 6.7834542, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 90.037037, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 84.5, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 84.519444, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.72, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.13, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 199.79, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 199.9979, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.12, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.02, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.15, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.13, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80261071, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 10; ", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), lambda2 = 0.03, hazardRatio = c(0.75, 0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$pi1, c(0.23662051, 0.25023841, 0.27674976), tolerance = 1e-07) expect_equal(simulationResults$pi2, 0.30232367, tolerance = 1e-07) expect_equal(simulationResults$median1, c(30.806541, 28.881133, 25.672118), tolerance = 1e-07) expect_equal(simulationResults$median2, 23.104906, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0225, 0.024, 0.027), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(31, 24, 16)) expect_equal(simulationResults$iterations[3, ], c(5, 5, 7)) expect_equal(simulationResults$analysisTime[1, ], c(10.701574, 10.513732, 10.265089), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(41.567704, 37.665074, 37.552932), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(33.732435, 53.513221, 36.546609), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(6.1071436, 6.3055402, 2.5582627), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(95.290323, 89.541667, 93.9375), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(93.8, 124, 109.71429), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(93.949032, 120.19708, 109.71429), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0.69, 0.74, 0.84), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0.16, 0.14, 0.09), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], c(173.61, 170.39, 165.9), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(199.6129, 200, 200), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(200, 200, 200)) expect_equal(simulationResults$expectedNumberOfSubjects, c(199.96129, 199.4078, 200), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0, 0.02, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.1, 0.05, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.05, 0.01, 0.03), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.15, 0.08, 0.03), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0)) expect_equal(simulationResults$futilityStop, c(0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.1, 0.07, 0), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.80326129, 0.80161244, 0.8), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.8, 0.8, 0.8), tolerance = 1e-07) }) test_that("'getSimulationSurvival': test accrual time and intensity definition", { .skipTestifDisabled() maxNumberOfSubjects <- getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100)$maxNumberOfSubjects expect_equal(maxNumberOfSubjects, 330) accrualIntensity <- getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(0.2, 0.3), maxNumberOfSubjects = 330, maxNumberOfIterations = 100, seed = 1234567890)$accrualIntensity expect_equal(accrualIntensity, c(22, 33)) }) test_that("'getSimulationSurvival': test exptected warnings and errors", { dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'minNumberOfEventsPerStage' (NA, 44, 44) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'maxNumberOfEventsPerStage' (NA, 176, 176) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'minNumberOfEventsPerStage' (NA, 44, 44) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'maxNumberOfEventsPerStage' (NA, 176, 176) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'minNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'maxNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), maxNumberOfEventsPerStage = 4 * c(58, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'minNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = -0.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (-0.1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 1.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (1.1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = -100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'plannedEvents' (-100) must be >= 1", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, plannedEvents = c(100,100, 150), accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'plannedEvents' (100, 100, 150) must be strictly increasing: x_1 < .. < x_3", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, -44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: each value of 'minNumberOfEventsPerStage' (58, 44, -44) must be >= 1", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 10, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'maxNumberOfEventsPerStage' (58, 40, 176) must be not smaller than minNumberOfEventsPerStage' (58, 44, 44)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'maxNumberOfSubjects' must be defined", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, thetaH1 = 0, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'thetaH1' (0) must be > 0", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 0, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (0) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 1, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = c(0.5, 0.8), maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'conditionalPower' c(0.5, 0.8) must be a single numerical value", fixed = TRUE) }) context("Testing the simulation of survival data for different parameter variants") test_that("'getSimulationSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 199.71, 196.74), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Increase number of simulation iterations ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 199.71, 196.74), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = 0, accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.02, 0.28, 0.77, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify accrual time as a list", { .skipTestifDisabled() at <- list("0 - <6" = 20, "6 - Inf" = 30) simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { .skipTestifDisabled() at <- list("0 - <6" = 20, "6 - <=10" = 30) simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 97) expect_equal(simulationResult$analysisTime[1, ], 14.769473, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 24.499634, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 24.198958, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.4, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.47, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9841, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.03, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.24, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.27, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.03, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.29516222, tolerance = 1e-07) }) test_that("'getSimulationSurvival': As above, but with a three-stage O'Brien and Flemming design with specified information rates, note that planned events consists of integer values", { .skipTestifDisabled() d3 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) simulationResult <- getSimulationSurvival(design = d3, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = round(d3$informationRates * 40), maxNumberOfSubjects = 200, directionUpper = FALSE, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 1000) expect_equal(simulationResult$iterations[2, ], 985) expect_equal(simulationResult$iterations[3, ], 861) expect_equal(simulationResult$analysisTime[1, ], 13.073331, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 18.748105, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[3, ], 24.810251, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 23.877826, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 16) expect_equal(simulationResult$eventsPerStage[2, ], 28) expect_equal(simulationResult$eventsPerStage[3, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.152, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$eventsNotAchieved[3, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.313, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$numberOfSubjects[3, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.92969, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.015, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.124, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[3, ], 0.183, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.322, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityPerStage[2, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.139, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.19637573, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[3, ], 0.23542216, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(simulationResult$median1, 93.281194, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 92) expect_equal(simulationResult$analysisTime[1, ], 15.596955, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 26.310745, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 25.440402, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.4, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.69, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9752, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.08, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.44, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.52, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.08, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.43087375, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(simulationResult$median1, 69.314718, tolerance = 1e-07) expect_equal(simulationResult$median2, 34.657359, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 94) expect_equal(simulationResult$analysisTime[1, ], 13.132525, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 21.186744, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 20.690944, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.8, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.5, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.73, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.43, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.49, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.06, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.48014443, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time and hazard ratios, note that in getSimulationSurvival only one hazard ratio is used in the case that the survival time is piecewise exponential", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time for both treatment arms ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time as a list, note that in getSimulationSurvival only on hazard ratio (not a vector) can be used", { .skipTestifDisabled() pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time and delayed effect (response after 5 time units) ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 100) expect_equal(simulationResult$analysisTime[1, ], 12.973056, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 17.030809, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 17.030809, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 197.81, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 200) expect_equal(simulationResult$rejectPerStage[1, ], 0) expect_equal(simulationResult$rejectPerStage[2, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.06, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.1789388, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(simulationResult$median1, 5, tolerance = 1e-07) expect_equal(simulationResult$median2, 3) expect_equal(simulationResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$analysisTime[1, ], 6.1552733, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 6.1552733, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$expectedNumberOfSubjects, 102.09, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.29, tolerance = 1e-07) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) }) test_that("'getSimulationSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(simulationResult$median1, 5, tolerance = 1e-07) expect_equal(simulationResult$median2, 3) expect_equal(simulationResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$analysisTime[1, ], 6.3123397, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 6.3123397, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$expectedNumberOfSubjects, 104.7, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.9, tolerance = 1e-07) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) }) test_that("'getSimulationSurvival': Perform recalculation of number of events based on conditional power", { .skipTestifDisabled() # Perform recalculation of number of events based on conditional power for a # three-stage design with inverse normal combination test, where the conditional power # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold # increase in originally planned sample size (number of events) is allowed # Note that the first value in \code{minNumberOfEventsPerStage} and # \code{maxNumberOfEventsPerStage} is arbitrary, i.e., it has no effect. dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) resultsWithSSR1 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR1' with expected results ## expect_equal(resultsWithSSR1$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR1$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR1$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR1$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR1$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR1$iterations[3, ], c(96, 96, 88, 67, 50, 35, 11)) expect_equal(resultsWithSSR1$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[2, ], c(17.76189, 17.229038, 16.567328, 16.175906, 15.668575, 15.328143, 14.604753), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[3, ], c(30.192276, 28.615009, 26.463502, 25.657109, 23.821118, 23.34898, 22.534023), tolerance = 1e-07) expect_equal(resultsWithSSR1$studyDuration, c(29.683899, 28.160756, 25.20615, 22.190278, 19.319577, 18.030286, 14.789904), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR1$eventsPerStage[2, ], c(233.65, 231.27, 229.84, 229.43878, 228.57292, 227.67677, 219.44565), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsPerStage[3, ], c(409.28125, 401.01042, 385.875, 382.38806, 371.3, 374.14286, 367.72727), tolerance = 1e-07) expect_equal(resultsWithSSR1$expectedNumberOfEvents, c(402.256, 394.2208, 367.1508, 328.48602, 293.11354, 277.24313, 222.84098), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[2, ], c(800, 800, 799.45, 798.66327, 796.55208, 797.06061, 793.47826), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[3, ], c(800, 800, 800, 800, 800, 800, 800)) expect_equal(resultsWithSSR1$expectedNumberOfSubjects, c(800, 800, 799.934, 793.55401, 785.93916, 794.85349, 767.86699), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[2, ], c(0.04, 0.04, 0.12, 0.31, 0.46, 0.64, 0.81), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[3, ], c(0, 0.12, 0.26, 0.42, 0.41, 0.3, 0.11), tolerance = 1e-07) expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) expect_equal(resultsWithSSR1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$earlyStop, c(0.04, 0.04, 0.12, 0.33, 0.5, 0.65, 0.89), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR1$conditionalPowerAchieved[2, ], c(0.12165751, 0.15502837, 0.23497758, 0.29890789, 0.33886493, 0.41286728, 0.49916888), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[3, ], c(0.14749827, 0.23857933, 0.44868993, 0.59763371, 0.65378645, 0.66059558, 0.69812096), tolerance = 1e-07) # If thetaH1 is unspecified, the observed hazard ratio estimate # (calculated from the log-rank statistic) is used for performing the # recalculation of the number of events resultsWithSSR2 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR2' with expected results ## expect_equal(resultsWithSSR2$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR2$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR2$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR2$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR2$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR2$iterations[3, ], c(99, 95, 92, 71, 60, 45, 21)) expect_equal(resultsWithSSR2$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[2, ], c(17.532866, 16.792737, 15.753436, 15.242772, 14.414526, 13.395253, 12.536642), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[3, ], c(29.782185, 28.27297, 25.249508, 24.235039, 21.407797, 20.846814, 17.625231), tolerance = 1e-07) expect_equal(resultsWithSSR2$studyDuration, c(29.663096, 27.530562, 24.305604, 21.136576, 18.176787, 16.398878, 13.170673), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR2$eventsPerStage[2, ], c(229.71, 222.76, 213.91, 210.63265, 201.21875, 185.82828, 171.84783), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[3, ], c(403.55556, 395.78947, 365.25, 358.80282, 327.35, 327.17778, 272.14286), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfEvents, c(401.8171, 387.138, 353.1428, 312.78082, 271.16875, 248.15727, 183.80196), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[2, ], c(798.3, 792.67, 784.71, 785.72449, 774.40625, 754.47475, 731), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[3, ], c(800, 800, 800, 800, 799.08333, 797.51111, 794.95238), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfSubjects, c(799.983, 799.6335, 798.7768, 790.11401, 777.76145, 771.03106, 723.0996), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[2, ], c(0.01, 0.05, 0.08, 0.27, 0.36, 0.54, 0.71), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[3, ], c(0.03, 0.11, 0.29, 0.39, 0.48, 0.37, 0.19), tolerance = 1e-07) expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) expect_equal(resultsWithSSR2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$earlyStop, c(0.01, 0.05, 0.08, 0.29, 0.4, 0.55, 0.79), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR2$conditionalPowerAchieved[2, ], c(0.13442705, 0.17515425, 0.27216274, 0.37121019, 0.42163288, 0.51345413, 0.62679958), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[3, ], c(0.088787205, 0.13342075, 0.37806621, 0.51790868, 0.64116584, 0.64220287, 0.73456911), tolerance = 1e-07) # Compare it with design without event size recalculation resultsWithoutSSR <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58,102,145), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of numeric object 'resultsWithoutSSR$overallReject' with expected results ## expect_equal(resultsWithoutSSR$overallReject, c(0.06, 0.09, 0.26, 0.36, 0.5, 0.62, 0.8), tolerance = 1e-07) ## ## Comparison of the results of numeric object 'resultsWithSSR1$overallReject' with expected results ## expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) ## ## Comparison of the results of numeric object 'resultsWithSSR2$overallReject' with expected results ## expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that event size racalcuation increases the Type I error rate, i.e., you have to use the combination test ", { .skipTestifDisabled() dGS <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) resultsWithSSRGS <- getSimulationSurvival(design = dGS, hazardRatio = seq(1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of numeric object 'resultsWithSSRGS$overallReject' with expected results ## expect_equal(resultsWithSSRGS$overallReject, 0.05, tolerance = 1e-07) }) rpact/tests/testthat/helper-class_analysis_dataset.R0000644000176200001440000000442413353343260022510 0ustar liggesusers###################################################################################### # # # -- Unit tests helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getMultipleStageResultsForDataset <- function(dataset, thetaH0 = NA_real_) { stage <- dataset$getNumberOfStages() kMax <- stage + 1 design1 <- getDesignGroupSequential(kMax = kMax) design2 <- getDesignInverseNormal(kMax = kMax) design3 <- getDesignFisher(kMax = kMax) stageResults1 <- getStageResults(design = design1, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults2 <- getStageResults(design = design2, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults3 <- getStageResults(design = design3, dataInput = dataset, stage = stage, thetaH0 = thetaH0) return(list( stageResults1 = stageResults1, stageResults2 = stageResults2, stageResults3 = stageResults3 )) }rpact/tests/testthat/test-f_design_power_calculator.R0000644000176200001440000045761413574422572022724 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 12 December 2019, 12:31:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the power calculation of means for different designs and arguments") test_that("'getPowerMeans': power calculation of means in one sample for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1,1.2,1.4), directionUpper = TRUE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, c(-1.5, 0.7, 0.9), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(15.177049, 35.61826, 31.576281), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(1.2596734e-07, 0.17254516, 0.28730882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(2.7189456e-10, 0.43368823, 0.5145435), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(1.9550874e-12, 0.19182608, 0.13120557), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(1.2624119e-07, 0.79805947, 0.93305789), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.99114779, 0.032857727, 0.013099441), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.008851635, 0.045821034, 0.01275185), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.99999942, 0.078678761, 0.02585129), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.99999955, 0.68491215, 0.82770361), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.97002208, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1.2, -1), directionUpper = FALSE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, c(-0.7, -0.5), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(35.61826, 38.108498), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.17254516, 0.092241599), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.43368823, 0.28692789), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19182608, 0.18609918), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.79805947, 0.56526867), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.032857727, 0.072497778), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.045821034, 0.12144703), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.078678761, 0.19394481), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.68491215, 0.5731143), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.97002208, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 35.476828, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.17645213), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.43857394), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19041646), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.80544254, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.031759279), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.04381091), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.075570189, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69059627, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.6797184, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1091952, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.96124634, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.24180111, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.66903085, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, directionUpper = FALSE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.152327, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0540554), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.17942496), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13908306), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.37256342, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.11944374), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.20558857), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.32503231, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55851267, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8594368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.7183904, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.4224927, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.01639778, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.8380617, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) getSampleSizeMeans(groups = 1, thetaH0 = -0.2, allocationRatioPlanned = 1) }) test_that("'getPowerMeans': power calculation of means in one sample for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.2, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.898263, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24200246), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44211004), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17580597), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.85991847, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.6841125, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.060186214, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.080390401, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.0601862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.9196096, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.898263, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24200246), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44211004), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17580597), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.85991847, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.6841125, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.0601862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.9196096, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.060186214, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.080390401, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.735156, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24557792), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44464112), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17420245), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8644215, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69021905, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.045347909, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.087095017, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.0453479, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.91290498, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.735156, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24557792), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44464112), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17420245), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8644215, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69021905, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.0453479, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.91290498, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.045347909, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.087095017, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) }) test_that("'getPowerMeans': power calculation of mean difference in two samples for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = F, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.3144249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = F, normalApproximation = FALSE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.3144249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = F, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2989021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = F, normalApproximation = TRUE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.2989021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanRatio} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = T, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1) expect_equal(powerResult$expectedNumberOfSubjects, 36.038015, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16086364), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.41797637), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19543795), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.77427796, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.036438496), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.052451014), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.08888951, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.66772952, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.7808252, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7314858, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.495845, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.40854768, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0525289, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = T, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1) expect_equal(powerResult$expectedNumberOfSubjects, 35.906427, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16454336), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.42310788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19440486), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.7820561, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.035259709), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.050256465), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.085516174, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.67316741, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5458238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7015266, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.4825823, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.41790054, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0501428, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerMeans': power calculation of mean difference in two samples for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) context("Testing the power calculation of rates for different designs and arguments") test_that("'getPowerRates': power calculation of rate in one sample for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.2,0.3,0.4), directionUpper = FALSE, maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(-0.2, -0.1, 0), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(26.793099, 30.568926, 25.859698), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.23143452, 0.056551742, 0.011170644), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.48990786, 0.18729986, 0.030436001), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.16366541, 0.14357447, 0.025842077), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8850078, 0.38742607, 0.067448723), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.020163481, 0.11504671, 0.30853754), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.023605223, 0.1982266, 0.40193671), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.043768704, 0.31327331, 0.71047424), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.76511109, 0.55712491, 0.75208089), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.076920806, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.23316503, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.27368249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.47071068, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.353709, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.4,0.5,0.6), directionUpper = , maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(25.859698, 30.585503, 27.927522), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.057586328, 0.19206788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.19052871, 0.45635017), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.14536961, 0.1839518), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.39348465, 0.83236985), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.11330227, 0.027796437), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.19527267, 0.03667294), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.30857493, 0.064469377), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.55668998, 0.71288743), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.72307919, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.56683497, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.52631751, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.32928932, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.446291, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in one sample for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS2, groups = 1, thetaH0 = 0.4, pi1 = seq(0.2,0.6,0.1), maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(-0.2, -0.1, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.319274, 30.129425, 34.422159, 30.357182, 22.419855), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.54595705, 0.22704321, 0.1297467, 0.22142183, 0.46151826), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.36616073, 0.29278043, 0.16207777, 0.28691724, 0.38813612), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.065351333, 0.15710154, 0.10817552, 0.15623302, 0.098356497), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.97746912, 0.67692518, 0.4, 0.66457209, 0.94801088), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.91211779, 0.51982364, 0.29182448, 0.50833906, 0.84965439), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.18573229, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.28935423, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.3162256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.61426771, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.51064577, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.4837744, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for one-sided group sequential design, riskRatio = FALSE ", { # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.1, pi2 = 0.4, pi1 = c(0.1,0.2,0.3), directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 9) expect_equal(powerResult$numberOfSubjects1[2, ], 21) expect_equal(powerResult$numberOfSubjects1[3, ], 30) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(27.333747, 30.142404, 30.525807), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.21254585, 0.11056737, 0.054245237), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.47569558, 0.32910884, 0.18002797), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17392941, 0.19557908, 0.13943265), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.86217083, 0.63525529, 0.37370586), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.023466961, 0.059262043, 0.11909962), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.029128919, 0.096088854, 0.20501677), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.05259588, 0.1553509, 0.32411639), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.74083731, 0.59502711, 0.5583896), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -0.3905544, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -0.21681979, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.15504053, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.26517501, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.00361566, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = -0.1, pi2 = 0.4, pi1 = c(0.2, 0.3, 0.4, 0.5), directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$effect, c(-0.1, -2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(42.4454, 51.719397, 58.823585, 61.315141), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0028716829, 0.011170644, 0.031364648, 0.076178456), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0049229598, 0.030436001, 0.1027412, 0.24505539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0033586921, 0.025842077, 0.087149125, 0.17152942), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.011153335, 0.067448723, 0.22125497, 0.49276327), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.49105221, 0.30853754, 0.17789692, 0.08798644), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40736296, 0.40193671, 0.29133241, 0.150429), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.89841517, 0.71047424, 0.46922933, 0.23841544), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90620981, 0.75208089, 0.60333518, 0.55964928), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.38186802, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.17360028, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.10931124, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.20652185, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.02383242, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for one-sided group sequential design, riskRatio = TRUE ", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.5, pi1 = c(0.1,0.2,0.3), riskRatio = T, directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 5) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 10) expect_equal(powerResult$numberOfSubjects1[2, ], 23.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 33.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 2) expect_equal(powerResult$numberOfSubjects2[2, ], 4.6666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 6.6666667, tolerance = 1e-07) expect_equal(powerResult$effect, c(-0.6, -0.4, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(29.869153, 30.545915, 28.722194), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.12233203, 0.055263055, 0.02493902), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.35325438, 0.1832494, 0.079687483), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19845995, 0.14128433, 0.068746287), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.67404635, 0.37979679, 0.17337279), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.052497346, 0.11728241, 0.20511002), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.083047698, 0.20198492, 0.32334859), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.13554504, 0.31926733, 0.52845861), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61113145, 0.55777979, 0.63308512), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], NA_real_) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.19789883, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.30397209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.1132916, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.59448494, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = T, directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$effect, c(0.2, 0.45, 0.7), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(58.50994, 61.208415, 55.770675), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.029681783, 0.083038809, 0.19351805), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.096741134, 0.26351903, 0.45786385), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.082477726, 0.17856321, 0.18329277), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.20890064, 0.52512104, 0.83467468), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.18431999, 0.080816996, 0.027459911), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.29934054, 0.13713348, 0.036076093), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.48366053, 0.21795048, 0.063536004), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61008345, 0.56450831, 0.71491791), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8651141, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3871263, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2471692, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.57000905, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.96223105, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for two-sided group sequential design ", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.5, pi1 = c(0.1,0.2,0.3), riskRatio = F, maxNumberOfSubjects = 40, allocationRatioPlanned = 0.5) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 4) expect_equal(powerResult$numberOfSubjects1[2, ], 9.3333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 13.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8) expect_equal(powerResult$numberOfSubjects2[2, ], 18.666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 26.666667, tolerance = 1e-07) expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.586564, 26.282925, 30.696455), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.53456929, 0.33187612, 0.2131539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.37045799, 0.36871195, 0.27793629), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.06955493, 0.14629915, 0.1545979), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.9745822, 0.84688722, 0.64568809), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90502727, 0.70058807, 0.49109019), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = T, maxNumberOfSubjects = 80, allocationRatioPlanned = 7) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 21) expect_equal(powerResult$numberOfSubjects1[2, ], 49) expect_equal(powerResult$numberOfSubjects1[3, ], 70) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$effect, c(0, 0.25, 0.5), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(68.844318, 66.97762, 61.620959), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.1297467, 0.14947843, 0.21040306), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.16207777, 0.19381617, 0.27485292), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.10817552, 0.12487952, 0.15395566), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.4, 0.46817413, 0.63921164), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.29182448, 0.3432946, 0.48525598), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.22081341, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.49677588, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5992042, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 2.0083461, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.5897897, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.4538504, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) context("Testing the power calculation for survival design for different designs and arguments") test_that("'getPowerSurvival': power calculation for survival design for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.394846, 25.872188), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.496718, 34.368969), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.824774), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.036015488, 0.087726198), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11913846, 0.27563412), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.099477436, 0.1826593), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25463139, 0.54601962), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16216653, 0.076412449), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27053178, 0.12885945), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43269831, 0.2052719), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58785226, 0.56863222), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.163653, 26.008714), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.256688, 34.504982), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.822811), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.033136424, 0.067729226), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.10902189, 0.22109606), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.091947627, 0.16101101), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.23410594, 0.44983629), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1715797, 0.098248524), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.28318207, 0.16903127), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.45476178, 0.26727979), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.59692009, 0.55610508), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "Hsieh", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.372933, 25.919163), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.473935, 34.421802), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.825057), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03572104, 0.083721511), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11810922, 0.2653086), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.098722701, 0.17919441), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25255296, 0.52822452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16308496, 0.080152238), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27179271, 0.13588956), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43487767, 0.2160418), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58870793, 0.56507191), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 1.25, hazardRatio = 0.8, directionUpper = FALSE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.31886857, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.38121661, tolerance = 1e-07) expect_equal(powerResult$median1, 21.660849, tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.032, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 5.7883102, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 8.7091306, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 13.807185, tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], 17.78831, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 14.723329, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 17.78831, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, 49.818428, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 145.15218, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$numberOfSubjects[3, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 190.996, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.076192913), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.24509523), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17154561), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.49283375, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.087970326), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.15039938), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.2383697, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55965784, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.37847558, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.67448058, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.78350426, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.623577, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0533329, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 0.8, hazardRatio = seq(0.8,1.4,0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.38121661, 0.43785755, 0.48931382), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.38121661, tolerance = 1e-07) expect_equal(powerResult$median1, c(21.660849, 17.32868, 14.440566, 12.377628), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.032, 0.04, 0.048, 0.056), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(5.1617391, 4.0656056, 3.2120436, 2.5256004), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4767885, 8.0592408, 7.7076518, 7.4060255), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.399188, 12.692623, 12.137705, 11.68467), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(12.758265, 13.175351, 12.752351, 11.880451), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(141.27981, 134.32068, 128.46086, 123.43376), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 194.7445), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(181.22667, 187.06042, 188.27858, 183.16132), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, eventTime = 120, pi2 = 0.4, thetaH0 = 0.8, hazardRatio = seq(0.8,1.4,0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.33546019, 0.4, 0.45827173, 0.51088413), tolerance = 1e-07) expect_equal(powerResult$median1, c(203.53732, 162.82985, 135.69154, 116.30704), tolerance = 1e-07) expect_equal(powerResult$median2, 162.82985, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.0034055042, 0.0042568802, 0.0051082562, 0.0059596323), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.0042568802, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(96.86335, 86.356678, 78.102375, 71.398147), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(32.816894, 30.124548, 27.945787, 26.142615), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(73.505015, 66.662265, 61.211479, 56.744296), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(69.262697, 72.57735, 68.358222, 60.378881), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerSurvival': power calculation for survival design for two-sided group sequential design ", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.11, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.32) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.378955, 22.994709, 18.586202), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(40.275667, 53.258703, 46.484493), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(177.90788, 179.45429, 176.38168), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.20812766, 0.025692757, 0.10981107), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.4067526, 0.045583354, 0.25986553), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19467465, 0.038723888, 0.1839545), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.80955491, 0.11, 0.5536311), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61488026, 0.071276112, 0.3696766), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(22.281639, 22.994709, 17.952578), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(44.992896, 53.258703, 44.408918), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.68182, 179.45429, 175.39233), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.13113454, 0.025692757, 0.13983652), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.30051056, 0.045583354, 0.31559857), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19586767, 0.038723888, 0.19878897), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.62751278, 0.11, 0.65422406), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.4316451, 0.071276112, 0.45543509), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Hsieh", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.85758, 22.994709, 18.697033), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(41.467466, 53.258703, 46.846888), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.11906, 179.45429, 176.54633), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.18711904, 0.025692757, 0.10481397), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.38354247, 0.045583354, 0.24956205), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19996364, 0.038723888, 0.18005389), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.77062516, 0.11, 0.53442991), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.57066151, 0.071276112, 0.35437602), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.20331, 15.121757, 12.72043), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(37.895698, 53.258703, 46.404972), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(166.366, 178.38985, 170.00949), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.25384788, 0.025692757, 0.11091682), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.44431262, 0.045583354, 0.26210486), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17592811, 0.038723888, 0.18475659), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8740886, 0.11, 0.55777827), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.6981605, 0.071276112, 0.37302168), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(14.524507, 15.121757, 12.352885), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(43.761896, 53.258703, 44.296935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(171.95824, 178.38985, 167.38024), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.14972738, 0.025692757, 0.14152926), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.33173334, 0.045583354, 0.31843565), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.20093576, 0.038723888, 0.19924141), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.68239647, 0.11, 0.65920633), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.48146072, 0.071276112, 0.45996492), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Hsieh", lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.562832, 15.121757, 12.784878), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(39.493229, 53.258703, 46.77542), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(168.04554, 178.38985, 170.45805), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.2225769, 0.025692757, 0.10579404), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.42045819, 0.045583354, 0.25160664), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.18963039, 0.038723888, 0.18085515), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.83266548, 0.11, 0.53825584), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.64303509, 0.071276112, 0.35740069), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) }) context("Testing the power calculation of survival data for other parameter variants") test_that("'getPowerSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Four stage O'Brien and Fleming group sequential design with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 4), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.2382885, 7.2643376, 6.5021817, 5.8683997), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(11.775158, 10.405299, 9.3411982, 8.4606249), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(14.851313, 12.90759, 11.580651, 10.517763), tolerance = 1e-07) expect_equal(powerResult$analysisTime[4, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.070854, 14.972567, 12.292784, 10.112156), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 10) expect_equal(powerResult$eventsPerStage[2, ], 20) expect_equal(powerResult$eventsPerStage[3, ], 30) expect_equal(powerResult$eventsPerStage[4, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.87408, 38.142534, 33.62741, 28.346513), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(137.30481, 121.07229, 108.36969, 97.806661), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(196.25264, 173.42164, 155.68664, 141.01041), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 193.01085, 175.29605), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[4, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(199.99057, 199.0474, 190.68267, 167.42879), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(2.5763449e-05, 0.00047146778, 0.0030806507, 0.012020122), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0020845834, 0.034441261, 0.15314753, 0.35953485), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0083455469, 0.11544971, 0.32172195, 0.41021864), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[4, ], c(0.014544106, 0.15846685, 0.25680093, 0.16196846), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.025, 0.30882929, 0.73475105, 0.94374207), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.010455897, 0.15036244, 0.47795013, 0.78177362), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 12.942983, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 3.5976357, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 2.3478921, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[4, ], 1.8967435, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 2.5763449e-05, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.0020996694, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.0097077663, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[4, ], 0.021469878, tolerance = 1e-07) }) test_that("'getPowerSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0), accrualIntensity = 30, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.7010979, 6.004962, 4.1561659, 2.779256), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify accrual time as a list", { at <- list("0 - <6" = 20, "6 - Inf" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { at <- list("0 - <6" = 20, "6 - <=10" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, 74.550809, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 12.65889, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 14.822645, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 24.65889, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 24.262964, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 24.65889, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 39.194966, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.04025172), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.27369279), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.31394451, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.040251721, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(powerResult$median1, 93.281194, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 14.346945, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 15.582247, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 26.346945, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 25.202929, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 26.346945, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(powerResult$median1, 69.314718, tolerance = 1e-07) expect_equal(powerResult$median2, 34.657359, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 9.1631017, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 13.164641, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 21.163102, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 20.313067, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 21.163102, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time and hazard ratios ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01,0.02,0.04), hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as list and hazard ratios ", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time for both treatment arms ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015,0.03,0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(powerResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 4.2070411, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 12.173669, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 16.207041, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 16.088508, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 16.207041, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 39.412236, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0293882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.21729291), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.24668111, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.029388201, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as a list", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { powerResult <- getPowerSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(powerResult$median1, 5, tolerance = 1e-07) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.9093279, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.0906721, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.0906721, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$expectedNumberOfSubjects, 101.5112, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.36520074, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { powerResult <- getPowerSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(powerResult$median1, 5, tolerance = 1e-07) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.7378582, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.2621418, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.2621418, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$expectedNumberOfSubjects, 104.36903, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8980967, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) context("Testing the follow-up time calculation") test_that("'getPowerSurvival': analysis time at last stage equals accrual time + follow-up time", { x1 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, lambda2 = 0.005, lambda1 = 0.01, maxNumberOfSubjects = 766, maxNumberOfEvents = 76) expect_equal(x1$overallReject, 1 - x1$.design$beta, tolerance = 0.01) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfEvents = 76, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01, maxNumberOfEvents = 76) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), maxNumberOfEvents = 76, piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) rpact/tests/testthat/test-f_analysis_base_rates.R0000644000176200001440000035647213574164753022045 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 08:45:16 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis rates functionality for one treatment") test_that("'getAnalysisResults' for a group sequential design and one treatment", { design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8) dataExample1 <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) x1 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, 0.60291694, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, 0.29544407, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, 0.73635572, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.26917981, 0.015800491, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.0089457853, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.32991006, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.70969307, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.51904357, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(1.2064848, 2.0674098, 2.4192811), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.11381531, 0.01934778, 0.0077756083), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, 0.83593758, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, 0.31742335, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, 0.71378821, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.10104164, 0.0056362503, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 3) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, 0.0042246203, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.29660132, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.68026724, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.48659273, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(-1.6329932, -2.4494897, -2.7777778, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.051235217, 0.0071529392, 0.0027366018, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.6918414, 0.87964625), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.26917981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$overallTestStatistics, c(1.2064848, 2.0674098), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.11381531, 0.01934778), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.94525609, 0.87964625, 0.77671901, 0.6376454, 0.47357888, 0.30528352, 0.15917802), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(-1.6329932, -1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.5, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.85377193, 0.95011174), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.10104164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$overallTestStatistics, c(-1.6329932, -2.4494897, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$overallPValues, c(0.051235217, 0.0071529392, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98086022, 0.95011174, 0.89224094, 0.79890753, 0.66697034, 0.50241609, 0.32350374), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") x5 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x5$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, 0.85300796, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, 0.29544407, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, 0.73635572, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.26917981, 0.0050506954, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, 3) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, 0.003964958, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.32244641, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.70667629, NA_real_), tolerance = 1e-07) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.51656189, NA_real_), tolerance = 1e-07) expect_equal(x5$normalApproximation, FALSE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$overallTestStatistics, c(1.2064848, 2.0674098, 2.8135397), tolerance = 1e-07) expect_equal(x5$overallPValues, c(0.11381531, 0.01934778, 0.0024499668), tolerance = 1e-07) x6 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x6$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, 0.96903431, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, 0.31742335, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, 0.71378821, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.10104164, 0.0013294657, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, 3) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, 0.0023857966, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.34941079, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.74332995, NA_real_), tolerance = 1e-07) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.55110366, NA_real_), tolerance = 1e-07) expect_equal(x6$normalApproximation, TRUE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$overallTestStatistics, c(1.6329932, 2.4494897, 3.2222222, NA_real_), tolerance = 1e-07) expect_equal(x6$overallPValues, c(0.051235217, 0.0071529392, 0.00063600219, NA_real_), tolerance = 1e-07) x7 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x7' with expected results ## expect_equal(x7$stages, c(1, 2, 3, 4)) expect_equal(x7$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x7$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x7$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x7$allocationRatioPlanned, 1) expect_equal(x7$pi1, 0.5, tolerance = 1e-07) expect_equal(x7$pi2, NA_real_) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.6918414, 0.87964625), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.26917981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$normalApproximation, FALSE) expect_equal(x7$directionUpper, TRUE) expect_equal(x7$overallTestStatistics, c(1.2064848, 2.0674098), tolerance = 1e-07) expect_equal(x7$overallPValues, c(0.11381531, 0.01934778), tolerance = 1e-07) plotData3 <- testGetAnalysisResultsPlotData(x7, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.15917802, 0.30528352, 0.47357888, 0.6376454, 0.77671901, 0.87964625, 0.94525609), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") x8 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x8' with expected results ## expect_equal(x8$stages, c(1, 2, 3, 4)) expect_equal(x8$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x8$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x8$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x8$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x8$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testStatistics, c(1.6329932, 1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x8$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x8$allocationRatioPlanned, 1) expect_equal(x8$pi1, 0.5, tolerance = 1e-07) expect_equal(x8$pi2, NA_real_) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.85377193, 0.95011174), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.10104164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$normalApproximation, TRUE) expect_equal(x8$directionUpper, TRUE) expect_equal(x8$overallTestStatistics, c(1.6329932, 2.4494897, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$overallPValues, c(0.051235217, 0.0071529392, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x8, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.32350374, 0.50241609, 0.66697034, 0.79890753, 0.89224094, 0.95011174, 0.98086022), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for an inverse sequential design and one treatment", { .skipTestifDisabled() design2 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8) dataExample2 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) x1 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, 0.28098687, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, 0.76870152, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.43799317, 0.045574143, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(1.2064848, 1.8556383, 1.9988727, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, 0.78413538, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, 0.72001941, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.1020964, 0.0075111702, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 3) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, 0.0050707339, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.30413229, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.68870859, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.49547717, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.6329932, 2.445695, 2.6819469, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$combinationTestStatistics, c(1.2064848, 1.8556383, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.94793138, 0.88465983, 0.78396384, 0.64581102, 0.48045808, 0.30888816, 0.15917802), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(-1.6329932, -1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.5, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$combinationTestStatistics, c(1.6329932, 2.445695, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98088099, 0.95015898, 0.89232288, 0.79901831, 0.66708346, 0.50248974, 0.32350374), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") x5 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x5$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, 0.6508521, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, 0.76870152, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.43799317, 0.013282796, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, 3) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, 0.007752129, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.29554194, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.67875285, NA_real_), tolerance = 1e-07) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.48769645, NA_real_), tolerance = 1e-07) expect_equal(x5$normalApproximation, FALSE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$combinationTestStatistics, c(1.2064848, 1.8556383, 2.4826398, NA_real_), tolerance = 1e-07) x6 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x6$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, 0.96959663, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, 0.72001941, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.1020964, 0.0013103922, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, 3) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, 0.002378519, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.3437363, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.73847376, NA_real_), tolerance = 1e-07) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.54446903, NA_real_), tolerance = 1e-07) expect_equal(x6$normalApproximation, TRUE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$combinationTestStatistics, c(1.6329932, 2.445695, 3.2262779, NA_real_), tolerance = 1e-07) x7 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x7' with expected results ## expect_equal(x7$stages, c(1, 2, 3, 4)) expect_equal(x7$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x7$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x7$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x7$allocationRatioPlanned, 1) expect_equal(x7$pi1, 0.5, tolerance = 1e-07) expect_equal(x7$pi2, NA_real_) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$normalApproximation, FALSE) expect_equal(x7$directionUpper, TRUE) expect_equal(x7$combinationTestStatistics, c(1.2064848, 1.8556383, NA_real_, NA_real_), tolerance = 1e-07) plotData3 <- testGetAnalysisResultsPlotData(x7, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.15917802, 0.30888816, 0.48045808, 0.64581102, 0.78396384, 0.88465983, 0.94793138), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") x8 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x8' with expected results ## expect_equal(x8$stages, c(1, 2, 3, 4)) expect_equal(x8$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x8$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x8$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x8$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x8$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testStatistics, c(1.6329932, 1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x8$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x8$allocationRatioPlanned, 1) expect_equal(x8$pi1, 0.5, tolerance = 1e-07) expect_equal(x8$pi2, NA_real_) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$normalApproximation, TRUE) expect_equal(x8$directionUpper, TRUE) expect_equal(x8$combinationTestStatistics, c(1.6329932, 2.445695, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x8, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.32350374, 0.50248974, 0.66708346, 0.79901831, 0.89232288, 0.95015898, 0.98088099), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for a Fisher design and one treatment", { design3 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1)) dataExample3 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) x1 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(1, 1, 1)) expect_equal(x1$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.018233808, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.23393398, 0.11483365, 0.11050779, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(0.11381531, 0.008892038, 0.00069992563, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(1, 1, 1)) expect_equal(x2$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.11554509, 0.032131177, 0.024656293, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(0.051235217, 0.0017391578, 5.6795832e-05, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(1, 1, 1)) expect_equal(x3$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) expect_equal(x3$combinationTestStatistics, c(0.11381531, 0.008892038, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.997, 0.99, 0.967, 0.9, 0.822, 0.659, 0.534), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(1, 1, 1)) expect_equal(x4$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x4$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.10237226, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.23393398, 0.11483365, 0.040061917, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, FALSE) expect_equal(x4$directionUpper, TRUE) expect_equal(x4$combinationTestStatistics, c(0.11381531, 0.008892038, 0.00012466571, NA_real_), tolerance = 1e-07) x5 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(1, 1, 1)) expect_equal(x5$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x5$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.11554509, 0.032131177, 0.0055275316, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, NA_integer_) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$normalApproximation, TRUE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$combinationTestStatistics, c(0.051235217, 0.0017391578, 5.527981e-06, NA_real_), tolerance = 1e-07) x6 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(1, 1, 1)) expect_equal(x6$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.5, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, NA_integer_) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$normalApproximation, FALSE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) expect_equal(x6$combinationTestStatistics, c(0.11381531, 0.008892038, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x6, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.039, 0.114, 0.208, 0.363, 0.54, 0.659, 0.817), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' produces the correct exact tests and final CIs", { .skipTestifDisabled() dataExample4 <- getDataset( n1 = c(10, 80), n2 = c(15, 100), events1 = c(8, 54), events2 = c(6, 45) ) design4 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x1 <- getAnalysisResults(design4, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x1$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.19076123, 0.00035290512), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.0010527587), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.08946214), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.35871085), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, 0.2258854), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(1.5754901, 3.3869257), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.057571679, 0.00035340268), tolerance = 1e-07) design5 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x2 <- getAnalysisResults(design5, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x2$testStatistics, c(NA_real_, NA_real_)) expect_equal(x2$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "reject")) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x2$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.11944223), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.38794979), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.19076123, 0.00053410288), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.0012242304), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.088125224), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.36146576), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, 0.2258396), tolerance = 1e-07) expect_equal(x2$normalApproximation, FALSE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(1.5754901, 3.2718402), tolerance = 1e-07) design6 <- getDesignFisher(kMax = 2, alpha = 0.025, method = "fullAlpha", informationRates = c(0.3, 1)) x3 <- getAnalysisResults(design6, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00076737019, 0.00076737019), tolerance = 1e-07) expect_equal(x3$futilityBounds, 1) expect_equal(x3$alphaSpent, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "reject")) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.059209424, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x3$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.22999653, 0.10478651), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.78083174, 0.37635443), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.33766302, 0.00088314698), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.0015832283), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(0.057571679, 4.3180464e-06), tolerance = 1e-07) }) context("Testing the analysis rates functionality for two treatments") test_that("'getAnalysisResults' for a group sequential design and two treatments", { .skipTestifDisabled() design7 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = T) dataExample5 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.11795654, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$pi1, 0.8, tolerance = 1e-07) expect_equal(x1$pi2, 0.4, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.95912173, 0.99561789), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10866984, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44094175, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297284, 0.1166436, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(2.1918708, 1.5920411, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.014194417, 0.055687735, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.39134298, 0.56404834, 0.72892392, 0.85852169, 0.94050627, 0.98087239, 0.99561789), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x2 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.8, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 6.6613381e-16, 6.6613381e-16), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10866984, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44094175, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.21185459, 0.21185459, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 1) expect_equal(x2$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(0.03932898, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(0.62730993, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(2.1918708, 1.5920411, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.98580558, 0.94431227, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 1, 1), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for an inverse design and two treatments", { .skipTestifDisabled() design8 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = T) dataExample6 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0.0, stage = 2, nPlanned = c(30,30), pi2 = 0.2, pi1 = 0.4, directionUpper = T) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 30, 30)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.4, tolerance = 1e-07) expect_equal(x1$pi2, 0.2, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.42200962, 0.67359244), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.4, 0.7, 0.05), nPlanned = c(30,30)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") x2 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0.0, stage = 2, nPlanned = c(30,30), pi2 = 0.2, pi1 = 0.4, directionUpper = T) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 30, 30)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.2, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.42200962, 0.67359244), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.4, 0.7, 0.05), nPlanned = c(30, 30)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") plotData3 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.4, 0.7, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") x3 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$pi1, 0.8, tolerance = 1e-07) expect_equal(x3$pi2, 0.4, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.95920074, 0.9956319), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.39156853, 0.5643629, 0.72924187, 0.85876179, 0.94064025, 0.98092555, 0.9956319), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x4 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0) expect_equal(x4$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x4$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x4$pi1, 0.4, tolerance = 1e-07) expect_equal(x4$pi2, 0.8, tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 6.6613381e-16, 6.6613381e-16), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.21185459, 0.21185459, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, 1) expect_equal(x4$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(0.03932898, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(0.62730993, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$combinationTestStatistics, c(-2.1918708, -1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData5 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData5' with expected results ## expect_equal(plotData5$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData5$condPowerValues, c(6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 1, 1), tolerance = 1e-07) expect_equal(plotData5$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData5$main, "Conditional Power Plot with Likelihood") expect_equal(plotData5$xlab, "pi1") expect_equal(plotData5$ylab, "Conditional power / Likelihood") expect_equal(plotData5$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for a Fisher design and two treatments", { .skipTestifDisabled() design9 <- getDesignFisher(kMax = 4, alpha = 0.025, method = "equalAlpha", informationRates = c(0.2, 0.4, 0.8, 1)) dataExample7 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(1, 1, 1)) expect_equal(x1$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.13898608, 0.050808351, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$pi1, 0.8, tolerance = 1e-07) expect_equal(x1$pi2, 0.4, tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.035427069, 0.088523734, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.925, 0.972), tolerance = 1e-07) expect_equal(x1$combinationTestStatistics, c(0.014194417, 0.0062944232, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.199, 0.364, 0.506, 0.686, 0.839, 0.927, 0.979), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x2 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(1, 1, 1)) expect_equal(x2$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.0056634595, 0.0023089469, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.8, tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.591, 0.788), tolerance = 1e-07) expect_equal(x2$combinationTestStatistics, c(0.98580558, 0.54865641, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2,piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.998, 0.992, 0.967, 0.892, 0.807, 0.623, 0.493), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' produces the correct exact tests and final CIs", { .skipTestifDisabled() dataExample8 <- getDataset( n2 = c(10, 80), n1 = c(15, 100), events2 = c(8, 54), events1 = c(6, 45) ) design10 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x1 <- getAnalysisResults(design10, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x1$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.78187776, -0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.23265736, -0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.19076123, 0.00035290512), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.0010527587), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.35871085), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, -0.08946214), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, -0.2258854), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(1.5754901, 3.3869257), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.057571679, 0.00035340268), tolerance = 1e-07) design11 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x2 <- getAnalysisResults(design11, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x2$testStatistics, c(NA_real_, NA_real_)) expect_equal(x2$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "reject")) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x2$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.78187776, -0.38794979), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.23265736, -0.11944223), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.19076123, 0.00053410288), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.0012242304), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.36146576), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, -0.088125224), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, -0.2258396), tolerance = 1e-07) expect_equal(x2$normalApproximation, FALSE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.5754901, 3.2718402), tolerance = 1e-07) design12 <- getDesignFisher(kMax = 2, alpha = 0.025, method = "fullAlpha", informationRates = c(0.3, 1)) x3 <- getAnalysisResults(design12, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00076737019, 0.00076737019), tolerance = 1e-07) expect_equal(x3$futilityBounds, 1) expect_equal(x3$alphaSpent, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "reject")) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.059209424, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x3$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.78083174, -0.37635443), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.22999653, -0.10478651), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.33766302, 0.00088314698), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.0015832283), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$combinationTestStatistics, c(0.057571679, 4.3180464e-06), tolerance = 1e-07) }) test_that("'getAnalysisResults' produes the correct non-inferiority results for a group sequential design", { .skipTestifDisabled() design13 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) dataExample9 <- getDataset( n1 = c(10, 80), n2 = c(15, 100), events1 = c(8, 54), events2 = c(6, 45) ) x1 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 2, directionUpper = TRUE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.4676423, 4.366446), tolerance = 1e-07) expect_equal(x1$pValues, c(0.0068003075, 6.3142236e-06), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, -0.1, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.2311149, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x1$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.067077549, 1.9536724e-06), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.00072814991), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.046389254), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.3577016), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, 0.2183453), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(2.4676423, 4.9460155), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.0068003075, 3.7873958e-07), tolerance = 1e-07) x2 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 1, nPlanned = 40, pi1 = 0.45, pi2 = 0.4, directionUpper = TRUE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.4, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.4676423, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.0068003075, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", NA_character_)) expect_equal(x2$thetaH0, -0.1, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.2311149, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, 40)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.45, tolerance = 1e-07) expect_equal(x2$pi2, 0.4, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, 0.59014508), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.78187776, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.067077549, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$overallTestStatistics, c(2.4676423, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.0068003075, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.25, 0.7, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.13978279, 0.2311149, 0.34247666, 0.46554605, 0.59014508, 0.70618885, 0.80546789, 0.88295965, 0.937434, 0.97121381), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(7.8444044e-05, 0.00040464517, 0.0017853782, 0.006737947, 0.021750359, 0.060054668, 0.14183016, 0.2865048, 0.4950359, 0.73161563), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 1, # of remaining subjects = 40, pi2 = 0.4, allocation ratio = 1") # non-inferiority, reversed "directionUpper" x3 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 2, directionUpper = FALSE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x3$futilityBounds, -6) expect_equal(x3$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.4985437, 1.6883572), tolerance = 1e-07) expect_equal(x3$pValues, c(0.93300397, 0.95432866), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "accept")) expect_equal(x3$thetaH0, 0.1, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.00043165085, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x3$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.49999905), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.9819019), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.10906703), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.37282539), tolerance = 1e-07) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, 0.24094623), tolerance = 1e-07) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$overallTestStatistics, c(1.4985437, 2.0947166), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.93300397, 0.9819019), tolerance = 1e-07) x4 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 1, nPlanned = 40, pi1 = 0.4, pi2 = 0.45, directionUpper = FALSE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2)) expect_equal(x4$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x4$futilityBounds, -6) expect_equal(x4$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.4, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(1.4985437, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.93300397, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", NA_character_)) expect_equal(x4$thetaH0, 0.1, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.00043165085, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, 40)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.4, tolerance = 1e-07) expect_equal(x4$pi2, 0.45, tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, 0.009129264), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.78187776, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$overallTestStatistics, c(1.4985437, NA_real_), tolerance = 1e-07) expect_equal(x4$overallPValues, c(0.93300397, NA_real_), tolerance = 1e-07) }) rpact/tests/testthat/test-f_design_utilities.R0000644000176200001440000017431613567165663021374 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:56 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing design utility functions") test_that("'getPiByLambda' and 'getLambdaByPi' produce corresponding results", { expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 5), eventTime = 7, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 5), eventTime = 7, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 5), eventTime = 8, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 5), eventTime = 8, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 4), eventTime = 9, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 4), eventTime = 9, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 5), eventTime = 9, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 5), eventTime = 9, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 4), eventTime = 10, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 4), eventTime = 10, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 5), eventTime = 10, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 5), eventTime = 10, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 4), eventTime = 11, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 4), eventTime = 11, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 3), eventTime = 12, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 3), eventTime = 12, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 4), eventTime = 12, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 4), eventTime = 12, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 5), eventTime = 12, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 3), eventTime = 13, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 3), eventTime = 13, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 4), eventTime = 13, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 5), eventTime = 13, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 3), eventTime = 14, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 3), eventTime = 14, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 4), eventTime = 14, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 5), eventTime = 14, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 3), eventTime = 15, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 3), eventTime = 15, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 4), eventTime = 15, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 5), eventTime = 15, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 4), eventTime = 16, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 3), eventTime = 17, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 4), eventTime = 17, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 5), eventTime = 17, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 3), eventTime = 18, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 4), eventTime = 18, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 5), eventTime = 18, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 3), eventTime = 19, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 4), eventTime = 19, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 5), eventTime = 19, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 2), eventTime = 20, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 20, kappa = 2), eventTime = 20, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 3), eventTime = 20, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 4), eventTime = 20, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 5), eventTime = 20, kappa = 5), 0.1, tolerance = 1e-04) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results", { piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results", { piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime, lambda = piecewiseLambda) y <- qpwexp(quantile, s = piecewiseSurvivalTime, lambda = piecewiseLambda) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime) y <- qpwexp(quantile, s = piecewiseSurvivalTime) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected", { piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(5000, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = 1)) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected", { piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(rpwexp(5000, s = piecewiseSurvivalTime, lambda = piecewiseLambda, kappa = 1)) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(5000, piecewiseSurvivalTime = piecewiseSurvivalTime, kappa = 1)) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003) y <- 1 / mean(rpwexp(5000, s = piecewiseSurvivalTime, kappa = 1)) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'getPiecewiseExponentialDistribution': test that function call with singel lambda is working", { expect_equal(getPiecewiseExponentialDistribution(4, piecewiseLambda = 0.003), 0.01192829, tolerance = 5e-05) }) rpact/tests/testthat/helper-f_analysis_rates.R0000644000176200001440000000546013567153535021335 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments(x = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ...)) } rpact/tests/testthat/test-f_design_group_sequential_design.R0000644000176200001440000007061613567165663024276 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:28 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the group sequential and inverse normal design functionality") test_that("'getDesignInverseNormal' with default parameters: parameters and results are as expected", { # @refFS[Formula]{fs:criticalValuesOBrienFleming} x1 <- getDesignInverseNormal() ## ## Comparison of the results of TrialDesignInverseNormal object 'x1' with expected results ## expect_equal(x1$alphaSpent, c(0.00025917372, 0.0071600594, 0.025), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4710914, 2.4544323, 2.0040356), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00025917372, 0.0070553616, 0.022533125), tolerance = 1e-07) }) test_that("'getDesignInverseNormal' and 'getDesignCharacteristics' with kMax = 4: parameters and results are as expected for different arguments", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingHwangShiDeCani} x2 <- getDesignInverseNormal(kMax = 4, alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD",gammaA = -1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_HSD, gammaB = -2) ## ## Comparison of the results of TrialDesignInverseNormal object 'x2' with expected results ## expect_equal(x2$power, c(0.18540359, 0.47374657, 0.7208955, 0.86), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.81517021, 0.063469084, 0.84025384), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.011570732, 0.026427847, 0.045504759, 0.07), tolerance = 1e-07) expect_equal(x2$betaSpent, c(0.014215085, 0.037651799, 0.076292407, 0.14), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.2710911, 2.0692301, 1.8645608, 1.6606881), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.011570732, 0.01926225, 0.031121494, 0.048388055), tolerance = 1e-07) y1 <- getDesignCharacteristics(x2) ## ## Comparison of the results of TrialDesignCharacteristics object 'y1' with expected results ## expect_equal(y1$nFixed, 6.5337002, tolerance = 1e-07) expect_equal(y1$shift, 7.5749205, tolerance = 1e-07) expect_equal(y1$inflationFactor, 1.1593615, tolerance = 1e-07) expect_equal(y1$information, c(1.8937301, 3.7874603, 5.6811904, 7.5749205), tolerance = 1e-07) expect_equal(y1$power, c(0.18540359, 0.47374657, 0.7208955, 0.86), tolerance = 1e-07) expect_equal(y1$rejectionProbabilities, c(0.18540359, 0.28834298, 0.24714893, 0.1391045), tolerance = 1e-07) expect_equal(y1$futilityProbabilities, c(0.014215085, 0.023436714, 0.038640608), tolerance = 1e-07) expect_equal(y1$averageSampleNumber1, 0.72222281, tolerance = 1e-07) expect_equal(y1$averageSampleNumber01, 0.82592961, tolerance = 1e-07) expect_equal(y1$averageSampleNumber0, 0.68240644, tolerance = 1e-07) # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingHwangShiDeCani} x3 <- getDesignInverseNormal(kMax = 4, informationRates = c(0.2, 0.4, 0.8, 1), alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD",gammaA = -1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_HSD, gammaB = -2) ## ## Comparison of the results of TrialDesignInverseNormal object 'x3' with expected results ## expect_equal(x3$power, c(0.12840586, 0.34869365, 0.76424148, 0.86), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-1.0672796, -0.30464832, 1.028624), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.0090195874, 0.020036136, 0.049926539, 0.07), tolerance = 1e-07) expect_equal(x3$betaSpent, c(0.010777094, 0.026854629, 0.086620705, 0.14), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.364813, 2.1928805, 1.7718975, 1.6682985), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0090195874, 0.014157994, 0.038205784, 0.047628242), tolerance = 1e-07) # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} y2 <- getDesignCharacteristics(x3) ## ## Comparison of the results of TrialDesignCharacteristics object 'y2' with expected results ## expect_equal(y2$nFixed, 6.5337002, tolerance = 1e-07) expect_equal(y2$shift, 7.5750078, tolerance = 1e-07) expect_equal(y2$inflationFactor, 1.1593749, tolerance = 1e-07) expect_equal(y2$information, c(1.5150016, 3.0300031, 6.0600063, 7.5750078), tolerance = 1e-07) expect_equal(y2$power, c(0.12840586, 0.34869365, 0.76424148, 0.86), tolerance = 1e-07) expect_equal(y2$rejectionProbabilities, c(0.12840586, 0.22028779, 0.41554783, 0.095758523), tolerance = 1e-07) expect_equal(y2$futilityProbabilities, c(0.010777094, 0.016077535, 0.059766076), tolerance = 1e-07) expect_equal(y2$averageSampleNumber1, 0.75564768, tolerance = 1e-07) expect_equal(y2$averageSampleNumber01, 0.85242855, tolerance = 1e-07) expect_equal(y2$averageSampleNumber0, 0.720263, tolerance = 1e-07) }) test_that("'getDesignInverseNormal' with binding futility bounds", { # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x4 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'x4' with expected results ## expect_equal(x4$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(x4$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asUser'", { # @refFS[Formula]{fs:alphaSpendingConcept} x5 <- getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.03, 0.05)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x5' with expected results ## expect_equal(x5$alphaSpent, c(0.01, 0.02, 0.03, 0.05), tolerance = 1e-07) expect_equal(x5$criticalValues, c(2.3263479, 2.2192994, 2.1201347, 1.8189562), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.01, 0.01323318, 0.016997342, 0.034459058), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsUser'", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} x6 <- getDesignGroupSequential(kMax = 3, alpha = 0.03, typeOfDesign = "asOF", typeBetaSpending = "bsUser", userBetaSpending = c(0.01, 0.05, 0.3)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x6' with expected results ## expect_equal(x6$power, c(0.014685829, 0.33275272, 0.7), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.92327973, 0.29975473), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00017079385, 0.0078650906, 0.03), tolerance = 1e-07) expect_equal(x6$betaSpent, c(0.01, 0.05, 0.3), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.5815302, 2.417863, 1.9175839), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00017079385, 0.0078059773, 0.027581894), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsP'", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingPocock} x7 <- getDesignGroupSequential(kMax = 3, alpha = 0.03, typeOfDesign = "asOF", typeBetaSpending = "bsP", userBetaSpending = c(0.01, 0.05, 0.3)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x7' with expected results ## expect_equal(x7$power, c(0.03410434, 0.52267986, 0.8), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(0.42062972, 1.2539286), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00017079385, 0.0078650906, 0.03), tolerance = 1e-07) expect_equal(x7$betaSpent, c(0.090566485, 0.1526765, 0.2), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.5815302, 2.417863, 1.9175839), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00017079385, 0.0078059773, 0.027581894), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with binding futility bounds ", { # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignGroupSequential object 'x8' with expected results ## expect_equal(x8$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(x8$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with Haybittle Peto boundaries ", { # @refFS[Formula]{fs:criticalValuesHaybittlePeto} x9 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "HP") ## ## Comparison of the results of TrialDesignGroupSequential object 'x9' with expected results ## expect_equal(x9$alphaSpent, c(0.001349898, 0.0024617416, 0.0033695882, 0.025), tolerance = 1e-07) expect_equal(x9$criticalValues, c(3, 3, 3, 1.9827514), tolerance = 1e-07) expect_equal(x9$stageLevels, c(0.001349898, 0.001349898, 0.001349898, 0.023697604), tolerance = 1e-07) }) test_that("'getDesignInverseNormal': illegal arguments throw exceptions as expected", { expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = NA_character_), "Missing argument: parameter 'optimizationCriterion' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = NA_character_), "Missing argument: parameter 'typeBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2)), paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05)), paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2), paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 17), "Argument out of bounds: 'kMax' (17) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 18), "Argument out of bounds: 'kMax' (18) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 19), "Argument out of bounds: 'kMax' (19) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 20), "Argument out of bounds: 'kMax' (20) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(futilityBounds = c(-7, 5)), "Argument out of bounds: 'futilityBounds' (-7, 5) is out of bounds [-6; 6]", fixed = TRUE) expect_error(getDesignInverseNormal(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-6; 6]", fixed = TRUE) }) test_that("'getDesignGroupSequential': illegal arguments throw exceptions as expected", { expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = NA_character_), "Missing argument: parameter 'optimizationCriterion' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = NA_character_), "Missing argument: parameter 'typeBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2)), paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05)), paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2), paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE) expect_error(getDesignGroupSequential(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignGroupSequential(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 17), "Argument out of bounds: 'kMax' (17) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 18), "Argument out of bounds: 'kMax' (18) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 19), "Argument out of bounds: 'kMax' (19) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 20), "Argument out of bounds: 'kMax' (20) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_error(getDesignGroupSequential(futilityBounds = c(-7, 5)), "Argument out of bounds: 'futilityBounds' (-7, 5) is out of bounds [-6; 6]", fixed = TRUE) expect_error(getDesignGroupSequential(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-6; 6]", fixed = TRUE) }) rpact/tests/testthat/helper-f_core_utilities.R0000644000176200001440000000710613375253362021332 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getTestInformationRatesDefault <- function(kMax) { return((1:kMax) / kMax) } getTestFutilityBoundsDefault <- function(kMax) { return(rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1)) } getTestAlpha0VecDefault <- function(kMax) { return(rep(C_ALPHA_0_VEC_DEFAULT, kMax - 1)) } getTestInformationRates <- function(kMax) { if (kMax == 1L) { return(1) } a <- 0.8 / kMax b <- c() for (i in 1:(kMax - 1)) { b <- c(b, a * i) } return(c(b, 1)) } getTestFutilityBounds <- function(kMax) { if (kMax < 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kMax' must be >= 2") } k <- kMax - 1 futilityBounds <- c(2) k <- k - 1 if (k > 0) { futilityBounds <- c(1, futilityBounds) k <- k - 1 } if (k > 0) { futilityBounds <- c(rep(0, k), futilityBounds) } return(futilityBounds) } getTestDesign <- function(kMax = NA_real_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { if (designClass == C_CLASS_NAME_TRIAL_DESIGN_FISHER) { return(TrialDesignFisher( kMax = as.integer(kMax), alpha = C_ALPHA_DEFAULT, method = C_FISHER_METHOD_DEFAULT, alpha0Vec = futilityBounds, informationRates = informationRates, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT )) } return(.createDesign( designClass = designClass, kMax = as.integer(kMax), alpha = C_ALPHA_DEFAULT, beta = C_BETA_DEFAULT, sided = 1, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, tolerance = 1e-06)) } rpact/tests/testthat/helper-f_analysis_survival.R0000644000176200001440000000546013566731557022077 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments(x = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ...)) } rpact/tests/testthat/helper-f_analysis.R0000644000176200001440000000737513357572731020146 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### # Example in book p. 207f, one-treatment case, variance known getTestSettings1 = function() { kMax = 2 nActual = c(20, 60) return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, futilityBounds = c(stats::qnorm(0.7)), alpha0Vec = c(1), sided = 1, nActual = nActual, dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(20, 20), n2 = nActual, means1 = c(0.32, 0.35), means2 = c(1.92, 0.56), stds1 = c(1, 1), stds2 = c(1, 1))), stage = kMax )) } getTestSettings2 = function() { kMax = 4 return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, sided = 1, futilityBoundsForPower = c(-0.5, 0, 0.5), futilityBounds = rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1), alpha0Vec = c(0.7, 0.6, 0.5), alpha0Vec = rep(1, kMax - 1), nPlanned = rep(11, kMax), nActual = rep(11, kMax), dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), means1 = c(323, 514, 511, 611), means2 = c(452, 561, 635, 698), stds1 = c(111, 131, 145, 111), stds2 = c(118, 117, 104, 119))), stage = kMax )) } getTestSettings3 = function() { kMax = 4 return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, sided = 1, futilityBoundsForPower = c(-0.5, 0, 0.5), futilityBounds = rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1), alpha0Vec = c(0.7, 0.6, 0.5), alpha0Vec = rep(1, kMax - 1), nPlanned = rep(11, kMax), nActual = rep(11, kMax), dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), means1 = c(323, 514, 511, 611), means2 = c(452, 561, 635, 698), stds1 = c(111, 131, 145, 111), stds2 = c(118, 117, 104, 119))), stage = kMax )) } rpact/tests/testthat/test-f_analysis_base_means.R0000644000176200001440000027110613574374172022016 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25 November 2019, 11:23:25 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis means functionality for one treatment") test_that("'getAnalysisResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { dataExample1 <- getDataset( n = c(120, 130, 130), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults(design = design1, dataInput = dataExample1, nPlanned = 130, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) result1 result1$.design$alphaSpent #plot(result1, thetaRange = c(0, 100)) dataExample1b <- rpact::getDataset( n = c(120, 130, 130), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1b <- rpact::getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1b <- rpact::getAnalysisResults(design = design1b, dataInput = dataExample1b, nPlanned = 130, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result1$.design$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(45, 48.12, 47.052632, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(2.9492753, 3.3390852, 3.3255117, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.0019178249, 0.00054956317, 0.00057478599, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("reject and stop", "reject and stop", "reject and stop", NA_character_)) expect_equal(result1$thetaH0, 10) expect_equal(result1$thetaH1, 50) expect_equal(result1$assumedStDev, 100) expect_equal(result1$conditionalRejectionProbabilities, c(0.4390924, 0.99186839, 0.99999982, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, NA_real_, 130)) expect_equal(result1$allocationRatioPlanned, 1) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 1), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(14.924587, 28.099918, 32.086386, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(75.075413, 68.140082, 62.018878, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.010254306, 3.704879e-05, 1.572203e-06, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, 1) expect_equal(result1$finalPValues, c(0.0019178249, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(21.740476, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(68.259524, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$medianUnbiasedEstimates, c(45, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(2.9492753, 4.4628381, 5.558203, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.0019178249, 6.1303223e-06, 2.5741923e-08, NA_real_), tolerance = 1e-07) }) test_that("'getStageResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestifDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults1 <- getStageResults(design1, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 50)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.20492816, 0.31007642, 0.43512091, 0.5683138, 0.6950205, 0.80243295, 0.88343665, 0.93770927, 0.96998259, 0.98700232, 0.99495733, 0.99825113, 0.99945881, 0.9998508, 0.9999634), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, std = 100") }) test_that("'getAnalysisResults' for inverse normal and Fisher designs and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestifDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design2 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults2 <- getStageResults(design2, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 50)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.9256836, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.18776792, 0.28883478, 0.41147918, 0.5447391, 0.67401995, 0.78575942, 0.87165951, 0.93031941, 0.96586805, 0.98497137, 0.99406923, 0.99790729, 0.999341, 0.99981509, 0.99995383), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults(design = design2, dataInput = dataExample1, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result2$.design$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result2$thetaH0, 10) expect_equal(result2$thetaH1, 50) expect_equal(result2$assumedStDev, 100) expect_equal(result2$conditionalRejectionProbabilities, c(0.054544013, 0.18776792, 0.47147471, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, NA_real_, 30)) expect_equal(result2$allocationRatioPlanned, 1) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.98296857), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-35.118855, 1.5735511, 13.58964, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(125.11886, 94.865725, 80.385626, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.18164628, 0.056608473, 0.014183052, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, 3) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, 0.016754234, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.011822, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 82.848073, NA_real_), tolerance = 1e-07) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 45.714272, NA_real_), tolerance = 1e-07) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.1666257, 1.9256836, 2.4675727, NA_real_), tolerance = 1e-07) design3 <- getDesignFisher(kMax = 4, alpha = 0.025, alpha0Vec = rep(0.4, 3), bindingFutility = TRUE) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults3 <- getStageResults(design3, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 50)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults(design = design3, dataInput = dataExample1, thetaH0 = 10, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.013928445, 0.0019196833, 0.00034092609, 6.8425459e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(0.4, 0.4, 0.4), tolerance = 1e-07) expect_equal(result3$.design$alphaSpent, c(0.013928445, 0.020373842, 0.0235151, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.013928445, 0.013928445, 0.013928445, 0.013928445), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result3$thetaH0, 10) expect_equal(result3$thetaH1, 50) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.029249394, 0.067046868, 0.15552139, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, NA_real_, 30)) expect_equal(result3$allocationRatioPlanned, 1) expect_equal(result3$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.88057256), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-24.226675, 0.014834887, 8.7947814, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(114.22668, 96.713521, 85.125684, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.165096, 0.068572907, 0.029926287, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$combinationTestStatistics, c(0.12168078, 0.007272934, 0.00043997458, NA_real_), tolerance = 1e-07) }) test_that("'getAnalysisResults' for different designs and a dataset of one mean per stage (bindingFutility = FALSE)", { .skipTestifDisabled() dataExample2 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design4 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) stageResults1 <- getStageResults(design4, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 50)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.17749108, 0.27572975, 0.39647686, 0.52937537, 0.65998377, 0.77434444, 0.86340967, 0.9250277, 0.96285863, 0.98345513, 0.99339288, 0.99764031, 0.99924778, 0.9997863, 0.99994597), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults(design = design4, dataInput = dataExample2, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result1$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result1$thetaH0, 10) expect_equal(result1$thetaH1, 47.25, tolerance = 1e-07) expect_equal(result1$assumedStDev, 128.66279, tolerance = 1e-07) expect_equal(result1$conditionalRejectionProbabilities, c(0.046837862, 0.17749108, 0.46585158, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$allocationRatioPlanned, 1) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-37.7517, 1.3684534, 13.520683, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(127.7517, 95.831547, 80.979317, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.28074785, 0.063917079, 0.013597508, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, 3) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, 0.014875116, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.605112, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 73.306215, NA_real_), tolerance = 1e-07) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 44.286284, NA_real_), tolerance = 1e-07) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(1.2040366, 2.025312, 2.5895142, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.12168078, 0.02415027, 0.0057194973, NA_real_), tolerance = 1e-07) design5 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'design5' with expected results ## expect_equal(design5$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design5$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design5$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) stageResults2 <- getStageResults(design5, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 50)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.9256836, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.16190673, 0.25578292, 0.37352456, 0.50571691, 0.63820191, 0.75647342, 0.85036726, 0.91657302, 0.95799593, 0.98097594, 0.99227321, 0.99719262, 0.99908938, 0.99973673, 0.99993225), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults(design = design5, dataInput = dataExample2, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result2$thetaH0, 10) expect_equal(result2$thetaH1, 47.25, tolerance = 1e-07) expect_equal(result2$assumedStDev, 128.66279, tolerance = 1e-07) expect_equal(result2$conditionalRejectionProbabilities, c(0.046837862, 0.16190673, 0.42383694, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$allocationRatioPlanned, 1) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-37.7517, 0.20066782, 12.631309, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(127.7517, 96.240714, 81.345632, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.28074785, 0.070627118, 0.016069426, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, 3) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, 0.015631623, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.353451, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 73.21831, NA_real_), tolerance = 1e-07) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 44.191393, NA_real_), tolerance = 1e-07) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.1666257, 1.9256836, 2.4675727, NA_real_), tolerance = 1e-07) design6 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design6' with expected results ## expect_equal(design6$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design6$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design6$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design6$scale, c(1, 1, 1)) expect_equal(design6$nonStochasticCurtailment, FALSE) stageResults3 <- getStageResults(design6, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 50)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults(design = design6, dataInput = dataExample2, stage = 2, thetaH0 = 10, nPlanned = c(30, 20), thetaH1 = 50, assumedStDev = 100, iterations = 800, seed = 31082018) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 10) expect_equal(result3$thetaH1, 50) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.026695414, 0.053938868, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 30, 20)) expect_equal(result3$allocationRatioPlanned, 1) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-28.274837, -2.3519587, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(118.27484, 99.090567, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.23830752, 0.094039775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.6175, 0.84875), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) }) context("Testing the analysis means functionality for two treatments") test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage", { .skipTestifDisabled() # note: if third stage value of means1 (4.5) increases, lower bound of RCI does not increase design7 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design7' with expected results ## expect_equal(design7$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design7$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design7$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design7$scale, c(1, 1, 1)) expect_equal(design7$nonStochasticCurtailment, FALSE) dataExample3 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(2.7, 1.5, 4.5, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:definitionRCIwithFutilityFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result <- getAnalysisResults(design = design7, dataInput = dataExample3, equalVariances = TRUE, directionUpper = TRUE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result' with expected results ## expect_equal(result$.design$stages, c(1, 2, 3, 4)) expect_equal(result$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result$.stageResults$effectSizes, c(170, 123.33333, 197.37931, 188.47418), tolerance = 1e-07) expect_equal(result$.stageResults$testStatistics, c(4.552582, 0.42245245, 4.9350374, 2.8165036), tolerance = 1e-07) expect_equal(result$.stageResults$pValues, c(2.1583718e-05, 0.33839752, 6.5708867e-06, 0.0050256902), tolerance = 1e-07) expect_equal(result$testActions, c("reject and stop", "reject and stop", "reject and stop", "reject")) expect_equal(result$thetaH0, 0) expect_equal(result$thetaH1, 188.47418, tolerance = 1e-07) expect_equal(result$assumedStDev, 192.76382, tolerance = 1e-07) expect_equal(result$conditionalRejectionProbabilities, c(1, 1, 1, NA_real_)) expect_equal(result$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$allocationRatioPlanned, 1) expect_equal(result$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$repeatedConfidenceIntervalLowerBounds, c(80.389809, 58.773337, 126.21876, 121.44462), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalUpperBounds, c(259.61019, 232.56315, 252.86796, 238.01813), tolerance = 1e-07) expect_equal(result$repeatedPValues, c(6.2988707e-05, 0.00026325991, 1.9536724e-06, 1.9536724e-06), tolerance = 1e-07) expect_equal(result$finalStage, 1) expect_equal(result$finalPValues, c(2.1583718e-05, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$finalConfidenceIntervalLowerBounds, c(96.812108, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$finalConfidenceIntervalUpperBounds, c(243.18789, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$medianUnbiasedEstimates, c(170, NA_real_, NA_real_, NA_real_)) expect_equal(result$normalApproximation, FALSE) expect_equal(result$equalVariances, TRUE) expect_equal(result$directionUpper, TRUE) expect_equal(result$combinationTestStatistics, c(2.1583718e-05, 7.3038765e-06, 4.7992944e-11, 2.4119767e-13), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a group sequential design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample4 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignGroupSequential object 'design8' with expected results ## expect_equal(design8$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design8$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design8$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result1 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result1$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(1.8745926, 0.42245245, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.033826026, 0.33839752, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$thetaH0, 0) expect_equal(result1$thetaH1, 130) expect_equal(result1$assumedStDev, 100) expect_equal(result1$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result1$allocationRatioPlanned, 2) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.67921715, 0.95627008), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.10782416, 0.16254779, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(1.8745926, 1.4830004, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.033826026, 0.071381585, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result4 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result4' with expected results ## expect_equal(result4$.design$stages, c(1, 2, 3, 4)) expect_equal(result4$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result4$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result4$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result4$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result4$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result4$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result4$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result4$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, NA_real_), tolerance = 1e-07) expect_equal(result4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result4$thetaH0, 0) expect_equal(result4$thetaH1, 130) expect_equal(result4$assumedStDev, 100) expect_equal(result4$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, 0.040934114, NA_real_), tolerance = 1e-07) expect_equal(result4$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result4$allocationRatioPlanned, 2) expect_equal(result4$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.73680191), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, -25.969325, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, 136.59001, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedPValues, c(0.10782416, 0.16254779, 0.12132816, NA_real_), tolerance = 1e-07) expect_equal(result4$finalStage, NA_integer_) expect_equal(result4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$normalApproximation, FALSE) expect_equal(result4$equalVariances, TRUE) expect_equal(result4$directionUpper, TRUE) expect_equal(result4$overallTestStatistics, c(1.8745926, 1.4830004, 1.5863394, NA_real_), tolerance = 1e-07) expect_equal(result4$overallPValues, c(0.033826026, 0.071381585, 0.057753539, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result7 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result7' with expected results ## expect_equal(result7$.design$stages, c(1, 2, 3, 4)) expect_equal(result7$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result7$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result7$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result7$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result7$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result7$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result7$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result7$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, 0.0050256902), tolerance = 1e-07) expect_equal(result7$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result7$thetaH0, 0) expect_equal(result7$thetaH1, 130) expect_equal(result7$assumedStDev, 100) expect_equal(result7$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, 0.040934114, NA_real_), tolerance = 1e-07) expect_equal(result7$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result7$allocationRatioPlanned, 2) expect_equal(result7$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result7$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, -25.969325, 3.8960985), tolerance = 1e-07) expect_equal(result7$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, 136.59001, 140.93958), tolerance = 1e-07) expect_equal(result7$repeatedPValues, c(0.10782416, 0.16254779, 0.12132816, 0.017942439), tolerance = 1e-07) expect_equal(result7$finalStage, 4) expect_equal(result7$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.022610692), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 1.5235285), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 127.93924), tolerance = 1e-07) expect_equal(result7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 66.58768), tolerance = 1e-07) expect_equal(result7$normalApproximation, FALSE) expect_equal(result7$equalVariances, TRUE) expect_equal(result7$directionUpper, TRUE) expect_equal(result7$overallTestStatistics, c(1.8745926, 1.4830004, 1.5863394, 2.3864368), tolerance = 1e-07) expect_equal(result7$overallPValues, c(0.033826026, 0.071381585, 0.057753539, 0.0091998951), tolerance = 1e-07) }) test_that("'getAnalysisResults' for an inverse normal design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample5 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design9 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'design9' with expected results ## expect_equal(design9$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design9$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design9$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result2 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.8780002, 0.42565792, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.033590771, 0.33726198, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$thetaH0, 0) expect_equal(result2$thetaH1, 130) expect_equal(result2$assumedStDev, 100) expect_equal(result2$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result2$allocationRatioPlanned, 2) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.7399771, 0.96741599), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.10725005, 0.13184907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, FALSE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.8304576, 1.5912766, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result5 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result5' with expected results ## expect_equal(result5$.design$stages, c(1, 2, 3, 4)) expect_equal(result5$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result5$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result5$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result5$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result5$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result5$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result5$.stageResults$testStatistics, c(1.8780002, 0.42565792, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result5$.stageResults$pValues, c(0.033590771, 0.33726198, 0.22248687, NA_real_), tolerance = 1e-07) expect_equal(result5$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result5$thetaH0, 0) expect_equal(result5$thetaH1, 130) expect_equal(result5$assumedStDev, 100) expect_equal(result5$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result5$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result5$allocationRatioPlanned, 2) expect_equal(result5$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.82164236), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, NA_real_), tolerance = 1e-07) expect_equal(result5$finalStage, NA_integer_) expect_equal(result5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$normalApproximation, FALSE) expect_equal(result5$equalVariances, FALSE) expect_equal(result5$directionUpper, TRUE) expect_equal(result5$combinationTestStatistics, c(1.8304576, 1.5912766, 1.7402643, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} result8 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result8' with expected results ## expect_equal(result8$.design$stages, c(1, 2, 3, 4)) expect_equal(result8$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result8$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result8$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result8$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result8$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result8$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result8$.stageResults$testStatistics, c(1.8780002, 0.42565792, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result8$.stageResults$pValues, c(0.033590771, 0.33726198, 0.22248687, 0.0051181248), tolerance = 1e-07) expect_equal(result8$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result8$thetaH0, 0) expect_equal(result8$thetaH1, 130) expect_equal(result8$assumedStDev, 100) expect_equal(result8$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result8$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result8$allocationRatioPlanned, 2) expect_equal(result8$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result8$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, 16.862491), tolerance = 1e-07) expect_equal(result8$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, 146.10543), tolerance = 1e-07) expect_equal(result8$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, 0.0050030118), tolerance = 1e-07) expect_equal(result8$finalStage, 4) expect_equal(result8$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.019192988), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 4.0866333), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 135.35067), tolerance = 1e-07) expect_equal(result8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 71.819795), tolerance = 1e-07) expect_equal(result8$normalApproximation, FALSE) expect_equal(result8$equalVariances, FALSE) expect_equal(result8$directionUpper, TRUE) expect_equal(result8$combinationTestStatistics, c(1.8304576, 1.5912766, 1.7402643, 2.7909855), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample6 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design10 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design10' with expected results ## expect_equal(design10$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design10$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design10$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design10$scale, c(1, 1, 1)) expect_equal(design10$nonStochasticCurtailment, FALSE) result3 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.8745926, 0.42245245, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.033826026, 0.33839752, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 0) expect_equal(result3$thetaH1, 130) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result3$allocationRatioPlanned, 2) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.07529439, 0.13212373, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.599, 0.917), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.033826026, 0.011446643, NA_real_, NA_real_), tolerance = 1e-07) result6 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result6' with expected results ## expect_equal(result6$.design$stages, c(1, 2, 3, 4)) expect_equal(result6$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result6$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result6$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result6$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result6$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result6$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result6$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result6$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, NA_real_), tolerance = 1e-07) expect_equal(result6$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result6$thetaH0, 0) expect_equal(result6$thetaH1, 130) expect_equal(result6$assumedStDev, 100) expect_equal(result6$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, 0.017989301, NA_real_), tolerance = 1e-07) expect_equal(result6$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result6$allocationRatioPlanned, 2) expect_equal(result6$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.60883935), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, -24.875191, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, 146.25589, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedPValues, c(0.07529439, 0.13212373, 0.13321282, NA_real_), tolerance = 1e-07) expect_equal(result6$finalStage, NA_integer_) expect_equal(result6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$normalApproximation, FALSE) expect_equal(result6$equalVariances, TRUE) expect_equal(result6$directionUpper, TRUE) expect_equal(result6$combinationTestStatistics, c(0.033826026, 0.011446643, 0.0025466747, NA_real_), tolerance = 1e-07) result9 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result9' with expected results ## expect_equal(result9$.design$stages, c(1, 2, 3, 4)) expect_equal(result9$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result9$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result9$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result9$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result9$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result9$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result9$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result9$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, 0.0050256902), tolerance = 1e-07) expect_equal(result9$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result9$thetaH0, 0) expect_equal(result9$thetaH1, 130) expect_equal(result9$assumedStDev, 100) expect_equal(result9$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, 0.017989301, NA_real_), tolerance = 1e-07) expect_equal(result9$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$allocationRatioPlanned, 2) expect_equal(result9$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, -24.875191, 10.125544), tolerance = 1e-07) expect_equal(result9$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, 146.25589, 154.53063), tolerance = 1e-07) expect_equal(result9$repeatedPValues, c(0.07529439, 0.13212373, 0.13321282, 0.010110881), tolerance = 1e-07) expect_equal(result9$finalStage, NA_integer_) expect_equal(result9$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$normalApproximation, FALSE) expect_equal(result9$equalVariances, TRUE) expect_equal(result9$directionUpper, TRUE) expect_equal(result9$combinationTestStatistics, c(0.033826026, 0.011446643, 0.0025466747, 1.2798798e-05), tolerance = 1e-07) }) test_that("Check that the conditional power is as expected for different designs and datasets", { .skipTestifDisabled() informationRates <- c(0.2, 0.5, 0.8, 1) dataExample7 <- getDataset( n1 = c(22, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 1, 2.5), stds1 = c(1, 2, 2, 1.3), stds2 = c(1, 2, 2, 1.3)) design11 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = informationRates, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.45) ## ## Comparison of the results of TrialDesignGroupSequential object 'design11' with expected results ## expect_equal(design11$alphaSpent, c(0.008066711, 0.01611168, 0.021671928, 0.025), tolerance = 1e-07) expect_equal(design11$criticalValues, c(2.4058832, 2.2981456, 2.2447684, 2.2198623), tolerance = 1e-07) expect_equal(design11$stageLevels, c(0.008066711, 0.010776752, 0.012391502, 0.013214058), tolerance = 1e-07) result1 <- getAnalysisResults(design = design11, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.4058832, 2.2981456, 2.2447684, 2.2198623), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result1$.design$alphaSpent, c(0.008066711, 0.01611168, 0.021671928, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.008066711, 0.010776752, 0.012391502, 0.013214058), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(-1.9899749, -0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$thetaH0, 0.2, tolerance = 1e-07) expect_equal(result1$thetaH1, -0.2, tolerance = 1e-07) expect_equal(result1$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result1$conditionalRejectionProbabilities, c(0.13790633, 0.11434101, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result1$allocationRatioPlanned, 3) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.4081395, 0.60690858), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-1.1558731, -1.198323, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(0.35587299, 0.40594209, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.06267268, 0.077641512, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, FALSE) expect_equal(result1$overallTestStatistics, c(-1.9899749, -1.7496977, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.026564837, 0.042409297, NA_real_, NA_real_), tolerance = 1e-07) design12 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = informationRates, typeOfDesign = "WT", deltaWT = 0.45) ## ## Comparison of the results of TrialDesignInverseNormal object 'design12' with expected results ## expect_equal(design12$alphaSpent, c(0.0064937119, 0.013848609, 0.020340933, 0.025), tolerance = 1e-07) expect_equal(design12$criticalValues, c(2.484114, 2.3728731, 2.3177603, 2.2920443), tolerance = 1e-07) expect_equal(design12$stageLevels, c(0.0064937119, 0.0088251631, 0.010231176, 0.010951542), tolerance = 1e-07) stageResults <- getStageResults(design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = T, stage = 2, thetaH0 = -1) ## ## Comparison of the results of StageResultsMeans object 'stageResults' with expected results ## expect_equal(stageResults$overallTestStatistics, c(1.9899749, 1.7720581, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallPValues, c(0.026564837, 0.040500218, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallMeans1, c(1, 1.0371429, 1.022807, 1.0185714), tolerance = 1e-07) expect_equal(stageResults$overallMeans2, c(1.4, 1.4333333, 1.26, 1.4666667), tolerance = 1e-07) expect_equal(stageResults$overallStDevs1, c(1, 1.4254175, 1.6534615, 1.5851935), tolerance = 1e-07) expect_equal(stageResults$overallStDevs2, c(1, 1.3814998, 1.6530107, 1.6573689), tolerance = 1e-07) expect_equal(stageResults$overallSampleSizes1, c(22, 35)) expect_equal(stageResults$overallSampleSizes2, c(22, 33)) expect_equal(stageResults$testStatistics, c(1.9899749, 0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(stageResults$combInverseNormal, c(1.9338654, 1.7805468, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$weightsInverseNormal, c(0.4472136, 0.54772256, 0.54772256, 0.4472136), tolerance = 1e-07) conditionalPower <- getConditionalPower(design = design12, stageResults = stageResults, stage = 2, thetaH1 = 0.840, nPlanned = c(96,64), assumedStDev = 2) ## ## Comparison of the results of list object 'conditionalPower' with expected results ## expect_equal(conditionalPower$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(conditionalPower$conditionalPower, c(NA_real_, NA_real_, 0.99873967, 0.99999483), tolerance = 1e-07) conditionalPowerPlot <- .getConditionalPowerPlot(stageResults = stageResults, stage = 2, thetaRange = seq(-0.8,0.5,0.1), nPlanned = c(96,64), assumedStDev = 2, allocationRatioPlanned = 3) ## ## Comparison of the results of list object 'conditionalPowerPlot' with expected results ## expect_equal(conditionalPowerPlot$xValues, c(-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5), tolerance = 1e-07) expect_equal(conditionalPowerPlot$condPowerValues, c(0.22956329, 0.31502432, 0.41251256, 0.51641352, 0.6197496, 0.71556407, 0.79832397, 0.86487377, 0.91466948, 0.94932539, 0.97175172, 0.98524183, 0.99278324, 0.99670055), tolerance = 1e-07) expect_equal(conditionalPowerPlot$likelihoodValues, c(0.49547937, 0.67200309, 0.83620171, 0.95465162, 0.9999375, 0.96093693, 0.84724887, 0.68536385, 0.50865752, 0.34635689, 0.21637958, 0.12402316, 0.065220394, 0.031467201), tolerance = 1e-07) expect_equal(conditionalPowerPlot$main, "Conditional Power Plot with Likelihood") expect_equal(conditionalPowerPlot$xlab, "Effect size") expect_equal(conditionalPowerPlot$ylab, "Conditional power / Likelihood") expect_equal(conditionalPowerPlot$sub, "Stage = 2, # of remaining subjects = 160, std = 2, allocation ratio = 3") result2 <- getAnalysisResults(design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.484114, 2.3728731, 2.3177603, 2.2920443), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0064937119, 0.013848609, 0.020340933, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0064937119, 0.0088251631, 0.010231176, 0.010951542), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(-1.9899749, -0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$thetaH0, 0.2, tolerance = 1e-07) expect_equal(result2$thetaH1, -0.2, tolerance = 1e-07) expect_equal(result2$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result2$conditionalRejectionProbabilities, c(0.11857307, 0.10556981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result2$allocationRatioPlanned, 3) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.39060766, 0.5889102), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-1.182291, -1.2104795, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(0.3822909, 0.41047947, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.081445577, 0.092870573, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, FALSE) expect_equal(result2$combinationTestStatistics, c(1.9338654, 1.7805468, NA_real_, NA_real_), tolerance = 1e-07) design13 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = informationRates) ## ## Comparison of the results of TrialDesignFisher object 'design13' with expected results ## expect_equal(design13$alphaSpent, c(0.0099747046, 0.017168497, 0.022142404, 0.025), tolerance = 1e-07) expect_equal(design13$criticalValues, c(0.0099747046, 0.00059134153, 6.046221e-05, 1.3203687e-05), tolerance = 1e-07) expect_equal(design13$stageLevels, c(0.0099747046, 0.0099747046, 0.0099747046, 0.0099747046), tolerance = 1e-07) expect_equal(design13$scale, c(1.2247449, 1.2247449, 1), tolerance = 1e-07) expect_equal(design13$nonStochasticCurtailment, FALSE) result3 <- getAnalysisResults(design = design13, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, nPlanned = c(96,64), thetaH1 = -0.4, allocationRatio = 2, normalApproximation = FALSE, iterations = 10000, seed = 442018) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.0099747046, 0.00059134153, 6.046221e-05, 1.3203687e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.0099747046, 0.017168497, 0.022142404, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.0099747046, 0.0099747046, 0.0099747046, 0.0099747046), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(-1.3266499, -0.48819395, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.095896458, 0.31512146, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 0) expect_equal(result3$thetaH1, -0.4, tolerance = 1e-07) expect_equal(result3$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result3$conditionalRejectionProbabilities, c(0.031447357, 0.012731128, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result3$allocationRatioPlanned, 2) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-1.1295139, -1.2072533, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(0.32951385, 0.40725333, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.19930232, 0.29225486, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, FALSE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.1353, 0.2436), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.095896458, 0.023311276, NA_real_, NA_real_), tolerance = 1e-07) }) context("Testing 'getStageResultsMeans'") test_that("'getStageResultsMeans' for an inverse normal design and one or two treatments", { .skipTestifDisabled() designInverseNormal <- getDesignInverseNormal(kMax = 2, alpha = 0.025, sided = 1, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = qnorm(0.7)) dataExample8 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5)) stageResults1 <- getStageResults(design = designInverseNormal, dataInput = dataExample8, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(6.3245553, 8.3272484), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(6.846828e-05, 4.5964001e-08), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(1, 1.3426212), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(10, 20)) expect_equal(stageResults1$testStatistics, c(6.3245553, 6.3245553), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(6.846828e-05, 6.846828e-05), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults1$combInverseNormal, c(3.813637, 5.3932972), tolerance = 1e-07) expect_equal(stageResults1$weightsInverseNormal, c(0.70710678, 0.70710678), tolerance = 1e-07) dataExample9 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults2 <- getStageResults(design = designInverseNormal, dataInput = dataExample9, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(-1.3266499, -1.1850988), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.90410354, 0.87988596), tolerance = 1e-07) expect_equal(stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes1, c(22, 33)) expect_equal(stageResults2$overallSampleSizes2, c(22, 35)) expect_equal(stageResults2$testStatistics, c(-1.3266499, -0.48819395), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.90410354, 0.68487854), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(-0.4, -0.40380952), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(-1.3052935, -1.2633725), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.70710678, 0.70710678), tolerance = 1e-07) }) test_that("'getStageResultsMeans' for a Fisher design and one or two treatments", { .skipTestifDisabled() designFisher <- getDesignFisher(kMax = 2, alpha = 0.025, alpha0Vec = 1, informationRates = c(0.5, 1), method = "equalAlpha") dataExample10 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5)) stageResults3 <- getStageResults(design = designFisher, dataInput = dataExample10, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(6.3245553, 8.3272484), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(6.846828e-05, 4.5964001e-08), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(1, 1.3426212), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(10, 20)) expect_equal(stageResults3$testStatistics, c(6.3245553, 6.3245553), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(6.846828e-05, 6.846828e-05), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(6.846828e-05, 4.6879053e-09), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1)) dataExample11 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults4 <- getStageResults(design = designFisher, dataInput = dataExample11, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults4' with expected results ## expect_equal(stageResults4$overallTestStatistics, c(-1.3266499, -1.1850988), tolerance = 1e-07) expect_equal(stageResults4$overallPValues, c(0.90410354, 0.87988596), tolerance = 1e-07) expect_equal(stageResults4$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(stageResults4$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(stageResults4$overallSampleSizes1, c(22, 33)) expect_equal(stageResults4$overallSampleSizes2, c(22, 35)) expect_equal(stageResults4$testStatistics, c(-1.3266499, -0.48819395), tolerance = 1e-07) expect_equal(stageResults4$pValues, c(0.90410354, 0.68487854), tolerance = 1e-07) expect_equal(stageResults4$effectSizes, c(-0.4, -0.40380952), tolerance = 1e-07) expect_equal(stageResults4$combFisher, c(0.90410354, 0.61920111), tolerance = 1e-07) expect_equal(stageResults4$weightsFisher, c(1, 1)) }) rpact/tests/testthat/test-class_time.R0000644000176200001440000017061213567165663017641 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 07 November 2019, 10:22:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'PiecewiseSurvivalTime'") test_that("Testing 'getPiecewiseSurvivalTime': isPiecewiseSurvivalEnabled()", { expect_false(getPiecewiseSurvivalTime()$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime(piecewiseSurvivalTime = NA)$isPiecewiseSurvivalEnabled()) }) test_that("Testing 'getPiecewiseSurvivalTime': simple vector based definition", { pwSurvivalTime1 <- getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime1' with expected results ## expect_equal(pwSurvivalTime1$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime1$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime1$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime1$eventTime, 12) expect_equal(pwSurvivalTime1$kappa, 1) expect_equal(pwSurvivalTime1$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime1$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime1$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(lambda2 = 0.5, lambda1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime2$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.046209812, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.42565082, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 15) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, pi1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.73696559, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime3 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime3' with expected results ## expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime3$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime3$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime3$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime3$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime3$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime3$eventTime, 12) expect_equal(pwSurvivalTime3$kappa, 1) expect_equal(pwSurvivalTime3$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime3$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime3$delayedResponseEnabled, FALSE) pwSurvivalTime4 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime4' with expected results ## expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime4$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime4$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime4$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime4$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime4$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime4$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime4$eventTime, 12) expect_equal(pwSurvivalTime4$kappa, 1) expect_equal(pwSurvivalTime4$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime4$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime4$delayedResponseEnabled, FALSE) pwSurvivalTime5 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime5' with expected results ## expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime5$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime5$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime5$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime5$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime5$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime5$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime5$eventTime, 12) expect_equal(pwSurvivalTime5$kappa, 1) expect_equal(pwSurvivalTime5$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime5$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime5$delayedResponseEnabled, FALSE) pwSurvivalTime6 <- getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime6' with expected results ## expect_equal(pwSurvivalTime6$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime6$lambda1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime6$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime6$hazardRatio, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime6$pi1, 0.97267628, tolerance = 1e-07) expect_equal(pwSurvivalTime6$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime6$median1, 2.3104906, tolerance = 1e-07) expect_equal(pwSurvivalTime6$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime6$eventTime, 12) expect_equal(pwSurvivalTime6$kappa, 1) expect_equal(pwSurvivalTime6$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime6$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime6$delayedResponseEnabled, FALSE) pwSurvivalTime7 <- getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime7' with expected results ## expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime7$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime7$hazardRatio, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime7$pi1, 0.97267628, tolerance = 1e-07) expect_equal(pwSurvivalTime7$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime7$median1, 2.3104906, tolerance = 1e-07) expect_equal(pwSurvivalTime7$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime7$eventTime, 12) expect_equal(pwSurvivalTime7$kappa, 1) expect_equal(pwSurvivalTime7$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime7$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime7$delayedResponseEnabled, FALSE) pwSurvivalTime8 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime8' with expected results ## expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime8$lambda1, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime8$lambda2, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime8$hazardRatio, 0.69823229, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median1, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median2, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime8$eventTime, 12) expect_equal(pwSurvivalTime8$kappa, 1) expect_equal(pwSurvivalTime8$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime8$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime8$delayedResponseEnabled, FALSE) pwSurvivalTime9 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime9' with expected results ## expect_equal(pwSurvivalTime9$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime9$lambda1, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime9$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime9$hazardRatio, 1.5984103, tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime9$median1, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime9$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime9$eventTime, 12) expect_equal(pwSurvivalTime9$kappa, 1) expect_equal(pwSurvivalTime9$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime9$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime9$delayedResponseEnabled, FALSE) pwSurvivalTime10 <- getPiecewiseSurvivalTime(median2 = 1.386294, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime10' with expected results ## expect_equal(pwSurvivalTime10$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime10$lambda1, 0.4000001, tolerance = 1e-07) expect_equal(pwSurvivalTime10$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime10$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi1, 0.99177026, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median1, 1.7328675, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime10$eventTime, 12) expect_equal(pwSurvivalTime10$kappa, 1) expect_equal(pwSurvivalTime10$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime10$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime10$delayedResponseEnabled, FALSE) pwSurvivalTime11 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime11' with expected results ## expect_equal(pwSurvivalTime11$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime11$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime11$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime11$hazardRatio, 0.79999979, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime11$eventTime, 12) expect_equal(pwSurvivalTime11$kappa, 1) expect_equal(pwSurvivalTime11$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime11$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime11$delayedResponseEnabled, FALSE) pwSurvivalTime12 <- getPiecewiseSurvivalTime(median2 = 5, median1 = 6) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime12' with expected results ## expect_equal(pwSurvivalTime12$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime12$lambda1, 0.11552453, tolerance = 1e-07) expect_equal(pwSurvivalTime12$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime12$hazardRatio, 0.83333333, tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi1, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi2, 0.81053543, tolerance = 1e-07) expect_equal(pwSurvivalTime12$median1, 6) expect_equal(pwSurvivalTime12$median2, 5) expect_equal(pwSurvivalTime12$eventTime, 12) expect_equal(pwSurvivalTime12$kappa, 1) expect_equal(pwSurvivalTime12$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime12$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime12$delayedResponseEnabled, FALSE) pwSurvivalTime13 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = c(0.3, 0.4)) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime13' with expected results ## expect_equal(pwSurvivalTime13$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime13$lambda1, c(0.3, 0.4), tolerance = 1e-07) expect_equal(pwSurvivalTime13$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime13$hazardRatio, c(0.59999984, 0.79999979), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi1, c(0.97267628, 0.99177025), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime13$median1, c(2.3104906, 1.732868), tolerance = 1e-07) expect_equal(pwSurvivalTime13$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime13$eventTime, 12) expect_equal(pwSurvivalTime13$kappa, 1) expect_equal(pwSurvivalTime13$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime13$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime13$delayedResponseEnabled, FALSE) pwSurvivalTime14 <- getPiecewiseSurvivalTime(median2 = 5, median1 = c(6:8)) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime14' with expected results ## expect_equal(pwSurvivalTime14$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime14$lambda1, c(0.11552453, 0.099021026, 0.086643398), tolerance = 1e-07) expect_equal(pwSurvivalTime14$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime14$hazardRatio, c(0.83333333, 0.71428571, 0.625), tolerance = 1e-07) expect_equal(pwSurvivalTime14$pi1, c(0.75, 0.69524659, 0.64644661), tolerance = 1e-07) expect_equal(pwSurvivalTime14$pi2, 0.81053543, tolerance = 1e-07) expect_equal(pwSurvivalTime14$median1, c(6, 7, 8)) expect_equal(pwSurvivalTime14$median2, 5) expect_equal(pwSurvivalTime14$eventTime, 12) expect_equal(pwSurvivalTime14$kappa, 1) expect_equal(pwSurvivalTime14$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime14$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime14$delayedResponseEnabled, FALSE) pwSurvivalTime15 <- getPiecewiseSurvivalTime(median2 = 2, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime15' with expected results ## expect_equal(pwSurvivalTime15$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime15$lambda1, 0.27725887, tolerance = 1e-07) expect_equal(pwSurvivalTime15$lambda2, 0.34657359, tolerance = 1e-07) expect_equal(pwSurvivalTime15$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime15$pi1, 0.96410318, tolerance = 1e-07) expect_equal(pwSurvivalTime15$pi2, 0.984375, tolerance = 1e-07) expect_equal(pwSurvivalTime15$median1, 2.5, tolerance = 1e-07) expect_equal(pwSurvivalTime15$median2, 2) expect_equal(pwSurvivalTime15$eventTime, 12) expect_equal(pwSurvivalTime15$kappa, 1) expect_equal(pwSurvivalTime15$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime15$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime15$delayedResponseEnabled, FALSE) pwSurvivalTime16 <- getPiecewiseSurvivalTime(median1 = c(2, 3), hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime16' with expected results ## expect_equal(pwSurvivalTime16$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime16$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime16$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime16$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime16$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime16$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime16$median1, c(2, 3)) expect_equal(pwSurvivalTime16$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime16$eventTime, 12) expect_equal(pwSurvivalTime16$kappa, 1) expect_equal(pwSurvivalTime16$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime16$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime16$delayedResponseEnabled, FALSE) pwSurvivalTime17 <- getPiecewiseSurvivalTime(median1 = c(2, 3), median2 = 4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime17' with expected results ## expect_equal(pwSurvivalTime17$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime17$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime17$lambda2, 0.1732868, tolerance = 1e-07) expect_equal(pwSurvivalTime17$hazardRatio, c(2, 1.3333333), tolerance = 1e-07) expect_equal(pwSurvivalTime17$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime17$pi2, 0.875, tolerance = 1e-07) expect_equal(pwSurvivalTime17$median1, c(2, 3)) expect_equal(pwSurvivalTime17$median2, 4) expect_equal(pwSurvivalTime17$eventTime, 12) expect_equal(pwSurvivalTime17$kappa, 1) expect_equal(pwSurvivalTime17$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime17$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime17$delayedResponseEnabled, FALSE) pwSurvivalTime18 <- getPiecewiseSurvivalTime(median1 = c(2, 3), lambda2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime18' with expected results ## expect_equal(pwSurvivalTime18$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime18$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime18$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime18$hazardRatio, c(0.86643398, 0.57762265), tolerance = 1e-07) expect_equal(pwSurvivalTime18$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime18$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime18$median1, c(2, 3)) expect_equal(pwSurvivalTime18$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime18$eventTime, 12) expect_equal(pwSurvivalTime18$kappa, 1) expect_equal(pwSurvivalTime18$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime18$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime18$delayedResponseEnabled, FALSE) pwSurvivalTime19 <- getPiecewiseSurvivalTime(median1 = c(2, 3), pi2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime19' with expected results ## expect_equal(pwSurvivalTime19$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime19$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime19$lambda2, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime19$hazardRatio, c(8.1414927, 5.4276618), tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime19$median1, c(2, 3)) expect_equal(pwSurvivalTime19$median2, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime19$eventTime, 12) expect_equal(pwSurvivalTime19$kappa, 1) expect_equal(pwSurvivalTime19$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime19$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime19$delayedResponseEnabled, FALSE) pwSurvivalTime20 <- getPiecewiseSurvivalTime(median1 = c(2, 3), hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime20' with expected results ## expect_equal(pwSurvivalTime20$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime20$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime20$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime20$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime20$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime20$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime20$median1, c(2, 3)) expect_equal(pwSurvivalTime20$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime20$eventTime, 12) expect_equal(pwSurvivalTime20$kappa, 1) expect_equal(pwSurvivalTime20$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime20$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime20$delayedResponseEnabled, FALSE) pwSurvivalTime21 <- getPiecewiseSurvivalTime(median1 = 3, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime21' with expected results ## expect_equal(pwSurvivalTime21$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime21$lambda1, 0.23104906, tolerance = 1e-07) expect_equal(pwSurvivalTime21$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime21$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime21$pi1, 0.9375, tolerance = 1e-07) expect_equal(pwSurvivalTime21$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime21$median1, 3) expect_equal(pwSurvivalTime21$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime21$eventTime, 12) expect_equal(pwSurvivalTime21$kappa, 1) expect_equal(pwSurvivalTime21$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime21$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime21$delayedResponseEnabled, FALSE) getPiecewiseSurvivalTime(median1 = c(2, 3), lambda2 = 0.8) expect_error(getPiecewiseSurvivalTime(median2 = 1.386294, lambda2 = 0.4, hazardRatio = 0.8)) expect_error(getPiecewiseSurvivalTime(median2 = c(1.5, 1.7), lambda1 = c(0.3, 0.4))) }) test_that("Testing 'getPiecewiseSurvivalTime': vector based definition", { pwSurvivalTime1 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8) expect_equal(pwSurvivalTime1$hazardRatio, 0.8) expect_equal(pwSurvivalTime1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTime1$isDelayedResponseEnabled()) pwSurvivalTime2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8) expect_true(pwSurvivalTime2$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime2$hazardRatio, 0.8) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_true(pwSurvivalTime3$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime3$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime3$hazardRatio, 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) pwSurvivalTime4 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime4$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime4$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime4$hazardRatio, 0.8) expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda2, 0.01) expect_equal(pwSurvivalTime4$lambda1, 0.01 * 0.8) pwSurvivalTime5 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime5$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime5$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime5$hazardRatio, 0.8) expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda2, 0.01) expect_equal(pwSurvivalTime5$lambda1, 0.01 * 0.8) pwSurvivalTime6 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime6$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime6$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime6$hazardRatio, 0.8) expect_equal(pwSurvivalTime6$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime6$lambda2, 0.01) expect_equal(pwSurvivalTime6$lambda1, 0.008) pwSurvivalTime7 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime7$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime7$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime7$hazardRatio, 0.8) expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda2, 0.01) expect_equal(pwSurvivalTime7$lambda1, 0.008) # case 2.2 pwSurvivalTime9 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.025, 0.04, 0.015) * 0.8) expect_true(pwSurvivalTime9$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime9$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime9$hazardRatio, 0.8) # case 2.2: error expected expect_error(getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025)), paste0("Illegal argument: 'hazardRatio' can only be calculated if ", "'unique(lambda1 / lambda2)' result in a single value; ", "current result = c(1.2, 1, 1.667) (delayed response is not allowed)"), fixed = TRUE) # case 3 expect_false(getPiecewiseSurvivalTime(delayedResponseAllowed = TRUE)$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime(piecewiseSurvivalTime = NA, delayedResponseAllowed = TRUE)$isPiecewiseSurvivalEnabled()) # case 3.1 pwSurvivalTimeSim1 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8, delayedResponseAllowed = TRUE) expect_equal(pwSurvivalTimeSim1$hazardRatio, 0.8) expect_equal(pwSurvivalTimeSim1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTimeSim1$isDelayedResponseEnabled()) # case 3.2 pwSurvivalTimeSim2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025), delayedResponseAllowed = TRUE) expect_true(pwSurvivalTimeSim2$isPiecewiseSurvivalEnabled()) expect_true(pwSurvivalTimeSim2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTimeSim2$hazardRatio, c(1.2, 1, 5/3)) pwsTime1 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) expect_equal(pwsTime1$.isLambdaBased(minNumberOfLambdas = 1), TRUE) }) test_that("Testing 'getPiecewiseSurvivalTime': check warnings", { expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "'pi1' (0.3) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "'pi1' (0.3) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_equal(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4)$.isPiBased(), TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4, pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE) }) test_that("Testing 'getPiecewiseSurvivalTime': list-wise definition", { pwSurvivalTime8 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.6) expect_true(pwSurvivalTime8$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime8$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime8$hazardRatio, 0.6) expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, c(0, 6,9, 15, 21)) expect_equal(pwSurvivalTime8$lambda2, c(0.025, 0.040, 0.015, 0.010, 0.007)) expect_equal(pwSurvivalTime8$lambda1, c(0.0150, 0.0240, 0.0090, 0.0060, 0.0042)) result1 <- getPiecewiseSurvivalTime(list( "<5" = 0.1, "5 - <10" = 0.2, ">=10" = 0.8), hazardRatio = 0.8) expect_equal(result1$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result1$lambda2, c(0.1, 0.2, 0.8)) result2 <- getPiecewiseSurvivalTime(list( "0 - <5" = 0.1, "5 - <10" = 0.2, "10 - Inf" = 0.8), hazardRatio = 0.8) expect_equal(result2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) }) context("Testing class 'AccrualTime'") test_that("Testing 'getAccrualTime': isAccrualTimeEnabled()", { expect_true(getAccrualTime()$isAccrualTimeEnabled()) expect_true(getAccrualTime(maxNumberOfSubjects = 100)$isAccrualTimeEnabled()) }) test_that("Testing 'getAccrualTime': vector based definition", { accrualTime1 <- getAccrualTime(accrualTime = c(0, 6, 9, 15), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 315) expect_equal(accrualTime1$accrualTime, c(0, 6, 9, 15)) expect_equal(accrualTime1$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime1$remainingTime, 6) accrualTime2 <- getAccrualTime(accrualTime = c(0, 6, 9), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 1000) expect_equal(accrualTime2$accrualTime, c(0, 6, 9, 40.37037)) expect_equal(accrualTime2$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime2$remainingTime, 31.37037) accrualTime3 <- getAccrualTime(accrualTime = c(0, 12, 13, 14, 15, 16), accrualIntensity = c(15, 21, 27, 33, 39, 45), maxNumberOfSubjects = 1405) expect_equal(accrualTime3$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime3$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime3$remainingTime, 24.55555556) accrualTime4 <- getAccrualTime(accrualTime = c(0, 24), accrualIntensity = c(30, 45), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results ## expect_equal(accrualTime4$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime4$accrualTime, c(0, 24)) expect_equal(accrualTime4$accrualIntensity, 30) expect_equal(accrualTime4$accrualIntensityRelative, NA_real_) expect_equal(accrualTime4$maxNumberOfSubjects, 720) expect_equal(accrualTime4$remainingTime, 24) expect_equal(accrualTime4$piecewiseAccrualEnabled, FALSE) accrualTime5 <- getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45)) ## ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results ## expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 24, 30)) expect_equal(accrualTime5$accrualIntensity, c(30, 45)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 990) expect_equal(accrualTime5$remainingTime, 6) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) accrualTime6 <- getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45, 55), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results ## expect_equal(accrualTime6$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 24)) expect_equal(accrualTime6$accrualIntensity, 30) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 720) expect_equal(accrualTime6$remainingTime, 24) expect_equal(accrualTime6$piecewiseAccrualEnabled, FALSE) accrualTime7 <- getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime7' with expected results ## expect_equal(accrualTime7$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime7$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime7$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime7$accrualTime, c(0, 24)) expect_equal(accrualTime7$accrualIntensity, 30) expect_equal(accrualTime7$accrualIntensityRelative, NA_real_) expect_equal(accrualTime7$maxNumberOfSubjects, 720) expect_equal(accrualTime7$remainingTime, 24) expect_equal(accrualTime7$piecewiseAccrualEnabled, FALSE) accrualTime8 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results ## expect_equal(accrualTime8$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime8$accrualTime, c(0, 66.666667), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensity, 15) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, 1000) expect_equal(accrualTime8$remainingTime, 66.666667, tolerance = 1e-07) expect_equal(accrualTime8$piecewiseAccrualEnabled, FALSE) accrualTime9 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15) ## ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results ## expect_equal(accrualTime9$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 5)) expect_equal(accrualTime9$accrualIntensity, 15) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 75) expect_equal(accrualTime9$remainingTime, 5) expect_equal(accrualTime9$piecewiseAccrualEnabled, FALSE) accrualTime10 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 10) ## ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results ## expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 0.66666667), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, 15) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 10) expect_equal(accrualTime10$remainingTime, 0.66666667, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, FALSE) accrualTime11 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15, maxNumberOfSubjects = 10) ## ## Comparison of the results of AccrualTime object 'accrualTime11' with expected results ## expect_equal(accrualTime11$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime11$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime11$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime11$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime11$accrualTime, c(0, 0.66666667), tolerance = 1e-07) expect_equal(accrualTime11$accrualIntensity, 15) expect_equal(accrualTime11$accrualIntensityRelative, NA_real_) expect_equal(accrualTime11$maxNumberOfSubjects, 10) expect_equal(accrualTime11$remainingTime, 0.66666667, tolerance = 1e-07) expect_equal(accrualTime11$piecewiseAccrualEnabled, FALSE) accrualTime12 <- getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(22, 0, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results ## expect_equal(accrualTime12$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime12$accrualTime, c(0, 6, 15, 25)) expect_equal(accrualTime12$accrualIntensity, c(22, 0, 33)) expect_equal(accrualTime12$accrualIntensityRelative, NA_real_) expect_equal(accrualTime12$maxNumberOfSubjects, 462) expect_equal(accrualTime12$remainingTime, 10) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results ## expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, 1000) expect_equal(accrualTime13$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime13$piecewiseAccrualEnabled, TRUE) }) test_that("Testing 'getAccrualTime': test absolute and relative definition", { accrualTime1 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 924) ## ## Comparison of the results of AccrualTime object 'accrualTime1' with expected results ## expect_equal(accrualTime1$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime1$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime1$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime1$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime1$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime1$accrualTime, c(0, 6, 30)) expect_equal(accrualTime1$accrualIntensity, c(22, 33)) expect_equal(accrualTime1$accrualIntensityRelative, NA_real_) expect_equal(accrualTime1$maxNumberOfSubjects, 924) expect_equal(accrualTime1$remainingTime, 24) expect_equal(accrualTime1$piecewiseAccrualEnabled, TRUE) accrualTime2 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33), maxNumberOfSubjects = 924) ## ## Comparison of the results of AccrualTime object 'accrualTime2' with expected results ## expect_equal(accrualTime2$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime2$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime2$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime2$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime2$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime2$accrualTime, c(0, 6, 30)) expect_equal(accrualTime2$accrualIntensity, c(22, 33)) expect_equal(accrualTime2$accrualIntensityRelative, NA_real_) expect_equal(accrualTime2$maxNumberOfSubjects, 924) expect_equal(accrualTime2$remainingTime, 24) expect_equal(accrualTime2$piecewiseAccrualEnabled, TRUE) accrualTime3 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime3' with expected results ## expect_equal(accrualTime3$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime3$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime3$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime3$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime3$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime3$accrualTime, c(0, 6, 30)) expect_equal(accrualTime3$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime3$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime3$maxNumberOfSubjects, 1000) expect_equal(accrualTime3$remainingTime, 24) expect_equal(accrualTime3$piecewiseAccrualEnabled, TRUE) accrualTime4 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results ## expect_equal(accrualTime4$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime4$accrualTime, c(0, 6, 30)) expect_equal(accrualTime4$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime4$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime4$maxNumberOfSubjects, 1000) expect_equal(accrualTime4$remainingTime, 24) expect_equal(accrualTime4$piecewiseAccrualEnabled, TRUE) accrualTime5 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results ## expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 6, 30)) expect_equal(accrualTime5$accrualIntensity, c(22, 33)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 924) expect_equal(accrualTime5$remainingTime, 24) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) accrualTime6 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results ## expect_equal(accrualTime6$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 6, 30)) expect_equal(accrualTime6$accrualIntensity, c(22, 33)) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 924) expect_equal(accrualTime6$remainingTime, 24) expect_equal(accrualTime6$piecewiseAccrualEnabled, TRUE) accrualTime7 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime7' with expected results ## expect_equal(accrualTime7$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime7$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime7$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime7$accrualTime, c(0, 6, 30)) expect_equal(accrualTime7$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime7$accrualIntensityRelative, NA_real_) expect_equal(accrualTime7$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime7$remainingTime, NA_real_) expect_equal(accrualTime7$piecewiseAccrualEnabled, TRUE) accrualTime8 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results ## expect_equal(accrualTime8$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime8$accrualTime, c(0, 6, 30)) expect_equal(accrualTime8$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime8$remainingTime, NA_real_) expect_equal(accrualTime8$piecewiseAccrualEnabled, TRUE) accrualTime9 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results ## expect_equal(accrualTime9$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime9$accrualIntensity, c(22, 33)) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 1000) expect_equal(accrualTime9$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime9$piecewiseAccrualEnabled, TRUE) accrualTime10 <- getAccrualTime(list( "0 - <6" = 22, "6" = 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results ## expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, c(22, 33)) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 1000) expect_equal(accrualTime10$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, TRUE) accrualTime11 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime11' with expected results ## expect_equal(accrualTime11$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime11$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime11$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime11$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime11$accrualTime, c(0, 6)) expect_equal(accrualTime11$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime11$accrualIntensityRelative, NA_real_) expect_equal(accrualTime11$maxNumberOfSubjects, 1000) expect_equal(accrualTime11$remainingTime, NA_real_) expect_equal(accrualTime11$piecewiseAccrualEnabled, TRUE) accrualTime12 <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results ## expect_equal(accrualTime12$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime12$accrualTime, c(0, 6)) expect_equal(accrualTime12$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime12$accrualIntensityRelative, NA_real_) expect_equal(accrualTime12$maxNumberOfSubjects, 1000) expect_equal(accrualTime12$remainingTime, NA_real_) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results ## expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6)) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime13$remainingTime, NA_real_) expect_equal(accrualTime13$piecewiseAccrualEnabled, FALSE) accrualTime14 <- getAccrualTime(list( "0 - <6" = 22, "6" = 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime14' with expected results ## expect_equal(accrualTime14$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime14$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime14$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime14$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime14$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime14$accrualTime, c(0, 6)) expect_equal(accrualTime14$accrualIntensity, c(22, 33)) expect_equal(accrualTime14$accrualIntensityRelative, NA_real_) expect_equal(accrualTime14$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime14$remainingTime, NA_real_) expect_equal(accrualTime14$piecewiseAccrualEnabled, FALSE) accrualTime15 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime15' with expected results ## expect_equal(accrualTime15$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime15$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime15$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime15$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime15$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime15$accrualTime, c(0, 6)) expect_equal(accrualTime15$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime15$accrualIntensityRelative, NA_real_) expect_equal(accrualTime15$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime15$remainingTime, NA_real_) expect_equal(accrualTime15$piecewiseAccrualEnabled, FALSE) accrualTime16 <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime16' with expected results ## expect_equal(accrualTime16$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime16$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime16$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime16$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime16$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime16$accrualTime, c(0, 6)) expect_equal(accrualTime16$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime16$accrualIntensityRelative, NA_real_) expect_equal(accrualTime16$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime16$remainingTime, NA_real_) expect_equal(accrualTime16$piecewiseAccrualEnabled, FALSE) }) test_that("Testing 'getAccrualTime': check expected warnings and errors", { expect_warning(getAccrualTime(accrualTime = c(0, 24), accrualIntensity = c(30, 45), maxNumberOfSubjects = 720), "Last accrual intensity value (45) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45, 55), maxNumberOfSubjects = 720), "Last 2 accrual intensity values (45, 55) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 2 accrual time values (30, 40) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 3 accrual intensity values (45, 55, 66) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(0, 22, 33)), "It makes no sense to start 'accrualIntensity' (0, 22, 33) with 0", fixed = TRUE) expect_error(getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0)), "Illegal argument: at least one 'accrualIntensity' value must be > 0", fixed = TRUE) expect_error(getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000), paste0("Conflicting arguments: 'maxNumberOfSubjects' (1000) disagrees with the defined ", "accrual time and intensity: 6 * 22 + 24 * 33 = 924"), fixed = TRUE) }) test_that("Testing 'getAccrualTime': list-wise definition", { accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) accrualTime4 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 1405) expect_equal(accrualTime4$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime4$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime4$remainingTime, 24.55555556) accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, "16 - ?" = 45) accrualTime5 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 1405) expect_equal(accrualTime5$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime5$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime5$remainingTime, 24.55555556) accrualTime <- list( "0 - <11" = 20, "11 - <16" = 40, ">=16" = 60) accrualTime6 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 800) expect_equal(accrualTime6$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime6$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime6$remainingTime, 6.33333333) accrualTime <- list( "0 - <11" = 20, "11 - <16" = 40, "16 - ?" = 60) accrualTime7 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 800) expect_equal(accrualTime7$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime7$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime7$remainingTime, 6.33333333) }) rpact/tests/testthat/test-f_core_utilities.R0000644000176200001440000015064613567165663021053 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:24 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing core utility functions") test_that("'getValidatedInformationRates': 'informationRates' must be generated correctly based on specified 'kMax'", { design1 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design1), 1, tolerance = 1e-08) design2 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design2), c(0.5, 1), tolerance = 1e-08) design3 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design3), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design4 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design4), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design5 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design5), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design6 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design6), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design7 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design7), 1, tolerance = 1e-08) design8 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design8), c(0.5, 1), tolerance = 1e-08) design9 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design9), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design10 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design10), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design11 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design11), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design12 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design12), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design13 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design13), 1, tolerance = 1e-08) design14 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design14), c(0.5, 1), tolerance = 1e-08) design15 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design15), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design16 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design16), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design17 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design17), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design18 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design18), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'informationRates' must be set correctly based on specified 'informationRates'", { design19 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design19), 1, tolerance = 1e-07) design20 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design20), c(0.4, 1), tolerance = 1e-07) design21 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design21), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design22 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design22), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design23 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design23), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design24 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design24), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design25 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design25), 1, tolerance = 1e-07) design26 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design26), c(0.4, 1), tolerance = 1e-07) design27 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design27), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design28 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design28), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design29 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design29), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design30 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design30), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design31 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design31), 1, tolerance = 1e-07) design32 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design32), c(0.4, 1), tolerance = 1e-07) design33 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design33), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design34 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design34), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design35 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design35), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design36 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design36), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design37 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design37), c(0.5, 1), tolerance = 1e-07) design38 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design38), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design39 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design39), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design40 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design40), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design41 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design41), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design42 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design42), c(0.5, 1), tolerance = 1e-07) design43 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design43), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design44 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design44), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design45 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design45), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design46 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design46), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design47 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design47), c(0.5, 1), tolerance = 1e-07) design48 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design48), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design49 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design49), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design50 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design50), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design51 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design51), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'informationRates'", { design52 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design52) expect_equal(design52$kMax, 1, tolerance = 1e-07) design53 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design53) expect_equal(design53$kMax, 2, tolerance = 1e-07) design54 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design54) expect_equal(design54$kMax, 3, tolerance = 1e-07) design55 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design55) expect_equal(design55$kMax, 4, tolerance = 1e-07) design56 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design56) expect_equal(design56$kMax, 5, tolerance = 1e-07) design57 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design57) expect_equal(design57$kMax, 6, tolerance = 1e-07) design58 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design58) expect_equal(design58$kMax, 1, tolerance = 1e-07) design59 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design59) expect_equal(design59$kMax, 2, tolerance = 1e-07) design60 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design60) expect_equal(design60$kMax, 3, tolerance = 1e-07) design61 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design61) expect_equal(design61$kMax, 4, tolerance = 1e-07) design62 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design62) expect_equal(design62$kMax, 5, tolerance = 1e-07) design63 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design63) expect_equal(design63$kMax, 6, tolerance = 1e-07) design64 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") .getValidatedInformationRates(design64) expect_equal(design64$kMax, 1, tolerance = 1e-07) design65 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design65) expect_equal(design65$kMax, 2, tolerance = 1e-07) design66 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design66) expect_equal(design66$kMax, 3, tolerance = 1e-07) design67 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design67) expect_equal(design67$kMax, 4, tolerance = 1e-07) design68 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design68) expect_equal(design68$kMax, 5, tolerance = 1e-07) design69 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design69) expect_equal(design69$kMax, 6, tolerance = 1e-07) design70 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design70) expect_equal(design70$kMax, 2, tolerance = 1e-07) design71 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design71) expect_equal(design71$kMax, 3, tolerance = 1e-07) design72 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design72) expect_equal(design72$kMax, 4, tolerance = 1e-07) design73 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design73) expect_equal(design73$kMax, 5, tolerance = 1e-07) design74 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design74) expect_equal(design74$kMax, 6, tolerance = 1e-07) design75 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design75) expect_equal(design75$kMax, 2, tolerance = 1e-07) design76 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design76) expect_equal(design76$kMax, 3, tolerance = 1e-07) design77 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design77) expect_equal(design77$kMax, 4, tolerance = 1e-07) design78 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design78) expect_equal(design78$kMax, 5, tolerance = 1e-07) design79 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design79) expect_equal(design79$kMax, 6, tolerance = 1e-07) design80 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") .getValidatedInformationRates(design80) expect_equal(design80$kMax, 2, tolerance = 1e-07) design81 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design81) expect_equal(design81$kMax, 3, tolerance = 1e-07) design82 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design82) expect_equal(design82$kMax, 4, tolerance = 1e-07) design83 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design83) expect_equal(design83$kMax, 5, tolerance = 1e-07) design84 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design84) expect_equal(design84$kMax, 6, tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be generated correctly based on specified 'kMax'", { design85 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design85), numeric(0), tolerance = 1e-08) design86 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design86), -6, tolerance = 1e-08) design87 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design87), c(-6, -6), tolerance = 1e-08) design88 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design88), c(-6, -6, -6), tolerance = 1e-08) design89 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design89), c(-6, -6, -6, -6), tolerance = 1e-08) design90 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design90), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design91 <- getTestDesign(kMax = 7L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design91), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design92 <- getTestDesign(kMax = 8L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design92), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design93 <- getTestDesign(kMax = 9L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design93), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design94 <- getTestDesign(kMax = 10L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design94), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design95 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design95), numeric(0), tolerance = 1e-08) design96 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design96), -6, tolerance = 1e-08) design97 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design97), c(-6, -6), tolerance = 1e-08) design98 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design98), c(-6, -6, -6), tolerance = 1e-08) design99 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design99), c(-6, -6, -6, -6), tolerance = 1e-08) design100 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design100), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design101 <- getTestDesign(kMax = 7L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design101), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design102 <- getTestDesign(kMax = 8L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design102), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design103 <- getTestDesign(kMax = 9L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design103), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design104 <- getTestDesign(kMax = 10L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design104), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design105 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design105), numeric(0), tolerance = 1e-08) design106 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design106), 1, tolerance = 1e-08) design107 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design107), c(1, 1), tolerance = 1e-08) design108 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design108), c(1, 1, 1), tolerance = 1e-08) design109 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design109), c(1, 1, 1, 1), tolerance = 1e-08) design110 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design110), c(1, 1, 1, 1, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be set correctly based on specified 'futilityBounds'", { design111 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design111), 2, tolerance = 1e-07) design112 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design112), c(1, 2), tolerance = 1e-07) design113 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design113), c(0, 1, 2), tolerance = 1e-07) design114 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design114), c(0, 0, 1, 2), tolerance = 1e-07) design115 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design115), c(0, 0, 0, 1, 2), tolerance = 1e-07) design116 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design116), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design117 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design117), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design118 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design118), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design119 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design119), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design120 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design120), 2, tolerance = 1e-07) design121 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design121), c(1, 2), tolerance = 1e-07) design122 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design122), c(0, 1, 2), tolerance = 1e-07) design123 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design123), c(0, 0, 1, 2), tolerance = 1e-07) design124 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design124), c(0, 0, 0, 1, 2), tolerance = 1e-07) design125 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design125), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design126 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design126), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design127 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design127), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design128 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design128), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design129 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design129), 2, tolerance = 1e-07) design130 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design130), c(1, 2), tolerance = 1e-07) design131 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design131), c(0, 1, 2), tolerance = 1e-07) design132 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design132), c(0, 0, 1, 2), tolerance = 1e-07) design133 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design133), c(0, 0, 0, 1, 2), tolerance = 1e-07) design134 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design134), -6, tolerance = 1e-07) design135 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design135), c(-6, -6), tolerance = 1e-07) design136 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design136), c(-6, -6, -6), tolerance = 1e-07) design137 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design137), c(-6, -6, -6, -6), tolerance = 1e-07) design138 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design138), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design139 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design139), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design140 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design140), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design141 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design141), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design142 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design142), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design143 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design143), -6, tolerance = 1e-07) design144 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design144), c(-6, -6), tolerance = 1e-07) design145 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design145), c(-6, -6, -6), tolerance = 1e-07) design146 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design146), c(-6, -6, -6, -6), tolerance = 1e-07) design147 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design147), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design148 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design148), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design149 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design149), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design150 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design150), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design151 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design151), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design152 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design152), 1, tolerance = 1e-07) design153 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design153), c(1, 1), tolerance = 1e-07) design154 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design154), c(1, 1, 1), tolerance = 1e-07) design155 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design155), c(1, 1, 1, 1), tolerance = 1e-07) design156 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design156), c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'futilityBounds'", { design157 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design157) expect_equal(design157$kMax, 2, tolerance = 1e-07) design158 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design158) expect_equal(design158$kMax, 3, tolerance = 1e-07) design159 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design159) expect_equal(design159$kMax, 4, tolerance = 1e-07) design160 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design160) expect_equal(design160$kMax, 5, tolerance = 1e-07) design161 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design161) expect_equal(design161$kMax, 6, tolerance = 1e-07) design162 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design162) expect_equal(design162$kMax, 7, tolerance = 1e-07) design163 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design163) expect_equal(design163$kMax, 8, tolerance = 1e-07) design164 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design164) expect_equal(design164$kMax, 9, tolerance = 1e-07) design165 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design165) expect_equal(design165$kMax, 10, tolerance = 1e-07) design166 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design166) expect_equal(design166$kMax, 2, tolerance = 1e-07) design167 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design167) expect_equal(design167$kMax, 3, tolerance = 1e-07) design168 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design168) expect_equal(design168$kMax, 4, tolerance = 1e-07) design169 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design169) expect_equal(design169$kMax, 5, tolerance = 1e-07) design170 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design170) expect_equal(design170$kMax, 6, tolerance = 1e-07) design171 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design171) expect_equal(design171$kMax, 7, tolerance = 1e-07) design172 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design172) expect_equal(design172$kMax, 8, tolerance = 1e-07) design173 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design173) expect_equal(design173$kMax, 9, tolerance = 1e-07) design174 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design174) expect_equal(design174$kMax, 10, tolerance = 1e-07) design175 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design175) expect_equal(design175$kMax, 2, tolerance = 1e-07) design176 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design176) expect_equal(design176$kMax, 3, tolerance = 1e-07) design177 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design177) expect_equal(design177$kMax, 4, tolerance = 1e-07) design178 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design178) expect_equal(design178$kMax, 5, tolerance = 1e-07) design179 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design179) expect_equal(design179$kMax, 6, tolerance = 1e-07) design180 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design180) expect_equal(design180$kMax, 2, tolerance = 1e-07) design181 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design181) expect_equal(design181$kMax, 3, tolerance = 1e-07) design182 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design182) expect_equal(design182$kMax, 4, tolerance = 1e-07) design183 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design183) expect_equal(design183$kMax, 5, tolerance = 1e-07) design184 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design184) expect_equal(design184$kMax, 6, tolerance = 1e-07) design185 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design185) expect_equal(design185$kMax, 7, tolerance = 1e-07) design186 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design186) expect_equal(design186$kMax, 8, tolerance = 1e-07) design187 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design187) expect_equal(design187$kMax, 9, tolerance = 1e-07) design188 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design188) expect_equal(design188$kMax, 10, tolerance = 1e-07) design189 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design189) expect_equal(design189$kMax, 2, tolerance = 1e-07) design190 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design190) expect_equal(design190$kMax, 3, tolerance = 1e-07) design191 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design191) expect_equal(design191$kMax, 4, tolerance = 1e-07) design192 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design192) expect_equal(design192$kMax, 5, tolerance = 1e-07) design193 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design193) expect_equal(design193$kMax, 6, tolerance = 1e-07) design194 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design194) expect_equal(design194$kMax, 7, tolerance = 1e-07) design195 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design195) expect_equal(design195$kMax, 8, tolerance = 1e-07) design196 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design196) expect_equal(design196$kMax, 9, tolerance = 1e-07) design197 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design197) expect_equal(design197$kMax, 10, tolerance = 1e-07) design198 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design198) expect_equal(design198$kMax, 2, tolerance = 1e-07) design199 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design199) expect_equal(design199$kMax, 3, tolerance = 1e-07) design200 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design200) expect_equal(design200$kMax, 4, tolerance = 1e-07) design201 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design201) expect_equal(design201$kMax, 5, tolerance = 1e-07) design202 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design202) expect_equal(design202$kMax, 6, tolerance = 1e-07) }) context("Testing utilities") test_that("Testing '.toCapitalized'", { expect_equal(.toCapitalized("zip code"), "Zip Code") expect_equal(.toCapitalized("state of the art"), "State of the Art") expect_equal(.toCapitalized("final and count"), "Final and Count") }) test_that("Testing '.equalsRegexpIgnoreCase' ", { expect_equal(.equalsRegexpIgnoreCase("stage2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("Stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("STAGES", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase(" stages", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stages2", "stages?"), TRUE) expect_equal(.equalsRegexpIgnoreCase("1stage2", "stages?"), TRUE) }) test_that("Testing 'isUndefinedArgument' and 'isValidArgument'", { expect_equal(.isUndefinedArgument(NULL), TRUE) expect_equal(.isUndefinedArgument(numeric(0)), TRUE) expect_equal(.isUndefinedArgument(NA), TRUE) expect_equal(.isUndefinedArgument(NA_integer_), TRUE) expect_equal(.isUndefinedArgument(NA_real_), TRUE) expect_equal(.isUndefinedArgument(NA_complex_), TRUE) expect_equal(.isUndefinedArgument(NA_character_), TRUE) expect_equal(.isUndefinedArgument(c(NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(1, NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(NA, NA, 1)), FALSE) expect_equal(.isUndefinedArgument(1), FALSE) expect_equal(.isDefinedArgument(NULL), FALSE) expect_equal(.isDefinedArgument(numeric(0)), FALSE) expect_equal(.isDefinedArgument(NA), FALSE) expect_equal(.isDefinedArgument(NA_integer_), FALSE) expect_equal(.isDefinedArgument(NA_real_), FALSE) expect_equal(.isDefinedArgument(NA_complex_), FALSE) expect_equal(.isDefinedArgument(NA_character_), FALSE) expect_equal(.isDefinedArgument(c(NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(1, NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(NA, NA, 1)), TRUE) expect_equal(.isDefinedArgument(1), TRUE) skip_if_translated() expect_error(.isDefinedArgument(notExistingTestVariable, argumentExistsValidationEnabled = FALSE), "object 'notExistingTestVariable' not found", fixed = TRUE) expect_error(.isDefinedArgument(notExistingTestVariable), "Missing argument: the object 'notExistingTestVariable' has not been defined anywhere. Please define it first, e.g., run 'notExistingTestVariable <- 1'", fixed = TRUE) }) test_that("Result of 'setSeed(seed)' is working for different arguments, incl. NULL and NA", { # @refFS[Sec.]{fs:subsec:reproducibilityOfSimulationResults} expect_false(is.null(.setSeed())) expect_false(is.na(.setSeed())) expect_true(is.numeric(.setSeed())) expect_false(is.null(.setSeed(NULL))) expect_false(is.na(.setSeed(NULL))) expect_true(is.numeric(.setSeed(NULL))) expect_false(is.null(.setSeed(NA))) expect_false(is.na(.setSeed(NA))) expect_true(is.numeric(.setSeed(NA))) expect_true(.setSeed() != .setSeed()) expect_equal(.setSeed(123), 123) expect_equal(.setSeed(0), 0) expect_equal(.setSeed(5e-5), 5e-5) }) test_that("Testing '.getInputForZeroOutputInsideTolerance''", { input <- 99 tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance, tolerance), input) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance + epsilon, tolerance), NA_real_) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance - epsilon, tolerance), input) }) test_that("Testing '.arrayToString'", { expect_equal(.arrayToString(NA, vectorLookAndFeelEnabled = TRUE), "NA") expect_equal(.arrayToString(NULL, vectorLookAndFeelEnabled = TRUE), "NULL") expect_equal(.arrayToString(c(1, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 2, 3)") expect_equal(.arrayToString(c(NA, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(NA, 2, 3)") expect_equal(.arrayToString(c(1, 2, NA), vectorLookAndFeelEnabled = TRUE), "c(1, 2, NA)") expect_equal(.arrayToString(c(NA, NA, NA), vectorLookAndFeelEnabled = TRUE), "c(NA, NA, NA)") expect_equal(.arrayToString(c(1, NULL, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 3)") }) test_that("Testing '.getInputProducingZeroOutput'", { tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputProducingZeroOutput(1, 0, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, 0, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, 0, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, 2, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance + epsilon, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance + epsilon, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) }) test_that("Testing '.getOneDimensionalRoot'", { tolerance <- 1e-08 expect_equal(.getOneDimensionalRoot(f = function(x) {x - 2}, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 2}, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1 - tolerance}, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1 + tolerance}, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = 0, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = tolerance, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 0, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 1- tolerance, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 3}, lower = 1, upper = 5, tolerance = tolerance), 3) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 3}, lower = -5, upper = -1, tolerance = tolerance), -3) expect_equal(.getOneDimensionalRoot(f = function(x) {3 * x - 700}, lower = 100, upper = 1000, tolerance = tolerance), 233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) {3 * x + 700}, lower = -1000, upper = -100, tolerance = tolerance), -233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 4}, lower = -10, upper = 10), 4, tolerance = tolerance) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 4}, lower = -10, upper = 10), -4, tolerance = tolerance) dataExample1 <- getDataset( overallEvents = c(33, 55, 129), overallAllocationRatios = c(1, 1, 4), overallLogRanks = c(1.02, 1.38, 2.2) ) design1 <- getDesignGroupSequential(kMax = 3, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25) result1 <- getRepeatedConfidenceIntervals(design1, dataExample1, stage = 3) ## ## Comparison of the results of matrix object 'result1' with expected results ## expect_equal(result1[1, ], c(0.54923831, 0.77922365, 1.0261298), tolerance = 1e-07) expect_equal(result1[2, ], c(3.7041718, 2.7014099, 2.5669073), tolerance = 1e-07) design2 <- getDesignGroupSequential(kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.7, 1), typeOfDesign = "WT", deltaWT = 0.35) dataExample2 <- getDataset( overallN2 = c(30,80,100), overallN1 = c(30,80,100), overallEvents2 = c(10,25,36), overallEvents1 = c(14,35,53)) result2 <- getRepeatedConfidenceIntervals(design = design2, dataInput = dataExample2, stage = 3, normalApproximation = T, directionUpper = TRUE) ## ## Comparison of the results of matrix object 'result2' with expected results ## expect_equal(result2[1, ], c(-0.17491836, -0.048575353, 0.018957992), tolerance = 1e-07) expect_equal(result2[2, ], c(0.41834422, 0.29168781, 0.31353692), tolerance = 1e-07) design3 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25) dataExample3 <- getDataset( events1 = c(7,57), events2 = c(7,57), n1 = c(30,300), n2 = c(30,300) ) result3 <- getRepeatedConfidenceIntervals(design3, dataExample3) ## ## Comparison of the results of matrix object 'result3' with expected results ## expect_equal(result3[1, ], c(-0.26729325, -0.071745801), tolerance = 1e-07) expect_equal(result3[2, ], c(0.26729325, 0.071745801), tolerance = 1e-07) design4 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25) dataExample4 <- getDataset( events1 = c(4,55), events2 = c(4,46), n1 = c(30,300), n2 = c(30,300)) result4 <- getRepeatedConfidenceIntervals(design4, dataExample4) ## ## Comparison of the results of matrix object 'result4' with expected results ## expect_equal(result4[1, ], c(-0.23589449, -0.043528513), tolerance = 1e-07) expect_equal(result4[2, ], c(0.23589449, 0.088471324), tolerance = 1e-07) }) rpact/tests/testthat/test-f_simulation_rates.R0000644000176200001440000006352613567165663021412 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:58 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation rates function") test_that("'getSimulationRates': check several configurations", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} seed <- 99123 maxNumberOfIterations <- 100 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- (1:3) / 3 plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = TRUE, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x1' with expected results ## expect_equal(x1$effect, c(0.2, 0.7, 1.2, 1.7), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(78, 93, 99, 96)) expect_equal(x1$iterations[3, ], c(41, 68, 56, 40)) expect_equal(x1$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0.04), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.02, 0.04, 0.34, 0.54), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0.03, 0.19, 0.4, 0.3), tolerance = 1e-07) expect_equal(x1$overallReject, c(0.05, 0.23, 0.74, 0.88), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.22, 0.07, 0.01, 0), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.35, 0.21, 0.09, 0.02), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.57, 0.28, 0.1, 0.02), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.59, 0.32, 0.44, 0.6), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(65.7, 78.3, 76.5, 70.8), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.090943215, 0.15808459, 0.48521663, 0.52642331), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.22475932, 0.38294099, 0.60961381, 0.67377136), tolerance = 1e-07) x2 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x2' with expected results ## expect_equal(x2$effect, c(0.1, 0.2, 0.3, 0.4), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(84, 95, 100, 97)) expect_equal(x2$iterations[3, ], c(55, 73, 64, 42)) expect_equal(x2$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0.03), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.02, 0.09, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0.06, 0.3, 0.48, 0.32), tolerance = 1e-07) expect_equal(x2$overallReject, c(0.08, 0.39, 0.81, 0.88), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.16, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.27, 0.13, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.43, 0.18, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.45, 0.27, 0.36, 0.58), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(71.7, 80.4, 79.2, 71.7), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.10237911, 0.25306891, 0.43740091, 0.54067879), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.30171473, 0.4623858, 0.59071853, 0.68245332), tolerance = 1e-07) x3 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x3' with expected results ## expect_equal(x3$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(78, 91, 96, 90, 72)) expect_equal(x3$iterations[3, ], c(32, 65, 62, 37, 6)) expect_equal(x3$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x3$rejectPerStage[1, ], c(0, 0.02, 0.04, 0.1, 0.28), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0.01, 0.06, 0.28, 0.53, 0.66), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0.02, 0.22, 0.28, 0.3, 0.05), tolerance = 1e-07) expect_equal(x3$overallReject, c(0.03, 0.3, 0.6, 0.93, 0.99), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.22, 0.07, 0, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.45, 0.2, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.67, 0.27, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.68, 0.35, 0.38, 0.63, 0.94), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(63, 76.8, 77.4, 68.1, 53.4), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.12773913, 0.18983473, 0.36146118, 0.53982038, 0.7268178), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.32676971, 0.35596086, 0.46114911, 0.56126649, 0.75350644), tolerance = 1e-07) x4 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = TRUE, thetaH0 = 1.5, pi1 = seq(0.05,0.25,0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, directionUpper = FALSE, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x4' with expected results ## expect_equal(x4$effect, c(-1.25, -1, -0.75, -0.5, -0.25), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(74, 64, 47, 36, 39)) expect_equal(x4$iterations[3, ], c(28, 28, 30, 20, 25)) expect_equal(x4$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x4$rejectPerStage[1, ], c(0.06, 0.05, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.43, 0.29, 0.09, 0.04, 0.04), tolerance = 1e-07) expect_equal(x4$rejectPerStage[3, ], c(0.17, 0.17, 0.08, 0.04, 0.06), tolerance = 1e-07) expect_equal(x4$overallReject, c(0.66, 0.51, 0.19, 0.08, 0.1), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.2, 0.31, 0.51, 0.64, 0.61), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.03, 0.07, 0.08, 0.12, 0.1), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.23, 0.38, 0.59, 0.76, 0.71), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.72, 0.72, 0.7, 0.8, 0.75), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(60.6, 57.6, 53.1, 46.8, 49.2), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.65569733, 0.50411153, 0.40992455, 0.37112776, 0.28877148), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.52876953, 0.55375049, 0.46252843, 0.37280654, 0.34687207), tolerance = 1e-07) x5 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = FALSE, thetaH0 = 0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x5' with expected results ## expect_equal(x5$effect, c(-0.1, 2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(50, 41, 12, 2)) expect_equal(x5$iterations[3, ], c(34, 29, 3, 0)) expect_equal(x5$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[3, ], c(30, 30, 30, NaN)) expect_equal(x5$rejectPerStage[1, ], c(0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[2, ], c(0.09, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.12, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x5$overallReject, c(0.22, 0.03, 0, 0), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.49, 0.59, 0.88, 0.98), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.07, 0.1, 0.09, 0.02), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.56, 0.69, 0.97, 1), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.66, 0.71, 0.97, 1), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(55.2, 51, 34.5, 30.6), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.36523014, 0.20927326, 0.16995311, 0.25129054), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.43064609, 0.32068397, 0.041565592, NaN), tolerance = 1e-07) x6 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.4, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, directionUpper = FALSE, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x6' with expected results ## expect_equal(x6$effect, c(-0.2, -0.15, -0.1, -0.05, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(91, 89, 66, 56, 39)) expect_equal(x6$iterations[3, ], c(19, 49, 51, 48, 24)) expect_equal(x6$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x6$rejectPerStage[1, ], c(0.03, 0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.72, 0.4, 0.14, 0.01, 0.01), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.17, 0.37, 0.26, 0.14, 0.02), tolerance = 1e-07) expect_equal(x6$overallReject, c(0.92, 0.78, 0.4, 0.15, 0.03), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0.06, 0.1, 0.34, 0.44, 0.61), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.01, 0.07, 0.14), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0.06, 0.1, 0.35, 0.51, 0.75), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.81, 0.51, 0.49, 0.52, 0.76), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(63, 71.4, 65.1, 61.2, 48.9), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.72335875, 0.55247274, 0.3843863, 0.29482523, 0.18598438), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.71459365, 0.68392316, 0.54740245, 0.39208559, 0.15519282), tolerance = 1e-07) x7 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5), typeOfDesign = "P"), thetaH0 = 0.3, groups = 1, plannedSubjects = c(30,60), pi1 = seq(0.3,0.5,0.05),maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(30, 30), maxNumberOfSubjectsPerStage = 5 * c(30, 30), directionUpper = TRUE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x7' with expected results ## expect_equal(x7$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(25, 41, 53, 50, 35)) expect_equal(x7$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x7$sampleSizes[2, ], c(114.24, 115.68293, 100.39623, 101.92, 82.371429), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0.02, 0.06, 0.15, 0.36, 0.59), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0.03, 0.12, 0.32, 0.41, 0.32), tolerance = 1e-07) expect_equal(x7$overallReject, c(0.05, 0.18, 0.47, 0.77, 0.91), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.73, 0.53, 0.32, 0.14, 0.06), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0.73, 0.53, 0.32, 0.14, 0.06), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.75, 0.59, 0.47, 0.5, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(58.56, 77.43, 83.21, 80.96, 58.83), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.60107965, 0.60407724, 0.68409402, 0.68536207, 0.68807468), tolerance = 1e-07) x8 <- getSimulationRates(design = getDesignGroupSequential( futilityBounds = c(0.5,0.5), typeOfDesign = "P"), thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 3, plannedSubjects = (1:3) * 100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.2, maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100,100), maxNumberOfSubjectsPerStage = 5*c(100,100,100), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x8' with expected results ## expect_equal(x8$effect, c(-0.3, -0.25, -0.2, -0.15, -0.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(7, 23, 41, 52, 59)) expect_equal(x8$iterations[3, ], c(0, 1, 1, 11, 20)) expect_equal(x8$sampleSizes[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$sampleSizes[2, ], c(225.57143, 148.73913, 239.7561, 361.73077, 405.05085), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(NaN, 112, 316, 398, 405.85), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.93, 0.75, 0.54, 0.29, 0.1), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.07, 0.22, 0.4, 0.41, 0.37), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0, 0.01, 0.01, 0.11, 0.14), tolerance = 1e-07) expect_equal(x8$overallReject, c(1, 0.98, 0.95, 0.81, 0.61), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0.02, 0.05, 0.19, 0.31), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0, 0.02), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0.02, 0.05, 0.19, 0.33), tolerance = 1e-07) expect_equal(x8$earlyStop, c(1, 0.99, 0.99, 0.89, 0.8), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(115.79, 135.33, 201.46, 331.88, 420.15), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.79294349, 0.80728899, 0.77763316, 0.64160567, 0.53147513), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(NaN, 0.80069037, 0.80071364, 0.56677072, 0.57523679), tolerance = 1e-07) x9 <- getSimulationRates(design = getDesignGroupSequential( futilityBounds = c(0), typeOfDesign = "P"), thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 3, maxNumberOfIterations = maxNumberOfIterations, plannedSubjects = c(100,200), pi1 = seq(0.15, 0.4, 0.05), pi2 = 0.2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100), maxNumberOfSubjectsPerStage = 5*c(100, 100), directionUpper = TRUE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x9' with expected results ## expect_equal(x9$effect, c(-0.05, 0.2, 0.45, 0.7, 0.95, 1.2), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(48, 66, 75, 74, 57, 35)) expect_equal(x9$sampleSizes[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$sampleSizes[2, ], c(466.29167, 407.39394, 382.84, 357.2973, 256.61404, 268.45714), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0.01, 0.02, 0.11, 0.24, 0.41, 0.65), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0, 0.05, 0.34, 0.62, 0.51, 0.35), tolerance = 1e-07) expect_equal(x9$overallReject, c(0.01, 0.07, 0.45, 0.86, 0.92, 1), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.51, 0.32, 0.14, 0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0.51, 0.32, 0.14, 0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.52, 0.34, 0.25, 0.26, 0.43, 0.65), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(323.82, 368.88, 387.13, 364.4, 246.27, 193.96), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.32248415, 0.49314797, 0.522945, 0.55888112, 0.72047998, 0.75410423), tolerance = 1e-07) mySampleSizeCalculationFunction <- function(...,stage, plannedSubjects, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, conditionalCriticalValue, overallRate) { if (overallRate[1] - overallRate[2] < 0.1){ return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } else { rateUnderH0 <- (overallRate[1] + overallRate[2])/2 stageSubjects <- 2 * (max(0, conditionalCriticalValue * sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / (max(1e-12, (overallRate[1] - overallRate[2])))^2 stageSubjects <- ceiling(min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } } x10 <- getSimulationRates(design = getDesignInverseNormal(kMax = 2), pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x10' with expected results ## expect_equal(x10$effect, c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(100, 99, 95, 75)) expect_equal(x10$sampleSizes[1, ], c(40, 40, 40, 40)) expect_equal(x10$sampleSizes[2, ], c(64.34, 74.444444, 65.126316, 58.253333), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.02, 0.19, 0.47, 0.64), tolerance = 1e-07) expect_equal(x10$overallReject, c(0.02, 0.2, 0.52, 0.89), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$futilityStop, c(0, 0, 0, 0)) expect_equal(x10$earlyStop, c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(104.34, 113.7, 101.87, 83.69), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.20349537, 0.39194633, 0.57556995, 0.71162895), tolerance = 1e-07) #options(width = 180) #maxNumberOfSubjects <- 300 #informationRates <- (1:2)/2 #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 10000 # #x <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.4, groups = 1, plannedSubjects = plannedSubjects, pi1 = seq(0.3,0.4,0.02),maxNumberOfIterations = maxNumberOfIterations, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100), maxNumberOfSubjectsPerStage = 5*c(100,100), directionUpper = FALSE) #x$overallReject #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes #x$rejectPerStage #x$futilityStop #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.4, groups = 1, pi1 = seq(0.3,0.4,0.02), directionUpper = FALSE, maxNumberOfSubjects = maxNumberOfSubjects) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # #x <- getSimulationRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), typeOfDesign = "P"), # thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, plannedSubjects = (1:3)*100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100,100), maxNumberOfSubjectsPerStage = 1*c(100,100,100), directionUpper = FALSE) # #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), typeOfDesign = "P"), # thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, directionUpper = FALSE, maxNumberOfSubjects = 300) # #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # #x <- getSimulationRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, plannedSubjects = plannedSubjects, pi1 = seq(0.15,0.4,0.05), pi2 = 0.2, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = plannedSubjects, maxNumberOfSubjectsPerStage = c(100,200,300), directionUpper = TRUE) # #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, pi1 = seq(0.15,0.4,0.05), pi2 = 0.2, maxNumberOfSubjects = maxNumberOfSubjects, # directionUpper = TRUE) # #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # ############################################################################################################################## # #x <- getSimulationSurvival(design = getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0,0)), # pi1 = seq(0.2, 0.4, 0.05), maxNumberOfIterations = 10000, accrualTime = 24, plannedEvents = c(67,134,201), maxNumberOfSubjects = 396, allocation1 = 1, allocation2 = 1) #toc() #y <- getPowerSurvival(design = getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0,0)), # pi1 = seq(0.2, 0.4, 0.05), maxNumberOfEvents = 201, accrualTime = 24, maxNumberOfSubjects = 396, allocationRatioPlanned = 1) # #round(x$expectedNumberOfEvents - y$expectedNumberOfEvents,1) #round(x$expectedNumberOfSubjects - y$expectedNumberOfSubjects,1) #round(x$numberOfSubjects - y$numberOfSubjects,1) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$overallReject - y$overallReject,4) #round(x$earlyStop - y$earlyStop,4) #round(x$futilityPerStage - y$futilityPerStage,4) #round(x$futilityStop - y$futilityStop,4) #round(x$analysisTime - y$analysisTime,4) #round(x$studyDuration - y$studyDuration,4) #x$conditionalPowerAchieved }) rpact/tests/testthat/test-f_core_output_formats.R0000644000176200001440000001073613574412501022106 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the output format functions") # @refFS[Sec.]{fs:sec:outputFormats} test_that("'formatPValues'", { x <- formatPValues(0.0000234) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, "<0.0001") x <- formatPValues(c(0.0000234, 0.0000134, 0.1234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- formatPValues(c(0.0002345678, 0.0000134, 0.1234, 0.000000000001, .00000009999)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.0002346", "0.0000134", "0.1234000", "<0.000001", "<0.000001")) x <- formatPValues(c(0.00234, 0.000013, 0.1234, 0.000000000001, .00000009999)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.00234", "<0.0001", "0.12340", "<0.0001", "<0.0001")) x <- formatPValues(c(6.244e-05, 4.906e-02, 1.446e-02, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "0.04906", "0.01446", "NA")) x <- formatPValues(c(6.24408201934656e-05, 7.55449751868031e-05, 1.23207030919836e-05, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "<0.0001", "NA")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatRepeatedPValues'", { x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.1234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", ">0.5")) x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", ">0.5", "NA")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatConditionalPower'", { x <- formatConditionalPower(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0", "0", "0.5234", "NA")) x <- formatConditionalPower(c(0.234, 0.123456, 0.6, 0.000001)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.2340", "0.1235", "0.6000", "0")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatProbabilities'", { x <- formatProbabilities(c(NA_real_, NA_real_, 0.4536623, 0.7713048)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("NA", "NA", "0.4537", "0.7713")) }) rpact/tests/testthat/test-f_design_sample_size_calculator.R0000644000176200001440000134066113574422572024075 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 12 December 2019, 12:31:28 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing internal functions of the sample size calculator") test_that("'.getLambdaStepFunctionByTime': return correct lambda for specified time and piecewise exponential bounds", { lambda1 <- .getLambdaStepFunctionByTime(time = 1, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda1' with expected results ## expect_equal(lambda1, 0.025, tolerance = 1e-07) lambda2 <- .getLambdaStepFunctionByTime(time = 6, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda2' with expected results ## expect_equal(lambda2, 0.025, tolerance = 1e-07) lambda3 <- .getLambdaStepFunctionByTime(time = 7, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda3' with expected results ## expect_equal(lambda3, 0.04, tolerance = 1e-07) lambda4 <- .getLambdaStepFunctionByTime(time = 9, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda4' with expected results ## expect_equal(lambda4, 0.04, tolerance = 1e-07) lambda5 <- .getLambdaStepFunctionByTime(time = 14, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda5' with expected results ## expect_equal(lambda5, 0.015, tolerance = 1e-07) lambda6 <- .getLambdaStepFunctionByTime(time = 15, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda6' with expected results ## expect_equal(lambda6, 0.015, tolerance = 1e-07) lambda7 <- .getLambdaStepFunctionByTime(time = 16, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda7' with expected results ## expect_equal(lambda7, 0.01, tolerance = 1e-07) lambda8 <- .getLambdaStepFunctionByTime(time = 21, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda8' with expected results ## expect_equal(lambda8, 0.01, tolerance = 1e-07) lambda9 <- .getLambdaStepFunctionByTime(time = 50, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda9' with expected results ## expect_equal(lambda9, 0.007, tolerance = 1e-07) }) context("Testing the sample size calculation of means for different designs and arguments") test_that("'getSampleSizeMeans': sample size calculation of means for one sided group sequential design", { # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS1pretest <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) ## ## Comparison of the results of TrialDesignGroupSequential object 'designGS1pretest' with expected results ## expect_equal(designGS1pretest$alphaSpent, c(0.0020595603, 0.0098772988, 0.024999999), tolerance = 1e-07) expect_equal(designGS1pretest$criticalValues, c(2.8688923, 2.3885055, 2.0793148), tolerance = 1e-07) expect_equal(designGS1pretest$stageLevels, c(0.0020595603, 0.0084585282, 0.018794214), tolerance = 1e-07) designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.929099, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.32275, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 491.89699, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 462.87248, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 360.24062, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090771, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80583608, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68748891, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.522991, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 246.30748, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 489.87773, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 460.97237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 358.76182, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.0780634, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80438093, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68736844, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 21.400599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 106.40843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 100.12977, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 77.928183, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8110917, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3500437, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81436669, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 104.35265, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 98.195298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 76.422636, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 28.394266, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 70.985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.295699, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.239248, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 7.0985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.746416, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 141.18246, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 132.85195, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 103.39494, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.7228801, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3419598, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81376184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 27.982861, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 69.957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 6.9957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.489288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 139.13687, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 130.92706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 101.89685, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 14.272462, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 70.965784, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 66.77843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 51.971772, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.222748, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1829515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5038177, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.854796, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 68.889056, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 64.824239, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 50.450881, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 17.387515, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 43.468787, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.9678613, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 31.049133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 86.454503, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 81.353233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 63.314931, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.0734522, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1712593, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5029983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 16.972125, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 42.430311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.8491785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 30.307365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 84.389093, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 79.409693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 61.802329, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.629897, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 361.13164, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 339.82298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 264.47466, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8861856, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9212807, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5251098, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.222278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 359.10487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 337.9158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.99035, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.64926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 229.12315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.736945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.84236, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.912315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.280788, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 455.70005, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 428.81135, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 333.7318, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8732837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9198713, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5249957, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.242142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 228.10535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.431606, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.07902, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.810535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.026339, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 453.67577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 426.90651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 332.24932, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizeRatioMeansOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1111111, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.022769, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.05692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 37.906721, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 94.766802, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 34.116049, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 85.290122, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 358.11287, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 336.98233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.26386, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeMeans': sample size calculation of means for two sided group sequential design", { # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS2pretest <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) ## ## Comparison of the results of TrialDesignGroupSequential object 'designGS2pretest' with expected results ## expect_equal(designGS2pretest$alphaSpent, c(0.12265406, 0.26238998, 0.4), tolerance = 1e-07) expect_equal(designGS2pretest$criticalValues, c(1.5437287, 1.2852363, 1.1188632), tolerance = 1e-07) expect_equal(designGS2pretest$stageLevels, c(0.06132703, 0.099354859, 0.13159925), tolerance = 1e-07) designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.984866, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.46217, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.45911, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.81177, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.60888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.041134725, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26146972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.3536511, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95886527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73853028, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.6463489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.901412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.25353, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.11194, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.49772, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.36979, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.049174965, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26261678, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.35387349, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95082503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73738322, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.64612651, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 10.078438, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.926745, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.926818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.874132, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.562306, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.597148, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.62315, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.407507, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 10.05563, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 25.139075, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3796917, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.775818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.454651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.411718, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.321111, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3256945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.416408, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.12953, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.164199, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of rates for different designs and arguments") test_that("'getSampleSizeRates': sample size calculation of rates for one sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedLargerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = FALSE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090192, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.81076728, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.6912997, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.2, normalApproximation = FALSE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], -0.090191958, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.18923272, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.3087003, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.2223957, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 13.055989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 25.966887, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 24.434704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 19.016842, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.127696, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.83051514, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.70345593, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 52.320365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 260.14823, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 244.79812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 190.51949, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.39662162, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20482715, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12354802, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 69.882614, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 174.70654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 52.411961, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 131.0299, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 17.470654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 43.676634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 347.47155, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 326.9689, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 254.47069, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.38949339, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20784714, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12553463, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.341131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 200.58487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 188.74931, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 146.89828, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6326463, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40827798, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32212934, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 53.497736, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 133.74434, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 15.285067, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 95.531671, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 266.00237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 250.30683, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 194.80676, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.59822838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40051537, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32119139, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizeRatesDiffOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1669392, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.090378, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.22594, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.589453, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.973634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 18.500924, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 46.252311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 199.33807, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 187.57608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 145.98518, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.63834776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.41018483, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32243267, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.241624, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.2568, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.21075, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.68752, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1899424, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0225352, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5569402, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 44.344741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 110.86185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 33.258556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 83.14639, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 11.086185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 27.715463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 220.4917, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 207.48153, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 161.47703, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1917697, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0740853, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5843199, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} # @refFS[Formula]{fs:sampleSizeTwoRatesRatioOptimumAllocationRatio} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.0304199, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.234379, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.585947, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 17.37364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 43.434101, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 16.860739, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 42.151846, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.22077, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.17685, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.66114, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1919838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0241846, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5576701, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeRates': sample size calculation of rates for two sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 2.2663131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 5.6657828, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 9.4279622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 8.5285086, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 6.4928537, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.01272092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.23002532, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.33381109, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.0127209, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.76997468, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.66618891, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 24.687106, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 102.69945, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 92.901636, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 70.727105, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.23899172, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13791313, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.087906186, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.30941892, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.15876644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.095938144, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 32.461488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 81.153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 24.346116, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 60.865289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 8.1153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 20.28843, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 135.04122, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 122.15791, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 93.000251, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.21587527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13203224, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.086052993, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.31213587, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.16272503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.09811449, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of survival data for different designs and arguments") test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'Schoenfeld'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 218.43651, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 109.21825, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 109.21825, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.203042, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 129.1955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 209.26106, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 218.43651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 207.20268, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.835901, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 146.60794, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 238.15931, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 109.95596, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 178.61948, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 36.651986, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 59.539826, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 236.50497, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 359.16189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 179.58095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 179.58095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.930158, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 212.42831, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 344.07526, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 359.16189, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 340.69079, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 411.1105, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 308.33287, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 102.77762, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 34.259208, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 241.05854, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 391.59089, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 411.1105, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 180.79391, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 293.69317, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 308.33287, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 60.264635, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 97.897723, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 102.77762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 388.87078, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 27.051725, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 168.44491, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 212.39441, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.931767, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 190.83096, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 143.12322, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 47.70774, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 242.70959, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.47951, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 276.96374, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 349.22724, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 50.859227, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 313.77176, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 235.32882, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 78.442941, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 399.07264, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 28.03225, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 172.34323, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 219.90797, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 32.179199, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 195.71655, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 146.78741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 48.929138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 252.26222, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 46.091727, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 283.37351, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 361.58134, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 52.910303, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 321.80485, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 241.35364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 80.451212, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 414.77946, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.973595, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.747749, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 54.369372, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 108.13454, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 101.75403, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 79.192297, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 275.50245, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 158.48615, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 117.01629, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 352.72627, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'Freedman'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.318803, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 144.21204, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 233.58371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.28609, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 399.20253, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 299.4019, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 99.800633, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.266878, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 234.07619, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 380.24832, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 399.20253, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 175.55715, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 285.18624, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 299.4019, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 58.519049, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 95.06208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 99.800633, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 377.60699, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 49.386071, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 304.68325, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 228.51244, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 76.170813, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 387.51336, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.539836, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.846839, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.554152, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 31.385379, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 62.421971, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 58.738747, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 45.714713, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 182.82647, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 69.22509, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 113.60138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.83649, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 5.3084847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.4084373, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7178517, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'HsiehFreedman'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.318803, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 144.21204, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 233.58371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.28609, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 279.09218, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 209.31914, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.773046, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 23.257682, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 163.64835, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 265.84083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 279.09218, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 122.73626, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 199.38062, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 209.31914, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 40.912088, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 66.460208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.773046, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 263.99422, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 34.527001, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 213.01146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 159.75859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 53.252865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 270.92, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.5314, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.763979, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.909947, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 73.409713, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 69.078154, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 53.761583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 187.03142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 107.59211, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 79.439306, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 239.45666, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Schoenfeld'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 103.98569, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.992843, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.992843, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 8.6654738, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 61.502916, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 99.617756, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 103.98569, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 89.550349, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 119.02601, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 89.269507, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.756502, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.9188341, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 69.79203, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 113.37463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 119.02601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 52.344023, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 85.030974, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 89.269507, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 17.448008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 28.343658, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.756502, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 102.08444, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 12.877848, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 80.187417, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.108996, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.724924, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 90.844192, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.133144, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.711048, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 109.63825, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.344622, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.606421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 82.043195, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.274467, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.020897, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.587598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.2964769, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.741192, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 26.193621, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.694677, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 18.039036, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 79.764338, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 45.885412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 33.878926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.778811, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Freedman'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.6726717, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 68.65147, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 111.19644, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.958888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 190.03851, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 142.52888, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 47.509628, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 15.836543, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 13.100348, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.75087, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 54.498024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 49.298762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 37.531726, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 111.43089, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 181.01545, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 190.03851, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 83.573164, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 135.76159, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 142.52888, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 27.857721, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 45.253862, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 47.509628, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 162.98937, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.37344541, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.59532615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.72668369, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.6777676, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.6797515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.3761146, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.374655, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 89.507692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 107.27985, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 23.51001, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 13.100348, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.75087, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 54.498024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 49.298762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 37.531726, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 145.04305, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 108.78229, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 36.260762, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 175.0499, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.37344541, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.59532615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.72668369, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.6777676, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.6797515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.3761146, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 59.582732, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 59.582732, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.895683, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.606421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.579168, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 110.81325, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 29.881728, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.062302, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.641184, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 5.9763456, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 14.940864, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 29.881728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 24.86186, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 22.48997, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.121878, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 87.033692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 32.954283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 54.079409, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 104.78854, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.2720218, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50383572, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65574857, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.676176, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9847739, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5249747, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'HsiehFreedman'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.6726717, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 68.65147, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 111.19644, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.958888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 132.86054, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 99.645403, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 33.215134, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 11.071711, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 77.904037, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 126.55229, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 132.86054, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 58.428028, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 94.914221, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 99.645403, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 19.476009, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 31.638074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 33.215134, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 113.94983, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.374655, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 89.507692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 107.27985, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 16.436419, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 101.40312, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 76.052337, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 25.350779, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 122.38163, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(62.981507, 17.099275), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(11.671936, 11.550242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(392.16937, 103.99921), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(294.12702, 77.999405), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(98.042341, 25.999802), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(470.03826, 126.86497), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.3, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 27.207015, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.025476782, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 1.5984103, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 132.64612, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 62.981507, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267383, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.326085, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.671936, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 26.529224, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 66.32306, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 132.64612, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 110.36274, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.833829, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 76.004665, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 392.16937, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 294.12702, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 98.042341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 470.03826, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.50049257, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.6945715, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.79903418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9980317, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.4397366, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.2515109, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 17.099275, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.550242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 103.99921, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 77.999405, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 25.999802, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 126.86497, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, maxNumberOfSubjects = 0, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(62.981507, 17.099275), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(11.671936, 11.550242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(392.16937, 103.99921), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(294.12702, 77.999405), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(98.042341, 25.999802), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(470.03826, 126.86497), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(maxNumberOfSubjects = 194, designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 24.25, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(65.207473, 5.0567417), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(10.593965, 5.0743995), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(23.577694, 8.185751), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(73.207473, 13.056742), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDurationH1, c(36.377625, 8.8858243), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(73.207473, 13.056742), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(194, 123.05419), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(194, 194)) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(194, 194)) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(145.5, 92.290642), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(48.5, 30.763547), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(194, 172.51997), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of survival data for other parameter variants") test_that("'getSampleSizeSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, only alpha = 0.01 is specified ", { sampleSizeResult <- getSampleSizeSurvival(alpha = 0.01) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(58.52451, 31.248898, 20.120262), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, c(197.78666, 90.804254, 51.314209), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.8370942, 2.2986321, 2.821477), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.01, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Four stage O'Brien and Fleming group sequential design with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 4)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(79.185858, 36.354387, 20.544155), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(79.185858, 36.354387, 20.544155), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(46.861741, 25.021615, 16.110694), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(13.197643, 6.0590646, 3.4240258), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.25, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 0.75, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[4, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(7.9739331, 7.8101434, 7.6105076), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(11.495939, 11.330412, 11.125901), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(14.574435, 14.425585, 14.235444), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[4, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(15.491732, 15.406299, 15.298535), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(11.715435, 6.2554038, 4.0276736), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(23.43087, 12.510808, 8.0553472), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(35.146306, 18.766211, 12.083021), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[4, ], c(46.861741, 25.021615, 16.110694), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(46.71422, 24.942847, 16.059978), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(44.876904, 23.961821, 15.428323), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(38.052731, 20.318084, 13.082227), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(105.23712, 47.322163, 26.058574), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(151.7193, 68.651695, 38.095372), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[4, ], c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(156.87296, 71.824595, 40.451776), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.0042542622, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.19131467, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.35652274, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[4, ], 0.24790832, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.55209168, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(10.651203, 25.469293, 56.523607), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(3.2636181, 5.0467111, 7.5182183), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], c(2.2002206, 2.9422007, 3.8377495), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[4, ], c(1.8065487, 2.2464886, 2.741937), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 2.5763449e-05, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0020996694, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.0097077663, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[4, ], 0.021469878, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0), accrualIntensity = c(30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(5.4039758, 0.22825781, -1.7164516), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.070642, 6.8949245, 4.950215), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.070642, 6.8949245, 4.950215), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(4.8516734, -0.31523272, -2.5326655), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(240, 240, 240)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 10) expect_equal(sampleSizeResult$followUpTime, c(2.6783764, -1.6485661, -3.8659989), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(240, 240, 240)) expect_equal(sampleSizeResult$nFixed1, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed2, c(120, 120, 120)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify accrual time as a list", { at <- list("0 - <6" = 20, "6 - Inf" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(4.8516734, -0.31523272, -2.5326655), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { at <- list("0 - <6" = 20, "6 - <=10" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(240, 240, 240)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 10) expect_equal(sampleSizeResult$followUpTime, c(2.6783764, -1.6485661, -3.8659989), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(240, 240, 240)) expect_equal(sampleSizeResult$nFixed1, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed2, c(120, 120, 120)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time needs to be specified because it should be shown that hazard ratio < 1", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 74.550809, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 46.640597, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 1076.0672, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 538.03358, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 538.03358, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 143.8377, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 89.672263, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.811468, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.702852, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 71.918848, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 143.8377, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 143.65194, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 141.26582, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 128.76314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 1059.161, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 1076.0672, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 1072.5235, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.51710185, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.71909794, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio ", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 93.281194, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 46.640597, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.393694, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.816947, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.704001, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 524.59793, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 531.021, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Effect size is based on hazard rate for the reference group and hazard ratio", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.872594, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.754955, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.691007, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 398.17083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 404.73134, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time and hazard ratios ", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list and hazard ratios ", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time for both treatment arms", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 63.558499, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 13.350554, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 17.025453, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 96.227483, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 192.20642, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 189.01379, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 172.2852, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.76855, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3298684, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { sampleSizeResult <- getSampleSizeSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 5, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, 141.26641, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { sampleSizeResult <- getSampleSizeSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 5, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, 31.248566, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.48932026, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Calculation of maximum number of subjects for given follow-up time", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 12.515269), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 12.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult2 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = -1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results ## expect_equal(sampleSizeResult2$directionUpper, FALSE) expect_equal(sampleSizeResult2$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult2$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult2$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult2$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfSubjects, 741.77932, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualTime, c(6, 17.50527), tolerance = 1e-07) expect_equal(sampleSizeResult2$totalAccrualTime, 17.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult2$nFixed, 741.77932, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed1, 370.88966, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed2, 370.88966, tolerance = 1e-07) expect_equal(sampleSizeResult2$analysisTime[1, ], 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$studyDuration, 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) .skipTestifDisabled() sampleSizeResult3 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult3' with expected results ## expect_equal(sampleSizeResult3$directionUpper, FALSE) expect_equal(sampleSizeResult3$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult3$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult3$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult3$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult3$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfSubjects, 70.679258, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult3$accrualTime, 3.2126936, tolerance = 1e-07) expect_equal(sampleSizeResult3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult3$nFixed, 70.679258, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed1, 35.339629, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed2, 35.339629, tolerance = 1e-07) expect_equal(sampleSizeResult3$analysisTime[1, ], 203.2127, tolerance = 1e-07) expect_equal(sampleSizeResult3$studyDuration, 203.2127, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult4 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = -200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult4' with expected results ## expect_equal(sampleSizeResult4$directionUpper, FALSE) expect_equal(sampleSizeResult4$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult4$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult4$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult4$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult4$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfSubjects, 11288.779, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult4$accrualTime, c(6, 216.50527), tolerance = 1e-07) expect_equal(sampleSizeResult4$totalAccrualTime, 216.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult4$nFixed, 11288.779, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed1, 5644.3897, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed2, 5644.3897, tolerance = 1e-07) expect_equal(sampleSizeResult4$analysisTime[1, ], 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$studyDuration, 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult5 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 44.43107095) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult5' with expected results ## expect_equal(sampleSizeResult5$directionUpper, FALSE) expect_equal(sampleSizeResult5$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult5$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult5$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult5$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult5$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult5$maxNumberOfSubjects, 131.99999, tolerance = 1e-07) expect_equal(sampleSizeResult5$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult5$accrualTime, 5.9999996, tolerance = 1e-07) expect_equal(sampleSizeResult5$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult5$nFixed, 131.99999, tolerance = 1e-07) expect_equal(sampleSizeResult5$nFixed1, 65.999995, tolerance = 1e-07) expect_equal(sampleSizeResult5$nFixed2, 65.999995, tolerance = 1e-07) expect_equal(sampleSizeResult5$analysisTime[1, ], 50.43107, tolerance = 1e-07) expect_equal(sampleSizeResult5$studyDuration, 50.43107, tolerance = 1e-07) expect_equal(sampleSizeResult5$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult5$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult6 <- getSampleSizeSurvival(accrualTime = c(0, 60), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), maxNumberOfSubjects = 500000) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult6' with expected results ## expect_equal(sampleSizeResult6$directionUpper, FALSE) expect_equal(sampleSizeResult6$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult6$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult6$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult6$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult6$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult6$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult6$accrualTime, c(60, 9469.0566), tolerance = 1e-07) expect_equal(sampleSizeResult6$totalAccrualTime, 9469.0566, tolerance = 1e-07) expect_equal(sampleSizeResult6$followUpTime, -9448.0008, tolerance = 1e-07) expect_equal(sampleSizeResult6$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult6$nFixed, 5e+05) expect_equal(sampleSizeResult6$nFixed1, 250000) expect_equal(sampleSizeResult6$nFixed2, 250000) expect_equal(sampleSizeResult6$analysisTime[1, ], 21.055818, tolerance = 1e-07) expect_equal(sampleSizeResult6$studyDuration, 21.055818, tolerance = 1e-07) expect_equal(sampleSizeResult6$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult6$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult7 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 44) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult7' with expected results ## expect_equal(sampleSizeResult7$directionUpper, FALSE) expect_equal(sampleSizeResult7$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult7$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult7$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult7$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult7$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfSubjects, 132.8172, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult7$accrualTime, c(6, 6.0154188), tolerance = 1e-07) expect_equal(sampleSizeResult7$totalAccrualTime, 6.0154188, tolerance = 1e-07) expect_equal(sampleSizeResult7$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult7$nFixed, 132.8172, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed1, 66.408599, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed2, 66.408599, tolerance = 1e-07) expect_equal(sampleSizeResult7$analysisTime[1, ], 50.015396, tolerance = 1e-07) expect_equal(sampleSizeResult7$studyDuration, 50.015396, tolerance = 1e-07) expect_equal(sampleSizeResult7$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult7$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult8 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 45) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult8' with expected results ## expect_equal(sampleSizeResult8$directionUpper, FALSE) expect_equal(sampleSizeResult8$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult8$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult8$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult8$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult8$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult8$maxNumberOfSubjects, 130.99398, tolerance = 1e-07) expect_equal(sampleSizeResult8$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult8$accrualTime, 5.9542719, tolerance = 1e-07) expect_equal(sampleSizeResult8$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult8$nFixed, 130.99398, tolerance = 1e-07) expect_equal(sampleSizeResult8$nFixed1, 65.496991, tolerance = 1e-07) expect_equal(sampleSizeResult8$nFixed2, 65.496991, tolerance = 1e-07) expect_equal(sampleSizeResult8$analysisTime[1, ], 50.954287, tolerance = 1e-07) expect_equal(sampleSizeResult8$studyDuration, 50.954287, tolerance = 1e-07) expect_equal(sampleSizeResult8$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult8$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Calculation of median1 and median2", { sampleSizeResult <- getSampleSizeSurvival(lambda1 = log(2) / 3, lambda2 = log(2) / 5) expect_equal(sampleSizeResult$median1, 3) expect_equal(sampleSizeResult$median2, 5) kappa <- 2 sampleSizeResult2 <- getSampleSizeSurvival(lambda1 = log(2)^(1 / kappa) / 3, lambda2 = log(2)^(1 / kappa) / 5, kappa = kappa) expect_equal(sampleSizeResult2$median1, 3) expect_equal(sampleSizeResult2$median2, 5) sampleSizeResult1 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 4, median1 = c(5), followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult1' with expected results ## expect_equal(sampleSizeResult1$directionUpper, FALSE) expect_equal(sampleSizeResult1$pi1, 0.81053543, tolerance = 1e-07) expect_equal(sampleSizeResult1$pi2, 0.875, tolerance = 1e-07) expect_equal(sampleSizeResult1$lambda1, 0.13862944, tolerance = 1e-07) expect_equal(sampleSizeResult1$lambda2, 0.1732868, tolerance = 1e-07) expect_equal(sampleSizeResult1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(sampleSizeResult1$maxNumberOfSubjects, 770.8069, tolerance = 1e-07) expect_equal(sampleSizeResult1$maxNumberOfEvents, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeResult1$accrualTime, c(6, 18.05296), tolerance = 1e-07) expect_equal(sampleSizeResult1$totalAccrualTime, 18.05296, tolerance = 1e-07) expect_equal(sampleSizeResult1$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult1$nFixed, 770.8069, tolerance = 1e-07) expect_equal(sampleSizeResult1$nFixed1, 385.40345, tolerance = 1e-07) expect_equal(sampleSizeResult1$nFixed2, 385.40345, tolerance = 1e-07) expect_equal(sampleSizeResult1$analysisTime[1, ], 23.052959, tolerance = 1e-07) expect_equal(sampleSizeResult1$studyDuration, 23.052959, tolerance = 1e-07) expect_equal(sampleSizeResult1$criticalValuesEffectScale[1, ], 0.85546574, tolerance = 1e-07) expect_equal(sampleSizeResult1$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult2 <- getSampleSizeSurvival(median2 = 25, lambda1 = c(0.021, 0.023), maxNumberOfSubjects = 2280) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results ## expect_equal(sampleSizeResult2$directionUpper, c(FALSE, FALSE)) expect_equal(sampleSizeResult2$pi1, c(0.22275526, 0.24118707), tolerance = 1e-07) expect_equal(sampleSizeResult2$pi2, 0.28302238, tolerance = 1e-07) expect_equal(sampleSizeResult2$median1, c(33.007009, 30.136834), tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda2, 0.027725887, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, c(0.7574149, 0.82954965), tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, c(406.69171, 899.03732), tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualIntensity, 190) expect_equal(sampleSizeResult2$followUpTime, c(2.2277357, 13.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult2$nFixed, c(2280, 2280)) expect_equal(sampleSizeResult2$nFixed1, c(1140, 1140)) expect_equal(sampleSizeResult2$nFixed2, c(1140, 1140)) expect_equal(sampleSizeResult2$analysisTime[1, ], c(14.227736, 25.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$studyDuration, c(14.227736, 25.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], c(0.82334724, 0.87745097), tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult3 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 50, lambda1 = 0.01, followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult3' with expected results ## expect_equal(sampleSizeResult3$directionUpper, FALSE) expect_equal(sampleSizeResult3$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult3$pi2, 0.15325469, tolerance = 1e-07) expect_equal(sampleSizeResult3$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult3$lambda2, 0.013862944, tolerance = 1e-07) expect_equal(sampleSizeResult3$hazardRatio, 0.72134752, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfSubjects, 1477.2065, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfEvents, 294.26878, tolerance = 1e-07) expect_equal(sampleSizeResult3$accrualTime, c(6, 31.381254), tolerance = 1e-07) expect_equal(sampleSizeResult3$totalAccrualTime, 31.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult3$nFixed, 1477.2065, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed1, 738.60324, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed2, 738.60324, tolerance = 1e-07) expect_equal(sampleSizeResult3$analysisTime[1, ], 36.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$studyDuration, 36.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesEffectScale[1, ], 0.79571801, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult4 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, median1 = 32, followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult4' with expected results ## expect_equal(sampleSizeResult4$directionUpper, TRUE) expect_equal(sampleSizeResult4$pi1, 0.22889459, tolerance = 1e-07) expect_equal(sampleSizeResult4$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult4$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult4$lambda1, 0.021660849, tolerance = 1e-07) expect_equal(sampleSizeResult4$hazardRatio, 1.0830425, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfSubjects, 7086.5152, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfEvents, 4933.3616, tolerance = 1e-07) expect_equal(sampleSizeResult4$accrualTime, c(6, 137.21727), tolerance = 1e-07) expect_equal(sampleSizeResult4$totalAccrualTime, 137.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult4$nFixed, 7086.5152, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed1, 3543.2576, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed2, 3543.2576, tolerance = 1e-07) expect_equal(sampleSizeResult4$analysisTime[1, ], 142.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$studyDuration, 142.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesEffectScale[1, ], 1.057396, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) context("Testing the follow-up time calculation") test_that("'getSampleSizeSurvival': analysis time at last stage equals accrual time + follow-up time", { x1 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, pi2 = 0.05, pi1 = 0.1) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) test_that("'getSampleSizeSurvival': follow-up time is equal for different argument-target constellations", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) x5 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x6 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, maxNumberOfSubjects = x5$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x5$followUpTime, x6$followUpTime) x7 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 6, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x8 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 6, accrualTime = 8, maxNumberOfSubjects = x7$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x7$followUpTime, x8$followUpTime) x9 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x10 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, maxNumberOfSubjects = x9$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x9$followUpTime, x10$followUpTime) }) context("Testing expected warnings and errors") test_that("'getSampleSizeSurvival': illegal arguments", { expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01, 0.015), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.01, 0.015)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, median1 = c(5, 6), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.139, 0.116)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 4, median1 = c(5, 6), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'median1'; median1 = c(5, 6)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), pi2 = 0.213, pi1 = c(0.113, 0.165), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.113, 0.165)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0), pi1 = c(0.4, 0.5), accrualIntensity = c(22), followUpTime = 6), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.4, 0.5)"), fixed = TRUE) expect_warning(getSampleSizeSurvival(accrualTime = c(0, 6, 30), pi1 = 0.4, accrualIntensity = c(0.22, 0.53), maxNumberOfSubjects = 1000), "Accrual duration longer than maximal study duration (time to maximal number of events); followUpTime = -17.501", fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), pi1 = 0.4, accrualIntensity = c(0.22, 0.53), maxNumberOfSubjects = 1000), paste0("Illegal argument: the calulation of 'followUpTime' for given 'maxNumberOfSubjects' and ", "relative accrual intensities (< 1) can only be done if end of accrual is defined"), fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = -1, hazardRatio = 2), "Argument out of bounds: 'lambda2' (-1) must be >= 0", fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = 0, hazardRatio = 2), "Illegal argument: 'lambda2' (0) not allowed: at least one lambda value must be > 0", fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = 0.9, hazardRatio = 0.8, kappa = 0), "Argument out of bounds: 'kappa' (0) must be > 0", fixed = TRUE) }) context("Testing the calculation of event probabilities and number of subjects") test_that("'getEventProbabilities': check expected events over time for overall survival (case 1)", { design <- getDesignGroupSequential( sided = 1, alpha = 0.025, beta = 0.2, informationRates = c(0.33, 0.7, 1), futilityBounds = c(0, 0), bindingFutility = FALSE) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) powerResults <- getPowerSurvival( design = design, typeOfComputation = "Schoenfeld", thetaH0 = 1, directionUpper = FALSE, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, allocationRatioPlanned = 1, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = seq(0.6, 1, 0.05), maxNumberOfEvents = 404, maxNumberOfSubjects = 1405) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(powerResults$analysisTime[2:3, 4], 17 + 3.5 * 12) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405)$overallEventProbabilities eventsOS <- eventsOS * 1405 expect_equal(round(timeOS, 2), c(37.60, 46.72, 59.00)) expect_equal(round(eventsOS, 1), c(194.1, 288.7, 365.1)) }) test_that("'getEventProbabilities': check expected events over time for overall survival (case 2)", { accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(37.59823, 46.71658, 59) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405) ## ## Comparison of the results of EventProbabilities object 'eventsOS' with expected results ## expect_equal(eventsOS$time, c(37.59823, 46.71658, 59), tolerance = 1e-07) expect_equal(eventsOS$accrualTime, c(12, 13, 14, 15, 16, 40.555556), tolerance = 1e-07) expect_equal(eventsOS$lambda1, c(0.012, 0.008, 0.004, 0.002), tolerance = 1e-07) expect_equal(eventsOS$overallEventProbabilities, c(0.13811859, 0.20546928, 0.2598385), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities1, c(0.12437783, 0.18544801, 0.23527681), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities2, c(0.15185935, 0.22549055, 0.28440019), tolerance = 1e-07) }) test_that("'getNumberOfSubjects': check the number of recruited subjects at given time vector", { accrualTime1 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) numberOfSubjects1 <- getNumberOfSubjects(time = 1:3, accrualTime = getAccrualTime(accrualTime1, maxNumberOfSubjects = 1405)) ## ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects1' with expected results ## expect_equal(numberOfSubjects1$time, c(1, 2, 3)) expect_equal(numberOfSubjects1$accrualTime, c(12, 13, 14, 15, 16, 41.355556), tolerance = 1e-07) expect_equal(numberOfSubjects1$numberOfSubjects, c(12, 24, 36), tolerance = 1e-07) accrualTime2 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39) numberOfSubjects2 <- getNumberOfSubjects(time = 1:3, accrualTime = getAccrualTime(accrualTime2)) ## ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects2' with expected results ## expect_equal(numberOfSubjects2$time, c(1, 2, 3)) expect_equal(numberOfSubjects2$maxNumberOfSubjects, 264) expect_equal(numberOfSubjects2$numberOfSubjects, c(12, 24, 36)) }) test_that("'getSampleSizeSurvival': check the calulation of 'maxNumberOfSubjects' for given 'followUpTime'", { sampleSizeSurvival1 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival1' with expected results ## expect_equal(sampleSizeSurvival1$directionUpper, FALSE) expect_equal(sampleSizeSurvival1$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival1$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival2 <- getSampleSizeSurvival(piecewiseSurvivalTime = list( "<12" = 0.02, ">=12" = 0.03), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival2' with expected results ## expect_equal(sampleSizeSurvival2$directionUpper, FALSE) expect_equal(sampleSizeSurvival2$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival2$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival3 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival3' with expected results ## expect_equal(sampleSizeSurvival3$directionUpper, FALSE) expect_equal(sampleSizeSurvival3$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival3$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival4 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.8, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival4' with expected results ## expect_equal(sampleSizeSurvival4$directionUpper, FALSE) expect_equal(sampleSizeSurvival4$lambda1, c(0.016, 0.024), tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfSubjects, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfEvents, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$accrualTime, 44.182203, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival4$nFixed, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed1, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed2, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$analysisTime[1, ], 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$studyDuration, 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesEffectScale[1, ], 0.85546574, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival5 <- getSampleSizeSurvival(lambda1 = 0.03, lambda2 = 0.2, hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival5' with expected results ## expect_equal(sampleSizeSurvival5$directionUpper, FALSE) expect_equal(sampleSizeSurvival5$pi1, 0.30232367, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$pi2, 0.90928205, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$median1, 23.104906, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$median2, 3.4657359, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$hazardRatio, 0.15, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfSubjects, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfEvents, 8.723245, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$accrualTime, 0.56510944, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival5$nFixed, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed1, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed2, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$analysisTime[1, ], 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$studyDuration, 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$criticalValuesEffectScale[1, ], 0.26521666, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival6 <- getSampleSizeSurvival(lambda1 = 0.03, lambda2 = 0.2, hazardRatio = c(0.6, 0.7), followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival6' with expected results ## expect_equal(sampleSizeSurvival6$directionUpper, FALSE) expect_equal(sampleSizeSurvival6$pi1, 0.30232367, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$pi2, 0.90928205, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$median1, 23.104906, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$median2, 3.4657359, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$hazardRatio, 0.15, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$maxNumberOfSubjects, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$maxNumberOfEvents, 8.723245, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$accrualTime, 0.56510944, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival6$nFixed, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$nFixed1, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$nFixed2, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$analysisTime[1, ], 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$studyDuration, 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$criticalValuesEffectScale[1, ], 0.26521666, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) expect_error(getSampleSizeSurvival(lambda2 = 0.2, hazardRatio = c(0.6, 0.7), followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) expect_error(getSampleSizeSurvival(lambda1 = c(0.02, 0.03), lambda2 = 0.2, hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) expect_error(getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = c(0.6, 0.8), followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) }) rpact/tests/testthat/helper-f_analysis_means.R0000644000176200001440000000477513363325361021322 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetStageResultsPlotData <- function(x, ..., nPlanned, stage = x$getNumberOfStages(), allocationRatioPlanned = 1) { if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, ...)) }rpact/tests/testthat/test-class_analysis_dataset.R0000644000176200001440000015732013567165663022234 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 10:07:41 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the class 'Dataset'") test_that("Usage of 'getDataset'", { datasetOfMeans1 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans1' with expected results ## expect_equal(datasetOfMeans1$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans1$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans1$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$means, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$overallMeans, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans1$.data' with expected results ## expect_equal(datasetOfMeans1$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans1$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans1$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$.data$mean, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$stDev, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$.data$overallMean, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallStDev, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) expect_equal(datasetOfMeans1$stages, datasetOfMeans1$.data$stage, tolerance = 1e-07) expect_equal(datasetOfMeans1$groups, datasetOfMeans1$.data$group, tolerance = 1e-07) expect_equal(datasetOfMeans1$sampleSizes, datasetOfMeans1$.data$sampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$means, datasetOfMeans1$.data$mean, tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, datasetOfMeans1$.data$stDev, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, datasetOfMeans1$.data$overallSampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallMeans, datasetOfMeans1$.data$overallMean, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, datasetOfMeans1$.data$overallStDev, tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans1) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) datasetOfMeans2 <- getDataset(data.frame( stages = 1:4, n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) )) x <- getMultipleStageResultsForDataset(datasetOfMeans2) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) datasetOfMeans3 <- getDataset( overallSampleSizes1 = c(22, 33, 55, 66), overallSampleSizes2 = c(22, 35, 57, 70), overallMeans1 = c(1, 1.033333, 1.02, 1.016667), overallMeans2 = c(1.4, 1.437143, 2.040351, 2.125714), overallStDevs1 = c(1, 1.381500, 1.639151, 1.578664), overallStDevs2 = c(1, 1.425418, 1.822857, 1.738706) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans3' with expected results ## expect_equal(datasetOfMeans3$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans3$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans3$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$means, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$stDevs, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$overallMeans, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallStDevs, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans3$.data' with expected results ## expect_equal(datasetOfMeans3$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans3$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans3$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$.data$mean, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$stDev, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$.data$overallMean, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallStDev, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans3) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of means using stage wise data (one group)", { datasetOfMeans4 <- getDataset( n = c(22, 11, 22, 11), means = c(1, 1.1, 1, 1), stDevs = c(1, 2, 2, 1.3) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans4' with expected results ## expect_equal(datasetOfMeans4$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans4$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans4$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$means, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$stDevs, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans4$.data' with expected results ## expect_equal(datasetOfMeans4$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfMeans4$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfMeans4$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$.data$mean, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$stDev, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$.data$overallMean, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallStDev, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans4) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of means using overall data (one group)", { datasetOfMeans5 <- getDataset( overallSampleSizes = c(22, 33, 55, 66), overallMeans = c(1.000, 1.033, 1.020, 1.017 ), overallStDevs = c(1.00, 1.38, 1.64, 1.58) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans5' with expected results ## expect_equal(datasetOfMeans5$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans5$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans5$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$means, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$stDevs, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans5$.data' with expected results ## expect_equal(datasetOfMeans5$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfMeans5$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfMeans5$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$.data$mean, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$stDev, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$.data$overallMean, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallStDev, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans5) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (one group)", { datasetOfRates1 <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates1' with expected results ## expect_equal(datasetOfRates1$stages, c(1, 2, 3, 4)) expect_equal(datasetOfRates1$groups, c(1, 1, 1, 1)) expect_equal(datasetOfRates1$sampleSizes, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$events, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$overallEvents, c(4, 9, 14, 20)) ## ## Comparison of the results of data.frame object 'datasetOfRates1$.data' with expected results ## expect_equal(datasetOfRates1$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfRates1$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfRates1$.data$sampleSize, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$.data$event, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$.data$overallSampleSize, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$.data$overallEvent, c(4, 9, 14, 20)) x <- getMultipleStageResultsForDataset(datasetOfRates1, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults1$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults2$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults3$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (two groups)", { datasetOfRates2 <- getDataset( n2 = c(8, 10, 9, 11), n1 = c(11, 13, 12, 13), events2 = c(3, 5, 5, 6), events1 = c(10, 10, 12, 12) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates2' with expected results ## expect_equal(datasetOfRates2$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates2$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates2$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) ## ## Comparison of the results of data.frame object 'datasetOfRates2$.data' with expected results ## expect_equal(datasetOfRates2$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates2$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates2$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates2, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (three groups)", { datasetOfRates3 <- getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), n3 = c(7, 10, 8, 9), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6), events3 = c(2, 4, 3, 5) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates3' with expected results ## expect_equal(datasetOfRates3$stages, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates3$groups, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates3$sampleSizes, c(11, 8, 7, 13, 10, 10, 12, 9, 8, 13, 11, 9)) expect_equal(datasetOfRates3$events, c(10, 3, 2, 10, 5, 4, 12, 5, 3, 12, 6, 5)) expect_equal(datasetOfRates3$overallSampleSizes, c(11, 8, 7, 24, 18, 17, 36, 27, 25, 49, 38, 34)) expect_equal(datasetOfRates3$overallEvents, c(10, 3, 2, 20, 8, 6, 32, 13, 9, 44, 19, 14)) ## ## Comparison of the results of data.frame object 'datasetOfRates3$.data' with expected results ## expect_equal(datasetOfRates3$.data$stage, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates3$.data$group, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates3$.data$sampleSize, c(11, 8, 7, 13, 10, 10, 12, 9, 8, 13, 11, 9)) expect_equal(datasetOfRates3$.data$event, c(10, 3, 2, 10, 5, 4, 12, 5, 3, 12, 6, 5)) expect_equal(datasetOfRates3$.data$overallSampleSize, c(11, 8, 7, 24, 18, 17, 36, 27, 25, 49, 38, 34)) expect_equal(datasetOfRates3$.data$overallEvent, c(10, 3, 2, 20, 8, 6, 32, 13, 9, 44, 19, 14)) }) test_that("Creation of a dataset of rates using overall data (two groups)", { datasetOfRates4 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates4' with expected results ## expect_equal(datasetOfRates4$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates4$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates4$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) ## ## Comparison of the results of data.frame object 'datasetOfRates4$.data' with expected results ## expect_equal(datasetOfRates4$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates4$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates4$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates4, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using overall data (three groups)", { datasetOfRates5 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates5' with expected results ## expect_equal(datasetOfRates5$stages, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates5$groups, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates5$sampleSizes, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$events, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$overallSampleSizes, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$overallEvents, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) ## ## Comparison of the results of data.frame object 'datasetOfRates5$.data' with expected results ## expect_equal(datasetOfRates5$.data$stage, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates5$.data$group, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates5$.data$sampleSize, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$.data$event, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$.data$overallSampleSize, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$.data$overallEvent, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) }) test_that("Creation of a dataset of survival data using stage wise data", { datasetSurvival1 <- getDataset( events = c(8, 7, 4, 12), allocationRatios = c(1, 1, 1, 3.58333333333333), logRanks = c(1.520, 1.273, 0.503, 0.887) ) ## ## Comparison of the results of DatasetSurvival object 'datasetSurvival1' with expected results ## expect_equal(datasetSurvival1$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival1$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival1$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$overallAllocationRatios, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetSurvival1$.data' with expected results ## expect_equal(datasetSurvival1$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetSurvival1$.data$group, c(1, 1, 1, 1)) expect_equal(datasetSurvival1$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$.data$overallAllocationRatio, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$overallLogRank, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$logRanks, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival1) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) expect_equal(datasetSurvival1$stages, datasetSurvival1$.data$stage, tolerance = 1e-07) expect_equal(datasetSurvival1$groups, datasetSurvival1$.data$group, tolerance = 1e-07) expect_equal(datasetSurvival1$events, datasetSurvival1$.data$event, tolerance = 1e-07) expect_equal(datasetSurvival1$allocationRatios, datasetSurvival1$.data$allocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, datasetSurvival1$.data$logRank, tolerance = 1e-07) expect_equal(datasetSurvival1$overallEvents, datasetSurvival1$.data$overallEvent, tolerance = 1e-07) expect_equal(datasetSurvival1$overallAllocationRatios, datasetSurvival1$.data$overallAllocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, datasetSurvival1$.data$overallLogRank, tolerance = 1e-07) }) test_that("Creation of a dataset of survival data using overall data", { datasetSurvival2 <- getDataset( overallEvents = c(8, 15, 19, 31), overallAllocationRatios = c(1, 1, 1, 2), overallLogRanks = c(1.52, 1.98, 1.99, 2.11) ) ## ## Comparison of the results of DatasetSurvival object 'datasetSurvival2' with expected results ## expect_equal(datasetSurvival2$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival2$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival2$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$overallAllocationRatios, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$overallLogRanks, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$logRanks, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetSurvival2$.data' with expected results ## expect_equal(datasetSurvival2$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetSurvival2$.data$group, c(1, 1, 1, 1)) expect_equal(datasetSurvival2$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$.data$overallAllocationRatio, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$.data$overallLogRank, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$logRanks, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival2) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) context("Testing that 'getDataset' throws exceptions as expected") test_that("Wrong parameter usage of 'getDataset'", { expect_error(getDataset(), "Missing argument: data.frame or data vectors expected", fixed = TRUE) expect_error(getDataset(1), "Illegal argument: all parameters must be named", fixed = TRUE) expect_error(getDataset(n = 1), "Illegal argument: failed to identify dataset type", fixed = TRUE) expect_error(getDataset(1, x = 2), "Illegal argument: all parameters must be named", fixed = TRUE) expect_error(getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 8, 13, 19), overallEvents1 = c(3, 8, 13, 19) ), "Illegal argument: the parameter names must be unique", fixed = TRUE) }) rpact/tests/testthat/test-f_analysis_base_survival.R0000644000176200001440000012407413574172040022555 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 10:08:38 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis survival functionality for the group sequential design") test_that("'getAnalysisResults' for a group sequential design and survival data", { design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample1 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design1, dataExample1, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.9359555, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2.9359555, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, 0.46900287, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, 1.2243899, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, 7.0401059, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.19438137, 0.0054226276, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.013371274, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.1294538, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 5.6956842, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.632639, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(1.52, 1.38, 2.9, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design1, dataExample1, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1) expect_equal(x2$thetaH1, 2) expect_equal(x2$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 2) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.24122422, 0.76137238), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.16918725, 0.19438137, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$overallTestStatistics, c(1.52, 1.38, 2.9, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.042054884, 0.058920703, 0.079860688, 0.10500347, 0.13429067, 0.16748187, 0.20417526, 0.24383962, 0.28585263, 0.32954089, 0.37421781, 0.41921675, 0.46391757, 0.50776612, 0.55028679, 0.59108872, 0.62986668, 0.6663978, 0.70053535, 0.73220037, 0.76137238, 0.78807962, 0.81238956, 0.83439998, 0.85423087, 0.87201737, 0.88790373, 0.90203829, 0.91456948, 0.92564264, 0.93539766), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") x3 <- getAnalysisResults(design1, dataExample1, thetaH0 = 0.95, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0, 0)) expect_equal(x3$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x3$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.5925397, 0.46301945, 2.8413382, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.055631748, 0.32167521, 0.0022462323, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.95, tolerance = 1e-07) expect_equal(x3$thetaH1, 2) expect_equal(x3$conditionalRejectionProbabilities, c(0.082607165, 0.055558825, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.32497202, 0.83762717), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.15076802, 0.16617365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$overallTestStatistics, c(1.5925397, 1.479329, 3.0381114, NA_real_), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.055631748, 0.069526198, 0.0011903296, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.073284723, 0.098867221, 0.12923781, 0.16416019, 0.20317699, 0.24565174, 0.29082221, 0.33785714, 0.3859101, 0.43416582, 0.48187662, 0.52838782, 0.57315275, 0.61573859, 0.65582473, 0.69319563, 0.72772995, 0.75938755, 0.7881956, 0.81423493, 0.83762717, 0.85852313, 0.87709283, 0.89351704, 0.9079804, 0.92066609, 0.93175171, 0.94140636, 0.94978864, 0.95704547, 0.96331147), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") }) test_that("'getAnalysisResults' for a group sequential design and survival data ('directionUpper' reversed)", { .skipTestifDisabled() design2 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample2 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design2, dataExample2, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.34060461, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.34060461, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, 0.46900287, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14176244, 0.14204332, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.6961224, 0.81673327, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.19438137, 0.0054226276, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.013371274, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.17557153, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.88538374, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.379847, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(-1.52, -1.38, -2.9, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design2, dataExample2, thetaH0 = 1.1, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.34136523, 0.49035339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6547889, -0.52124832, -2.9236862, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.048983658, 0.3010969, 0.0017295662, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1.1, tolerance = 1e-07) expect_equal(x2$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.090339948, 0.066890003, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.40494574, 0.88883511), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14176244, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.6961224, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.13608528, 0.14422583, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(-1.6547889, -1.5645674, -3.1566305, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.048983658, 0.058842192, 0.00079801722, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.97858552, 0.94519604, 0.88883511, 0.81002306, 0.71447863, 0.61071863, 0.50731205, 0.41100476, 0.32600179, 0.25411912, 0.19536859, 0.14863199, 0.11223419), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 0.5") }) context("Testing the analysis survival functionality for the inverse normal design") test_that("'getAnalysisResults' for an inverse normal design and survival data", { design3 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample3 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design3, dataExample3, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.9359555, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2.9359555, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, 0.36917623, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, 1.1345596, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, 6.6631754, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.20216143, 0.010091808, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.014307783, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.121428, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 5.6413216, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.6253218, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(1.52, 1.354226, 2.6907652, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design3, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, dataExample3, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1) expect_equal(x2$thetaH1, 2) expect_equal(x2$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 2) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.24122422, 0.76137238), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.16918725, 0.20216143, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(1.52, 1.354226, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.042054884, 0.058920703, 0.079860688, 0.10500347, 0.13429067, 0.16748187, 0.20417526, 0.24383962, 0.28585263, 0.32954089, 0.37421781, 0.41921675, 0.46391757, 0.50776612, 0.55028679, 0.59108872, 0.62986668, 0.6663978, 0.70053535, 0.73220037, 0.76137238, 0.78807962, 0.81238956, 0.83439998, 0.85423087, 0.87201737, 0.88790373, 0.90203829, 0.91456948, 0.92564264, 0.93539766), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") x3 <- getAnalysisResults(design3, dataExample3, thetaH0 = 0.95, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0, 0)) expect_equal(x3$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x3$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.5925397, 0.46301945, 2.8413382, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.055631748, 0.32167521, 0.0022462323, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.95, tolerance = 1e-07) expect_equal(x3$thetaH1, 2) expect_equal(x3$conditionalRejectionProbabilities, c(0.082607165, 0.052483916, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.32497202, 0.83762717), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.15076802, 0.17323655, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(1.5925397, 1.4534998, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.073284723, 0.098867221, 0.12923781, 0.16416019, 0.20317699, 0.24565174, 0.29082221, 0.33785714, 0.3859101, 0.43416582, 0.48187662, 0.52838782, 0.57315275, 0.61573859, 0.65582473, 0.69319563, 0.72772995, 0.75938755, 0.7881956, 0.81423493, 0.83762717, 0.85852313, 0.87709283, 0.89351704, 0.9079804, 0.92066609, 0.93175171, 0.94140636, 0.94978864, 0.95704547, 0.96331147), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") }) test_that("'getAnalysisResults' for an inverse normal design and survival data ('directionUpper' reversed)", { .skipTestifDisabled() design4 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample4 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design4, dataExample4, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.34060461, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.34060461, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, 0.36917623, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14350678, 0.15007853, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.7193664, 0.88139925, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.20216143, 0.010091808, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.014307783, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.17726343, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.89172021, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.38090568, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(1.52, 1.354226, 2.6907652, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design4, dataExample4, thetaH0 = 1.1, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.34136523, 0.49035339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6547889, -0.52124832, -2.9236862, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.048983658, 0.3010969, 0.0017295662, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1.1, tolerance = 1e-07) expect_equal(x2$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.090339948, 0.063249751, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.40494574, 0.88883511), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14350678, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.7193664, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.13608528, 0.15066694, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.6547889, 1.5386907, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.97858552, 0.94519604, 0.88883511, 0.81002306, 0.71447863, 0.61071863, 0.50731205, 0.41100476, 0.32600179, 0.25411912, 0.19536859, 0.14863199, 0.11223419), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 0.5") }) context("Testing the analysis survival functionality for the Fisher design") test_that("'getAnalysisResults' for a Fisher design and 'bindingFutility = TRUE'", { .skipTestifDisabled() design5 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), alpha0Vec = c(0.6,0.5,0.4), bindingFutility = TRUE) dataExample5 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2) ) x1 <- getAnalysisResults(design5, dataExample5, thetaH1 = 2, allocationRatioPlanned = 2, nPlanned = 50, directionUpper = TRUE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.012419362, 0.0016809245, 0.00029441484, 1.8548902e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0.6, 0.5, 0.4), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.012419362, 0.018937437, 0.022642761, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.012419362, 0.012419362, 0.012419362, 0.012419362), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.1017732, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 1.450056, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.073521457, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2) expect_equal(x1$conditionalRejectionProbabilities, c(0.046367462, 0.024190775, 0.042101664, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, 50)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.72028527), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.59937028, 0.5945604, 0.81409304, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(14.31747, 6.9389819, 5.3768854, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.10915739, 0.16855974, 0.081195715, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(0.064255488, 0.022255572, 0.0016362621, NA_real_), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a Fisher design and 'bindingFutility = TRUE' ('directionUpper' reversed)", { .skipTestifDisabled() design6 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), alpha0Vec = c(0.6,0.5,0.4), bindingFutility = TRUE) dataExample6 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2) ) x1 <- getAnalysisResults(design6, dataExample6, thetaH1 = 0.5, allocationRatioPlanned = 0.5, nPlanned = 50, directionUpper = FALSE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.012419362, 0.0016809245, 0.00029441484, 1.8548902e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0.6, 0.5, 0.4), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.012419362, 0.018937437, 0.022642761, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.012419362, 0.012419362, 0.012419362, 0.012419362), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.47578874, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -1.450056, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.073521457, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.046367462, 0.024190775, 0.042101664, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, 50)) expect_equal(x1$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.72028527), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.069844861, 0.14411336, 0.18598127, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.6684179, 1.6819149, 1.2283608, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.10915739, 0.16855974, 0.081195715, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(0.064255488, 0.022255572, 0.0016362621, NA_real_), tolerance = 1e-07) }) rpact/tests/testthat/test-f_simulation_means.R0000644000176200001440000007755013567165663021401 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:56 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation means function") test_that("'getSimulationMeans': several configurations", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} maxNumberOfIterations <- 100 seed <- 99123 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- c(0.2, 0.5, 1) plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results ## expect_equal(x1$effect, c(-0.4, -0.2, 0, 0.2, 0.4, 0.6), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(42, 49, 68, 87, 94, 97)) expect_equal(x1$iterations[3, ], c(4, 9, 23, 43, 65, 68)) expect_equal(x1$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x1$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x1$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x1$rejectPerStage[2, ], c(0, 0, 0, 0.01, 0.09, 0.23), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0, 0, 0.01, 0.13, 0.33, 0.53), tolerance = 1e-07) expect_equal(x1$overallReject, c(0, 0, 0.01, 0.14, 0.42, 0.76), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.58, 0.51, 0.32, 0.13, 0.06, 0.03), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.38, 0.4, 0.45, 0.43, 0.2, 0.06), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.96, 0.91, 0.77, 0.56, 0.26, 0.09), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.96, 0.91, 0.77, 0.57, 0.35, 0.32), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(31.14, 35.28, 46.71, 60.84, 72.63, 74.79), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.017557086, 0.030814475, 0.058601262, 0.09027436, 0.17816715, 0.24070046), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.10771631, 0.32388388, 0.32415334, 0.38125404, 0.51933559, 0.59400955), tolerance = 1e-07) x2 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results ## expect_equal(x2$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(63, 73, 84, 83, 89, 96)) expect_equal(x2$iterations[3, ], c(15, 24, 42, 53, 69, 76)) expect_equal(x2$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x2$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x2$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x2$rejectPerStage[2, ], c(0, 0, 0.02, 0.03, 0.06, 0.1), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0, 0.02, 0.05, 0.15, 0.27, 0.43), tolerance = 1e-07) expect_equal(x2$overallReject, c(0, 0.02, 0.07, 0.18, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.37, 0.27, 0.16, 0.17, 0.11, 0.04), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.48, 0.49, 0.4, 0.27, 0.14, 0.1), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.85, 0.76, 0.56, 0.44, 0.25, 0.14), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.85, 0.76, 0.58, 0.47, 0.31, 0.24), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(41.76, 48.51, 59.58, 64.26, 73.08, 78.12), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.056595809, 0.082243527, 0.1171868, 0.14183443, 0.20192022, 0.18371302), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.36165449, 0.31543938, 0.36771185, 0.4758946, 0.54527876, 0.61204049), tolerance = 1e-07) x3 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results ## expect_equal(x3$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(50, 71, 87, 96, 97, 99)) expect_equal(x3$iterations[3, ], c(9, 21, 63, 67, 49, 29)) expect_equal(x3$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x3$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x3$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x3$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0.01), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0, 0, 0.03, 0.21, 0.47, 0.7), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0, 0.02, 0.18, 0.38, 0.47, 0.29), tolerance = 1e-07) expect_equal(x3$overallReject, c(0, 0.02, 0.21, 0.59, 0.94, 1), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.5, 0.29, 0.13, 0.04, 0.03, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.41, 0.5, 0.21, 0.08, 0.01, 0), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.91, 0.79, 0.34, 0.12, 0.04, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.91, 0.79, 0.37, 0.33, 0.51, 0.71), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(35.55, 46.62, 69.84, 74.07, 66.24, 57.78), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.047252355, 0.074094582, 0.18424333, 0.30402818, 0.54078356, 0.67131653), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.27249296, 0.30454177, 0.45212728, 0.62638376, 0.84307565, 0.91215549), tolerance = 1e-07) x4 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results ## expect_equal(x4$effect, c(-1.1, -0.9, -0.7, -0.5, -0.3, -0.1), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(95, 97, 88, 83, 82, 80)) expect_equal(x4$iterations[3, ], c(74, 76, 68, 55, 50, 41)) expect_equal(x4$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x4$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x4$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x4$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x4$rejectPerStage[2, ], c(0.16, 0.12, 0.06, 0.06, 0.01, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[3, ], c(0.56, 0.52, 0.38, 0.2, 0.11, 0.04), tolerance = 1e-07) expect_equal(x4$overallReject, c(0.72, 0.64, 0.44, 0.26, 0.12, 0.04), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.05, 0.03, 0.12, 0.17, 0.18, 0.2), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.05, 0.09, 0.14, 0.22, 0.31, 0.39), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.1, 0.12, 0.26, 0.39, 0.49, 0.59), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.26, 0.24, 0.32, 0.45, 0.5, 0.59), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(76.95, 78.39, 72.36, 65.16, 62.64, 58.05), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.32767401, 0.23178095, 0.2071599, 0.2070829, 0.10752485, 0.096294166), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.75737536, 0.64651318, 0.56642877, 0.51397128, 0.44717442, 0.36357098), tolerance = 1e-07) x5 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results ## expect_equal(x5$effect, c(-1.1, -0.9, -0.7, -0.5, -0.3, -0.1), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(98, 96, 88, 84, 82, 79)) expect_equal(x5$iterations[3, ], c(77, 74, 69, 58, 54, 43)) expect_equal(x5$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x5$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x5$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x5$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x5$rejectPerStage[2, ], c(0.19, 0.14, 0.08, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.59, 0.57, 0.43, 0.21, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$overallReject, c(0.78, 0.71, 0.51, 0.27, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.02, 0.04, 0.12, 0.16, 0.18, 0.21), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.02, 0.08, 0.11, 0.2, 0.28, 0.36), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.04, 0.12, 0.23, 0.36, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.23, 0.26, 0.31, 0.42, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(79.11, 77.22, 72.81, 66.78, 64.44, 58.68), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.33588936, 0.25194744, 0.19824827, 0.19178721, 0.11444971, 0.092566355), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.74226501, 0.69902839, 0.55641803, 0.50033698, 0.45636572, 0.33236099), tolerance = 1e-07) x6 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results ## expect_equal(x6$effect, c(-0.8, -0.6, -0.4, -0.2, 0, 0.2), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(100, 99, 96, 81, 70, 49)) expect_equal(x6$iterations[3, ], c(22, 43, 75, 57, 27, 7)) expect_equal(x6$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x6$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x6$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x6$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x6$rejectPerStage[2, ], c(0.78, 0.56, 0.13, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.22, 0.4, 0.53, 0.21, 0.02, 0), tolerance = 1e-07) expect_equal(x6$overallReject, c(1, 0.96, 0.66, 0.26, 0.02, 0), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0.01, 0.04, 0.19, 0.3, 0.51), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.08, 0.19, 0.43, 0.42), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0.01, 0.12, 0.38, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.78, 0.57, 0.25, 0.43, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(54.9, 64.08, 77.67, 65.52, 49.05, 34.38), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.67267344, 0.52857476, 0.27194206, 0.18361852, 0.064769395, 0.04670856), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.81011604, 0.77276452, 0.65795757, 0.50391481, 0.35327029, 0.24591214), tolerance = 1e-07) x7 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = -0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 3.5, alternative = seq(-1.2,-0.2,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,10,10), maxNumberOfSubjectsPerStage = c(100,100,100), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results ## expect_equal(x7$effect, c(-1, -0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(93, 97, 88, 78, 78, 74)) expect_equal(x7$iterations[3, ], c(52, 77, 69, 57, 51, 35)) expect_equal(x7$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x7$sampleSizes[2, ], c(74.918717, 83.151367, 90.734126, 88.517379, 94.605927, 95.502536), tolerance = 1e-07) expect_equal(x7$sampleSizes[3, ], c(34.779445, 56.130993, 68.133125, 83.503922, 92.63947, 93.575595), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x7$rejectPerStage[2, ], c(0.4, 0.19, 0.12, 0.07, 0, 0), tolerance = 1e-07) expect_equal(x7$rejectPerStage[3, ], c(0.41, 0.63, 0.47, 0.25, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$overallReject, c(0.81, 0.82, 0.59, 0.32, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.07, 0.03, 0.12, 0.22, 0.22, 0.26), tolerance = 1e-07) expect_equal(x7$futilityPerStage[2, ], c(0.01, 0.01, 0.07, 0.14, 0.27, 0.39), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0.08, 0.04, 0.19, 0.36, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.48, 0.23, 0.31, 0.43, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(105.75972, 141.87769, 144.85789, 134.64079, 139.03875, 121.42333), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.48960058, 0.35501907, 0.33230293, 0.3239724, 0.20164899, 0.17099815), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.75975737, 0.70067902, 0.61722401, 0.51061814, 0.40378864, 0.28388391), tolerance = 1e-07) x8 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5)), groups = 2, meanRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 3.5, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(100,400,400), seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results ## expect_equal(x8$effect, c(0.1, 0.3, 0.5, 0.7, 0.9, 1.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(75, 75, 82, 77, 85, 88)) expect_equal(x8$iterations[3, ], c(32, 45, 59, 64, 62, 66)) expect_equal(x8$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x8$sampleSizes[2, ], c(312.537, 315.47118, 298.71109, 298.99103, 273.70172, 271.45585), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(337.66315, 320.33174, 340.67902, 295.14071, 245.92316, 230.91095), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0, 0, 0, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.02, 0, 0.05, 0.08, 0.21, 0.21), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0.02, 0.06, 0.22, 0.27, 0.42, 0.5), tolerance = 1e-07) expect_equal(x8$overallReject, c(0.04, 0.06, 0.27, 0.36, 0.63, 0.71), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0.25, 0.25, 0.18, 0.22, 0.15, 0.12), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0.41, 0.3, 0.18, 0.05, 0.02, 0.01), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0.66, 0.55, 0.36, 0.27, 0.17, 0.13), tolerance = 1e-07) expect_equal(x8$earlyStop, c(0.68, 0.55, 0.41, 0.36, 0.38, 0.34), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(360.45496, 398.75267, 463.94372, 437.11315, 403.11882, 409.28238), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.31465275, 0.38687556, 0.41716705, 0.36457183, 0.46957137, 0.48650775), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.36402168, 0.44332107, 0.47182355, 0.52975853, 0.68482255, 0.64923586), tolerance = 1e-07) x9 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, alternative = seq(0.8,1.6,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(50,50,50), maxNumberOfSubjectsPerStage = c(400,400,400), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results ## expect_equal(x9$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(84, 86, 78, 67, 70)) expect_equal(x9$iterations[3, ], c(53, 62, 62, 45, 23)) expect_equal(x9$sampleSizes[1, ], c(18, 18, 18, 18, 18)) expect_equal(x9$sampleSizes[2, ], c(257.73836, 278.8361, 303.27301, 306.23977, 339.30408), tolerance = 1e-07) expect_equal(x9$sampleSizes[3, ], c(153.57289, 230.09947, 313.53643, 325.28234, 342.27563), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0, 0, 0, 0.01, 0), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.3, 0.21, 0.06, 0.03, 0.02), tolerance = 1e-07) expect_equal(x9$rejectPerStage[3, ], c(0.44, 0.45, 0.39, 0.09, 0.01), tolerance = 1e-07) expect_equal(x9$overallReject, c(0.74, 0.66, 0.45, 0.13, 0.03), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.16, 0.14, 0.22, 0.32, 0.3), tolerance = 1e-07) expect_equal(x9$futilityPerStage[2, ], c(0.01, 0.03, 0.1, 0.19, 0.45), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0.17, 0.17, 0.32, 0.51, 0.75), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.47, 0.38, 0.38, 0.55, 0.77), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(315.89385, 400.46072, 448.94553, 369.5577, 334.23625), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.63427493, 0.5497222, 0.49353349, 0.49721138, 0.34872602), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.84712236, 0.77882668, 0.6178067, 0.51284066, 0.41825576), tolerance = 1e-07) myStageSubjects <- function(..., stage, thetaH0, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaStandardized, conditionalPower, conditionalCriticalValue) { mult <- 1 if (stage == 2){ stageSubjects <- (1 + 1/allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned))* (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / (max(1e-12, thetaStandardized))^2 stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage]) } else { stageSubjects <- sampleSizesPerStage[stage - 1] } return(stageSubjects) } x10 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed, calcSubjectsFunction = myStageSubjects) ## ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results ## expect_equal(x10$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(76, 73, 61, 39, 31)) expect_equal(x10$iterations[3, ], c(26, 36, 49, 28, 25)) expect_equal(x10$sampleSizes[1, ], c(80, 80, 80, 80, 80)) expect_equal(x10$sampleSizes[2, ], c(226.65982, 237.82641, 309.41238, 322.48967, 279.64545), tolerance = 1e-07) expect_equal(x10$sampleSizes[3, ], c(192.90106, 242.76454, 304.34981, 316.96502, 285.17103), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.01, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.5, 0.37, 0.11, 0.04, 0.02), tolerance = 1e-07) expect_equal(x10$rejectPerStage[3, ], c(0.23, 0.26, 0.27, 0.07, 0.01), tolerance = 1e-07) expect_equal(x10$overallReject, c(0.74, 0.63, 0.38, 0.11, 0.03), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0.23, 0.27, 0.39, 0.61, 0.69), tolerance = 1e-07) expect_equal(x10$futilityPerStage[2, ], c(0, 0, 0.01, 0.07, 0.04), tolerance = 1e-07) expect_equal(x10$futilityStop, c(0.23, 0.27, 0.4, 0.68, 0.73), tolerance = 1e-07) expect_equal(x10$earlyStop, c(0.74, 0.64, 0.51, 0.72, 0.75), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(302.41574, 341.00851, 417.87296, 294.52118, 237.98285), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.83285854, 0.79260753, 0.67563397, 0.60585275, 0.66737426), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(0.84091805, 0.78999444, 0.60898256, 0.53740242, 0.36130254), tolerance = 1e-07) #x <- getSimulationRates(getDesignGroupSequential(),plannedSubjects = c(33,67,100)) #y <- getPowerRates(getDesignGroupSequential(),maxNumberOfSubjects = 100) # #plot(x, type = 5) #plot(y, type = 5) #options(width = 180) #maxNumberOfSubjects <- 200 #informationRates <- c(0.2,0.5,1) #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 10000 # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, # maxNumberOfSubjects = maxNumberOfSubjects, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, # maxNumberOfSubjects = maxNumberOfSubjects, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # # #options(width = 180) #maxNumberOfSubjects <- 150 #informationRates <- (1:3)/3 #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 20000 # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 3.5, alternative = seq(-1,0,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,10,10), maxNumberOfSubjectsPerStage = c(100,100,100), directionUpper = FALSE) #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5, 0.5)), groups = 2, meanRatio = FALSE, thetaH0 = -0.1, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 3.5, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 40, 40), maxNumberOfSubjectsPerStage = c(100,400, 400)) #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), # allocationRatioPlanned = 3, directionUpper = FALSE) # #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #myStageSubjects <- function(..., stage, thetaH0, allocationRatioPlanned, # minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, # sampleSizesPerStage, thetaStandardized, conditionalPower, conditionalCriticalValue) { # if (stage == 2){ # stageSubjects <- (1 + 1/allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned))* # (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / # (max(1e-12, thetaStandardized))^2 # stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), # maxNumberOfSubjectsPerStage[stage]) # } else { # stageSubjects <- sampleSizesPerStage[stage - 1] # } # return(stageSubjects) #} # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), # allocationRatioPlanned = 3, directionUpper = FALSE, calcSubjectsFunction = myStageSubjects) # # }) rpact/tests/testthat/test-f_core_assertions.R0000644000176200001440000002146413567423107021213 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing assertion functions") test_that("Testing '.assertIsInClosedInterval'", { invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(NA_real_, 0, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = 1.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInClosedInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertIsInOpenInterval'", { invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(0.9999, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(NA_real_, 0.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(1.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInOpenInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertDesignParameterExists'", { expect_error(.assertDesignParameterExists(), "Missing argument: 'design' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign()), "Missing argument: 'parameterName' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(), parameterName = "kMax"), "Missing argument: 'defaultValue' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(kMax = NA_integer_), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE) }) test_that("Testing '.assertIsValidThetaRange' ", { expect_error(.assertIsValidThetaRange(thetaRange = c()), "Illegal argument: 'thetaRange' must be a vector with two entries defining minimum and maximum or a sequence of values with length > 2", fixed = TRUE) #expect_error(.assertIsValidThetaRange(thetaRange = c(1, -2)), # "Illegal argument: 'thetaRange' with length 2 must contain minimum < maximum (1 >= -2)", fixed = TRUE) expect_equal(.assertIsValidThetaRange(thetaRange = c(1, 2, 3)), c(1, 2, 3)) expect_equal(.assertIsValidThetaRange(thetaRange = c(-1, 2)), seq(-1, 2, 3 / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT)) }) test_that("Testing '.assertIsSingleNumber'", { expect_error(.assertIsSingleNumber(NA, "x"), "Illegal argument: 'x' (NA) must be a valid single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(NULL, "x"), "Missing argument: 'x' must be a valid single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(c(1, 2), "x"), "Illegal argument: 'x' c(1, 2) must be a single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(numeric(0), "x"), "Missing argument: 'x' must be a valid single numerical value", fixed = TRUE) }) test_that("Testing '.assertAssociatedArgumentsAreDefined'", { expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1), "Missing argument: 'a' must be defined because 'b' is defined", fixed = TRUE) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = NA), "Missing argument: 'a', 'c' must be defined because 'b' is defined", fixed = TRUE) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = 2), "Missing argument: 'a' must be defined because 'b', 'c' are defined", fixed = TRUE) }) test_that("Testing '.associatedArgumentsAreDefined'", { expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = NA_real_), FALSE) expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = 1), FALSE) expect_equal(.associatedArgumentsAreDefined(nPlanned = 1, thetaH1 = 1), TRUE) }) test_that("Testing '.isValidNPlanned", { expect_equal(.isValidNPlanned(nPlanned = c(1, 2), kMax = 4, stage = 2), TRUE) expect_silent(.isValidNPlanned(nPlanned = NA_real_, kMax = 4, stage = 2)) expect_warning(.isValidNPlanned(nPlanned = c(1), kMax = 4, stage = 2), "'nPlanned' (1) will be ignored: length must be equal to 'kMax' (4) - 'stage' (2)", fixed = TRUE) expect_warning(.isValidNPlanned(nPlanned = c(1, 2, 3), kMax = 4, stage = 2), "'nPlanned' (1, 2, 3) will be ignored: length must be equal to 'kMax' (4) - 'stage' (2)", fixed = TRUE) }) rpact/tests/testthat/helper-f_core_assertions.R0000644000176200001440000000545713370565006021514 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 07-11-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getAssertionTestDesign <- function(kMax = NA_integer_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { if (designClass == C_CLASS_NAME_TRIAL_DESIGN_FISHER) { return(TrialDesignFisher( kMax = kMax, alpha = C_ALPHA_DEFAULT, method = C_FISHER_METHOD_DEFAULT, alpha0Vec = futilityBounds, informationRates = informationRates, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = 9498485 )) } return(.createDesign( designClass = designClass, kMax = kMax, alpha = C_ALPHA_DEFAULT, beta = C_BETA_DEFAULT, sided = 1, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, delta = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, tolerance = 1e-06)) } rpact/tests/testthat/test-f_design_fisher_combination_test.R0000644000176200001440000003427213567165663024256 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:27 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the Fisher design functionality") test_that("'getDesignFisher' with default parameters: parameters and results are as expected", { # @refFS[Formula]{fs:FisherCombinationEqualAlpha} x <- getDesignFisher() ## ## Comparison of the results of TrialDesignFisher object 'x' with expected results ## expect_equal(x$alphaSpent, c(0.012308547, 0.01962413, 0.025), tolerance = 1e-07) expect_equal(x$criticalValues, c(0.012308547, 0.0016635923, 0.00029106687), tolerance = 1e-07) expect_equal(x$stageLevels, c(0.012308547, 0.012308547, 0.012308547), tolerance = 1e-07) expect_equal(x$scale, c(1, 1)) expect_equal(x$nonStochasticCurtailment, FALSE) }) test_that("'getDesignFisher' with kMax = 4: parameters and results are as expected for different arguments", { # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher1 <- getDesignFisher(kMax = 4) ## ## Comparison of the results of TrialDesignFisher object 'designFisher1' with expected results ## expect_equal(designFisher1$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(designFisher1$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(designFisher1$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(designFisher1$scale, c(1, 1, 1)) expect_equal(designFisher1$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher2 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher2' with expected results ## expect_equal(designFisher2$alphaSpent, c(0.010565317, 0.017774885, 0.022713904, 0.025), tolerance = 1e-07) expect_equal(designFisher2$criticalValues, c(0.010565317, 0.00031144789, 2.8609076e-06, 1.4533579e-07), tolerance = 1e-07) expect_equal(designFisher2$stageLevels, c(0.010565317, 0.010565317, 0.010565317, 0.010565317), tolerance = 1e-07) expect_equal(designFisher2$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher2$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher3 <- getDesignFisher(kMax = 4, method = "fullAlpha") ## ## Comparison of the results of TrialDesignFisher object 'designFisher3' with expected results ## expect_equal(designFisher3$alphaSpent, c(0.00015574772, 0.0015212305, 0.0075070105, 0.025), tolerance = 1e-07) expect_equal(designFisher3$criticalValues, c(0.00015574772, 0.00015574772, 0.00015574772, 0.00015574772), tolerance = 1e-07) expect_equal(designFisher3$stageLevels, c(0.00015574772, 0.0015212305, 0.0075070105, 0.025), tolerance = 1e-07) expect_equal(designFisher3$scale, c(1, 1, 1)) expect_equal(designFisher3$nonStochasticCurtailment, TRUE) # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher4 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "fullAlpha") ## ## Comparison of the results of TrialDesignFisher object 'designFisher4' with expected results ## expect_equal(designFisher4$alphaSpent, c(0.0075234886, 0.012807964, 0.016496254, 0.025), tolerance = 1e-07) expect_equal(designFisher4$criticalValues, c(0.0075234886, 0.00019010097, 1.4149989e-06, 1.0550077e-06), tolerance = 1e-07) expect_equal(designFisher4$stageLevels, c(0.0075234886, 0.0075234886, 0.0075234886, 0.025), tolerance = 1e-07) expect_equal(designFisher4$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher4$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher5 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), method = "noInteraction") ## ## Comparison of the results of TrialDesignFisher object 'designFisher5' with expected results ## expect_equal(designFisher5$alphaSpent, c(0.0098603693, 0.012073314, 0.018133935, 0.025), tolerance = 1e-07) expect_equal(designFisher5$criticalValues, c(0.0098603693, 0.00051915905, 0.00031149543, 0.00015574772), tolerance = 1e-07) expect_equal(designFisher5$stageLevels, c(0.0098603693, 0.0044457148, 0.012979977, 0.025), tolerance = 1e-07) expect_equal(designFisher5$scale, c(1, 1, 1)) expect_equal(designFisher5$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher6 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "noInteraction") ## ## Comparison of the results of TrialDesignFisher object 'designFisher6' with expected results ## expect_equal(designFisher6$alphaSpent, c(0.01128689, 0.011490625, 0.016266616, 0.025), tolerance = 1e-07) expect_equal(designFisher6$criticalValues, c(0.01128689, 2.0322622e-06, 1.5741835e-06, 1.0550077e-06), tolerance = 1e-07) expect_equal(designFisher6$stageLevels, c(0.01128689, 0.0003175156, 0.0079214091, 0.025), tolerance = 1e-07) expect_equal(designFisher6$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher6$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaPending} designFisher7 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), method = "userDefinedAlpha", userAlphaSpending = c(0.01,0.015,0.02,0.025)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher7' with expected results ## expect_equal(designFisher7$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher7$criticalValues, c(0.01, 0.0011768873, 0.00031357454, 0.00011586425), tolerance = 1e-07) expect_equal(designFisher7$stageLevels, c(0.01, 0.0091148534, 0.013047692, 0.020300118), tolerance = 1e-07) expect_equal(designFisher7$scale, c(1, 1, 1)) expect_equal(designFisher7$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaPending} designFisher8 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "userDefinedAlpha", userAlphaSpending = c(0.01,0.015,0.02,0.025)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher8' with expected results ## expect_equal(designFisher8$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher8$criticalValues, c(0.01, 0.00018389153, 2.6484943e-06, 5.2344628e-07), tolerance = 1e-07) expect_equal(designFisher8$stageLevels, c(0.01, 0.0073532156, 0.0101804, 0.018500415), tolerance = 1e-07) expect_equal(designFisher8$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher8$nonStochasticCurtailment, FALSE) }) test_that("'getDesignFisher': illegal arguments throw exceptions as expected", { expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.5, 1)), paste0("Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.4, 1)), paste0("Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignFisher(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]"), fixed = TRUE) expect_error(getDesignFisher(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]"), fixed = TRUE) expect_error(getDesignFisher(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 7), "Argument out of bounds: 'kMax' (7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 8), "Argument out of bounds: 'kMax' (8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 9), "Argument out of bounds: 'kMax' (9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 10), "Argument out of bounds: 'kMax' (10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 2, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (2)", fixed = TRUE) expect_error(getDesignFisher(kMax = 3, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (3)", fixed = TRUE) expect_error(getDesignFisher(kMax = 5, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (5)", fixed = TRUE) expect_error(getDesignFisher(kMax = 6, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (6)", fixed = TRUE) expect_error(getDesignFisher(alpha0Vec = c(0, 1)), "Argument out of bounds: 'alpha0Vec' (0, 1) is out of bounds (0; 1]", fixed = TRUE) expect_error(getDesignFisher(alpha0Vec = c(0.1, 1.01)), "Argument out of bounds: 'alpha0Vec' (0.1, 1.01) is out of bounds (0; 1]", fixed = TRUE) }) rpact/tests/testthat.R0000644000176200001440000000007413352126153014512 0ustar liggesusers library(testthat) library(rpact) test_check("rpact") rpact/src/0000755000176200001440000000000013574442444012165 5ustar liggesusersrpact/src/f_simulation_survival.cpp0000644000176200001440000011171213504353366017315 0ustar liggesusers/** * * -- Simulation of survival data with group sequential and combination test -- * * This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. * * File version: 1.0.1 * Date: 09-05-2019 * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * RPACT package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * */ #include using namespace Rcpp; NumericVector vectorSum(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] + y[i]; } return result; } NumericVector vectorSqrt(NumericVector x) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = sqrt(x[i]); } return result; } NumericVector vectorDivide(NumericVector x, double value) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] / value; } return result; } NumericVector vectorDivide(NumericMatrix x, int rowIndex, double value) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x(rowIndex, i) / value; } return result; } //double vectorSum(NumericMatrix x, int rowIndex) { // double s = 0; // for (int i = 0; i < x.ncol(); i++) { // s += x(rowIndex, i); // } // return s; //} NumericVector vectorDivide(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { if (y[i] != 0.0) { result[i] = x[i] / y[i]; } } return result; } NumericVector vectorMultiply(NumericVector x, double multiplier) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] * multiplier; } return result; } NumericVector vectorMultiply(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] * y[i]; } return result; } NumericVector vectorPow(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = pow(x[i], y[i]); } return result; } NumericVector vectorPow(double x, NumericVector y) { int n = y.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = pow(x, y[i]); } return result; } NumericVector vectorRepEachValue(NumericVector x, int kMax) { int n = x.size(); NumericVector result = NumericVector(n * kMax, NA_REAL); for (int i = 0; i < n; i++) { for (int j = 0; j < kMax; j++) { result[i * kMax + j] = x[i]; } } return result; } double vectorProduct(NumericVector x) { int n = x.size(); if (n == 0) { return 0; } if (n == 1) { return x[0]; } double s = x[0]; for (int i = 1; i < n; i++) { s *= x[i]; } return s; } double vectorProduct(NumericVector x, NumericVector y) { int n = x.size(); double s = 0; for (int i = 0; i < n; i++) { s += x[i] * y[i]; } return s; } double round(double value, int digits) { double mult = std::pow(10.0, (double)digits); return round(value * mult) / mult; } double findObservationTime( NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, double requiredStageEvents) { int numberOfSubjects = accrualTime.size(); double upperBound = 1; double numberOfEvents; while (true) { numberOfEvents = 0; for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] + survivalTime[i] < upperBound && (R_IsNA(dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { numberOfEvents = numberOfEvents + 1; } } upperBound = 2 * upperBound; if (numberOfEvents > requiredStageEvents || upperBound > 1E12) { break; } } if (upperBound > 1E12) { return NA_REAL; } double lower = 0; double upper = upperBound; double time; while (true) { time = (lower + upper) / 2; numberOfEvents = 0; for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] + survivalTime[i] <= time && (R_IsNA(dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { numberOfEvents = numberOfEvents + 1; } } if (numberOfEvents >= requiredStageEvents) { upper = time; } else { lower = time; } if (upper - lower < 1E-05) { break; } } if (numberOfEvents > requiredStageEvents) { time -= 1E-05; } else if (numberOfEvents < requiredStageEvents) { time += 1E-05; } return time; } // Log Rank Test // // This function calculates the logrank test statistic for the survival data set at given time, // i.e., it determines whether an event or a dropout // was observed, calculates the time under risk, and the logrank statistic. // // @param accrualTime An integer vector // List logRankTest(NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, IntegerVector treatmentGroup, double time, bool directionUpper, double thetaH0, bool returnRawData) { int numberOfSubjects = accrualTime.size(); int subjectsT1 = 0; int subjectsT2 = 0; NumericVector timeUnderObservation = NumericVector(numberOfSubjects, 0.0); LogicalVector event = LogicalVector(numberOfSubjects, NA_LOGICAL); LogicalVector dropoutEvent = LogicalVector(numberOfSubjects, NA_LOGICAL); for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] > time) { treatmentGroup[i] = -treatmentGroup[i]; event[i] = false; dropoutEvent[i] = false; } else { if (treatmentGroup[i] == 1) { subjectsT1++; } else if (treatmentGroup[i] == 2) { subjectsT2++; } if (treatmentGroup[i] > 0 && accrualTime[i] + survivalTime[i] < time && (R_IsNA(dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { event[i] = true; } else { event[i] = false; } if (treatmentGroup[i] > 0 && accrualTime[i] + dropoutTime[i] < time && !R_IsNA(dropoutTime[i]) && dropoutTime[i] < survivalTime[i]) { dropoutEvent[i] = true; } else { dropoutEvent[i] = false; } } if (event[i]) { timeUnderObservation[i] = survivalTime[i]; } else if (dropoutEvent[i]) { timeUnderObservation[i] = dropoutTime[i]; } else { timeUnderObservation[i] = time - accrualTime[i]; } } int numberOfSubjets = subjectsT1 + subjectsT2; NumericVector timeUnderObservationSorted = clone(timeUnderObservation).sort(); IntegerVector sortedIndex = match(timeUnderObservationSorted, timeUnderObservation); sortedIndex = sortedIndex - 1; LogicalVector eventSorted = event[sortedIndex]; IntegerVector treatmentGroupSorted = treatmentGroup[sortedIndex]; eventSorted = eventSorted[treatmentGroupSorted > 0]; treatmentGroupSorted = treatmentGroupSorted[treatmentGroupSorted > 0]; treatmentGroup = abs(treatmentGroup); double numerator = 0; double denominator = 0; int events1 = 0; int events2 = 0; for (int i = 0; i < eventSorted.size(); i++) { if (eventSorted[i]) { if (treatmentGroupSorted[i] == 1) { if (subjectsT1 + subjectsT2 > 0) { numerator -= subjectsT2 / (thetaH0 * subjectsT1 + subjectsT2); } events1++; } else if (treatmentGroupSorted[i] == 2) { if (subjectsT1 + subjectsT2 > 0) { numerator += 1 - subjectsT2 / (thetaH0 * subjectsT1 + subjectsT2); } events2++; } if (subjectsT1 + subjectsT2 > 0) { denominator += thetaH0 * subjectsT1 * subjectsT2 / pow(thetaH0 * subjectsT1 + subjectsT2, 2); } } if (treatmentGroupSorted[i] == 1) { subjectsT1--; } else if (treatmentGroupSorted[i] == 2) { subjectsT2--; } } double logRank; if (denominator > 0) { logRank = -numerator / sqrt(denominator); } else { logRank = R_NegInf; } if (!directionUpper) { logRank = -logRank; } NumericVector out(4); out[0] = logRank; out[1] = numberOfSubjets; out[2] = events1; out[3] = events2; if (returnRawData) { return List::create( _["result"] = out, _["timeUnderObservation"] = timeUnderObservation, _["event"] = event, _["dropoutEvent"] = dropoutEvent ); } return List::create( _["result"] = out ); } NumericVector getIndependentIncrements(int stage, NumericVector eventsPerStage, NumericVector logRankOverStages) { NumericVector independentIncrements = NumericVector(stage, NA_REAL); independentIncrements[0] = logRankOverStages[0]; const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); independentIncrements[indices2] = vectorDivide( vectorMultiply(vectorSqrt(eventsPerStage[indices2]), logRankOverStages[indices2]) - vectorMultiply(vectorSqrt(eventsPerStage[indices1]), logRankOverStages[indices1]), vectorSqrt(eventsPerStage[indices2] - eventsPerStage[indices1])); return independentIncrements; } // Get Test Statistics // @param designNumber The design number: // 1: Group sequential design // 2: Inverse normal design // 3: Fisher design // NumericVector getTestStatistics(int stage, int designNumber, NumericVector informationRates, NumericVector eventsPerStage, NumericVector logRankOverStages) { // Group sequential design if (designNumber == 1) { return NumericVector::create(logRankOverStages[stage - 1], NA_REAL); } // Inverse normal design if (designNumber == 2) { if (stage == 1) { return NumericVector::create(logRankOverStages[0], NA_REAL); } NumericVector independentIncrements = getIndependentIncrements(stage, eventsPerStage, logRankOverStages); const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); double value = (sqrt(informationRates[0]) * independentIncrements[0] + vectorProduct(vectorSqrt(informationRates[indices2] - informationRates[indices1]), independentIncrements[indices2])) / sqrt(informationRates[stage - 1]); return NumericVector::create(value, NA_REAL); } // Fisher design NumericVector independentIncrements = NumericVector(stage, NA_REAL); independentIncrements[0] = logRankOverStages[0]; NumericVector weightFisher = NumericVector(stage, NA_REAL); weightFisher[0] = 1; if (stage > 1) { independentIncrements = getIndependentIncrements(stage, eventsPerStage, logRankOverStages); const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); weightFisher[indices2] = vectorDivide( vectorSqrt(informationRates[indices2] - informationRates[indices1]), sqrt(informationRates[0])); } const IntegerVector indices0 = seq(0, stage - 1); double value = vectorProduct(vectorPow(1 - pnorm(as(independentIncrements[indices0])), as(weightFisher[indices0]))); double pValueSeparate = 1 - ::Rf_pnorm5(independentIncrements[stage - 1], 0.0, 1.0, 1, 0); return NumericVector::create(value, pValueSeparate); } // Get Recalculated Event Sizes // @param designNumber The design number: // 1: Group sequential design // 2: Inverse normal design // 3: Fisher design // NumericVector getRecalculatedEventSizes(int designNumber, int stage, int kMax, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector eventsPerStage, NumericVector logRankOverStages, NumericVector testStatisticOverStages, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocation1, double allocation2) { double requiredStageEvents = plannedEvents[stage - 1]; if (stage == 1) { NumericVector result = NumericVector(3, NA_REAL); result[0] = requiredStageEvents; return result; } // Used effect size is either estimated from test statistic of pre-fixed double estimatedTheta; if (R_IsNA(thetaH1)) { estimatedTheta = exp(logRankOverStages[stage - 2] * (1 + allocation1 / allocation2) / sqrt(allocation1 / allocation2 * eventsPerStage[stage - 2])); } else { estimatedTheta = thetaH1; if (!directionUpper) { estimatedTheta = 1 / estimatedTheta; } } // Conditional critical value to reject the null hypotheses at the last stage of the trial double conditionalCriticalValue; if (designNumber == 3) { // Fisher design conditionalCriticalValue = ::Rf_qnorm5( 1 - pow(criticalValues[stage - 1] / testStatisticOverStages[stage - 2], 1 / sqrt( (informationRates[stage - 1] - informationRates[stage - 2]) / informationRates[0] ) ) , 0.0, 1.0, 1, 0); } else { conditionalCriticalValue = (sqrt(informationRates[stage - 1]) * criticalValues[stage - 1] - testStatisticOverStages[stage - 2] * sqrt(informationRates[stage - 2])) / sqrt(informationRates[stage - 1] - informationRates[stage - 2]); } if (!R_IsNA(conditionalPower)) { double theta; theta = max(NumericVector::create(1 + 1E-12, estimatedTheta)); requiredStageEvents = pow(max(NumericVector::create(0, conditionalCriticalValue + ::Rf_qnorm5(conditionalPower, 0.0, 1.0, 1, 0))), 2) * pow(1 + allocation1 / allocation2, 2) * allocation2 / allocation1 / pow(log(theta), 2); requiredStageEvents = min(NumericVector::create( max(NumericVector::create(minNumberOfEventsPerStage[stage - 1], requiredStageEvents)), maxNumberOfEventsPerStage[stage - 1])) + eventsPerStage[stage - 2]; } NumericVector result = NumericVector(3, NA_REAL); result[0] = requiredStageEvents; result[1] = conditionalCriticalValue; result[2] = estimatedTheta; return result; } NumericMatrix getSimulationStepResultsSurvival( int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocation1, double allocation2, NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec) { NumericVector eventsPerStage = NumericVector(kMax, 0.0); NumericVector logRankOverStages = NumericVector(kMax, 0.0); NumericVector testStatisticOverStages = NumericVector(kMax, 0.0); NumericVector analysisTime = NumericVector(kMax, 0.0); NumericVector subjects = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents1 = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents2 = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents = NumericVector(kMax, 0.0); NumericVector rejections = NumericVector(kMax, 0.0); NumericVector eventsNotAchieved = NumericVector(kMax, 0.0); NumericVector futilityStops = NumericVector(kMax, 0.0); NumericVector duration = NumericVector(kMax, 0.0); NumericVector iterations = NumericVector(kMax, 0.0); NumericVector hazardRates1 = NumericVector(kMax, 0.0); NumericVector hazardRates2 = NumericVector(kMax, 0.0); NumericVector hazardRatiosEstimate = NumericVector(kMax, 0.0); NumericVector observationTimePerStage = NumericVector(kMax, NA_REAL); NumericVector conditionalPowerAchieved = NumericVector(kMax, 0.0); for (int k = 1; k <= kMax; k++) { NumericVector recalculatedEventSizes = getRecalculatedEventSizes( designNumber, k, kMax, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, eventsPerStage, logRankOverStages, testStatisticOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2); double requiredStageEvents = recalculatedEventSizes[0]; double observationTime = findObservationTime(accrualTime, survivalTime, dropoutTime, requiredStageEvents); if (R_IsNA(observationTime)) { eventsNotAchieved[k - 1]++; break; } if (k > 1) { double conditionalCriticalValue = recalculatedEventSizes[1]; double theta = recalculatedEventSizes[2]; conditionalPowerAchieved[k - 1] = 1 - ::Rf_pnorm5(conditionalCriticalValue - log(theta) * sqrt(requiredStageEvents - eventsPerStage[k - 2]) * sqrt(allocation1 / allocation2) / (1 + allocation1 / allocation2), 0.0, 1.0, 1, 0); } else { conditionalPowerAchieved[k - 1] = NA_REAL; } observationTimePerStage[k - 1] = observationTime; List result = logRankTest( accrualTime, survivalTime, dropoutTime, treatmentGroup, observationTime, directionUpper, thetaH0, false); NumericVector survivalResult = result["result"]; double logRank = survivalResult[0]; double numberOfSubjects = survivalResult[1]; double events1 = survivalResult[2]; double events2 = survivalResult[3]; hazardRates1[k - 1] = NA_REAL; hazardRates2[k - 1] = NA_REAL; hazardRatiosEstimate[k - 1] = NA_REAL; eventsPerStage[k - 1] = events1 + events2; logRankOverStages[k - 1] = logRank; NumericVector testStatistic = getTestStatistics(k, designNumber, informationRates, eventsPerStage, logRankOverStages); testStatisticOverStages[k - 1] = testStatistic[0]; int trialStopEventCounter = 0; if (designNumber == 3) { // Fisher design if (testStatistic[0] <= criticalValues[k - 1]) { rejections[k - 1]++; trialStopEventCounter++; } if (k < kMax && (testStatistic[1] >= alpha0Vec[k - 1])) { futilityStops[k - 1]++; trialStopEventCounter++; } } else { // all other designs if ((sided == 1 && testStatistic[0] >= criticalValues[k - 1]) || (sided == 2 && std::abs(testStatistic[0]) >= criticalValues[k - 1])) { rejections[k - 1]++; trialStopEventCounter++; } if (sided == 1 && k < kMax && testStatistic[0] <= futilityBounds[k - 1]) { futilityStops[k - 1]++; trialStopEventCounter++; } } if (trialStopEventCounter > 0) { for (int i = 0; i < trialStopEventCounter; i++) { duration[k - 1] += observationTime; subjects[k - 1] += numberOfSubjects; } } else { subjects[k - 1] += numberOfSubjects; if (k == kMax) { duration[k - 1] += observationTime; } } expectedNumberOfEvents1[k - 1] += events1; expectedNumberOfEvents2[k - 1] += events2; expectedNumberOfEvents[k - 1] += events1 + events2; analysisTime[k - 1] += observationTime; iterations[k - 1]++; if (trialStopEventCounter > 0) { break; } } NumericMatrix result(kMax, 17); result(_, 0) = analysisTime; result(_, 1) = subjects; result(_, 2) = expectedNumberOfEvents1; result(_, 3) = expectedNumberOfEvents2; result(_, 4) = expectedNumberOfEvents; result(_, 5) = rejections; result(_, 6) = eventsNotAchieved; result(_, 7) = futilityStops; result(_, 8) = duration; result(_, 9) = iterations; result(_, 10) = testStatisticOverStages; result(_, 11) = logRankOverStages; result(_, 12) = hazardRates1; result(_, 13) = hazardRates2; result(_, 14) = hazardRatiosEstimate; result(_, 15) = observationTimePerStage; result(_, 16) = conditionalPowerAchieved; return result; } /** * Weibull: (-log(1 - runif(0.0, 1.0)))^(1 / kappa) / rate */ // [[Rcpp::export]] double getRandomSurvivalDistribution(double rate, double kappa) { return pow(-log(1 - R::runif(0.0, 1.0)), 1 / kappa) / rate; } NumericMatrix getExtendedSurvivalDataSet(IntegerVector treatmentGroup, int maxNumberOfSubjects, double lambda1, double lambda2, double phi1, double phi2, double kappa) { NumericVector survivalTime = NumericVector(maxNumberOfSubjects, NA_REAL); NumericVector dropoutTime = NumericVector(maxNumberOfSubjects, NA_REAL); for (int i = 0; i < maxNumberOfSubjects; i++) { if (treatmentGroup[i] == 1) { survivalTime[i] = getRandomSurvivalDistribution(lambda1, kappa); if (phi1 > 0) { dropoutTime[i] = getRandomSurvivalDistribution(phi1, 1); } } else { survivalTime[i] = getRandomSurvivalDistribution(lambda2, kappa); if (phi2 > 0) { dropoutTime[i] = getRandomSurvivalDistribution(phi2, 1); } } } NumericMatrix result(maxNumberOfSubjects, 2); result(_, 0) = survivalTime; result(_, 1) = dropoutTime; return result; } // [[Rcpp::export]] double getRandomPiecewiseExponentialDistribution(NumericVector cdfValues, NumericVector piecewiseLambda, NumericVector piecewiseSurvivalTime) { double y; NumericVector s; double p = R::runif(0.0, 1.0); int n = piecewiseSurvivalTime.size(); if (n == 0) { return -log(1 - p) / piecewiseLambda[0]; } for (int i = 0; i < n; i++) { if (p <= cdfValues[i]) { if (i == 0) { return -log(1 - p) / piecewiseLambda[0]; } y = piecewiseLambda[0] * piecewiseSurvivalTime[0]; if (i > 1) { s = vectorSum(piecewiseSurvivalTime[seq(1, i - 1)], -piecewiseSurvivalTime[seq(0, i - 2)]); y += vectorProduct(piecewiseLambda[seq(1, i - 1)], s); } return piecewiseSurvivalTime[i - 1] - (log(1 - p) + y) / piecewiseLambda[i]; } } if (n == 1) { return piecewiseSurvivalTime[0] - (log(1 - p) + piecewiseLambda[0] * piecewiseSurvivalTime[0]) / piecewiseLambda[1]; } s = vectorSum(piecewiseSurvivalTime[seq(1, n - 1)], -piecewiseSurvivalTime[seq(0, n - 2)]); y = piecewiseLambda[0] * piecewiseSurvivalTime[0] + vectorProduct(piecewiseLambda[seq(1, n - 1)], s); return piecewiseSurvivalTime[n - 1] - (log(1 - p) + y) / piecewiseLambda[n]; } NumericMatrix getExtendedSurvivalDataSet(IntegerVector treatmentGroup, int maxNumberOfSubjects, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, double phi1, double phi2) { NumericVector survivalTime = NumericVector(maxNumberOfSubjects, NA_REAL); NumericVector dropoutTime = NumericVector(maxNumberOfSubjects, NA_REAL); for (int i = 0; i < maxNumberOfSubjects; i++) { if (treatmentGroup[i] == 1) { survivalTime[i] = getRandomPiecewiseExponentialDistribution( cdfValues1, lambdaVec1, piecewiseSurvivalTime); if (phi1 > 0) { dropoutTime[i] = getRandomPiecewiseExponentialDistribution( cdfValues1, rep(phi1, lambdaVec1.size()), piecewiseSurvivalTime); } } else { survivalTime[i] = getRandomPiecewiseExponentialDistribution(cdfValues2, lambdaVec2, piecewiseSurvivalTime); if (phi2 > 0) { dropoutTime[i] = getRandomPiecewiseExponentialDistribution( cdfValues2, rep(phi2, lambdaVec2.size()), piecewiseSurvivalTime); } } } NumericMatrix result(maxNumberOfSubjects, 2); result(_, 0) = survivalTime; result(_, 1) = dropoutTime; return result; } void assertArgumentsAreValid( int kMax, NumericVector plannedEvents, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage ) { if (kMax > minNumberOfEventsPerStage.size()) { throw Rcpp::exception(tfm::format( "'minNumberOfEventsPerStage' must have length %s (is %s)", kMax, minNumberOfEventsPerStage.size()).c_str()); } if (kMax > maxNumberOfEventsPerStage.size()) { throw Rcpp::exception(tfm::format( "'maxNumberOfEventsPerStage' must have length %s (is %s)", kMax, maxNumberOfEventsPerStage.size()).c_str()); } if (kMax > plannedEvents.size()) { throw Rcpp::exception(tfm::format( "'plannedEvents' must have length %s (is %s)", kMax, plannedEvents.size()).c_str()); } } void vectorSumC(int i, int j, int kMax, double* x, NumericMatrix y) { for (int k = 0; k < kMax; k++) { x[i * kMax + k] += y(k, j); } } void vectorInitC(int i, int kMax, double* x, double value) { for (int k = 0; k < kMax; k++) { x[i * kMax + k] = value; } } void logDebug(std::string s) { Rcout << s << std::endl; } bool isPiecewiseExponentialSurvivalEnabled(NumericVector lambdaVec2) { if (lambdaVec2.size() == 0) { return false; } for (int i = 0; i < lambdaVec2.size(); i++) { if (R_IsNA(lambdaVec2[i])) { return false; } } return true; } double getLambdaByPi(double pi, double eventTime, double kappa) { return pow(-log(1 - pi), 1 / kappa) / eventTime; } double getPiByLambda(double lambda, double eventTime, double kappa) { return 1 - exp(-pow(lambda * eventTime, kappa)); } double getHazardRatio(double pi1, double pi2, double eventTime, double kappa) { return pow(getLambdaByPi(pi1, eventTime, kappa) / getLambdaByPi(pi2, eventTime, kappa), kappa); } /* Get Simulation Results * * This function calculates the simulation results for survival data. * * @param kappa The kappa value for the Weibull distribution; * if kappa = 1, then the exponential distribution will be used for simulation. */ // [[Rcpp::export]] List getSimulationSurvivalCpp( int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocation1, double allocation2, NumericVector accrualTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector pi1Vec, double pi2, double eventTime, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, NumericVector phi, int maxNumberOfSubjects, int maxNumberOfIterations, int maxNumberOfRawDatasetsPerStage, double kappa) { bool pwExpEnabled = isPiecewiseExponentialSurvivalEnabled(lambdaVec2); int n = 1; if (!pwExpEnabled) { n = pi1Vec.size(); } if (n < 1) { throw Rcpp::exception(tfm::format( "'pi1Vec' must have minimum length %s (is %s)", 1, pi1Vec.size()).c_str()); } int sumVectorLength = kMax * n; IntegerVector stages = IntegerVector(sumVectorLength, NA_INTEGER); NumericVector pi1Column = NumericVector(sumVectorLength, 0.0); NumericVector hazardRatioColumn = NumericVector(sumVectorLength, 0.0); NumericVector analysisTimeSum = NumericVector(sumVectorLength, 0.0); NumericVector subjectsSum = NumericVector(sumVectorLength, 0.0); NumericVector eventsSum = NumericVector(sumVectorLength, 0.0); NumericVector rejectionsSum = NumericVector(sumVectorLength, 0.0); NumericVector eventsNotAchievedSum = NumericVector(sumVectorLength, 0.0); NumericVector futilityStopsSum = NumericVector(sumVectorLength, 0.0); NumericVector durationsSum = NumericVector(sumVectorLength, 0.0); NumericVector iterationsSum = NumericVector(sumVectorLength, 0.0); NumericVector conditionalPowerAchievedSum = NumericVector(sumVectorLength, 0.0); int simResultsVectorLength = sumVectorLength * maxNumberOfIterations; IntegerVector iterationNumbers = IntegerVector(simResultsVectorLength, NA_INTEGER); IntegerVector stageNumbers = IntegerVector(simResultsVectorLength, NA_INTEGER); NumericVector pi1Values = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRatios = NumericVector(simResultsVectorLength, NA_REAL); NumericVector analysisTime = NumericVector(simResultsVectorLength, NA_REAL); NumericVector subjects = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events1 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events2 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events = NumericVector(simResultsVectorLength, NA_REAL); NumericVector rejections = NumericVector(simResultsVectorLength, NA_REAL); NumericVector eventsNotAchieved = NumericVector(simResultsVectorLength, NA_REAL); NumericVector futilityStops = NumericVector(simResultsVectorLength, NA_REAL); NumericVector testStatistics = NumericVector(simResultsVectorLength, NA_REAL); NumericVector logRankStatistics = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRates1 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRates2 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRatiosEstimate = NumericVector(simResultsVectorLength, NA_REAL); NumericVector conditionalPowerAchieved = NumericVector(simResultsVectorLength, NA_REAL); // raw datasets per stage int rawDataVectorLength = maxNumberOfRawDatasetsPerStage * n * kMax *maxNumberOfSubjects; IntegerVector rawDataPerStage = IntegerVector(kMax, 0); NumericVector rawDataIterationNumbers = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataStageNumbers = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataPi1Values = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataSubjectIds = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataAccrualTime = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataTreatmentGroups = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataSurvivalTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataDropoutTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataObservationTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataTimeUnderObservation = NumericVector(rawDataVectorLength, NA_REAL); LogicalVector rawDataEvent = LogicalVector(rawDataVectorLength, NA_LOGICAL); LogicalVector rawDataDropoutEvent = LogicalVector(rawDataVectorLength, NA_LOGICAL); IntegerVector rawDataCensorIndicator = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericMatrix survivalDataSet; int index = 0; for (int pi1Index = 0; pi1Index < n; pi1Index++) { double pi1 = NA_REAL; double hazardRatio = NA_REAL; double lambda1 = NA_REAL; double lambda2 = NA_REAL; if (!pwExpEnabled) { pi1 = pi1Vec[pi1Index]; lambda1 = getLambdaByPi(pi1, eventTime, kappa); lambda2 = getLambdaByPi(pi2, eventTime, kappa); hazardRatio = pow(lambda1 / lambda2, kappa); } for (int k = 0; k < kMax; k++) { stages[pi1Index * kMax + k] = k + 1; } vectorInitC(pi1Index, kMax, REAL(pi1Column), pi1); vectorInitC(pi1Index, kMax, REAL(hazardRatioColumn), hazardRatio); for (int iterationIndex = 0; iterationIndex < maxNumberOfIterations; iterationIndex++) { if (!pwExpEnabled) { survivalDataSet = getExtendedSurvivalDataSet( treatmentGroup, maxNumberOfSubjects, lambda1, lambda2, phi[0], phi[1], kappa); } else { survivalDataSet = getExtendedSurvivalDataSet(treatmentGroup, maxNumberOfSubjects, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi[0], phi[1]); } NumericVector survivalTime = survivalDataSet(_, 0); NumericVector dropoutTime = survivalDataSet(_, 1); NumericMatrix stepResults = getSimulationStepResultsSurvival( designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2, accrualTime, survivalTime, dropoutTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec); vectorSumC(pi1Index, 0, kMax, REAL(analysisTimeSum), stepResults); vectorSumC(pi1Index, 1, kMax, REAL(subjectsSum), stepResults); vectorSumC(pi1Index, 4, kMax, REAL(eventsSum), stepResults); vectorSumC(pi1Index, 5, kMax, REAL(rejectionsSum), stepResults); vectorSumC(pi1Index, 6, kMax, REAL(eventsNotAchievedSum), stepResults); vectorSumC(pi1Index, 7, kMax, REAL(futilityStopsSum), stepResults); vectorSumC(pi1Index, 8, kMax, REAL(durationsSum), stepResults); vectorSumC(pi1Index, 9, kMax, REAL(iterationsSum), stepResults); vectorSumC(pi1Index, 16, kMax, REAL(conditionalPowerAchievedSum), stepResults); // conditionalPowerAchieved // get data for (int k = 0; k < kMax; k++) { if (stepResults(k, 9) > 0) { iterationNumbers[index] = iterationIndex + 1; stageNumbers[index] = k + 1; pi1Values[index] = pi1; hazardRatios[index] = hazardRatio; analysisTime[index] = stepResults(k, 0); subjects[index] = stepResults(k, 1); events1[index] = stepResults(k, 2); events2[index] = stepResults(k, 3); events[index] = stepResults(k, 4); rejections[index] = stepResults(k, 5); eventsNotAchieved[index] = stepResults(k, 6); futilityStops[index] = stepResults(k, 7); testStatistics[index] = stepResults(k, 10); logRankStatistics[index] = stepResults(k, 11); hazardRates1[index] = stepResults(k, 12); hazardRates2[index] = stepResults(k, 13); hazardRatiosEstimate[index] = stepResults(k, 14); conditionalPowerAchieved[index] = stepResults(k, 16); index++; } } // get raw datasets per stage if (maxNumberOfRawDatasetsPerStage > 0) { for (int k = kMax - 1; k >= 0; k--) { if (rawDataPerStage[k] < maxNumberOfRawDatasetsPerStage && stepResults(k, 9) > 0) { int start = k * maxNumberOfSubjects + pi1Index * kMax * maxNumberOfSubjects + rawDataPerStage[k] * n * kMax * maxNumberOfSubjects; double observationTime = stepResults(k, 15); if (R_IsNA(observationTime)) { break; } List logRankResult = logRankTest( accrualTime, survivalTime, dropoutTime, treatmentGroup, observationTime, directionUpper, thetaH0, true); NumericVector timeUnderObservation = logRankResult["timeUnderObservation"]; LogicalVector event = logRankResult["event"]; LogicalVector dropoutEvent = logRankResult["dropoutEvent"]; for (int i = 0; i < maxNumberOfSubjects; i++) { rawDataPi1Values[start + i] = pi1; rawDataIterationNumbers[start + i] = iterationIndex + 1; rawDataStageNumbers[start + i] = k + 1; rawDataSubjectIds[start + i] = i + 1; rawDataAccrualTime[start + i] = accrualTime[i]; rawDataTreatmentGroups[start + i] = treatmentGroup[i]; rawDataSurvivalTime[start + i] = survivalTime[i]; rawDataDropoutTime[start + i] = dropoutTime[i]; rawDataObservationTime[start + i] = observationTime; rawDataTimeUnderObservation[start + i] = timeUnderObservation[i]; rawDataEvent[start + i] = event[i]; rawDataDropoutEvent[start + i] = dropoutEvent[i]; if (survivalTime[i] >= dropoutTime[i]) { rawDataCensorIndicator[start + i] = 0; } else { rawDataCensorIndicator[start + i] = 1; } } rawDataPerStage[k]++; break; } } } } } NumericVector overallRejections = NumericVector(n, 0.0); NumericVector overallFutilityStops = NumericVector(n, 0.0); NumericVector duration = NumericVector(n, 0.0); NumericVector rejectionsRelative = vectorDivide(rejectionsSum, maxNumberOfIterations); NumericVector futilityStopsRelative = vectorDivide(futilityStopsSum, maxNumberOfIterations); for (int i = 0; i < n; i++) { double s1 = 0; double s2 = 0; double s3 = 0; for (int j = 0; j < kMax; j++) { s1 += rejectionsRelative[i * kMax + j]; s2 += futilityStopsRelative[i * kMax + j]; s3 += durationsSum[i * kMax + j]; } overallRejections[i] = s1; overallFutilityStops[i] = s2; duration[i] = s3 / maxNumberOfIterations; } DataFrame overview = DataFrame::create( Named("stages") = stages, Named("pi2") = NumericVector(sumVectorLength, pi2), Named("pi1") = pi1Column, Named("hazardRatioEstimate1") = hazardRatioColumn, Named("iterations") = iterationsSum, Named("eventsPerStage") = vectorDivide(eventsSum, iterationsSum), Named("eventsNotAchieved") = vectorDivide(eventsNotAchievedSum, maxNumberOfIterations), Named("numberOfSubjects") = vectorDivide(subjectsSum, iterationsSum), Named("rejectPerStage") = rejectionsRelative, Named("overallReject") = vectorRepEachValue(overallRejections, kMax), Named("futilityPerStage") = futilityStopsRelative, Named("futilityStop") = vectorRepEachValue(overallFutilityStops, kMax), Named("analysisTime") = vectorDivide(analysisTimeSum, iterationsSum), Named("studyDuration") = vectorRepEachValue(duration, kMax), Named("conditionalPowerAchieved") = vectorDivide(conditionalPowerAchievedSum, iterationsSum) ); DataFrame data = DataFrame::create( Named("iterationNumber") = iterationNumbers, Named("stageNumber") = stageNumbers, Named("pi1") = pi1Values, Named("pi2") = NumericVector(simResultsVectorLength, pi2), Named("hazardRatio") = hazardRatios, Named("analysisTime") = analysisTime, Named("numberOfSubjects") = subjects, Named("eventsPerStage1") = events1, Named("eventsPerStage2") = events2, Named("eventsPerStage") = events, Named("rejectPerStage") = rejections, Named("eventsNotAchieved") = eventsNotAchieved, Named("futilityPerStage") = futilityStops, Named("testStatistic") = testStatistics, Named("logRankStatistic") = logRankStatistics, Named("conditionalPowerAchieved") = conditionalPowerAchieved ); if (maxNumberOfRawDatasetsPerStage > 0) { DataFrame rawData = DataFrame::create( Named("iterationNumber") = rawDataIterationNumbers, Named("stopStage") = rawDataStageNumbers, Named("pi1") = rawDataPi1Values, Named("pi2") = NumericVector(rawDataVectorLength, pi2), Named("subjectId") = rawDataSubjectIds, Named("accrualTime") = rawDataAccrualTime, Named("treatmentGroup") = rawDataTreatmentGroups, Named("survivalTime") = rawDataSurvivalTime, Named("dropoutTime") = rawDataDropoutTime, Named("observationTime") = rawDataObservationTime, Named("timeUnderObservation") = rawDataTimeUnderObservation, Named("event") = rawDataEvent, Named("dropoutEvent") = rawDataDropoutEvent, Named("censorIndicator") = rawDataCensorIndicator ); return List::create( _["overview"] = overview, _["data"] = data, _["rawData"] = rawData ); } return List::create( _["overview"] = overview, _["data"] = data ); } rpact/src/rpact.c0000644000176200001440000000415613425756316013451 0ustar liggesusers#include #include #include #include //#include /* for dgemm */ double dnorm2(const double x, const double mean, const double stDev) { static const double inv_sqrt_2pi = 0.3989422804014327; double a = (x - mean) / stDev; return inv_sqrt_2pi / stDev * exp(-0.5f * a * a); } SEXP R_dnorm(SEXP x, SEXP mean, SEXP stDev) { double result = dnorm2(REAL(x)[0], REAL(mean)[0], REAL(stDev)[0]); return ScalarReal(result); } /** * k must be >= 3 */ double getDensityValue(double x, int k, double *informationRates, double *epsilonVec, double *x2, double *dn2, int n) { int i; double resultValue; double prod; double dnormValue; k = k - 1; double part1 = sqrt(informationRates[k - 1] / epsilonVec[k - 1]); double sqrtInfRates1 = sqrt(informationRates[k - 1]); double sqrtInfRates2 = sqrt(informationRates[k - 2]); const double mean = 0; const double stDev = 1; double prod1 = x * sqrtInfRates1; double divisor = sqrt(epsilonVec[k - 1]); resultValue = 0; for (i = 0; i < n; i++) { dnormValue = dnorm2((prod1 - (x2[i] * sqrtInfRates2)) / divisor, mean, stDev); prod = part1 * dnormValue * dn2[i]; resultValue += prod; } //Free(p); //Free(dnormValues); return(resultValue); } void getDensityValues(double *x, int *k, double *informationRates, double *epsilonVec, double *x2, double *dn2, int n, double *results) { int i; for (i = 0; i < n; i++) { if (*k == 2) { results[i] = dnorm2(x[i], 0.0, 1.0); } else { results[i] = getDensityValue(x[i], *k, informationRates, epsilonVec, x2, dn2, n); } } } /** * .Call interface to R_getDensityValues * \param x a numeric vector */ SEXP R_getDensityValues(SEXP x, SEXP k, SEXP informationRates, SEXP epsilonVec, SEXP x2, SEXP dn2) { SEXP results; int n; n = LENGTH(x2); PROTECT(results = allocVector(REALSXP, n)); getDensityValues(REAL(x), INTEGER(k), REAL(informationRates), REAL(epsilonVec), REAL(x2), REAL(dn2), n, REAL(results)); UNPROTECT(1); return(results); } rpact/src/RcppExports.cpp0000644000176200001440000001567413574432653015200 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include using namespace Rcpp; // getRandomSurvivalDistribution double getRandomSurvivalDistribution(double rate, double kappa); RcppExport SEXP _rpact_getRandomSurvivalDistribution(SEXP rateSEXP, SEXP kappaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type rate(rateSEXP); Rcpp::traits::input_parameter< double >::type kappa(kappaSEXP); rcpp_result_gen = Rcpp::wrap(getRandomSurvivalDistribution(rate, kappa)); return rcpp_result_gen; END_RCPP } // getRandomPiecewiseExponentialDistribution double getRandomPiecewiseExponentialDistribution(NumericVector cdfValues, NumericVector piecewiseLambda, NumericVector piecewiseSurvivalTime); RcppExport SEXP _rpact_getRandomPiecewiseExponentialDistribution(SEXP cdfValuesSEXP, SEXP piecewiseLambdaSEXP, SEXP piecewiseSurvivalTimeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type cdfValues(cdfValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type piecewiseLambda(piecewiseLambdaSEXP); Rcpp::traits::input_parameter< NumericVector >::type piecewiseSurvivalTime(piecewiseSurvivalTimeSEXP); rcpp_result_gen = Rcpp::wrap(getRandomPiecewiseExponentialDistribution(cdfValues, piecewiseLambda, piecewiseSurvivalTime)); return rcpp_result_gen; END_RCPP } // getSimulationSurvivalCpp List getSimulationSurvivalCpp(int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocation1, double allocation2, NumericVector accrualTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector pi1Vec, double pi2, double eventTime, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, NumericVector phi, int maxNumberOfSubjects, int maxNumberOfIterations, int maxNumberOfRawDatasetsPerStage, double kappa); RcppExport SEXP _rpact_getSimulationSurvivalCpp(SEXP designNumberSEXP, SEXP kMaxSEXP, SEXP sidedSEXP, SEXP criticalValuesSEXP, SEXP informationRatesSEXP, SEXP conditionalPowerSEXP, SEXP plannedEventsSEXP, SEXP thetaH1SEXP, SEXP minNumberOfEventsPerStageSEXP, SEXP maxNumberOfEventsPerStageSEXP, SEXP directionUpperSEXP, SEXP allocation1SEXP, SEXP allocation2SEXP, SEXP accrualTimeSEXP, SEXP treatmentGroupSEXP, SEXP thetaH0SEXP, SEXP futilityBoundsSEXP, SEXP alpha0VecSEXP, SEXP pi1VecSEXP, SEXP pi2SEXP, SEXP eventTimeSEXP, SEXP piecewiseSurvivalTimeSEXP, SEXP cdfValues1SEXP, SEXP cdfValues2SEXP, SEXP lambdaVec1SEXP, SEXP lambdaVec2SEXP, SEXP phiSEXP, SEXP maxNumberOfSubjectsSEXP, SEXP maxNumberOfIterationsSEXP, SEXP maxNumberOfRawDatasetsPerStageSEXP, SEXP kappaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type designNumber(designNumberSEXP); Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< int >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< double >::type conditionalPower(conditionalPowerSEXP); Rcpp::traits::input_parameter< NumericVector >::type plannedEvents(plannedEventsSEXP); Rcpp::traits::input_parameter< double >::type thetaH1(thetaH1SEXP); Rcpp::traits::input_parameter< NumericVector >::type minNumberOfEventsPerStage(minNumberOfEventsPerStageSEXP); Rcpp::traits::input_parameter< NumericVector >::type maxNumberOfEventsPerStage(maxNumberOfEventsPerStageSEXP); Rcpp::traits::input_parameter< bool >::type directionUpper(directionUpperSEXP); Rcpp::traits::input_parameter< double >::type allocation1(allocation1SEXP); Rcpp::traits::input_parameter< double >::type allocation2(allocation2SEXP); Rcpp::traits::input_parameter< NumericVector >::type accrualTime(accrualTimeSEXP); Rcpp::traits::input_parameter< IntegerVector >::type treatmentGroup(treatmentGroupSEXP); Rcpp::traits::input_parameter< double >::type thetaH0(thetaH0SEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type pi1Vec(pi1VecSEXP); Rcpp::traits::input_parameter< double >::type pi2(pi2SEXP); Rcpp::traits::input_parameter< double >::type eventTime(eventTimeSEXP); Rcpp::traits::input_parameter< NumericVector >::type piecewiseSurvivalTime(piecewiseSurvivalTimeSEXP); Rcpp::traits::input_parameter< NumericVector >::type cdfValues1(cdfValues1SEXP); Rcpp::traits::input_parameter< NumericVector >::type cdfValues2(cdfValues2SEXP); Rcpp::traits::input_parameter< NumericVector >::type lambdaVec1(lambdaVec1SEXP); Rcpp::traits::input_parameter< NumericVector >::type lambdaVec2(lambdaVec2SEXP); Rcpp::traits::input_parameter< NumericVector >::type phi(phiSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfSubjects(maxNumberOfSubjectsSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfIterations(maxNumberOfIterationsSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfRawDatasetsPerStage(maxNumberOfRawDatasetsPerStageSEXP); Rcpp::traits::input_parameter< double >::type kappa(kappaSEXP); rcpp_result_gen = Rcpp::wrap(getSimulationSurvivalCpp(designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa)); return rcpp_result_gen; END_RCPP } RcppExport SEXP R_getDensityValues(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"_rpact_getRandomSurvivalDistribution", (DL_FUNC) &_rpact_getRandomSurvivalDistribution, 2}, {"_rpact_getRandomPiecewiseExponentialDistribution", (DL_FUNC) &_rpact_getRandomPiecewiseExponentialDistribution, 3}, {"_rpact_getSimulationSurvivalCpp", (DL_FUNC) &_rpact_getSimulationSurvivalCpp, 31}, {"R_getDensityValues", (DL_FUNC) &R_getDensityValues, 6}, {NULL, NULL, 0} }; RcppExport void R_init_rpact(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } rpact/R/0000755000176200001440000000000013574442443011576 5ustar liggesusersrpact/R/class_core_parameter_set.R0000644000176200001440000012212313573721013016742 0ustar liggesusers###################################################################################### # # # -- Parameter set classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R NULL PlotSubTitleItem <- setRefClass("PlotSubTitleItem", fields = list( title = "character", subscript = "character", value = "numeric" ), methods = list( toQuote = function() { if (!is.null(subscript) && length(subscript) == 1 && !is.na(subscript)) { return(bquote(' '*.(title)[.(subscript)] == .(value))) } return(bquote(' '*.(title) == .(value))) } ) ) PlotSubTitleItems <- setRefClass("PlotSubTitleItems", fields = list( title = "character", subTitle = "character", items = "list" ), methods = list( initialize = function(...) { callSuper(...) items <<- list() }, addItem = function(item) { items <<- c(items, item) }, add = function(title, value, subscript = NA_character_) { titleTemp <- title if (length(items) == 0) { titleTemp <- .firstCharacterToUpperCase(titleTemp) } titleTemp <- paste0(' ', titleTemp) if (length(subscript) > 0 && !is.na(subscript)) { subscript <- paste0(as.character(subscript), ' ') } else { titleTemp <- paste0(titleTemp, ' ') } addItem(PlotSubTitleItem(title = titleTemp, subscript = subscript, value = value)) }, toQuote = function() { quotedItems <- .getQuotedItems() if (is.null(quotedItems)) { if (length(subTitle) > 0) { return(bquote(atop(bold(.(title)), atop(.(subTitle))))) } return(title) } if (length(subTitle) > 0) { return(bquote(atop(bold(.(title)), atop(.(subTitle)*','~.(quotedItems))))) } return(bquote(atop(bold(.(title)), atop(.(quotedItems))))) }, .getQuotedItems = function() { item1 <- NULL item2 <- NULL item3 <- NULL item4 <- NULL if (length(items) > 0) { item1 <- items[[1]] } if (length(items) > 1) { item2 <- items[[2]] } if (length(items) > 2) { item3 <- items[[3]] } if (length(items) > 3) { item4 <- items[[4]] } if (!is.null(item1) && !is.null(item2) && !is.null(item3) && !is.null(item4)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*','~.(item3$title) == .(item3$value)*','~.(item4$title) == .(item4$value)*'')) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title) == .(item2$value)*','~.(item3$title) == .(item3$value)*','~.(item4$title) == .(item4$value)*'')) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*','~.(item3$title) == .(item3$value)*','~.(item4$title) == .(item4$value)*'')) } return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title) == .(item2$value)*','~.(item3$title) == .(item3$value)*','~.(item4$title) == .(item4$value)*'')) } if (!is.null(item1) && !is.null(item2) && !is.null(item3)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*','~.(item3$title) == .(item3$value)*'')) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title) == .(item2$value)*','~.(item3$title) == .(item3$value)*'')) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*','~.(item3$title) == .(item3$value)*'')) } return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title) == .(item2$value)*','~.(item3$title) == .(item3$value)*'')) } if (!is.null(item1) && !is.null(item2)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*'')) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*','~.(item2$title) == .(item2$value)*'')) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title)[.(item2$subscript)] == .(item2$value)*'')) } return(bquote(' '*.(item1$title) == .(item1$value)*','~.(item2$title) == .(item2$value)*'')) } if (!is.null(item1)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(' '*.(item1$title)[.(item1$subscript)] == .(item1$value)*'')) } return(bquote(' '*.(item1$title) == .(item1$value)*'')) } return(NULL) } ) ) #' #' @name FieldSet #' #' @title #' Field Set #' #' @description #' Basic class for field sets. #' #' @details #' The field set implements basic functions for a set of fields. #' #' @include class_core_plot_settings.R #' #' @keywords internal #' #' @importFrom methods new #' FieldSet <- setRefClass("FieldSet", fields = list( .parameterTypes = "list", .parameterNames = "list", .parameterFormatFunctions = "list", .showParameterTypeEnabled = "logical", .catLines = "character" ), methods = list( .getFieldNames = function() { return(names(.self$getRefClass()$fields())) }, .getVisibleFieldNames = function() { fieldNames <- names(.self$getRefClass()$fields()) fieldNames <- fieldNames[!startsWith(fieldNames, ".")] return(fieldNames) }, .resetCat = function() { .catLines <<- character(0) }, .cat = function(..., file = "", sep = "", fill = FALSE, labels = NULL, append = FALSE, heading = 0, tableColumns = 0, consoleOutputEnabled = TRUE) { if (consoleOutputEnabled) { cat(..., file = file, sep = sep, fill = fill, labels = labels, append = append) return(invisible()) } args <- list(...) line <- "" if (length(args) > 0) { if (tableColumns > 0) { values <- unlist(args, use.names = FALSE) values <- values[values != "\n"] for (i in 1:length(values)) { values[i] <- gsub("\n", "", values[i]) } line <- paste0(values, collapse = "| ") if (trimws(line) != "" && !grepl("\\| *$", line)) { line <- paste0(line, "|") } line <- paste0("| ", line) extraCells <- tableColumns - length(values) if (extraCells > 0 && trimws(line) != "") { line <- paste0(line, paste0(rep(" |", extraCells), collapse = "")) } line <- paste0(line, "\n") } else { line <- paste0(args, collapse = sep) listItemEnabled <- grepl("^ ", line) if (heading > 0) { headingCmd <- paste0(rep("#", heading + 1), collapse = "") line <- paste0(headingCmd, " ", sub(": *", "", line)) } else { parts <- strsplit(line, " *: ")[[1]] if (length(parts) == 2) { line <- paste0("*", trimws(parts[1]), "*: ", parts[2]) } } if (listItemEnabled) { if (grepl("^ ", line)) { line <- sub("^ ", "* ", line) } else { line <- paste0("* ", line) } } } } if (length(.catLines) == 0) { .catLines <<- line } else { .catLines <<- c(.catLines, line) } return(invisible()) }, .getFields = function(values) { flds = names(.self$getRefClass()$fields()) if (!missing(values)) { flds = flds[flds %in% values] } result = setNames(vector("list", length(flds)), flds) for (fld in flds) { result[[fld]] = .self[[fld]] } return(result) } ) ) #' #' @name ParameterSet #' #' @title #' Parameter Set #' #' @description #' Basic class for parameter sets. #' #' @details #' The parameter set implements basic functions for a set of parameters. #' #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' ParameterSet <- setRefClass("ParameterSet", contains = "FieldSet", fields = list( .parameterTypes = "list", .parameterNames = "list", .parameterFormatFunctions = "list", .showParameterTypeEnabled = "logical", .catLines = "character" ), methods = list( initialize = function(..., .showParameterTypeEnabled = TRUE) { callSuper(..., .showParameterTypeEnabled = .showParameterTypeEnabled) .parameterTypes <<- list() .parameterNames <<- list() .parameterFormatFunctions <<- list() .catLines <<- character(0) }, .initParameterTypes = function() { for (parameterName in names(.parameterNames)) { .parameterTypes[[parameterName]] <<- C_PARAM_TYPE_UNKNOWN } }, .getParameterType = function(parameterName) { if (is.null(parameterName) || length(parameterName) == 0 || is.na(parameterName)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid character with length > 0") } parameterType <- .parameterTypes[[parameterName]] if (is.null(parameterType)) { return(C_PARAM_TYPE_UNKNOWN) } return(parameterType[1]) }, .getParametersToShow = function() { return(.getVisibleFieldNames()) }, .setParameterType = function(parameterName, parameterType) { if (is.null(parameterName) || length(parameterName) == 0 || is.na(parameterName)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid character with length > 0") } parameterType <- parameterType[1] if (!all(parameterType %in% c(C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE, C_PARAM_GENERATED, C_PARAM_DERIVED, C_PARAM_NOT_APPLICABLE))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterType' ('", parameterType, "') is invalid") } .parameterTypes[[parameterName]] <<- parameterType invisible(parameterType) }, isUserDefinedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_USER_DEFINED) }, isDefaultParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_DEFAULT_VALUE) }, isGeneratedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_GENERATED) }, isDerivedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_DERIVED) }, isUndefinedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_TYPE_UNKNOWN) }, .getInputParameters = function() { params <- .getParametersOfOneGroup(c(C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE)) if (inherits(.self, "TrialDesignPlanSurvival") && .self$.objectType == "sampleSize") { params <- params[params != "calculateFollowUpTime"] } return(params) }, .getUserDefinedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_USER_DEFINED)) }, .getDefaultParameters = function() { return(.getParametersOfOneGroup(C_PARAM_DEFAULT_VALUE)) }, .getGeneratedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_GENERATED)) }, .getDerivedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_DERIVED)) }, .getUndefinedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_TYPE_UNKNOWN)) }, .getParameterValueIfUserDefinedOrDefault = function(parameterName) { if (isUserDefinedParameter(parameterName) || isDefaultParameter(parameterName)) { return(.self[[parameterName]]) } parameterType <- .self$getRefClass()$fields()[[parameterName]] if (parameterType == "numeric") { return(NA_real_) } if (parameterType == "integer") { return(NA_integer_) } if (parameterType == "character") { return(NA_character_) } return(NA) }, .getParametersOfOneGroup = function(parameterType) { if (length(parameterType) == 1) { parameterNames <- names(.parameterTypes[.parameterTypes == parameterType]) } else { parameterNames <- names(.parameterTypes[which(.parameterTypes %in% parameterType)]) } parametersToShow <- .getParametersToShow() if (is.null(parametersToShow) || length(parametersToShow) == 0) { return(parameterNames) } return(parametersToShow[parametersToShow %in% parameterNames]) }, .showParameterType = function(parameterName) { if (!.showParameterTypeEnabled) { return(" ") } return(paste0("[", .getParameterType(parameterName), "]")) }, .isMatrix = function(param) { if (missing(param) || is.null(param) || is.list(param)) { return(FALSE) } return(is.matrix(param)) }, .isVector = function(param) { if (missing(param) || is.null(param) || is.list(param)) { return(FALSE) } return(length(param) > 1) }, .showAllParameters = function(consoleOutputEnabled = TRUE) { parametersToShow <- .getVisibleFieldNamesOrdered() for (parameter in parametersToShow) { .showParameter(parameter, showParameterType = TRUE, consoleOutputEnabled = consoleOutputEnabled) } }, .getVisibleFieldNamesOrdered = function() { visibleFieldNames <- .getVisibleFieldNames() parametersToShowSorted <- .getParametersToShow() if (is.null(parametersToShowSorted) || length(parametersToShowSorted) == 0) { return(visibleFieldNames) } visibleFieldNames <- visibleFieldNames[!(visibleFieldNames %in% parametersToShowSorted)] visibleFieldNames <- c(parametersToShowSorted, visibleFieldNames) return(visibleFieldNames) }, .show = function(..., consoleOutputEnabled = FALSE) { stop("Method '.show()' is not implemented in class '", class(.self), "'") }, .catMarkdownText = function() { .show(consoleOutputEnabled = FALSE) if (length(.catLines) == 0) { return(invisible()) } for (line in .catLines) { cat(line) } }, .showParametersOfOneGroup = function(parameters, title, orderByParameterName = TRUE, consoleOutputEnabled = TRUE) { output <- "" if (is.null(parameters) || length(parameters) == 0 || all(is.na(parameters))) { if (!missing(title) && !is.null(title) && !is.na(title) && consoleOutputEnabled) { output <- paste0(title, ": not available\n\n") .cat(output, heading = 2, consoleOutputEnabled = consoleOutputEnabled) } invisible(output) } else { if (orderByParameterName) { parameters <- sort(parameters) } if (!missing(title) && !is.null(title) && !is.na(title)) { output <- paste0(title, ":\n") .cat(output, heading = 2, consoleOutputEnabled = consoleOutputEnabled) } for (parameterName in parameters) { output <- paste0(output, .showParameter(parameterName, consoleOutputEnabled = consoleOutputEnabled)) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) output <- paste0(output, "\n") invisible(output) } }, .showParameter = function(parameterName, showParameterType = FALSE, consoleOutputEnabled = TRUE) { param <- .getParameterValueFormatted(parameterName = parameterName) output <- "" if (!is.null(param)) { if (param$isMatrix) { for (i in 1:length(param$paramValueFormatted)) { output <- paste0(output, .showParameterFormatted(paramName = param$paramName, paramValue = param$paramValue[i, ], paramValueFormatted = param$paramValueFormatted[[i]], showParameterType = showParameterType, matrixRow = i, consoleOutputEnabled = consoleOutputEnabled, paramNameRaw = parameterName)) } } else { output <- .showParameterFormatted(paramName = param$paramName, paramValue = param$paramValue, paramValueFormatted = param$paramValueFormatted, showParameterType = showParameterType, consoleOutputEnabled = consoleOutputEnabled, paramNameRaw = parameterName) } } invisible(output) }, .extractParameterNameAndValue = function(parameterName) { d <- regexpr(paste0("\\..+\\$"), parameterName) if (d[1] != 1) { return(list(parameterName = parameterName, paramValue = get(parameterName))) } index <- attr(d, "match.length") objectName <- substr(parameterName, 1, index - 1) parameterName <- substr(parameterName, index + 1, nchar(parameterName)) paramValue <- get(objectName)[[parameterName]] return(list(parameterName = parameterName, paramValue = paramValue)) }, .getParameterValueFormatted = function(parameterName) { tryCatch({ result <- .extractParameterNameAndValue(parameterName) parameterName <- result$parameterName paramValue <- result$paramValue if (isS4(paramValue)) { return(NULL) } paramValueFormatted <- paramValue formatFunctionName <- .parameterFormatFunctions[[parameterName]] if (!is.null(formatFunctionName)) { paramValueFormatted <- eval(call(formatFunctionName, paramValueFormatted)) } isMatrix <- FALSE if (.isMatrix(paramValue)) { matrixFormatted <- paramValueFormatted paramValueFormatted <- .arrayToString(matrixFormatted[1, ]) if (nrow(matrixFormatted) > 1 && ncol(matrixFormatted) > 0) { isMatrix <- TRUE paramValueFormatted <- list(paramValueFormatted) for (i in 2:nrow(matrixFormatted)) { paramValueFormatted <- c(paramValueFormatted, .arrayToString(matrixFormatted[i, ])) } } } else if (.isVector(paramValue)) { paramValueFormatted <- .arrayToString(paramValueFormatted) } else if (parameterName == "sided") { paramValueFormatted <- ifelse(paramValue == 1, "one-sided", "two-sided") } return(list( paramName = parameterName, paramValue = paramValue, paramValueFormatted = paramValueFormatted, isMatrix = isMatrix )) }, error = function(e) { .logError(paste0("Error in '.getParameterValueFormatted'. ", "Failed to show parameter '%s' (class '%s'): %s"), parameterName, class(.self), e) }) return(NULL) }, .showUnknownParameters = function(consoleOutputEnabled = TRUE) { params <- .getUndefinedParameters() if (length(params) > 0) { .showParametersOfOneGroup(params, "ISSUES (parameters with undefined type)", consoleOutputEnabled = consoleOutputEnabled) } }, .showParameterFormatted = function(paramName, paramValue, paramValueFormatted = NA_character_, showParameterType = FALSE, matrixRow = NA_real_, consoleOutputEnabled = TRUE, paramNameRaw = NA_character_) { if (!is.na(paramNameRaw)) { paramCaption <- .parameterNames[[paramNameRaw]] } if (is.null(paramCaption)) { paramCaption <- .parameterNames[[paramName]] } if (is.null(paramCaption)) { paramCaption <- paste0("%", paramName, "%") } if (!is.na(matrixRow)) { if (inherits(.self, "AnalysisResultsMultiArm")) { paramCaption <- paste0(paramCaption, " (", matrixRow, ")") } else { paramCaption <- paste0(paramCaption, " [", matrixRow, "]") } } if (is.null(paramValueFormatted) || length(paramValueFormatted) == 0 || is.na(paramValueFormatted)) { paramValueFormatted <- paramValue } if (is.list(paramValueFormatted)) { paramValueFormatted <- .listToString(paramValueFormatted) } prefix <- ifelse(showParameterType, .showParameterType(paramName), "") variableNameFormatted <- formatVariableName(name = paramCaption, n = .getNChar(), prefix = prefix) output <- paste(variableNameFormatted, paramValueFormatted, "\n") .cat(output, consoleOutputEnabled = consoleOutputEnabled) invisible(output) }, .getNChar = function() { if (length(.parameterNames) == 0) { return(40) } return(min(40, max(nchar(.parameterNames))) + 4) }, .showParameterTypeDescription = function(consoleOutputEnabled = consoleOutputEnabled) { .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_USER_DEFINED, ": user defined\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_DERIVED, ": derived value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_DEFAULT_VALUE, ": default value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_GENERATED, ": generated/calculated value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_NOT_APPLICABLE, ": not applicable or hidden\n", consoleOutputEnabled = consoleOutputEnabled) }, .printAsDataFrame = function(parameterNames, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, handleParameterNamesAsToBeExcluded = FALSE, lineBreakEnabled = FALSE) { if (.isTrialDesign(.self)) { tableColumnNames <- .getTableColumnNames(design = .self) } else { tableColumnNames <- C_TABLE_COLUMN_NAMES } if (.isTrialDesignPlan(.self)) { parameterNames <- NULL } dataFrame <- .getAsDataFrame(parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, handleParameterNamesAsToBeExcluded = handleParameterNamesAsToBeExcluded, returnParametersAsCharacter = TRUE, tableColumnNames = tableColumnNames) result <- as.matrix(dataFrame) if (.isTrialDesignPlan(.self)) { dimnames(result)[[1]] <- paste(" ", c(1:nrow(dataFrame))) } else if (!is.null(.self[["stages"]])) { dimnames(result)[[1]] <- paste(" Stage", c(1:nrow(dataFrame))) } print(result, quote = FALSE, right = FALSE) }, .getNumberOfRows = function(parameterNames) { numberOfRows <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (is.vector(parameterValues) && length(parameterValues) > numberOfRows) { numberOfRows <- length(parameterValues) } else if (is.matrix(parameterValues) && (nrow(parameterValues) == 1 || ncol(parameterValues) == 1) && length(parameterValues) > numberOfRows) { numberOfRows <- length(parameterValues) } } return(numberOfRows) }, .containsMultidimensionalParameters = function(parameterNames) { for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && is.matrix(parameterValues) && nrow(parameterValues) > 0 && ncol(parameterValues) > 0) { return(TRUE) } } return(FALSE) }, .getMultidimensionalNumberOfVariants = function(parameterNames) { n <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues)) { if (is.matrix(parameterValues)) { if (nrow(parameterValues) > 0 && ncol(parameterValues) > n) { n <- ncol(parameterValues) } } else if (length(parameterValues) > n && !(parameterName %in% c("accrualTime", "accrualIntensity", "plannedSubjects", "plannedEvents", "minNumberOfSubjectsPerStage", "maxNumberOfSubjectsPerStage", "minNumberOfEventsPerStage", "maxNumberOfEventsPerStage", "piecewiseSurvivalTime", "lambda2"))) { n <- length(parameterValues) } } } return(n) }, .getMultidimensionalNumberOfStages = function(parameterNames) { n <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && is.matrix(parameterValues) && ncol(parameterValues) > 0 && nrow(parameterValues) > n) { n <- nrow(parameterValues) } } return(n) }, .getVariedParameter = function(parameterNames, numberOfVariants) { for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) == numberOfVariants && parameterName %in% C_VARIABLE_DESIGN_PLAN_PARAMETERS && .getParameterType(parameterName) == C_PARAM_USER_DEFINED) { return(parameterName) } } for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) == numberOfVariants && parameterName %in% C_VARIABLE_DESIGN_PLAN_PARAMETERS && .getParameterType(parameterName) == C_PARAM_DEFAULT_VALUE) { return(parameterName) } } #stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to find varied parameter") return(NULL) }, .getDataFrameColumnValues = function(parameterName, numberOfVariants, numberOfStages, includeAllParameters) { if (.getParameterType(parameterName) == C_PARAM_TYPE_UNKNOWN) { return(NULL) } if (!includeAllParameters && .getParameterType(parameterName) == C_PARAM_NOT_APPLICABLE) { return(NULL) } parameterValues <- .self[[parameterName]] if (is.null(parameterValues) || length(parameterValues) == 0) { return(NULL) } if (!is.matrix(parameterValues)) { if (length(parameterValues) == 1) { return(rep(parameterValues, numberOfVariants * numberOfStages)) } if (length(parameterValues) == numberOfVariants) { return(rep(parameterValues, numberOfStages)) } if (parameterName %in% c("accrualTime", "accrualIntensity", "plannedEvents", "plannedSubjects", "minNumberOfEventsPerStage", "maxNumberOfEventsPerStage", "minNumberOfSubjectsPerStage", "maxNumberOfSubjectsPerStage", "piecewiseSurvivalTime", "lambda2")) { return(NULL) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter '", parameterName, "' has an invalid ", "dimension (length is ", length(parameterValues), ")") } if (grepl("futility|alpha0Vec", parameterName) && nrow(parameterValues) == numberOfStages - 1) { parameterValues <- rbind(parameterValues, rep(NA_real_, ncol(parameterValues))) } if (nrow(parameterValues) == numberOfStages && ncol(parameterValues) == 1) { columnValues <- c() for (parameterValue in parameterValues) { columnValues <- c(columnValues, rep(parameterValue, numberOfVariants)) } return(columnValues) } if (nrow(parameterValues) == numberOfStages && ncol(parameterValues) == numberOfVariants) { columnValues <- c() for (i in 1:nrow(parameterValues)) { for (j in 1:ncol(parameterValues)) { columnValues <- c(columnValues, parameterValues[i, j]) } } return(columnValues) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter '", parameterName, "' has an invalid ", "dimension (", nrow(parameterValues), " x ", ncol(parameterValues), ")") }, .getAsDataFrameMultidimensional = function(parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames) { numberOfVariants <- .getMultidimensionalNumberOfVariants(parameterNames) numberOfStages <- .getMultidimensionalNumberOfStages(parameterNames) stagesCaption <- .getDataFrameColumnCaption("stages", tableColumnNames, niceColumnNamesEnabled) dataFrame <- data.frame( stages = sort(rep(1:numberOfStages, numberOfVariants)) ) names(dataFrame) <- stagesCaption variedParameter <- .getVariedParameter(parameterNames, numberOfVariants) if (!is.null(variedParameter) && variedParameter != "stages") { variedParameterCaption <- .getDataFrameColumnCaption(variedParameter, tableColumnNames, niceColumnNamesEnabled) dataFrame[[variedParameterCaption]] <- rep(.self[[variedParameter]], numberOfStages) } for (parameterName in parameterNames) { if (parameterName != "stages" && (is.null(variedParameter) || parameterName != variedParameter)) { columnValues <- .getDataFrameColumnValues(parameterName, numberOfVariants, numberOfStages, includeAllParameters) if (!is.null(columnValues)) { columnCaption <- .getDataFrameColumnCaption(parameterName, tableColumnNames, niceColumnNamesEnabled) dataFrame[[columnCaption]] <- columnValues if (returnParametersAsCharacter) { .formatDataFrameParametersAsCharacter(dataFrame, parameterName, columnValues, columnCaption) } } } } return(dataFrame) }, .getDataFrameColumnCaption = function(parameterName, tableColumnNames, niceColumnNamesEnabled) { if (length(parameterName) == 0 || parameterName == "") { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid parameter name") } tableColumnName <- tableColumnNames[[parameterName]] return(ifelse(niceColumnNamesEnabled && !is.null(tableColumnName), tableColumnName, parameterName)) }, .getUnidimensionalNumberOfStages = function(parameterNames) { kMax <- .self[["kMax"]] if (is.null(kMax) && !is.null(.self[[".design"]])) { kMax <- .self[[".design"]][["kMax"]] } if (!is.null(kMax) && length(kMax) == 1 && is.integer(kMax)) { return(kMax) } n <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) > n) { n <- length(parameterValues) } } return(n) }, .formatDataFrameParametersAsCharacter = function(dataFrame, parameterName, parameterValues, parameterCaption) { tryCatch({ formatFunctionName <- .parameterFormatFunctions[[parameterName]] if (!is.null(formatFunctionName)) { parameterValuesFormatted <- eval(call(formatFunctionName, parameterValues)) } else { parameterValuesFormatted <- as.character(parameterValues) } if (parameterName == "sided") { parameterValuesFormatted <- ifelse(parameterValues == 1, "one-sided", "two-sided") } if (!is.null(dataFrame[[parameterCaption]])) { parameterValuesFormatted[is.na(dataFrame[[parameterCaption]])] <- "" } parameterValuesFormatted[is.na(parameterValuesFormatted)] <- "" parameterValuesFormatted[parameterValuesFormatted == "NA"] <- "" if (is.null(dataFrame)) { dataFrame <- data.frame(x = parameterValuesFormatted) names(dataFrame) <- parameterCaption } else { dataFrame[[parameterCaption]] <- parameterValuesFormatted } }, error = function(e) { .logError(paste0("Error in '.getAsDataFrame'. Failed to show parameter '%s' ", "(class '%s'): %s"), parameterName, class(.self), e) }) }, .getAsDataFrameUnidimensional = function(parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames) { numberOfStages <- .getUnidimensionalNumberOfStages(parameterNames) dataFrame <- NULL for (parameterName in parameterNames) { tryCatch({ parameterCaption <- ifelse(niceColumnNamesEnabled && !is.null(tableColumnNames[[parameterName]]), tableColumnNames[[parameterName]], parameterName) parameterValues <- .self[[parameterName]] if (parameterName == "futilityBounds") { parameterValues[parameterValues == C_FUTILITY_BOUNDS_DEFAULT] <- -Inf } if (length(parameterValues) == 1) { parameterValues <- rep(parameterValues, numberOfStages) } else { while (length(parameterValues) < numberOfStages) { parameterValues <- c(parameterValues, NA) } } if (includeAllParameters || ( .getParameterType(parameterName) != C_PARAM_NOT_APPLICABLE && sum(is.na(parameterValues)) < length(parameterValues))) { if (is.null(dataFrame)) { dataFrame <- data.frame(x = parameterValues) names(dataFrame) <- parameterCaption } else { dataFrame[[parameterCaption]] <- parameterValues } } if (returnParametersAsCharacter) { .formatDataFrameParametersAsCharacter(dataFrame, parameterName, parameterValues, parameterCaption) } }, error = function(e) { .logError("Failed to add parameter '%s' to data.frame: %s", parameterName, e) }) } return(dataFrame) }, .getAsDataFrame = function(parameterNames, niceColumnNamesEnabled = TRUE, includeAllParameters = FALSE, handleParameterNamesAsToBeExcluded = FALSE, returnParametersAsCharacter = FALSE, tableColumnNames = C_TABLE_COLUMN_NAMES) { parameterNamesToBeExcluded <- c() if (handleParameterNamesAsToBeExcluded) { parameterNamesToBeExcluded <- parameterNames parameterNames <- .getVisibleFieldNamesOrdered() if (!is.null(parameterNamesToBeExcluded) && length(parameterNamesToBeExcluded) > 0) { parameterNames <- parameterNames[!(parameterNames %in% parameterNamesToBeExcluded)] } } else if (is.null(parameterNames)) { parameterNames <- .getVisibleFieldNamesOrdered() } if (.containsMultidimensionalParameters(parameterNames)) { return(.getAsDataFrameMultidimensional(parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames)) } # remove matrices for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (is.matrix(parameterValues) && nrow(parameterValues) != 1 && ncol(parameterValues) != 1) { parameterNames <- parameterNames[parameterNames != parameterName] } } if (length(parameterNames) == 0) { return(data.frame()) } return(.getAsDataFrameUnidimensional(parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames)) }, # # Returns a sub-list. # # @param x A list from which you would like to get a sub-list. # @param listEntryNames A vector of names which specify the entries of the sub-list to return. # .getSubListByNames = function(x, listEntryNames) { "Returns a sub-list." if (!is.list(x)) { stop("'x' must be a list") } if (!is.character(listEntryNames)) { stop("'listEntryNames' must be a character vector") } return(x[which(names(x) %in% listEntryNames)]) } ) ) #' #' @name FieldSet_names #' #' @title #' The Names of a Field Set object #' #' @description #' Function to get the names of a \code{FieldSet} object. #' #' @details #' Returns the names of a field set that can be accessed by the user. #' #' @export #' #' @keywords internal #' names.FieldSet <- function(x) { return(x$.getVisibleFieldNames()) } #' #' @name FieldSet_print #' #' @title #' Print Field Set Values #' #' @description #' \code{print} prints its \code{FieldSet} argument and returns it invisibly (via \code{invisible(x)}). #' #' @details #' Prints the field set. #' #' @export #' #' @keywords internal #' print.FieldSet <- function(x, ...) { x$show() invisible(x) } #' #' @name ParameterSet_as.data.frame #' #' @title #' Coerce Parameter Set to a Data Frame #' #' @description #' Returns the \code{ParameterSet} as data frame. #' #' @details #' Coerces the parameter set to a data frame. #' #' @export #' #' @keywords internal #' as.data.frame.ParameterSet <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { return(x$.getAsDataFrame(parameterNames = NULL, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters)) } #' #' @name FrameSet_as.matrix #' #' @title #' Coerce Frame Set to a Matrix #' #' @description #' Returns the \code{FrameSet} as matrix. #' #' @details #' Coerces the frame set to a matrix. #' #' @export #' #' @keywords internal #' as.matrix.FieldSet <- function(x, rownames.force = NA, ...) { dataFrame <- as.data.frame(x) result <- as.matrix(dataFrame) if ((is.na(rownames.force) || isTRUE(rownames.force)) && nrow(result) > 0) { if (.isTrialDesignPlan(x)) { dimnames(result)[[1]] <- paste(" ", c(1:nrow(dataFrame))) } else if (inherits(x, "PowerAndAverageSampleNumberResult")) { dimnames(result)[[1]] <- rep("", nrow(dataFrame)) } else { if (!is.null(x[["stages"]]) || class(x) == "PowerAndAverageSampleNumberResult") { dimnames(result)[[1]] <- paste(" Stage", c(1:nrow(dataFrame))) } } } return(result) } #' #' @name ParameterSet_summary #' #' @title #' Parameter Set Summary #' #' @param digits defines how many digits are to be used for numeric values. #' #' @description #' Displays a summary of \code{ParameterSet} object. #' #' @details #' Summarizes the parameters and results of a parameter set. #' #' The following options get be set globaly: #' \enumerate{ #' \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; #' shall the values be right-justified (the default), left-justified or centred. #' \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is 3). #' \item \code{rpact.summary.digits.fixed}: if \code{FALSE} (default) probabilities get one more digits a the as #' the defined \code{rpact.summary.digits}. #' \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", #' e.g. "0.000" will become "0". #' } #' #' @export #' #' @keywords internal #' summary.ParameterSet <- function(object, ..., type = 1, digits = NA_integer_) { .warnInCaseOfUnknownArguments(functionName = "summary.ParameterSet", ...) if (type == 1 && (inherits(object, "TrialDesign") || inherits(object, "TrialDesignPlan") || inherits(object, "SimulationResults"))) { return(.createSummary(object, digits = digits)) } object$.cat("This output summarizes the ", object$.toString(), " specification.\n\n", heading = 1) object$show() object$.cat("\n") object$show(showType = 2) object$.cat("\n") object$.cat(object$.toString(startWithUpperCase = TRUE), " table:\n", heading = 1) parametersToShow <- object$.getParametersToShow() parametersToShow <- parametersToShow[parametersToShow != "stages" & parametersToShow != "stage"] for (parameter in parametersToShow) { if (length(object[[parameter]]) == 1) { parametersToShow <- parametersToShow[parametersToShow != parameter] } } object$.printAsDataFrame(parameterNames = parametersToShow) invisible(object) } #' #' @name ParameterSet_print #' #' @title #' Print Parameter Set Values #' #' @description #' \code{print} prints its \code{ParameterSet} argument and returns it invisibly (via \code{invisible(x)}). #' #' @param x The object to print. #' @param markdown If \code{TRUE}, the object \code{x} will be printed using markdown syntax; #' normal representation will be used otherwise (default is \code{FALSE}) #' #' @details #' Prints the parameters and results of a parameter set. #' #' @export #' #' @keywords internal #' print.ParameterSet <- function(x, ..., markdown = FALSE) { if (markdown) { x$.catMarkdownText() return(invisible(x)) } x$show() invisible(x) } rpact/R/class_design_plan.R0000644000176200001440000020401213573662712015372 0ustar liggesusers###################################################################################### # # # -- Trial design plan classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R #' @include f_design_utilities.R NULL C_VARIABLE_DESIGN_PLAN_PARAMETERS <- c("lambda1", "pi1", "median1", "alternative", "hazardRatio") C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS <- list( normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = 0, alternative = seq(0.2, 1, 0.2), stDev = 1, groups = 2L, allocationRatioPlanned = 1 ) C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES <- list( normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = 0, pi1 = seq(0.4, 0.6, 0.1), pi2 = 0.2, groups = 2L, allocationRatioPlanned = 1 ) C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL <- list( typeOfComputation = "Schoenfeld", thetaH0 = 1, pi2 = 0.2, pi1 = seq(0.4, 0.6, 0.1), allocationRatioPlanned = 1, accountForObservationTimes = NA, eventTime = 12, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, followUpTime = C_FOLLOW_UP_TIME_DEFAULT, maxNumberOfSubjects = 0, dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12 ) #' #' @name TrialDesignPlan #' #' @title #' Basic Trial Design Plan #' #' @description #' Basic class for trial design plans. #' #' @details #' \code{TrialDesignPlan} is the basic class for #' \itemize{ #' \item \code{TrialDesignPlanMeans}, #' \item \code{TrialDesignPlanRates}, and #' \item \code{TrialDesignPlanSurvival}. #' } #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_plot.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlan <- setRefClass("TrialDesignPlan", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .objectType = "character" # "sampleSize" or "power" ), methods = list( initialize = function(design, ...) { callSuper(.design = design, ...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design, .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS if (.isTrialDesignPlanMeans(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS } else if (.isTrialDesignPlanRates(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES } else if (.isTrialDesignPlanSurvival(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL } for (parameterName in .getVisibleFieldNames()) { defaultValue <- defaultValueList[[parameterName]] existingValue <- .self[[parameterName]] if (all(is.na(existingValue))) { .setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else if (!is.null(defaultValue) && length(defaultValue) == length(existingValue) && !any(is.na(defaultValue)) && !any(is.na(existingValue)) && sum(defaultValue == existingValue) == length(defaultValue)) { .setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { .setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .setParameterType("optimumAllocationRatio", C_PARAM_NOT_APPLICABLE) }, .setSampleSizeObject = function(objectType) { if (length(objectType) == 0 || !(objectType %in% c("sampleSize", "power"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' (", objectType, ") must be specified as 'sampleSize' or 'power'") } .objectType <<- objectType }, .isSampleSizeObject = function() { if (length(.objectType) == 0 || !(.objectType %in% c("sampleSize", "power"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' must be specified as 'sampleSize' or 'power'") } return(.objectType == "sampleSize") }, .isPowerObject = function() { if (length(.objectType) == 0 || !(.objectType %in% c("sampleSize", "power"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' must be specified as 'sampleSize' or 'power'") } return(.objectType == "power") }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing trial plan objects' .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show(showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else if (showType == 2) { .cat("Technical summary of the design plan object of class ", methods::classLabel(class(.self)), ":\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Design plan parameters and output for ", .toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) designParametersToShow <- c(".design$alpha") if (.objectType == "sampleSize" || (inherits(.self, "TrialDesignPlanSurvival") && .isBetaSpendingDesignType(.design$typeBetaSpending))) { designParametersToShow <- c(designParametersToShow, ".design$beta") } if (.objectType == "sampleSize" && !is.null(.design$sided) && !is.na(.design$sided) && .design$sided == 2) { designParametersToShow <- c(designParametersToShow, ".design$twoSidedPower") } designParametersToShow <- c(designParametersToShow, ".design$sided") .showParametersOfOneGroup(designParametersToShow, "Design parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Sample size and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (inherits(.self, "TrialDesignPlanSurvival") || groups == 2 || .design$kMax > 1) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) if (inherits(.self, "TrialDesignPlanSurvival") || groups == 2) { .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) } if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } }, getAlpha = function() { return(.design$alpha) }, getBeta = function() { if (.isTrialDesignInverseNormalOrGroupSequential(.design)) { return(.design$beta) } return(NA_real_) }, getSided = function() { return(.design$sided) }, getTwoSidedPower = function() { if (.isTrialDesignInverseNormalOrGroupSequential(.design)) { return(.design$twoSidedPower) } return(NA) }, .toString = function(startWithUpperCase = FALSE) { if (.isTrialDesignPlanMeans(.self)) { s <- "means" } else if (.isTrialDesignPlanRates(.self)) { s <- "rates" } else if (.isTrialDesignPlanSurvival(.self)) { s <- "survival data" } else { s <- paste0("unknown data class '", class(.self), "'") } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) } ) ) #' #' @name TrialDesignPlan_as.data.frame #' #' @title #' Coerce Trial Design Plan to a Data Frame #' #' @description #' Returns the \code{TrialDesignPlan} as data frame. #' #' @details #' Coerces the design plan to a data frame. #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignPlan <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { return(x$.getAsDataFrame(parameterNames = NULL, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters)) } #' #' @name TrialDesignPlanMeans #' #' @title #' Trial Design Plan Means #' #' @description #' Trial design plan for means. #' #' @details #' This object can not be created directly; use \code{\link{getSampleSizeMeans}} #' with suitable arguments to create a design plan for a dataset of means. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanMeans <- setRefClass("TrialDesignPlanMeans", contains = "TrialDesignPlan", fields = list( normalApproximation = "logical", meanRatio = "logical", thetaH0 = "numeric", alternative = "numeric", stDev = "numeric", groups = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", directionUpper = "logical", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", informationRates = "matrix", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH0 = "numeric", expectedNumberOfSubjectsH01 = "numeric", expectedNumberOfSubjectsH1 = "numeric", effect = "numeric", expectedNumberOfSubjects = "numeric", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(..., normalApproximation = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["normalApproximation"]], meanRatio = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["meanRatio"]], thetaH0 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["thetaH0"]], alternative = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["alternative"]], stDev = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["stDev"]], groups = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["groups"]], allocationRatioPlanned = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["allocationRatioPlanned"]]) { callSuper(..., normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, groups = groups, allocationRatioPlanned = allocationRatioPlanned ) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "directionUpper") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } if (groups == 1) { .setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) .setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) }, clone = function(alternative = NA_real_) { alternativeTemp <- alternative if (any(is.na(alternative))) { alternativeTemp <- .self$alternative } if (.objectType == "sampleSize") { return(getSampleSizeMeans(design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), meanRatio = .self$meanRatio, #.getParameterValueIfUserDefinedOrDefault("meanRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), alternative = alternativeTemp, stDev = .self$.getParameterValueIfUserDefinedOrDefault("stDev"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned"))) } else { return(getPowerMeans(design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), meanRatio = .self$meanRatio, #.getParameterValueIfUserDefinedOrDefault("meanRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), alternative = alternativeTemp, stDev = .self$.getParameterValueIfUserDefinedOrDefault("stDev"), directionUpper = .self$.getParameterValueIfUserDefinedOrDefault("directionUpper"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned"))) } }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing trial plan objects' callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignPlanRates #' #' @title #' Trial Design Plan Rates #' #' @description #' Trial design plan for rates. #' #' @details #' This object can not be created directly; use \code{\link{getSampleSizeRates}} #' with suitable arguments to create a design plan for a dataset of rates. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanRates <- setRefClass("TrialDesignPlanRates", contains = "TrialDesignPlan", fields = list( normalApproximation = "logical", riskRatio = "logical", thetaH0 = "numeric", pi1 = "numeric", pi2 = "numeric", groups = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", directionUpper = "logical", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", informationRates = "matrix", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH0 = "numeric", expectedNumberOfSubjectsH01 = "numeric", expectedNumberOfSubjectsH1 = "numeric", effect = "numeric", expectedNumberOfSubjects = "numeric", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(..., normalApproximation = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["normalApproximation"]], riskRatio = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["riskRatio"]], thetaH0 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["thetaH0"]], pi1 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["pi1"]], pi2 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["pi2"]], groups = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["groups"]], allocationRatioPlanned = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["allocationRatioPlanned"]]) { callSuper(..., normalApproximation = normalApproximation, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, groups = groups, allocationRatioPlanned = allocationRatioPlanned) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "directionUpper") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } if (groups == 1) { .setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) .setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) }, clone = function(pi1 = NA_real_) { pi1Temp <- pi1 if (any(is.na(pi1))) { pi1Temp <- .self$pi1 } if (.objectType == "sampleSize") { return(getSampleSizeRates(design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), riskRatio = .self$riskRatio, #.getParameterValueIfUserDefinedOrDefault("riskRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned"))) } else { return(getPowerRates(design = .self$.design, riskRatio = .self$riskRatio, #.getParameterValueIfUserDefinedOrDefault("riskRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), directionUpper = .self$.getParameterValueIfUserDefinedOrDefault("directionUpper"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned"))) } }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing trial plan objects' callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignPlanSurvival #' #' @title #' Trial Design Plan Survival #' #' @description #' Trial design plan for survival data. #' #' @details #' This object can not be created directly; use \code{\link{getSampleSizeSurvival}} #' with suitable arguments to create a design plan for a dataset of survival data. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include class_time.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanSurvival <- setRefClass("TrialDesignPlanSurvival", contains = "TrialDesignPlan", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", typeOfComputation = "character", thetaH0 = "numeric", directionUpper = "logical", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", lambda1 = "numeric", lambda2 = "numeric", hazardRatio = "numeric", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", maxNumberOfEvents = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", accountForObservationTimes = "logical", eventTime = "numeric", accrualTime = "numeric", totalAccrualTime = "numeric", accrualIntensity = "numeric", accrualIntensityRelative = "numeric", kappa = "numeric", piecewiseSurvivalTime = "numeric", followUpTime = "numeric", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", omega = "numeric", calculateFollowUpTime = "logical", eventsFixed = "numeric", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", informationRates = "matrix", analysisTime = "matrix", studyDurationH1 = "numeric", studyDuration = "numeric", maxStudyDuration = "numeric", eventsPerStage = "matrix", expectedEventsH0 = "numeric", expectedEventsH01 = "numeric", expectedEventsH1 = "numeric", expectedNumberOfEvents = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH1 = "numeric", expectedNumberOfSubjects = "numeric", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(...) { callSuper(...) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "hazardRatio") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) .setParameterType("omega", C_PARAM_NOT_APPLICABLE) .setParameterType("maxStudyDuration", C_PARAM_NOT_APPLICABLE) .setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) # set default values for (parameterName in c("eventTime", "accrualTime", "accrualIntensity", "kappa", "piecewiseSurvivalTime", "lambda1", "lambda2", "followUpTime", "dropoutTime")) { .setDefaultValue(parameterName) } }, clone = function(hazardRatio = NA_real_, pi1 = NA_real_) { hr <- NA_real_ if (.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { hr <- hazardRatio if (any(is.na(hazardRatio))) { hr <- .self$hazardRatio } } pi1Temp <- NA_real_ if (.getParameterType("pi1") == C_PARAM_USER_DEFINED) { pi1Temp <- pi1 if (any(is.na(pi1))) { pi1Temp <- .self$pi1 } } accrualTimeTemp <- .self$.getParameterValueIfUserDefinedOrDefault("accrualTime") if (!is.null(accrualTimeTemp) && length(accrualTimeTemp) > 0 && !all(is.na(accrualTimeTemp)) && accrualTimeTemp[1] != 0) { accrualTimeTemp <- c(0, accrualTimeTemp) } accrualIntensityTemp <- .self$.getParameterValueIfUserDefinedOrDefault("accrualIntensity") if (all(is.na(accrualIntensityTemp))) { accrualIntensityTemp <- C_ACCRUAL_INTENSITY_DEFAULT } if (.objectType == "sampleSize") { return(getSampleSizeSurvival(design = .self$.design, typeOfComputation = .self$.getParameterValueIfUserDefinedOrDefault("typeOfComputation"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), allocationRatioPlanned = .self$allocationRatioPlanned, accountForObservationTimes = .self$.getParameterValueIfUserDefinedOrDefault("accountForObservationTimes"), eventTime = .self$eventTime, accrualTime = accrualTimeTemp, accrualIntensity = accrualIntensityTemp, kappa = .self$kappa, piecewiseSurvivalTime = .self$.getParameterValueIfUserDefinedOrDefault("piecewiseSurvivalTime"), lambda2 = .self$.getParameterValueIfUserDefinedOrDefault("lambda2"), lambda1 = .self$.getParameterValueIfUserDefinedOrDefault("lambda1"), followUpTime = .self$.getParameterValueIfUserDefinedOrDefault("followUpTime"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), dropoutRate1 = .self$dropoutRate1, dropoutRate2 = .self$dropoutRate2, dropoutTime = .self$dropoutTime, hazardRatio = hr)) } else { directionUpperTemp <- directionUpper if (length(directionUpperTemp) > 1) { directionUpperTemp <- directionUpperTemp[1] } return(getPowerSurvival(design = .self$.design, typeOfComputation = .self$.getParameterValueIfUserDefinedOrDefault("typeOfComputation"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), directionUpper = directionUpperTemp, allocationRatioPlanned = .self$allocationRatioPlanned, eventTime = .self$eventTime, accrualTime = accrualTimeTemp, accrualIntensity = accrualIntensityTemp, kappa = .self$kappa, piecewiseSurvivalTime = .self$.getParameterValueIfUserDefinedOrDefault("piecewiseSurvivalTime"), lambda2 = .self$.getParameterValueIfUserDefinedOrDefault("lambda2"), lambda1 = .self$.getParameterValueIfUserDefinedOrDefault("lambda1"), hazardRatio = hr, maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), maxNumberOfEvents = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfEvents"), dropoutRate1 = .self$dropoutRate1, dropoutRate2 = .self$dropoutRate2, dropoutTime = .self$dropoutTime)) } }, .setDefaultValue = function(argumentName) { if (is.null(.self[[argumentName]]) || all(is.na(.self[[argumentName]]))) { .self[[argumentName]] <<- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL[[argumentName]] .setParameterType(argumentName, C_PARAM_DEFAULT_VALUE) } }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing trial plan objects' callSuper(showType = showType, digits = digits) }, .warnInCaseArgumentExists = function(argument, argumentName) { if (!all(is.na(argument)) && any(argument > 0)) { warning(sprintf("Specified '%s' (%s) not taken into account", argumentName, .arrayToString(argument)), call. = FALSE) } } ) ) #' #' @name TrialDesignPlanSurvival_summary #' #' @title #' Trial Design Plan Survival Set Summary #' #' @description #' Displays a summary of \code{TrialDesignPlanSurvival} object. #' #' @details #' Summarizes the parameters and results of a survival design. #' #' @export #' #' @keywords internal #' summary.TrialDesignPlanSurvival <- function(object, ..., type = 1, digits = NA_integer_) { if (type == 1) { return(summary.ParameterSet(object = object, ..., type = type, digits = digits)) } object$.cat("This output summarizes the ", object$.toString(), " specification.\n\n", heading = 1) object$show() object$.cat("\n") object$.piecewiseSurvivalTime$show() object$.cat("\n") object$.accrualTime$show() object$.cat("\n") object$show(showType = 2) object$.cat("\n") object$.cat(object$.toString(startWithUpperCase = TRUE), " table:\n", heading = 1) parametersToShow <- object$.getParametersToShow() parametersToShow <- parametersToShow[parametersToShow != "stages" & parametersToShow != "stage"] for (parameter in parametersToShow) { if (length(object[[parameter]]) == 1) { parametersToShow <- parametersToShow[parametersToShow != parameter] } } object$.printAsDataFrame(parameterNames = parametersToShow) invisible(object) } .addPlotSubTitleItems <- function(designPlan, designMaster, items, type) { if (type %in% c(1, 3, 4)) { return(invisible()) } if (.isTrialDesignPlanMeans(designPlan)) { nMax <- designPlan$maxNumberOfSubjects[1] # use first value for plotting if (!(type %in% c(5))) { items$add("N", round(nMax, 1), "max") } if ((type %in% c(5)) && !(items$title == "Sample Size")) { items$add("N", round(nMax, 1), "max") } if (designPlan$meanRatio) { items$add("coefficient of variation", designPlan$stDev) } else { items$add("standard deviation", designPlan$stDev) } if (designPlan$groups == 1) { if (type %in% c(2,(5:9))) { items$add("H0: mu", designPlan$thetaH0) items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } else { if (type %in% c(2,(5:9))) { if (designPlan$meanRatio) { items$add("H0: mean ratio", designPlan$thetaH0) } else { items$add("H0: mean difference", designPlan$thetaH0) } items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } else if (.isTrialDesignPlanRates(designPlan)) { nMax <- designPlan$maxNumberOfSubjects[1] # use first value for plotting if (!(type %in% c(5))){ items$add("N", round(nMax, 1), "max") } if ((type %in% c(5)) && !(items$title == "Sample Size")) { items$add("N", round(nMax, 1), "max") } if (designPlan$groups == 2 && !(type %in% c(3, 4)) && length(designPlan$pi2) == 1 && !is.na(designPlan$pi2)) { items$add("pi", designPlan$pi2, 2) } if (designPlan$groups == 1) { if (type %in% c(2,(5:9))) { items$add("H0: pi", designPlan$thetaH0) } } else { if (type %in% c(2,(5:9))) { if (designPlan$riskRatio) { items$add("H0: risk ratio", designPlan$thetaH0) } else { items$add("H0: risk difference", designPlan$thetaH0) } items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } else if (.isTrialDesignPlanSurvival(designPlan)) { if (designPlan$.isPowerObject() && !(type %in% (13:14))) { items$add("maximum number of events", designPlan$maxNumberOfEvents[1]) } if (type %in% (10:12)) { items$add("maximum number of subjects", designPlan$maxNumberOfSubjects[1]) } if (type %in% c(2,(5:12))) { items$add("H0: hazard ratio", designPlan$thetaH0) items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } .assertIsValidVariedParameterVectorForPlotting <- function(designPlan, plotType) { if (.isTrialDesignPlanMeans(designPlan)) { if (is.null(designPlan$alternative) || is.na(designPlan$alternative) || length(designPlan$alternative) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'alternative' with length > 1 is defined") } } else if (.isTrialDesignPlanRates(designPlan)) { if (is.null(designPlan$pi1) || is.na(designPlan$pi1) || length(designPlan$pi1) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'pi1' with length > 1 is defined") } } else if (.isTrialDesignPlanSurvival(designPlan)) { if (is.null(designPlan$hazardRatio) || is.na(designPlan$hazardRatio) || length(designPlan$hazardRatio) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'hazardRatio' with length > 1 is defined") } } } .plotTrialDesignPlan <- function(designPlan, designMaster, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, designPlanName = NA_character_, ...) { .assertGgplotIsInstalled() .assertIsTrialDesignPlan(designPlan) .assertIsValidLegendPosition(legendPosition) theta <- .assertIsValidThetaRange(thetaRange = theta) nMax <- ifelse(.isTrialDesignPlanSurvival(designPlan), designPlan$maxNumberOfEvents[1], designPlan$maxNumberOfSubjects[1]) # use first value for plotting plotSettings <- designPlan$.plotSettings if (designMaster$kMax == 1 && (type %in% c(1:4))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not available for 'kMax' = 1") } if (designPlan$.isSampleSizeObject()) { if (.isTrialDesignPlanSurvival(designPlan)) { if (!(type %in% c(1:5, 13, 14))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, 3, 4, 5, 13 or 14") } } else { if (!(type %in% c(1:5))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, 3, 4, 5") } } } if (is.na(plotPointsEnabled)) { plotPointsEnabled <- type < 4 } ratioEnabled <- (.isTrialDesignPlanSurvival(designPlan) || (.isTrialDesignPlanMeans(designPlan) && designPlan$meanRatio) || (.isTrialDesignPlanRates(designPlan) && designPlan$riskRatio)) variedParameters <- logical(0) showSourceHint <- "" if (type %in% c(5:12)) { if (.isTrialDesignPlanMeans(designPlan) && length(designPlan$alternative) == 2 && designPlan$.getParameterType("alternative") == C_PARAM_USER_DEFINED) { if (showSource) { showSourceHint <- .getVariedParameterHint(designPlan$alternative, "alternative") } designPlan <- designPlan$clone(alternative = .getVariedParameterVector(designPlan$alternative, "alternative")) } else if ((.isTrialDesignPlanRates(designPlan) || .isTrialDesignPlanSurvival(designPlan)) && length(designPlan$pi1) == 2 && designPlan$.getParameterType("pi1") == C_PARAM_USER_DEFINED) { if (showSource) { showSourceHint <- .getVariedParameterHint(designPlan$pi1, "pi1") } designPlan <- designPlan$clone(pi1 = .getVariedParameterVector(designPlan$pi1, "pi1")) } else if (.isTrialDesignPlanSurvival(designPlan) && length(designPlan$hazardRatio) == 2 && designPlan$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { if (showSource) { showSourceHint <- .getVariedParameterHint(designPlan$hazardRatio, "hazardRatio") } designPlan <- designPlan$clone(hazardRatio = .getVariedParameterVector(designPlan$hazardRatio, "hazardRatio")) } } if (type == 1) { # Boundary plot if (.isTrialDesignPlanSurvival(designPlan)) { main <- ifelse(is.na(main), "Boundaries Z Scale", main) if (designMaster$sided == 1) { designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], criticalValues = designMaster$criticalValues, futilityBounds = c(designMaster$futilityBounds, designMaster$criticalValues[designMaster$kMax]) ) } else { designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], criticalValues = designMaster$criticalValues, criticalValuesMirrored = -designMaster$criticalValues ) } xParameterName <- "eventsPerStage" if (designMaster$sided == 1) { if (any(designMaster$futilityBounds > -6)) { yParameterNames <- c("futilityBounds", "criticalValues") } else { yParameterNames <- "criticalValues" } } else { yParameterNames <- c("criticalValues", "criticalValuesMirrored") } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else { designSet <- TrialDesignSet(design = designMaster, singleDesign = TRUE) return(.plotTrialDesignSet(x = designSet, y = NULL, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, designSetName = designPlanName, ...)) } } else if (type == 2) { # Effect Scale Boundary plot reducedParam <- .warnInCaseOfUnusedValuesForPlotting(designPlan) if (is.na(main)) { items <- PlotSubTitleItems(title = "Boundaries Effect Scale") .addPlotSubTitleItems(designPlan, designMaster, items, type) if (!is.null(reducedParam)) { items$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } main <- items$toQuote() } if (is.na(ylab)) { if (.isTrialDesignPlanMeans(designPlan)) { if (designPlan$groups == 1) { ylab <- "Mean" } else if (!designPlan$meanRatio) { ylab <- "Mean Difference" } else { ylab <- "Mean Ratio" } } else if (.isTrialDesignPlanRates(designPlan)) { if (designPlan$groups == 1) { ylab <- "Rate" } else if (!designPlan$riskRatio) { ylab <- "Rate Difference" } else { ylab <- "Risk Ratio" } } else if (.isTrialDesignPlanSurvival(designPlan)) { ylab <- "Hazard Ratio" } } yParameterNamesSrc <- c() if (designMaster$sided == 1) { data <- data.frame( criticalValues = designPlan$criticalValuesEffectScale[, 1], futilityBounds = c(designPlan$futilityBoundsEffectScale[, 1], designPlan$criticalValuesEffectScale[designMaster$kMax, 1]) ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleUpper[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, "futilityBoundsEffectScale[, 1]") } else { data <- data.frame( criticalValues = designPlan$criticalValuesEffectScaleUpper[, 1], criticalValuesMirrored = designPlan$criticalValuesEffectScaleLower[, 1] ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleUpper[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleLower[, 1]") } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "eventsPerStage" data <- cbind(data.frame(eventsPerStage = designPlan$eventsPerStage[, 1]), data) } else { xParameterName <- "informationRates" data <- cbind(data.frame(informationRates = designMaster$informationRates), data) } if (designMaster$sided == 1) { if (any(designMaster$futilityBounds > -6)) { yParameterNames <- c("futilityBounds", "criticalValues") } else { yParameterNames <- "criticalValues" } } else { yParameterNames <- c("criticalValues", "criticalValuesMirrored") } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } designPlan <- data .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 3) { # Stage Levels if (is.na(main)) { items <- PlotSubTitleItems(title = "Boundaries p Values Scale") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "eventsPerStage" yParameterNames <- "stageLevels" designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], stageLevels = designMaster$stageLevels ) xParameterNameSrc <- "eventsPerStage[, 1]" yParameterNamesSrc <- ".design$stageLevels" } else { xParameterName <- "informationRates" yParameterNames <- "stageLevels" designPlan <- TrialDesignSet(design = designMaster, singleDesign = TRUE) xParameterNameSrc <- ".design$informationRates" yParameterNamesSrc <- ".design$stageLevels" } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 4) { # Alpha Spending if (is.na(main)) { items <- PlotSubTitleItems(title = "Type One Error Spending") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "eventsPerStage" yParameterNames <- "alphaSpent" designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], alphaSpent = designMaster$alphaSpent ) xParameterNameSrc <- "eventsPerStage[, 1]" yParameterNamesSrc <- ".design$alphaSpent" } else { xParameterName <- "informationRates" yParameterNames <- "alphaSpent" designPlan <- TrialDesignSet(design = designMaster, singleDesign = TRUE) xParameterNameSrc <- ".design$informationRates" yParameterNamesSrc <- ".design$alphaSpent" } plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), FALSE, plotPointsEnabled) .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 5) { # Power and Stopping Probabilities .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (designPlan$.isSampleSizeObject()) { if (is.na(main)) { items <- PlotSubTitleItems(title = "Sample Size") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } yAxisScalingEnabled <- TRUE if (.isTrialDesignPlanMeans(designPlan)) { xParameterName <- "alternative" yParameterNames <- c("nFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfSubjects", "expectedNumberOfSubjectsH1") } if (is.na(ylab)) { ylab <- "Sample Size" } yAxisScalingEnabled <- FALSE if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- yParameterNames } else if (.isTrialDesignPlanRates(designPlan)) { xParameterName <- "pi1" yParameterNames <- c("nFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfSubjects", "expectedNumberOfSubjectsH1") } if (is.na(ylab)) { ylab <- "Sample Size" } yAxisScalingEnabled <- FALSE if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- yParameterNames } else if (.isTrialDesignPlanSurvival(designPlan)) { designPlan <- data.frame( hazardRatio = designPlan$hazardRatio, eventsFixed = designPlan$eventsFixed, maxNumberOfEvents = designPlan$eventsPerStage[designMaster$kMax, ], expectedEventsH1 = designPlan$expectedEventsH1 ) xParameterName <- "hazardRatio" yParameterNames <- c("eventsFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfEvents", "expectedEventsH1") } if (is.na(ylab)) { ylab <- "# Events" } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- c("eventsFixed", paste0("eventsPerStage[", designMaster$kMax, ", ]"), "expectedEventsH1") } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, showSource = showSource) return(.plotParameterSet(parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, ...)) } else { if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Power and Early Stopping") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("overallReject", "futilityStop", "earlyStop") if (is.na(ylab)) { ylab <- "" } if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } if (is.null(list(...)[["ylim"]])) { ylim <- c(0, 1) return(.plotParameterSet(parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, ylim = ylim, ...)) } else { return(.plotParameterSet(parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, ...)) } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } } else if (type == 6) { # Average Sample Size / Average Event Number .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { titlePart <- ifelse(.isTrialDesignPlanSurvival(designPlan), "Number of Events", "Sample Size") items <- PlotSubTitleItems(title = paste0("Expected ", titlePart, " and Power / Early Stop")) .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfEvents" expectedNumberOfEvents <- designPlan[["expectedNumberOfEvents"]] if (is.null(expectedNumberOfEvents) || length(expectedNumberOfEvents) == 0) { yParameterNames <- "expectedEventsH1" } yParameterNames <- c(yParameterNames, "overallReject", "earlyStop") # overallReject = power if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } } else { xParameterName <- "effect" yParameterNames <- c("expectedNumberOfSubjects", "overallReject", "earlyStop") # overallReject = power } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 7) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Power") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- "overallReject" if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 8) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Early Stopping") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("earlyStop", "futilityStop") if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 9) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { if (.isTrialDesignPlanSurvival(designPlan)) { items <- PlotSubTitleItems(title = "Expected Number of Events") } else { items <- PlotSubTitleItems(title = "Expected Sample Size") } .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } if (.isTrialDesignPlanSurvival(designPlan)) { xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfEvents" expectedNumberOfEvents <- designPlan[["expectedNumberOfEvents"]] if (is.null(expectedNumberOfEvents) || length(expectedNumberOfEvents) == 0) { yParameterNames <- c("expectedEventsH0", "expectedEventsH1") if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } } } else { xParameterName <- "effect" yParameterNames <- "expectedNumberOfSubjects" } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (.isTrialDesignPlanSurvival(designPlan)) { if (type == 10) { # Study Duration .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Study Duration") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "studyDuration" .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 11) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Expected Number of Subjects") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfSubjects" .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 12) { # Analysis Time .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Analysis Times") .addPlotSubTitleItems(designPlan, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "analysisTime" data <- NULL for (k in 1:designMaster$kMax) { part <- data.frame( categories = rep(k, length(designPlan$hazardRatio)), xValues = designPlan$hazardRatio, yValues = designPlan$analysisTime[k, ] ) if (is.null(data)) { data <- part } else { data <- rbind(data, part) } } .showPlotSourceInformation(objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, showSource = showSource) return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = "Hazard Ratio", yAxisLabel1 = "Analysis Time", yAxisLabel2 = NA_character_, plotPointsEnabled = TRUE, legendTitle = "Stage", legendPosition = legendPosition, sided = designMaster$sided, ...)) } else if (type == 13 || type == 14) { # Cumulative Distribution Function / Survival function return(.plotSurvivalFunction(designPlan, designMaster = designMaster, type = type, main = main, xlab = xlab, ylab = ylab, palette = palette, legendPosition = legendPosition, showSource = showSource, ...)) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 14") } } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 9") } return(.plotParameterSet(parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = (type != 2), ratioEnabled = ratioEnabled, plotSettings = plotSettings, ...)) } # Cumulative Distribution Function / Survival function .plotSurvivalFunction <- function(designPlan, ..., designMaster, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE) { if (is.null(designPlan$piecewiseSurvivalTime) || length(designPlan$piecewiseSurvivalTime) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'piecewiseSurvivalTime' must be specified") } lambda1 <- designPlan[["lambda1"]] lambda2 <- designPlan[["lambda2"]] if (is.null(lambda2) || length(lambda2) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda2' must be specified") } if (is.null(designPlan$kappa) || length(designPlan$kappa) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'kappa' must be specified") } if (is.null(designPlan$hazardRatio) || length(designPlan$hazardRatio) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio' must be specified") } piecewiseSurvivalEnabled <- designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled if (is.na(main)) { if (type == 13) { items <- PlotSubTitleItems(title = "Cumulative Distribution Function") } else { items <- PlotSubTitleItems(title = "Survival Function") } .addPlotSubTitleItems(designPlan, designMaster, items, type) if (!piecewiseSurvivalEnabled) { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased(minNumberOfLambdas = 1)) { items$add("lambda", round(designPlan$lambda1[1],4), 1) items$add("lambda", round(designPlan$lambda2,4), 2) } else { items$add("pi", round(designPlan$pi1[1],3), 1) items$add("pi", round(designPlan$pi2,3), 2) } } else if (length(designPlan$hazardRatio) == 1) { items$add("Hazard Ratio", round(designPlan$hazardRatio[1],3)) } main <- items$toQuote() } if (!piecewiseSurvivalEnabled || (length(designPlan$piecewiseSurvivalTime) == 1 && designPlan$piecewiseSurvivalTime[1] == 0)) { timeTo <- max(designPlan$analysisTime[designMaster$kMax, ]) } else { timeTo <- max(designPlan$piecewiseSurvivalTime) } if (is.na(timeTo) || !is.numeric(timeTo) || is.infinite(timeTo)) { #warning("Unable to determine upper bound of time values", call. = FALSE) timeTo <- 0 } timeValues <- seq(0, timeTo + 10, 0.1) data <- data.frame( time = timeValues, lambdaGroup1 = rep(-1, length(timeValues)), lambdaGroup2 = rep(-1, length(timeValues)), survival1 = rep(-1, length(timeValues)), survival2 = rep(-1, length(timeValues)), survivalGroup1 = rep(-1, length(timeValues)), survivalGroup2 = rep(-1, length(timeValues)) ) if (piecewiseSurvivalEnabled) { data$survival2 <- .getPiecewiseExponentialDistribution(timeValues, lambda2, designPlan$piecewiseSurvivalTime, designPlan$kappa) if (!is.null(lambda1) && !is.na(lambda1) && length(lambda1) == length(lambda2)) { data$survival1 <- .getPiecewiseExponentialDistribution(timeValues, lambda1, designPlan$piecewiseSurvivalTime, designPlan$kappa) } else { .warnInCaseOfUnusedValuesForPlottingSurvival(designPlan$hazardRatio) data$survival1 <- data$survival2 * designPlan$hazardRatio[1] } } else { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased(minNumberOfLambdas = 1)) { if (length(designPlan$lambda1) > 1) { warning("Only the first 'lambda1' (", round(designPlan$lambda1[1],4), ") was used for plotting", call. = FALSE) } } else { .warnInCaseOfUnusedValuesForPlottingRates(designPlan$pi1) } lambda2 <- (-log(1 - designPlan$pi2))^(1/designPlan$kappa) / designPlan$eventTime lambda1 <- (-log(1 - designPlan$pi1[1]))^(1/designPlan$kappa) / designPlan$eventTime data$survival2 <- .getPiecewiseExponentialDistribution(timeValues, lambda2, 0, designPlan$kappa) data$survival1 <- .getPiecewiseExponentialDistribution(timeValues, lambda1, 0, designPlan$kappa) } # two groups: 1 = treatment, 2 = control if (type == 14) { data$survival1 <- 1 - data$survival1 data$survival2 <- 1 - data$survival2 } if (piecewiseSurvivalEnabled) { data$lambdaGroup2 <- .getLambdaStepFunction(timeValues, designPlan$piecewiseSurvivalTime, lambda2) if (length(lambda1) == 1) { if (!is.na(lambda1)) { data$lambdaGroup1 <- rep(lambda1, length(data$lambdaGroup2)) } else { data$lambdaGroup1 <- data$lambdaGroup2 * designPlan$hazardRatio[1] } } else { data$lambdaGroup1 <- .getLambdaStepFunction(timeValues, designPlan$piecewiseSurvivalTime, lambda1) } } else { data$lambdaGroup2 <- .getLambdaStepFunction(timeValues, 0, lambda2) data$lambdaGroup1 <- .getLambdaStepFunction(timeValues, 0, lambda1) } scalingBaseValues1 <- na.omit(c(data$survival1, data$survival2)) scalingBaseValues2 <- na.omit(c(data$lambdaGroup1, data$lambdaGroup2)) scalingFactor <- max(scalingBaseValues1) / max(.getNextHigherValue(scalingBaseValues2)) data2 <- data.frame( categories = c( rep("Treatm. piecew. exp.", nrow(data)), rep("Control piecew. exp.", nrow(data)), rep("Treatm. piecew. lambda", nrow(data)), rep("Control piecew. lambda", nrow(data)) ), xValues = rep(data$time, 4), yValues = c( data$survival1, data$survival2, data$lambdaGroup1 * scalingFactor, data$lambdaGroup2 * scalingFactor ) ) if (is.na(legendPosition)) { if (type == 13) { legendPosition <- C_POSITION_LEFT_TOP } else { legendPosition <- C_POSITION_RIGHT_TOP } } if (is.na(palette) || palette == "Set1") { palette <- "Paired" } if (type == 13) { yAxisLabel1 <- "Cumulative Distribution Function" } else { yAxisLabel1 <- "Survival Function" } if (showSource) { warning("'showSource' = TRUE is not supported yet for plot type ", type, call. = FALSE) } return(.plotDataFrame(data2, mainTitle = main, xlab = xlab, ylab = ylab, xAxisLabel = "Time", yAxisLabel1 = yAxisLabel1, yAxisLabel2 = "Lambda", plotPointsEnabled = FALSE, legendTitle = NA_character_, legendPosition = legendPosition, scalingFactor1 = 1, scalingFactor2 = scalingFactor, palette = palette, sided = designMaster$sided)) } .warnInCaseOfUnusedValuesForPlottingMeans <- function(alternative) { if (length(alternative) > 1) { warning("Only the first 'alternative' (", round(alternative[1], 3), ") was used for plotting", call. = FALSE) return(list(title = "alternative", value = alternative[1], subscript = NA_character_)) } return(NULL) } .warnInCaseOfUnusedValuesForPlottingRates <- function(pi1) { if (length(pi1) > 1) { warning("Only the first 'pi1' (", round(pi1[1], 3), ") was used for plotting", call. = FALSE) return(list(title = "pi", value = pi1[1], subscript = "1")) } return(NULL) } .warnInCaseOfUnusedValuesForPlottingSurvival <- function(hazardRatio) { if (length(hazardRatio) > 1) { warning("Only the first 'hazardRatio' (", round(hazardRatio[1], 3), ") was used for plotting", call. = FALSE) return(list(title = "hazardRatio", value = hazardRatio[1], subscript = NA_character_)) } return(NULL) } .warnInCaseOfUnusedValuesForPlotting <- function(designPlan) { if (.isTrialDesignPlanMeans(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingMeans(designPlan$alternative)) } if (.isTrialDesignPlanRates(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingRates(designPlan$pi1)) } if (.isTrialDesignPlanSurvival(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingSurvival(designPlan$hazardRatio)) } return(NULL) } #' #' @title #' Trial Design Plan Plotting #' #' @param x The trial design plan, obtained from \cr #' \code{\link{getSampleSizeMeans}}, \cr #' \code{\link{getSampleSizeRates}}, \cr #' \code{\link{getSampleSizeSurvival}}, \cr #' \code{\link{getPowerMeans}}, \cr #' \code{\link{getPowerRates}} or \cr #' \code{\link{getPowerSurvival}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param palette The palette, default is \code{"Set1"}. #' @param theta A vector of theta values. #' @param plotPointsEnabled If \code{TRUE}, additional points will be plotted. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{-1}: no legend will be shown #' \item \code{NA}: the algorithm tries to find a suitable position #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{2}: creates a 'Boundaries Effect Scale' plot #' \item \code{3}: creates a 'Boundaries p Values Scale' plot #' \item \code{4}: creates a 'Type One Error Spending' plot #' \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot #' \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot #' \item \code{7}: creates an 'Overall Power' plot #' \item \code{8}: creates an 'Overall Early Stopping' plot #' \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot #' \item \code{10}: creates a 'Study Duration' plot #' \item \code{11}: creates an 'Expected Number of Subjects' plot #' \item \code{12}: creates an 'Analysis Times' plot #' \item \code{13}: creates a 'Cumulative Distribution Function' plot #' \item \code{14}: creates a 'Survival Function' plot #' } #' @param ... Optional \code{ggplot2} arguments. #' #' @description #' Plots a trial design plan. #' #' @details #' Generic function to plot all kinds of trial design plans. #' #' @return #' A \code{ggplot2} object. #' #' @export #' plot.TrialDesignPlan = function(x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = ifelse(x$.design$kMax == 1, 5, 1), palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ...) { fCall = match.call(expand.dots = FALSE) designPlanName <- as.character(fCall$x)[1] nMax <- list(...)[["nMax"]] if (!is.null(nMax)) { warning(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'nMax' (", nMax, ") will be ignored because it will be taken from design plan") } .plotTrialDesignPlan(designPlan = x, designMaster = x$.design, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, designPlanName = designPlanName, ...) } rpact/R/class_design_power_and_asn.R0000644000176200001440000002567113556061055017265 0ustar liggesusers###################################################################################### # # # -- Power and average sample number result classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' #' @name PowerAndAverageSampleNumberResult #' #' @title #' Power and Average Sample Number Result #' #' @description #' Class for power and average sample number (ASN) results. #' #' @details #' This object can not be created directly; use \code{getPowerAndAverageSampleNumber} #' with suitable arguments to create it. #' #' @include class_core_parameter_set.R #' @include class_design.R #' #' @keywords internal #' #' @importFrom methods new #' PowerAndAverageSampleNumberResult <- setRefClass("PowerAndAverageSampleNumberResult", contains = "ParameterSet", fields = list( .design = "TrialDesign", nMax = "numeric", theta = "numeric", averageSampleNumber = "numeric", calculatedPower = "numeric", overallEarlyStop = "numeric", earlyStop = "matrix", overallReject = "numeric", rejectPerStage = "matrix", overallFutility = "numeric", futilityPerStage = "matrix" ), methods = list( initialize = function(design, theta = seq(-1, 1, 0.05), nMax = 100L, ...) { callSuper(.design = design, theta = theta, nMax = nMax, ...) theta <<- .assertIsValidThetaRange(thetaRange = theta, thetaAutoSeqEnabled = FALSE) .initPowerAndAverageSampleNumber(design = design, theta = .self$theta, nMax = nMax) .parameterNames <<- .getParameterNames(design) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, clone = function() { return(PowerAndAverageSampleNumberResult(design = .self$.design, theta = .self$theta, nMax = .self$nMax)) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing a power and average sample size (ASN) result' .resetCat() if (showType == 2) { .cat("Technical summary of the power and average sample size (ASN) object:\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Power and average sample size (ASN):\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (.design$kMax > 1) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .toString = function(startWithUpperCase = FALSE) { s <- "power and average sample size (ASN)" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .initPowerAndAverageSampleNumber = function(design, theta = C_POWER_ASN_THETA_DEFAULT, nMax = C_NA_MAX_DEFAULT) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidSidedParameter(design$sided) if (nMax <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'nMax' must be an integer > 0") } .setParameterType("nMax", ifelse(nMax == C_NA_MAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) thetaIsDefault <- length(theta) == length(C_POWER_ASN_THETA_DEFAULT) && sum(theta == C_POWER_ASN_THETA_DEFAULT) == length(theta) .setParameterType("theta", ifelse(thetaIsDefault, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) kMax <- design$kMax # initialization numberOfThetas <- length(theta) averageSampleNumber <<- rep(NA_real_, numberOfThetas) .setParameterType("averageSampleNumber", C_PARAM_GENERATED) calculatedPower <<- rep(NA_real_, numberOfThetas) .setParameterType("calculatedPower", C_PARAM_GENERATED) earlyStop <<- matrix(NA_real_, kMax, numberOfThetas) .setParameterType("earlyStop", C_PARAM_GENERATED) rejectPerStage <<- matrix(NA_real_, kMax, numberOfThetas) .setParameterType("rejectPerStage", C_PARAM_GENERATED) futilityPerStage <<- matrix(NA_real_, kMax - 1, numberOfThetas) .setParameterType("futilityPerStage", C_PARAM_GENERATED) rowNames <- paste("stage =", c(1:kMax)) rownames(earlyStop) <<- rowNames rownames(rejectPerStage) <<- rowNames if (kMax > 1) { rownames(futilityPerStage) <<- rowNames[1:(kMax - 1)] } for (i in 1:numberOfThetas) { result <- .getPowerAndAverageSampleNumber(kMax = design$kMax, informationRates = design$informationRates, futilityBounds = design$futilityBounds, criticalValues = design$criticalValues, sided = design$sided, theta = theta[i], nMax) averageSampleNumber[i] <<- result$averageSampleNumber calculatedPower[i] <<- result$calculatedPower earlyStop[1:(kMax - 1), i] <<- result$earlyStop[1:(kMax - 1)] rejectPerStage[, i] <<- result$rejectPerStage[1:kMax] futilityPerStage[, i] <<- result$futilityPerStage[1:(kMax - 1)] } overallEarlyStop <<- .getOverallParameter(earlyStop) .setParameterType("overallEarlyStop", C_PARAM_GENERATED) overallReject <<- .getOverallParameter(rejectPerStage) .setParameterType("overallReject", C_PARAM_GENERATED) overallFutility <<- .getOverallParameter(futilityPerStage) .setParameterType("overallFutility", C_PARAM_GENERATED) }, .getPowerAndAverageSampleNumberByDesign = function(design, theta, nMax) { if (.isTrialDesignFisher(design)) { futilityBounds <- design$alpha0Vec } else { futilityBounds <- design$futilityBounds } return(.getPowerAndAverageSampleNumber(kMax = design$kMax, informationRates = design$informationRates, futilityBounds = futilityBounds, criticalValues = design$criticalValues, sided = design$sided, theta = theta, nMax = nMax)) }, .getPowerAndAverageSampleNumber = function(kMax, informationRates, futilityBounds, criticalValues, sided, theta, nMax) { if (sided == 2) { decisionMatrix <- matrix(c(-criticalValues - theta * sqrt(nMax * informationRates), criticalValues - theta * sqrt(nMax * informationRates)), nrow = 2, byrow = TRUE) } else { shiftedFutilityBounds <- futilityBounds - theta * sqrt(nMax * informationRates[1:(kMax - 1)]) shiftedFutilityBounds[futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, criticalValues - theta * sqrt(nMax * informationRates)), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) .averageSampleNumber <- nMax - sum((probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)]) * (informationRates[kMax] - informationRates[1:(kMax - 1)]) * nMax) .futilityPerStage <- rep(NA_real_, kMax) if (sided == 2) { .calculatedPower <- sum(probs[3, 1:kMax] - probs[2, 1:kMax] + probs[1, 1:kMax]) .rejectPerStage <- probs[3, 1:kMax] - probs[2, 1:kMax] + probs[1, 1:kMax] } else { .calculatedPower <- sum(probs[3, 1:kMax] - probs[2, 1:kMax]) .rejectPerStage <- probs[3, 1:kMax] - probs[2, 1:kMax] if (kMax > 1) { .futilityPerStage <- probs[1, 1:(kMax - 1)] } } .earlyStop <- rep(NA_real_, kMax) if (kMax > 1) { .earlyStop <- probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)] } return(list( averageSampleNumber = .averageSampleNumber, calculatedPower = .calculatedPower, earlyStop = .earlyStop, rejectPerStage = .rejectPerStage, futilityPerStage = .futilityPerStage )) }, .getOverallParameter = function(parameter) { if (is.null(parameter) || length(parameter) == 0) { return(rep(NA_real_, length(theta))) } overallParameter <- parameter overallParameter[is.na(overallParameter)] <- 0 overallParameter <- colSums(overallParameter) return(overallParameter) } ) ) #' #' @name PowerAndAverageSampleNumberResult_as.data.frame #' #' @title #' Coerce Power And Average Sample Number Result to a Data Frame #' #' @description #' Returns the \code{PowerAndAverageSampleNumberResult} as data frame. #' #' @details #' Coerces the object to a data frame. #' #' @export #' #' @keywords internal #' as.data.frame.PowerAndAverageSampleNumberResult <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { parameterNames <- x$.getVisibleFieldNames() parameterNames <- parameterNames[parameterNames != "nMax"] dataFrame <- x$.getAsDataFrame(parameterNames = parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x$.design)) return(dataFrame) } rpact/R/f_design_group_sequential.R0000644000176200001440000017017113574436571017161 0ustar liggesusers###################################################################################### # # # -- Group sequential design -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R NULL .getGroupSequentialProbabilities <- function(decisionMatrix, informationRates) { .assertAreValidInformationRates(informationRates) if (length(decisionMatrix) != 2 * length(informationRates)) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'decisionMatrix' (%s) must be equal to 2 x length of 'informationRates' (%s)"), length(decisionMatrix), length(informationRates))) } decisionMatrix[decisionMatrix <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix[decisionMatrix >= C_UPPER_BOUNDS_DEFAULT] <- C_UPPER_BOUNDS_DEFAULT M <- C_CONST_NEWTON_COTES * 6 + 1 # number of grid points with constant of Newton Cotes algorithm (n*6 + 1) dn <- rep(NA_real_, M) # density values w <- rep(NA_real_, M) # weights x <- rep(NA_real_, M) # grid points dn2 <- rep(NA_real_, M) # density values in recursion x2 <- rep(NA_real_, M) # grid points in recursion kMax <- length(informationRates) # maximum number of stages probs <- matrix(NA_real_, 3, kMax) # probability matrix output probs[, 1] <- c(stats::pnorm(decisionMatrix[1, 1]), stats::pnorm(decisionMatrix[2, 1]), 1) if (kMax <= 1) { return(probs) } epsilonVec <- informationRates epsilonVec[2 : kMax] <- informationRates[2 : kMax] - informationRates[1 : (kMax - 1)] informationRatesSqrt <- sqrt(informationRates) epsilonVecSqrt <- sqrt(epsilonVec) for (k in 2 : kMax) { dx <- (decisionMatrix[2, k - 1] - decisionMatrix[1, k - 1]) / (M - 1) w <- c(rep(c(492, 1296, 162, 1632, 162, 1296) * dx / 840, M %/% 6), 246 * dx / 840) w[1] <- 246 * dx / 840 x <- rep(decisionMatrix[1, k - 1], M) + (0 : (M - 1)) * dx dn <- w * .getDnormValues(x, k, informationRates, epsilonVec, x2, dn2) # as alternative, use crossprod (x, y) seq1 <- stats::pnorm((decisionMatrix[1, k] * informationRatesSqrt[k] - x * informationRatesSqrt[k - 1]) / epsilonVecSqrt[k]) %*% dn seq2 <- stats::pnorm((decisionMatrix[2, k] * informationRatesSqrt[k] - x * informationRatesSqrt[k - 1]) / epsilonVecSqrt[k]) %*% dn x2 <- x dn2 <- dn probs[, k] <- c(seq1, seq2, probs[2, k - 1] - probs[1, k - 1]) } .validateGroupSequentialProbabilityResultsMulti(dn = dn, dn2 = dn2, x = x, x2 = x2, w = w) return(probs) } .getDnormValues <- function(x, k, informationRates, epsilonVec, x2, dn2) { # Old R-based call: return(sapply(x, .getDnormValuesSlow, k = k, informationRates = informationRates, epsilonVec = epsilonVec, x2 = x2, dn2 = dn2)) return(.Call("R_getDensityValues", x, as.integer(k), informationRates, epsilonVec, x2, dn2)) } .getDnormValuesSlow <- function(x, k, informationRates, epsilonVec, x2, dn2) { if (k == 2) { return(stats::dnorm(x)) } sum(sqrt(informationRates[k - 1] / epsilonVec[k - 1]) * stats::dnorm((x * sqrt(informationRates[k - 1]) - x2 * sqrt(informationRates[k - 2])) / sqrt(epsilonVec[k - 1])) * dn2) } .validateGroupSequentialProbabilityResultsMulti <- function(...) { args <- list(...) for (variableName in names(args)) { if (!.validateGroupSequentialProbabilityResults(results = args[[variableName]], variableName)) { return(invisible()) } } } .validateGroupSequentialProbabilityResults <- function(results, variableName) { numberOfNAs <- sum(is.na(results)) if (numberOfNAs == 0) { return(TRUE) } warning(sprintf(paste0(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "in .getGroupSequentialProbabilities(): ", "variable '%s' contains %s NA's (%.1f%%)"), variableName, numberOfNAs, 100 * numberOfNAs / length(results)), call. = FALSE) return(FALSE) } .getSpendingValue <- function(alpha, x, sided, typeOfDesign, gamma = 1) { if (typeOfDesign == C_TYPE_OF_DESIGN_AS_P || typeOfDesign == C_TYPE_OF_DESIGN_BS_P) { return(alpha * log(1 + (exp(1) - 1) * x)) } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_OF || typeOfDesign == C_TYPE_OF_DESIGN_BS_OF) { return(2 * sided * (1 - stats::pnorm(stats::qnorm(1 - alpha / (2 * sided)) / sqrt(x)))) } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_KD || typeOfDesign == C_TYPE_OF_DESIGN_BS_KD) { return(alpha * x^gamma) } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_HSD || typeOfDesign == C_TYPE_OF_DESIGN_BS_HSD) { if (gamma == 0) { return(alpha * x) } return(alpha * (1 - exp(-gamma * x)) / (1 - exp(-gamma))) } return(NA) } .getOptimumDesign <- function(deltaWT, design) { scale <- .getOneDimensionalRoot(function(scale) { criticalValues <- scale * design$informationRates^(deltaWT - 0.5) if (design$sided == 2) { decisionMatrix <- (matrix(c(-criticalValues, criticalValues), nrow = 2, byrow = TRUE)) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 5, tolerance = design$tolerance) design$criticalValues <- scale * design$informationRates^(deltaWT - 0.5) designCharacteristics <- .getDesignCharacteristics(design = design) y <- NA_integer_ if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASNH1) { y <- designCharacteristics$averageSampleNumber1 } if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASNIFH1) { y <- designCharacteristics$inflationFactor + designCharacteristics$averageSampleNumber1 } if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASN_SUM) { y <- designCharacteristics$averageSampleNumber0 + designCharacteristics$averageSampleNumber01 + designCharacteristics$averageSampleNumber1 } return(y) } .validateTypeOfDesign <- function(design) { .assertDesignParameterExists(design, "typeOfDesign", C_DEFAULT_TYPE_OF_DESIGN) design$.setParameterType("userAlphaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("userBetaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("deltaWT", C_PARAM_NOT_APPLICABLE) design$.setParameterType("optimizationCriterion", C_PARAM_NOT_APPLICABLE) design$.setParameterType("gammaA", C_PARAM_NOT_APPLICABLE) design$.setParameterType("gammaB", C_PARAM_NOT_APPLICABLE) design$.setParameterType("typeBetaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("constantBoundsHP", C_PARAM_NOT_APPLICABLE) if (!(design$typeOfDesign %in% .getDesignTypes())) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "type of design (", design$typeOfDesign, ") must be one of the following : ", .printDesignTypes()) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT) { .assertDesignParameterExists(design, "deltaWT", 0) if (design$deltaWT < -0.5 || design$deltaWT > 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'deltaWT' (", design$deltaWT, ") out of bounds [-0.5; 1]") } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { .assertDesignParameterExists(design, "optimizationCriterion", C_OPTIMIZATION_CRITERION_DEFAULT) if (!.isOptimizationCriterion(design$optimizationCriterion)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "optimization criterion must be one of the following : ", .printOptimizationCriterion()) } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .assertDesignParameterExists(design, "constantBoundsHP", C_CONST_BOUND_HP_DEFAULT) .assertIsSingleNumber(design$constantBoundsHP, "constantBoundsHP") .assertIsInClosedInterval(design$constantBoundsHP, "constantBoundsHP", lower = 2, upper = NULL) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_KD) { .assertDesignParameterExists(design, "gammaA", 1) if (design$gammaA < 0.4 || design$gammaA > 8) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "parameter 'gammaA' (", design$gammaA, ") for Kim & DeMets alpha ", "spending function out of bounds [0.4; 8]") } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_HSD) { .assertDesignParameterExists(design, "gammaA", 1) if (design$gammaA < -10 || design$gammaA > 5) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "Parameter 'gammaA' (", design$gammaA, ") for Hwang, Shih & DeCani ", "alpha spending function out of bounds [-10; 5]") } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { .validateUserAlphaSpending(design) } if (.isUndefinedArgument(design$alpha)) { design$alpha <- C_ALPHA_DEFAULT design$.setParameterType("alpha", C_PARAM_DEFAULT_VALUE) } if (.isAlphaSpendingDesignType(design$typeOfDesign)) { .assertDesignParameterExists(design, "typeBetaSpending", C_TYPE_OF_DESIGN_BS_NONE) if (!.isBetaSpendingDesignType(design$typeBetaSpending, noneIncluded = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "type of beta spending must be one of the following : ", .printBetaSpendingDesignTypes()) } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_KD) { .assertDesignParameterExists(design, "gammaB", 1) if (design$gammaB < 0.4 || design$gammaB > 8) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "Parameter 'gammaB' (", design$gammaB, ") for Kim & DeMets beta ", "spending function out of bounds [0.4; 8]") } } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_HSD) { .assertDesignParameterExists(design, "gammaB", 1) if (design$gammaB < -10 || design$gammaB > 5) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "Parameter 'gammaB' (", design$gammaB, ") for Hwang, Shih & DeCani ", "beta spending out of bounds [-10; 5]") } } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { .validateUserBetaSpending(design) } } else { if (.designParameterExists(design, "typeBetaSpending") && design$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) { warning("'typeBetaSpending' (", design$typeBetaSpending, ") will be ignored ", "because 'typeOfDesign' (", design$typeOfDesign, ") is not an alpha spending design", call. = FALSE) design$typeBetaSpending <- C_TYPE_OF_DESIGN_BS_NONE design$.setParameterType("typeBetaSpending", C_PARAM_DEFAULT_VALUE) } if (.designParameterExists(design, "userBetaSpending")) { userBetaSpending <- NA_real_ warning("'userBetaSpending' (", .arrayToString(design$userBetaSpending), ") will be ignored ", "because 'typeOfDesign' (", design$typeOfDesign, ") is not an alpha spending design", call. = FALSE) } } if (.isUndefinedArgument(design$beta)) { design$beta <- C_BETA_DEFAULT design$.setParameterType("beta", C_PARAM_DEFAULT_VALUE) } invisible(design) } .validateBaseParameters <- function(design) { if (.isDefinedArgument(design$kMax)) { .assertDesignParameterExists(design, "kMax", C_KMAX_DEFAULT) .assertIsValidKMax(design$kMax) if (.isDefinedArgument(design$informationRates)) { .assertAreValidInformationRates(design$informationRates, design$kMax) } if (.isDefinedArgument(design$futilityBounds)) { .assertAreValidFutilityBounds(design$futilityBounds, design$kMax) } } .assertDesignParameterExists(design, "sided", 1) .assertIsValidSidedParameter(design$sided) .setKmaxBasedOnAlphaSpendingDefintion(design) design$informationRates <- .getValidatedInformationRates(design) design$futilityBounds <- .getValidatedFutilityBounds(design) .assertDesignParameterExists(design, "tolerance", C_DESIGN_TOLERANCE_DEFAULT) if (design$tolerance < 1e-10 || design$tolerance > 1e-03) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'tolerance' (", tolerance, ") out of bounds [1e-10; 1e-03]") } invisible(design) } .createDesign <- function( designClass, kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = C_BINDING_FUTILITY_DEFAULT, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { if (designClass == C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { design <- TrialDesignInverseNormal(kMax = kMax) } else if (designClass == C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL) { design <- TrialDesignGroupSequential(kMax = kMax) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designClass' ('", designClass, "') must be '", C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, "' or ", "'", C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, "'") } if (!is.integer(sided) && sided %in% c(1, 2)) { sided <- as.integer(sided) } design$alpha <- alpha design$beta <- beta design$sided <- sided design$typeOfDesign <- typeOfDesign design$deltaWT <- deltaWT design$gammaA <- gammaA design$gammaB <- gammaB design$optimizationCriterion <- optimizationCriterion design$typeBetaSpending <- typeBetaSpending design$futilityBounds <- futilityBounds design$informationRates <- informationRates design$userAlphaSpending <- userAlphaSpending design$userBetaSpending <- userBetaSpending design$bindingFutility <- bindingFutility if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .assertIsSingleNumber(constantBoundsHP, "constantBoundsHP") .assertIsInClosedInterval(constantBoundsHP, "constantBoundsHP", lower = 2, upper = NULL) design$constantBoundsHP <- constantBoundsHP } else if (constantBoundsHP != C_CONST_BOUND_HP_DEFAULT) { warning("'constantBoundsHP' (", constantBoundsHP, ") will be ignored because it is only applicable for 'typeOfDesign' = \"", C_TYPE_OF_DESIGN_HP, "\"") } if (is.na(twoSidedPower)) { design$twoSidedPower <- FALSE design$.setParameterType("twoSidedPower", C_PARAM_DEFAULT_VALUE) } else { design$twoSidedPower <- twoSidedPower design$.setParameterType("twoSidedPower", C_PARAM_USER_DEFINED) } design$tolerance <- tolerance return(design) } .getDesignGroupSequentialKMax1 <- function(design) { design$criticalValues <- stats::qnorm(1 - design$alpha / design$sided) design$alphaSpent[1] <- design$alpha invisible(design) } # # Wang and Tsiatis design # .getDesignGroupSequentialWangAndTsiatis <- function(design) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_P) { design$deltaWT <- 0.5 } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_OF) { design$deltaWT <- 0 } scale <- .getOneDimensionalRoot(function(scale) { design$criticalValues <- scale * design$informationRates^(design$deltaWT - 0.5) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 8, tolerance = design$tolerance) design$criticalValues <- scale * design$informationRates^(design$deltaWT - 0.5) .calculateAlphaSpent(design) invisible(design) } .calculateAlphaSpent <- function(design) { if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } } tryCatch({ probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) if (design$sided == 1) { design$alphaSpent <- cumsum(probs[3, ] - probs[2, ]) } else { design$alphaSpent <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } if (!is.na(design$alphaSpent[design$kMax])) { design$alphaSpent[design$kMax] <- floor(design$alphaSpent[design$kMax] * 1e8) / 1e8 } design$.setParameterType("alphaSpent", C_PARAM_GENERATED) }, error = function(e) { warning("Failed to calculate 'alphaSpent': ", e, call. = FALSE) }) } # # Haybittle & Peto design # .getDesignGroupSequentialHaybittleAndPeto <- function(design) { scale <- .getOneDimensionalRoot(function(scale) { design$criticalValues <- c(rep(design$constantBoundsHP, design$kMax - 1), scale) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 8, tolerance = design$tolerance) design$criticalValues <- c(rep(design$constantBoundsHP, design$kMax - 1), scale) .calculateAlphaSpent(design) if (!is.na(design$criticalValues[design$kMax]) && !is.na(design$alphaSpent[design$kMax]) && (design$criticalValues[design$kMax] > 6 || abs(design$alphaSpent[design$kMax] - design$alpha) > 0.001)) { stop(sprintf(paste0(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "critical values according to the Haybittle & Peto design cannot be calculated ", "(criticalValues[%s] = %s, alpha = %s)"), design$kMax, design$criticalValues[design$kMax], design$alpha)) } invisible(design) } # # Optimum design within Wang and Tsiatis class # .getDesignGroupSequentialWangAndTsiatisOptimum <- function(design) { .assertDesignParameterExists(design, "optimizationCriterion", C_OPTIMIZATION_CRITERION_DEFAULT) .assertIsOptimizationCriterion(design$optimizationCriterion) optimumDesign <- stats::optimize(.getOptimumDesign, design = design, interval = c(0, 1), tol = 0.001) design$deltaWT <- round(optimumDesign$minimum, 2) design$.setParameterType("deltaWT", C_PARAM_GENERATED) # Recalculation of design characteristics with rounded design$deltaWT scale <- .getOneDimensionalRoot(function(scale) { design$criticalValues <- scale * design$informationRates^(design$deltaWT - 0.5) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else{ if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 5, tolerance = design$tolerance) design$criticalValues <- scale * design$informationRates^(design$deltaWT - 0.5) designCharacteristics <- .getDesignCharacteristics(design = design) design$power <- designCharacteristics$power design$.setParameterType("power", C_PARAM_GENERATED) .calculateAlphaSpent(design) invisible(design) } # # alpha spending approaches # .getDesignGroupSequentialAlphaSpending <- function(design) { design$criticalValues <- rep(NA_real_, design$kMax) spendingValue <- .getSpendingValue(design$alpha, design$informationRates[1], design$sided, design$typeOfDesign, design$gammaA) if (spendingValue < 0) { .logWarn("Cannot calculate alpha spent : 'spendingValue' (%s) is < 0", spendingValue) design$alphaSpent <- NA_real_ design$.setParameterType("alphaSpent", C_PARAM_GENERATED) return(.getDesignGroupSequentialBetaSpendingApproaches(design)) } design$criticalValues[1] <- stats::qnorm(1 - spendingValue / design$sided) for (k in 2 : design$kMax) { design$criticalValues[k] <- .getOneDimensionalRoot(function(scale) { design$criticalValues[k] <- scale if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix[, 1 : k], design$informationRates[1 : k]) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - .getSpendingValue(design$alpha, design$informationRates[k], design$sided, design$typeOfDesign, design$gammaA)) } else{ if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix[, 1 : k], design$informationRates[1 : k]) return(sum(probs[3, ] - probs[2, ]) - .getSpendingValue(design$alpha, design$informationRates[k], design$sided, design$typeOfDesign, design$gammaA)) } }, lower = 0, upper = 8, tolerance = design$tolerance) } .calculateAlphaSpent(design) .getDesignGroupSequentialBetaSpendingApproaches(design) } # # User defined alpha spending approach # .getDesignGroupSequentialUserDefinedAlphaSpending <- function(design) { design$criticalValues <- rep(NA_real_, design$kMax) design$criticalValues[1] <- stats::qnorm(1 - design$userAlphaSpending[1] / design$sided) for (k in (2 : design$kMax)) { design$criticalValues[k] <- .getOneDimensionalRoot(function(scale) { design$criticalValues[k] <- scale if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix[, 1 : k], design$informationRates[1 : k]) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$userAlphaSpending[k]) } else{ if (design$bindingFutility) { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix[, 1 : k], design$informationRates[1 : k]) return(sum(probs[3, ] - probs[2, ]) - design$userAlphaSpending[k]) } }, lower = 0, upper = 8, tolerance = design$tolerance) } .calculateAlphaSpent(design) invisible(.getDesignGroupSequentialBetaSpendingApproaches(design)) } # # Only for alpha spending approaches # .getDesignGroupSequentialBetaSpendingApproaches <- function(design) { # beta spending approaches (additional to alpha spending)! if (.isBetaSpendingDesignType(design$typeBetaSpending, userDefinedBetaSpendingIncluded = FALSE, noneIncluded = FALSE)) { .getDesignGroupSequentialBetaSpending(design) } # User defined beta spending if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { .getDesignGroupSequentialUserDefinedBetaSpending(design) } invisible(design) } # # Beta spending approaches (additional to alpha spending) # Find shift with beta spending such that last critical values coincide # .getDesignGroupSequentialBetaSpending <- function(design) { # Note: calculated without .getOneDimensionalRoot because results may not achieved in inner search # Direct bisection produced reliable results (although sometimes slowly) if (design$sided == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'sided' = 2 not allowed; for beta-spending approach, only one-sided testing is possible") } if (design$bindingFutility) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Binding futility is not available for the beta spending function approach") } iteration <- design$kMax*1000 cLower1 <- -4 cUpper1 <- 10 prec1 <- 1 while (prec1 > design$tolerance) { shift <- (cLower1 + cUpper1) / 2 futilityBounds <- rep(NA_real_, design$kMax) futilityBounds[1] <- stats::qnorm(.getSpendingValue(design$beta, design$informationRates[1], design$sided, design$typeBetaSpending, design$gammaB)) + sqrt(design$informationRates[1]) * shift for (k in 2 : design$kMax) { cLower2 <- -6 cUpper2 <- 5 prec2 <- 1 while (prec2 > design$tolerance) { scale <- (cLower2 + cUpper2) / 2 futilityBounds[k] <- scale decisionMatrix <- matrix(c(futilityBounds - sqrt(design$informationRates) * shift, design$criticalValues - sqrt(design$informationRates) * shift), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix[, 1 : k], design$informationRates[1 : k]) ifelse(sum(probs[1, ]) < .getSpendingValue(design$beta, design$informationRates[k], design$sided, design$typeBetaSpending, design$gammaB), cLower2 <- scale, cUpper2 <- scale) ifelse (iteration > 0, prec2 <- cUpper2 - cLower2, prec2 <- 0) iteration <- iteration - 1 } } ifelse(futilityBounds[design$kMax] < design$criticalValues[design$kMax], cLower1 <- shift, cUpper1 <- shift) ifelse (iteration > 0, prec1 <- cUpper1 - cLower1, prec1 <- 0) } if ((abs(futilityBounds[design$kMax] - design$criticalValues[design$kMax]) > 1e-05) || (iteration < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "calculation of 'betaSpent' and 'power' ", "not possible due to numerical overflow") } decisionMatrix <- matrix(c(futilityBounds - sqrt(design$informationRates)*shift, design$criticalValues - sqrt(design$informationRates)*shift), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) design$betaSpent <- cumsum(probs[1, ]) design$power <- cumsum(probs[3, ] - probs[2, ]) design$.setParameterType("betaSpent", C_PARAM_GENERATED) design$.setParameterType("power", C_PARAM_GENERATED) design$futilityBounds <- futilityBounds[1 : (design$kMax - 1)] design$.setParameterType("futilityBounds", C_PARAM_GENERATED) invisible(design) } # # User defined beta spending. # # Find shift with beta spending such that last critical values coincide # .getDesignGroupSequentialUserDefinedBetaSpending <- function(design) { # Note: calculated without .getOneDimensionalRoot because results may not achieved in inner search # Direct bisection produced reliable results (although sometimes slowly) if (design$typeBetaSpending != C_TYPE_OF_DESIGN_BS_USER) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'typeBetaSpending' ('", design$typeBetaSpending, "') must be '", C_TYPE_OF_DESIGN_BS_USER, "'") } if (design$sided == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'sided' = 2 not allowed; for beta-spending approach, only one-sided testing is possible") } if (design$bindingFutility) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Binding futility is not available for the beta spending function approach") } iteration <- design$kMax*1000 cLower1 <- -4 cUpper1 <- 10 prec1 <- 1 while (prec1 > design$tolerance) { shift <- (cLower1 + cUpper1)/2 futilityBounds <- rep(NA_real_, design$kMax) futilityBounds[1] <- stats::qnorm(design$userBetaSpending[1]) + sqrt(design$informationRates[1]) * shift for (k in (2 : design$kMax)) { cLower2 <- -6 cUpper2 <- 5 prec2 <- 1 while (prec2 > design$tolerance) { scale <- (cLower2 + cUpper2)/2 futilityBounds[k] <- scale nDecisionMatrix <- (matrix(c(futilityBounds - sqrt(design$informationRates) * shift, design$criticalValues - sqrt(design$informationRates) * shift), nrow = 2, byrow = TRUE)) probs <- .getGroupSequentialProbabilities(nDecisionMatrix[, 1 : k], design$informationRates[1 : k]) ifelse(sum(probs[1, ]) < design$userBetaSpending[k], cLower2 <- scale, cUpper2 <- scale) ifelse (iteration > 0, prec2 <- cUpper2 - cLower2, prec2 <- 0) iteration <- iteration - 1 } } ifelse(futilityBounds[design$kMax] < design$criticalValues[design$kMax], cLower1 <- shift, cUpper1 <- shift) ifelse (iteration > 0, prec1 <- cUpper1 - cLower1, prec1 <- 0) } if ((abs(futilityBounds[design$kMax] - design$criticalValues[design$kMax]) > 1e-05) || (iteration < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "calculation of 'betaSpent' and 'power' ", "not possible due to numerical overflow") } nDecisionMatrix <- matrix(c(futilityBounds - sqrt(design$informationRates) * shift, design$criticalValues - sqrt(design$informationRates)*shift), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(nDecisionMatrix, design$informationRates) design$betaSpent <- cumsum(probs[1, ]) design$power <- cumsum(probs[3, ] - probs[2, ]) design$.setParameterType("betaSpent", C_PARAM_GENERATED) design$.setParameterType("power", C_PARAM_GENERATED) design$futilityBounds <- futilityBounds[1 : (design$kMax - 1)] design$.setParameterType("futilityBounds", C_PARAM_GENERATED) invisible(design) } #' #' @title #' Get Design Inverse Normal #' #' @description #' Provides adjusted boundaries and defines a group sequential design for its use in #' the inverse normal combination test. #' #' @param kMax The maximum number of stages K. K = 1, 2, 3,..., 10, default is \code{3}. #' @param alpha The significance level alpha, default is \code{0.025}. #' @param beta Type II error rate, necessary for providing sample size calculations \cr #' (e.g., \code{\link{getSampleSizeMeans}}), beta spending function designs, #' or optimum designs, default is \code{0.20}. #' @param sided One-sided or two-sided, default is \code{1}. #' @param typeOfDesign The type of design. Type of design is one of the following: #' O'Brien & Fleming ("OF"), Pocock ("P"), Wang & Tsiatis Delta class ("WT"), #' Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class ("WToptimum"), #' O'Brien & Fleming type alpha spending ("asOF"), Pocock type alpha spending ("asP"), #' Kim & DeMets alpha spending ("asKD"), Hwang, Shi & DeCani alpha spending ("asHSD"), #' user defined alpha spending ("asUser"), default is \code{"OF"}. #' @param informationRates The information rates, default is \code{(1 : kMax)/kMax}. #' @param futilityBounds The futility bounds (vector of length K - 1). #' @param bindingFutility If \code{bindingFutility = TRUE} is specified the calculation of #' the critical values is affected by the futility bounds (default is \code{FALSE}). #' @param deltaWT Delta for Wang & Tsiatis Delta class. #' @param constantBoundsHP The constant bounds up to stage K - 1 for the #' Haybittle & Peto design (default is \code{3}). #' @param optimizationCriterion Optimization criterion for optimum design within #' Wang & Tsiatis class ("ASNH1", "ASNIFH1", "ASNsum"), default is \code{"ASNH1"}. #' @param typeBetaSpending Type of beta spending. Type of of beta spending is one of the following: #' O'Brien & Fleming type beta spending, Pocock type beta spending, #' Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined #' beta spending ("bsOF", "bsP",...). #' @param gammaA Parameter for alpha spending function, default is \code{1}. #' @param gammaB Parameter for beta spending function, default is \code{1}. #' @param userAlphaSpending The user defined alpha spending. Vector of length kMax containing the cumulative #' alpha-spending up to each interim stage. #' @param userBetaSpending The user defined beta spending. Vector of length kMax containing the cumulative #' beta-spending up to each interim stage. #' @param twoSidedPower For two-sided testing, if \code{twoSidedPower = TRUE} is specified #' the sample size calculation is performed by considering both tails of the distribution. #' Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power #' should be directed to one part. #' @param tolerance The tolerance, default is \code{1e-08}. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' Depending on \code{typeOfDesign} some parameters are specified, others not. #' For example, only if \code{typeOfDesign} "asHSD" is selected, \code{gammaA} needs to be specified. #' #' If an alpha spending approach was specified ("asOF", "asP", "asKD", "asHSD", or "asUser") #' additionally a beta spending function can be specified to produce futility bounds. #' #' @return Returns a \code{\link{TrialDesignInverseNormal}} object. #' #' @export #' #' @seealso \code{\link{getDesignSet}} for creating a set of designs to compare. #' #' @examples #' #' # Run with default values #' getDesignInverseNormal() #' #' # Calculate the Pocock type alpha spending critical values if the second #' # interim analysis was performed after 70% of information was observed #' getDesignInverseNormal(informationRates = c(0.4, 0.7), #' typeOfDesign = "asP") #' getDesignInverseNormal <- function( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getDesignInverseNormal", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance, userFunctionCallEnabled = TRUE )) } .getDesignInverseNormal <- function( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getDesignInverseNormal", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance, userFunctionCallEnabled = FALSE )) } .getDesignGroupSequentialDefaultValues <- function() { return(list( kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, tolerance = C_DESIGN_TOLERANCE_DEFAULT )) } .getDesignInverseNormalDefaultValues <- function() { return(.getDesignGroupSequentialDefaultValues()) } # # Param: userFunctionCallEnabled if \code{TRUE}, additional parameter validation methods will be called. # .getDesignGroupSequential <- function( ..., designClass = C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = C_BINDING_FUTILITY_DEFAULT, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = C_DESIGN_TOLERANCE_DEFAULT, userFunctionCallEnabled = FALSE) { if (.isDefinedArgument(kMax, argumentExistsValidationEnabled = userFunctionCallEnabled)) { .assertIsValidKMax(kMax) if (!is.integer(kMax)) { kMax <- as.integer(kMax) } } if (is.na(bindingFutility)) { bindingFutility <- C_BINDING_FUTILITY_DEFAULT } else if (userFunctionCallEnabled && ((!is.na(kMax) && kMax == 1) || (!any(is.na(futilityBounds)) && all(futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)))) { warning("'bindingFutility' (", bindingFutility, ") will be ignored", call. = FALSE) } design <- .createDesign( designClass = designClass, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance) if (userFunctionCallEnabled) { .validateBaseParameters(design) .validateTypeOfDesign(design) .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertDesignParameterExists(design, "beta", C_BETA_DEFAULT) .assertDesignParameterExists(design, "sided", 1) .assertDesignParameterExists(design, "typeOfDesign", C_DEFAULT_TYPE_OF_DESIGN) .assertDesignParameterExists(design, "bindingFutility", C_BINDING_FUTILITY_DEFAULT) #.assertDesignParameterExists(design, "twoSidedPower", C_TWO_SIDED_POWER_DEFAULT) .assertDesignParameterExists(design, "tolerance", C_DESIGN_TOLERANCE_DEFAULT) } if (design$sided == 2 && design$bindingFutility) { warning("'bindingFutility' will be ignored because the test is defined as two-sided", call. = FALSE) design$bindingFutility <- FALSE } if (design$sided == 1 && design$twoSidedPower) { warning("'twoSidedPower' will be ignored because the test is defined as one-sided", call. = FALSE) design$twoSidedPower <- FALSE } if (userFunctionCallEnabled) { .validateAlphaAndBeta(design) } design$alphaSpent <- rep(NA_real_, design$kMax) design$betaSpent <- rep(NA_real_, design$kMax) design$power <- rep(NA_real_, design$kMax) if (userFunctionCallEnabled) { design$.setParameterType("betaSpent", C_PARAM_NOT_APPLICABLE) design$.setParameterType("power", C_PARAM_NOT_APPLICABLE) design$.setParameterType("alphaSpent", C_PARAM_NOT_APPLICABLE) design$.setParameterType("criticalValues", C_PARAM_GENERATED) } if (design$kMax == 1) { .getDesignGroupSequentialKMax1(design) } else { # Wang and Tsiatis design if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT || design$typeOfDesign == C_TYPE_OF_DESIGN_P || design$typeOfDesign == C_TYPE_OF_DESIGN_OF) { .getDesignGroupSequentialWangAndTsiatis(design) } # Haybittle & Peto design else if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .getDesignGroupSequentialHaybittleAndPeto(design) } # Optimum design within Wang and Tsiatis class else if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { .getDesignGroupSequentialWangAndTsiatisOptimum(design) } # alpha spending approaches else if (.isAlphaSpendingDesignType(design$typeOfDesign, userDefinedAlphaSpendingIncluded = FALSE)) { .getDesignGroupSequentialAlphaSpending(design) } # user defined alpha spending approach else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { .getDesignGroupSequentialUserDefinedAlphaSpending(design) } } design$stageLevels <- 1 - stats::pnorm(design$criticalValues) design$.setParameterType("stageLevels", C_PARAM_GENERATED) if (design$kMax == 1) { design$.setParameterType("futilityBounds", C_PARAM_NOT_APPLICABLE) } if (length(design$futilityBounds) == 0 || all(design$futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)) { # design$bindingFutility <- NA design$.setParameterType("bindingFutility", C_PARAM_NOT_APPLICABLE) design$.setParameterType("futilityBounds", C_PARAM_NOT_APPLICABLE) } return(design) } #' @title #' Get Design Group Sequential #' #' @description #' Provides adjusted boundaries and defines a group sequential design. #' #' @param kMax The maximum number of stages K. K = 1, 2, 3,..., 10, default is \code{3}. #' @param alpha The significance level alpha, default is \code{0.025}. #' @param beta Type II error rate, necessary for providing sample size calculations \cr #' (e.g., \code{\link{getSampleSizeMeans}}), beta spending function designs, #' or optimum designs, default is \code{0.20}. #' @param sided One-sided or two-sided, default is \code{1}. #' @param typeOfDesign The type of design. Type of design is one of the following: #' O'Brien & Fleming ("OF"), Pocock ("P"), Wang & Tsiatis Delta class ("WT"), #' Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class ("WToptimum"), #' O'Brien & Fleming type alpha spending ("asOF"), Pocock type alpha spending ("asP"), #' Kim & DeMets alpha spending ("asKD"), Hwang, Shi & DeCani alpha spending ("asHSD"), #' user defined alpha spending ("asUser"), default is \code{"OF"}. #' @param informationRates The information rates, default is \code{(1 : kMax)/kMax}. #' @param futilityBounds The futility bounds, defined on the test statistic z scale (vector of length K - 1). #' @param bindingFutility If \code{bindingFutility = TRUE} is specified the calculation of #' the critical values is affected by the futility bounds (default is \code{FALSE}). #' @param deltaWT Delta for Wang & Tsiatis Delta class. #' @param constantBoundsHP The constant bounds up to stage K - 1 for the #' Haybittle & Peto design (default is \code{3}). #' @param optimizationCriterion Optimization criterion for optimum design within #' Wang & Tsiatis class ("ASNH1", "ASNIFH1", "ASNsum"), default is \code{"ASNH1"}. #' @param typeBetaSpending Type of beta spending. Type of of beta spending is one of the following: #' O'Brien & Fleming type beta spending, Pocock type beta spending, #' Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined #' beta spending ("bsOF", "bsP",...). #' @param gammaA Parameter for alpha spending function, default is \code{1}. #' @param gammaB Parameter for beta spending function, default is \code{1}. #' @param userAlphaSpending The user defined alpha spending. Vector of length kMax containing the cumulative #' alpha-spending up to each interim stage. #' @param userBetaSpending The user defined beta spending. Vector of length kMax containing the cumulative #' beta-spending up to each interim stage. #' @param twoSidedPower For two-sided testing, if \code{twoSidedPower = TRUE} is specified #' the sample size calculation is performed by considering both tails of the distribution. #' Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power #' should be directed to one part. #' @param tolerance The tolerance, default is \code{1e-08}. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' Depending on \code{typeOfDesign} some parameters are specified, others not. #' For example, only if \code{typeOfDesign} "asHSD" is selected, \code{gammaA} needs to be specified. #' #' If an alpha spending approach was specified ("asOF", "asP", "asKD", "asHSD", or "asUser") #' additionally a beta spending function can be specified to produce futility bounds. #' #' @return Returns a \code{\link{TrialDesignGroupSequential}} object. #' #' @export #' #' @seealso \code{\link{getDesignSet}} for creating a set of designs to compare. #' #' @examples #' #' # Run with default values #' getDesignGroupSequential() #' #' # Calculate the Pocock type alpha spending critical values if the second #' # interim analysis was performed after 70% of information was observed #' getDesignGroupSequential(informationRates = c(0.4, 0.7), typeOfDesign = "asP") #' getDesignGroupSequential <- function( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, bindingFutility = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getDesignGroupSequential", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance, userFunctionCallEnabled = TRUE)) } .getFixedSampleSize <- function(alpha, beta, sided, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT) { .assertIsValidAlphaAndBeta(alpha = alpha, beta = beta) .assertIsValidSidedParameter(sided) if (sided == 1) { return((stats::qnorm(1 - alpha) + stats::qnorm(1 - beta))^2) } if (twoSidedPower) { n <- .getOneDimensionalRoot(function(n) { stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n)) - stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n)) + beta }, lower = 0, upper = 2 * (stats::qnorm(1 - alpha / 2) + stats::qnorm(1 - beta))^2, tolerance = 1e-08) } else { n <- (stats::qnorm(1 - alpha / 2) + stats::qnorm(1 - beta))^2 } return(n) } #' @title #' Get Design Characteristics #' #' @description #' Calculates the characteristics of a design and returns it. #' #' @param design The design. #' #' @details #' Calculates the inflation factor (IF), #' the expected reduction in sample size under H1, under H0, and under a value in between H0 and H1. #' Furthermore, absolute information values are calculated #' under the prototype case testing H0: mu = 0 against H1: mu = 1. #' #' @return Returns a \code{\link{TrialDesignCharacteristics}} object. #' #' @export #' #' @examples #' #' # Run with default values #' getDesignCharacteristics(getDesignGroupSequential()) #' getDesignCharacteristics <- function(design) { return(.getDesignCharacteristics(design = design, userFunctionCallEnabled = TRUE) ) } .getDesignCharacteristics <- function(..., design, userFunctionCallEnabled = FALSE) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertDesignParameterExists(design, "sided", 1) .assertIsValidSidedParameter(design$sided) if (userFunctionCallEnabled) { .validateAlphaAndBeta(design = design) } design$informationRates <- .getValidatedInformationRates(design, writeToDesign = FALSE) design$futilityBounds <- .getValidatedFutilityBounds(design, writeToDesign = FALSE) designCharacteristics <- TrialDesignCharacteristics(design = design) designCharacteristics$rejectionProbabilities <- rep(NA_real_, design$kMax) designCharacteristics$.setParameterType("rejectionProbabilities", C_PARAM_NOT_APPLICABLE) designCharacteristics$futilityProbabilities <- rep(NA_real_, design$kMax - 1) designCharacteristics$.setParameterType("futilityProbabilities", C_PARAM_NOT_APPLICABLE) nFixed <- .getFixedSampleSize(alpha = design$alpha, beta = design$beta, sided = design$sided, twoSidedPower = design$twoSidedPower) designCharacteristics$nFixed <- nFixed designCharacteristics$.setParameterType("nFixed", C_PARAM_GENERATED) if (design$kMax == 1) { designCharacteristics$shift <- nFixed designCharacteristics$.setParameterType("shift", C_PARAM_GENERATED) designCharacteristics$inflationFactor <- designCharacteristics$shift / nFixed designCharacteristics$.setParameterType("inflationFactor", C_PARAM_GENERATED) designCharacteristics$power <- 1 - design$beta designCharacteristics$.setParameterType("power", design$.getParameterType("power")) designCharacteristics$.setParameterType("information", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber1", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber01", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber0", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType(".probs", C_PARAM_NOT_APPLICABLE) return(designCharacteristics) } informationRates <- design$informationRates shift <- .getOneDimensionalRoot(function(shift) { if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues - sqrt(shift*informationRates), design$criticalValues - sqrt(shift*informationRates)), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) if (design$twoSidedPower) { return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - 1 + design$beta) } else { return(sum(probs[3, ] - probs[2, ]) - 1 + design$beta) } } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift*informationRates[1 : (design$kMax - 1)]) shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift*informationRates)), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix , informationRates) return(sum(probs[3, ] - probs[2, ]) - 1 + design$beta) } }, lower = 0, upper = 4 * (stats::qnorm(1 - design$alpha / design$sided) + stats::qnorm(1 - design$beta))^2, tolerance = design$tolerance) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues - sqrt(shift*informationRates), design$criticalValues - sqrt(shift*informationRates)), nrow = 2, byrow = TRUE) } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift*informationRates[1 : (design$kMax - 1)]) shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift*informationRates)), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix , informationRates) designCharacteristics$shift <- shift designCharacteristics$.setParameterType("shift", C_PARAM_GENERATED) designCharacteristics$.probs <- probs designCharacteristics$.setParameterType(".probs", C_PARAM_GENERATED) if (design$twoSidedPower) { designCharacteristics$power <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { designCharacteristics$power <- cumsum(probs[3, ] - probs[2, ]) } designCharacteristics$.setParameterType("power", C_PARAM_GENERATED) if (design$twoSidedPower) { designCharacteristics$rejectionProbabilities <- probs[3, ] - probs[2, ] + probs[1, ] } else { designCharacteristics$rejectionProbabilities <- probs[3, ] - probs[2, ] } designCharacteristics$.setParameterType("rejectionProbabilities", C_PARAM_GENERATED) if (design$kMax > 1) { if (design$sided == 2) { designCharacteristics$futilityProbabilities <- rep(0, design$kMax - 1) } else { designCharacteristics$futilityProbabilities <- probs[1, 1 : (design$kMax - 1)] } designCharacteristics$.setParameterType("futilityProbabilities", C_PARAM_GENERATED) } designCharacteristics$information <- informationRates * shift designCharacteristics$.setParameterType("information", C_PARAM_GENERATED) designCharacteristics$averageSampleNumber1 <- .getAverageSampleNumber(design$kMax, design$informationRates, probs, shift, nFixed) designCharacteristics$.setParameterType("averageSampleNumber1", C_PARAM_GENERATED) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues), nrow = 2, byrow = TRUE) } probs0 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber0 <- .getAverageSampleNumber(design$kMax, design$informationRates, probs0, shift, nFixed) designCharacteristics$.setParameterType("averageSampleNumber0", C_PARAM_GENERATED) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues - sqrt(shift * informationRates) / 2, design$criticalValues - sqrt(shift * informationRates) / 2), nrow = 2, byrow = TRUE) } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift * informationRates[1 : (design$kMax - 1)]) / 2 shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift * informationRates) / 2), nrow = 2, byrow = TRUE) } probs01 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber01 <- .getAverageSampleNumber(design$kMax, design$informationRates, probs01, shift, nFixed) designCharacteristics$.setParameterType("averageSampleNumber01", C_PARAM_GENERATED) designCharacteristics$inflationFactor <- shift / nFixed designCharacteristics$.setParameterType("inflationFactor", C_PARAM_GENERATED) if ((designCharacteristics$inflationFactor > 4) || (designCharacteristics$inflationFactor < 1)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "Inflation factor cannot be calculated") } return(designCharacteristics) } .getAverageSampleNumber <- function(kMax, informationRates, probs, shift, nFixed) { return((shift - sum((probs[3, 1 : (kMax - 1)] - probs[2, 1 : (kMax - 1)] + probs[1, 1 : (kMax - 1)]) * (informationRates[kMax] - informationRates[1 : (kMax - 1)]) * shift)) / nFixed) } #' #' @title #' Get Power And Average Sample Number #' #' @description #' Returns the power and average sample number of the specified design. #' #' @param design The design. #' @param theta A vector of standardized effect sizes. #' @param nMax The maximum sample size. #' #' @details #' This function returns the power and average sample number (ASN) of the specified design for the prototype case which is testing H0: mu = mu0 in a one-sample design. #' theta represents the standardized effect (mu - mu0)/sigma and power and ASN is calculated for maximum sample size nMax. #' For other designs than the one-sample test of a mean the standardized effect needs to be adjusted accordingly. #' #' @return Returns a \code{\link{PowerAndAverageSampleNumberResult}} object. #' #' @export #' #' @examples #' #' getPowerAndAverageSampleNumber( #' getDesignGroupSequential(), #' theta = seq(-1, 1, 0.5), nMax = 100) #' getPowerAndAverageSampleNumber <- function(design, theta = seq(-1, 1, 0.02), nMax = 100) { .assertIsTrialDesign(design) .assertIsSingleNumber(nMax, "nMax") .assertIsInClosedInterval(nMax, "nMax", lower = 1, upper = NULL) return(PowerAndAverageSampleNumberResult(design = design, theta = theta, nMax = nMax)) } rpact/R/f_core_plot.R0000644000176200001440000007367013533453523014225 0ustar liggesusers###################################################################################### # # # -- Plot functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-02-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### .addNumberToPlotCaption <- function(caption, type, numberInCaptionEnabled = FALSE) { if (!numberInCaptionEnabled) { return(caption) } return(paste0(caption, " [", type, "]")) } .getPlotCaption <- function(obj, type, numberInCaptionEnabled = FALSE) { if (is.null(obj) || length(type) == 0) { return(NA_character_) } if (inherits(obj, "TrialDesignPlan") || inherits(obj, "SimulationResults")) { if (type == 1) { if (.isTrialDesignPlanSurvival(obj)) { return(.addNumberToPlotCaption("Boundaries Z Scale", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption("Boundaries", type, numberInCaptionEnabled)) } } else if (type == 2) { return(.addNumberToPlotCaption("Boundaries Effect Scale", type, numberInCaptionEnabled)) } else if (type == 3) { return(.addNumberToPlotCaption("Boundaries p Values Scale", type, numberInCaptionEnabled)) } else if (type == 4) { return(.addNumberToPlotCaption("Type One Error Spending", type, numberInCaptionEnabled)) } else if (type == 5) { if (obj$.isSampleSizeObject()) { return(.addNumberToPlotCaption("Sample Size", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption("Overall Power and Early Stopping", type, numberInCaptionEnabled)) } } else if (type == 6) { return(.addNumberToPlotCaption(ifelse(.isTrialDesignPlanSurvival(obj) || inherits(obj, "SimulationResultsSurvival"), "Number of Events", "Sample Size"), type, numberInCaptionEnabled)) } else if (type == 7) { return(.addNumberToPlotCaption("Overall Power", type, numberInCaptionEnabled)) } else if (type == 8) { return(.addNumberToPlotCaption("Overall Early Stopping", type, numberInCaptionEnabled)) } else if (type == 9) { if (.isTrialDesignPlanSurvival(obj) || inherits(obj, "SimulationResultsSurvival")) { return(.addNumberToPlotCaption("Expected Number of Events", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption("Expected Sample Size", type, numberInCaptionEnabled)) } } else if (type == 10) { return(.addNumberToPlotCaption("Study Duration", type, numberInCaptionEnabled)) } else if (type == 11) { return(.addNumberToPlotCaption("Expected Number of Subjects", type, numberInCaptionEnabled)) } else if (type == 12) { return(.addNumberToPlotCaption("Analysis Times", type, numberInCaptionEnabled)) } else if (type == 13) { return(.addNumberToPlotCaption("Cumulative Distribution Function", type, numberInCaptionEnabled)) } else if (type == 14) { return(.addNumberToPlotCaption("Survival Function", type, numberInCaptionEnabled)) } } else if (inherits(obj, "TrialDesign") || inherits(obj, "TrialDesignSet")) { if (type == 1) { return(.addNumberToPlotCaption("Boundaries", type, numberInCaptionEnabled)) } else if (type == 3) { return(.addNumberToPlotCaption("Stage Levels", type, numberInCaptionEnabled)) } else if (type == 4) { return(.addNumberToPlotCaption("Type One Error Spending", type, numberInCaptionEnabled)) } else if (type == 5) { return(.addNumberToPlotCaption('Power and Early Stopping', type, numberInCaptionEnabled)) } else if (type == 6) { return(.addNumberToPlotCaption('Average Sample Size and Power / Early Stop', type, numberInCaptionEnabled)) } else if (type == 7) { return(.addNumberToPlotCaption('Power', type, numberInCaptionEnabled)) } else if (type == 8) { return(.addNumberToPlotCaption('Early Stopping', type, numberInCaptionEnabled)) } else if (type == 9) { return(.addNumberToPlotCaption('Average Sample Size', type, numberInCaptionEnabled)) } } return(NA_character_) } #' #' @title #' Get Available Plot Types #' #' @description #' Function to identify the available plot types of an object. #' #' @param obj The object for which the plot types shall be identified, e.g. produced by #' \code{\link{getDesignGroupSequential}} or \code{\link{getSampleSizeMeans}}. #' @param output The output type. Can be one of \code{c("numeric", "caption", "numcap", "capnum")}. #' @param numberInCaptionEnabled If \code{TRUE}, the number will be added to the #' caption, default is \code{FALSE}. #' #' @details #' #' \code{output}: #' \enumerate{ #' \item \code{numeric}: numeric output #' \item \code{caption}: caption as character output #' \item \code{numcap}: list with number and caption #' \item \code{capnum}: list with caption and number #' } #' #' @keywords internal #' #' @export #' getAvailablePlotTypes <- function(obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE) { output <- match.arg(output) if (is.null(obj)) { if (output == "numeric") { return(NA_real_) } if (output == "caption") { return(NA_character_) } return(list()) } types <- c() if (inherits(obj, "TrialDesignPlan")) { if (obj$.design$kMax > 1) { types <- c(types, 1:4) } types <- c(types, 5) if (obj$.isSampleSizeObject()) { if (.isTrialDesignPlanSurvival(obj)) { types <- c(types, 13, 14) } } else { types <- c(types, 6:9) if (.isTrialDesignPlanSurvival(obj)) { types <- c(types, 10:14) } } } else if (inherits(obj, "SimulationResults")) { types <- c(types, 5:9) if (inherits(obj, "SimulationResultsSurvival")) { types <- c(types, 10:14) } } else if (inherits(obj, "TrialDesign") || inherits(obj, "TrialDesignSet")) { if (inherits(obj, "TrialDesignFisher")) { types <- c(1, 3, 4) } else { types <- c(types, 1, 3:9) } } if (output == "numeric") { return(types) } if (output == "caption") { captions <- c() for (type in types) { captions <- c(captions, .getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled)) } return(captions) } if (output == "numcap") { numcap <- list() for (type in types) { numcap[[as.character(type)]] <- .getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled) } return(numcap) } capnum <- list() for (type in types) { capnum[[.getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled)]] <- type } return(capnum) } .getVariedParameterHint <- function(variedParameter, variedParameterName) { return(paste0("Note: interim values between ", round(variedParameter[1], 4), " and ", round(variedParameter[2], 4), " were calculated to get smoother lines; use, e.g., '", variedParameterName, " = ", .getVariedParameterVectorSeqCommand(variedParameter), "' to get all interim values")) } .showPlotSourceInformation <- function(objectName, ..., xParameterName, yParameterNames, hint = NA_character_, nMax = NA_integer_, showSource = FALSE) { if (!isTRUE(showSource)) { return(invisible()) } cat("Source data of the plot:\n") if (length(objectName) == 0 || is.na(objectName)) { objectName <- "x" } if (!is.na(nMax) && length(yParameterNames) < 3 && xParameterName == "informationRates") { cat(" x-axis: ", objectName, "$", xParameterName, " * ", nMax, "\n", sep = "") } else { cat(" x-axis: ", objectName, "$", xParameterName, "\n", sep = "") } if (length(yParameterNames) == 1) { cat(" y-axis: ", objectName, "$", yParameterNames, "\n", sep = "") } else { cat(" y-axes:\n") for (i in 1:length(yParameterNames)) { cat(" y", i, ": ", objectName, "$", yParameterNames[i], "\n", sep = "") } } if (!is.na(hint) && is.character(hint) && nchar(hint) > 0) { cat(hint, "\n", sep = "") } # Open work: add simple plot command example } .getParameterSetAsDataFrame <- function(parameterSet, designMaster, addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_) { if (.isTrialDesignSet(parameterSet) && parameterSet$getSize() > 1 && (is.null(parameterSet$variedParameters) || length(parameterSet$variedParameters) == 0)) { stop("'variedParameters' must be not empty; ", "use 'DesignSet$addVariedParameters(character)' to add one or more varied parameters") } data <- as.data.frame(parameterSet, niceColumnNamesEnabled = FALSE, includeAllParameters = TRUE, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax) if (!.isTrialDesignSet(parameterSet)) { return(list(data = data, variedParameters = character(0))) } if (parameterSet$getSize() <= 1) { return(list(data = data, variedParameters = parameterSet$variedParameters)) } variedParameters <- parameterSet$variedParameters if (nrow(data) > 1) { for (variedParameter in variedParameters) { column <- data[[variedParameter]] if (length(column) <= 1) { stop("Varied parameter '", variedParameter, "' has length ", length(column)) } valueBefore <- column[1] for (i in 2:length(column)) { if (is.na(column[i])) { column[i] <- valueBefore } else { valueBefore <- column[i] } } data[[variedParameter]] <- column } } variedParameterNames <- c() for (variedParameter in variedParameters) { variedParameterNames <- c(variedParameterNames, .getTableColumnNames(design = designMaster)[[variedParameter]]) } names(variedParameters) <- variedParameterNames return(list(data = data, variedParameters = variedParameters)) } .getCategories <- function(data, yParameterName, tableColumnNames) { if (is.null(data$categories) || sum(is.na(data$categories)) > 0) { return(rep(.getAxisLabel(yParameterName, tableColumnNames), nrow(data))) } return(paste(data$categories, .getAxisLabel(yParameterName, tableColumnNames), sep = ", ")) } .getAxisLabel <- function(parameterName, tableColumnNames) { axisLabel <- tableColumnNames[[parameterName]] if (is.null(axisLabel)) { return(paste0("%", parameterName, "%")) } return(axisLabel) } .plotParameterSet <- function(parameterSet, designMaster, xParameterName, yParameterNames, mainTitle = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, variedParameters = logical(0), qnormAlphaLineEnabled = TRUE, yAxisScalingEnabled = TRUE, ratioEnabled = NA, plotSettings = NULL, ...) { if (.isParameterSet(parameterSet) || .isTrialDesignSet(parameterSet)) { parameterNames <- c(xParameterName, yParameterNames) parameterNames <- parameterNames[!(parameterNames %in% c("theta", "averageSampleNumber", "overallEarlyStop", "calculatedPower"))] fieldNames <- c(names(parameterSet$getRefClass()$fields()), names(designMaster$getRefClass()$fields())) for (parameterName in parameterNames) { if (!is.na(parameterName) && !(parameterName %in% fieldNames)) { print(fieldNames) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", class(parameterSet), "' and '", class(designMaster), "' ", "do not contain a field with name '", parameterName, "'") } } if (is.null(plotSettings) || !inherits(plotSettings, "PlotSettings")) { plotSettings <- parameterSet$getPlotSettings() } } else { if (is.null(plotSettings) || !inherits(plotSettings, "PlotSettings")) { plotSettings <- PlotSettings() } } if (.isTrialDesignSet(parameterSet)) { parameterSet$assertHaveEqualSidedValues() } addPowerAndAverageSampleNumber <- xParameterName == "theta" && yParameterNames[1] %in% c("averageSampleNumber", "calculatedPower", "overallEarlyStop", "overallReject", "overallFutility") if (!addPowerAndAverageSampleNumber) { addPowerAndAverageSampleNumber <- xParameterName == "effect" && yParameterNames[1] %in% c("overallReject", "futilityStop", "earlyStop", "expectedNumberOfSubjects") } if (.isParameterSet(parameterSet) || .isTrialDesignSet(parameterSet)) { df <- .getParameterSetAsDataFrame(parameterSet, designMaster, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax) data <- df$data variedParameters <- df$variedParameters variedParameters <- variedParameters[!is.na(variedParameters) && variedParameters != "NA"] } else if (is.data.frame(parameterSet)) { data <- parameterSet } else { stop("'parameterSet' (", class(parameterSet), ") must be a data.frame, a 'TrialDesignSet' ", "or an object that inherits from 'ParameterSet'") } if (length(variedParameters) > 0) { legendTitle <- paste(names(variedParameters), collapse = "\n") categoryParameterName <- variedParameters[1] } else { legendTitle <- NA_character_ categoryParameterName <- NA_character_ } yParameterName1 <- yParameterNames[1] yParameterName2 <- NULL yParameterName3 <- NULL if (length(yParameterNames) >= 2) { yParameterName2 <- yParameterNames[2] } if (length(yParameterNames) >= 3) { yParameterName3 <- yParameterNames[3] } mirrorModeEnabled <- FALSE if (!is.null(yParameterName2) && !is.na(yParameterName2)) { mirrorModeEnabled <- paste0(yParameterName1, "Mirrored") == yParameterName2 } tableColumnNames <- .getTableColumnNames(design = designMaster) xAxisLabel <- .getAxisLabel(xParameterName, tableColumnNames) yAxisLabel1 <- .getAxisLabel(yParameterName1, tableColumnNames) yAxisLabel2 <- NULL if (!is.null(yParameterName2) && !is.null(yParameterName3)) { if (!is.na(yParameterName2)) { pn2 <- .getAxisLabel(yParameterName2, tableColumnNames) if (yParameterName2 == "overallEarlyStop") { pn2 <- "Stopping Probability" } yAxisLabel2 <- paste(pn2, .getAxisLabel(yParameterName3, tableColumnNames), sep = " and ") } else { yAxisLabel2 <- .getAxisLabel(yParameterName3, tableColumnNames) } } else if (!is.null(yParameterName2) && !mirrorModeEnabled) { yAxisLabel1 <- paste(yAxisLabel1, .getAxisLabel(yParameterName2, tableColumnNames), sep = " and ") } if (yParameterName1 %in% c("alphaSpent", "betaSpent")) { yAxisLabel1 <- "Cumulative Error" if (is.null(yParameterName2)) { yAxisLabel1 <- paste0(yAxisLabel1, " (", .getAxisLabel(yParameterName1, tableColumnNames), ")") } } data$xValues <- data[[xParameterName]] data$yValues <- data[[yParameterName1]] if (is.null(yParameterName2) || is.na(yParameterName2)) { data$yValues2 <- rep(NA_real_, nrow(data)) } else { data$yValues2 <- data[[yParameterName2]] } if (is.null(yParameterName3)) { data$yValues3 <- rep(NA_real_, nrow(data)) } else { data$yValues3 <- data[[yParameterName3]] } if (!is.na(categoryParameterName)) { data$categories <- data[[categoryParameterName]] if (length(variedParameters) > 1) { data$categories <- paste0(variedParameters[1], " = ", data$categories, ", ", variedParameters[2], " = ", data[[variedParameters[2]]]) } } else { data$categories <- rep(NA_character_, nrow(data)) } if (!is.na(nMax) && is.null(yParameterName3) && xParameterName == "informationRates") { xAxisLabel <- "Sample Size" data$xValues <- data$xValues * nMax } # add zero point to data if (yParameterName1 %in% c("alphaSpent", "betaSpent")) { data <- data[, c("xValues", "yValues", "yValues2", "categories")] uc <- unique(data$categories) data <- rbind(data.frame( xValues = rep(-0.00001, length(uc)), yValues = rep(0, length(uc)), yValues2 = rep(0, length(uc)), categories = uc ), data) } scalingFactor1 <- 1 scalingFactor2 <- 1 if (!is.null(yParameterName2)) { if (yAxisScalingEnabled && !is.null(yParameterName3)) { if (is.na(yParameterName2)) { scalingFactors <- .getScalingFactors(data$yValues, data$yValues3) } else { scalingFactors <- .getScalingFactors(data$yValues, c(data$yValues2, data$yValues3)) } scalingFactor1 <- scalingFactors$scalingFactor1 scalingFactor2 <- scalingFactors$scalingFactor2 } df1 <- data.frame( xValues = data$xValues, yValues = data$yValues * scalingFactor1, categories = .getCategories(data, yParameterName1, tableColumnNames) ) if (!is.na(yParameterName2)) { df2 <- data.frame( xValues = data$xValues, yValues = data$yValues2 * scalingFactor2, categories = .getCategories(data, yParameterName2, tableColumnNames) ) } if (!is.null(yParameterName3)) { df3 <- data.frame( xValues = data$xValues, yValues = data$yValues3 * scalingFactor2, categories = .getCategories(data, yParameterName3, tableColumnNames) ) if (is.na(yParameterName2)) { data <- rbind(df1, df3) } else { data <- rbind(df1, df2, df3) } } else { data <- rbind(df1, df2) } # sort categories for pairwise printing of the legend unqiueValues <- unique(as.character(data$categories)) decreasing <- addPowerAndAverageSampleNumber && xParameterName == "effect" data$categories <- factor(data$categories, levels = unqiueValues[order(unqiueValues, decreasing = decreasing)]) if (yParameterName1 == "alphaSpent" && yParameterName2 == "betaSpent") { sep <- ifelse(length(legendTitle) > 0 && nchar(legendTitle) > 0, "\n", "") legendTitle <- paste(legendTitle, "Type of error", sep = sep) } } if (is.na(legendPosition)) { legendPosition <- .getLegendPosition(plotSettings, designMaster, data, yParameterName1, yParameterName2, addPowerAndAverageSampleNumber) } if (is.na(ratioEnabled)) { ratioEnabled <- .isTrialDesignPlanSurvival(parameterSet) || (.isTrialDesignPlanMeans(parameterSet) && parameterSet$meanRatio) || (.isTrialDesignPlanRates(parameterSet) && parameterSet$riskRatio) } p <- .plotDataFrame(data, mainTitle = mainTitle, xlab = xlab, ylab = ylab, xAxisLabel = xAxisLabel, yAxisLabel1 = yAxisLabel1, yAxisLabel2 = yAxisLabel2, palette = palette, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, mirrorModeEnabled = mirrorModeEnabled, ratioEnabled = ratioEnabled, plotSettings = plotSettings, sided = designMaster$sided, ...) if (xParameterName == "informationRates") { p <- p + ggplot2::scale_x_continuous(breaks=c(0, round(data$xValues, 3))) } if (!is.data.frame(parameterSet) && yParameterName1 == "criticalValues" && designMaster$sided == 2) { p <- plotSettings$mirrorYValues(p, yValues = data$yValues, plotPointsEnabled = !addPowerAndAverageSampleNumber, pointBorder = .getPointBorder(data, plotSettings)) } if (!.isTrialDesignFisher(designMaster) && qnormAlphaLineEnabled && ((!is.data.frame(parameterSet) && (yParameterName1 == "criticalValues" || (yParameterName1 == "futilityBounds" && !is.null(yParameterName2) && yParameterName2 == "criticalValues"))) || (yParameterName1 %in% c("futilityBounds", "criticalValues") && !is.null(yParameterName2) && yParameterName2 %in% c("criticalValues", "criticalValuesMirrored")))) { p <- .addQnormAlphaLine(p, designMaster, plotSettings, data) } if (!.isTrialDesignFisher(designMaster) && (xParameterName == "informationRates" || xParameterName == "eventsPerStage") && yParameterName1 == "stageLevels") { yValue <- designMaster$alpha if (designMaster$sided == 2) { yValue <- yValue / 2 } p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") yValueLabel <- paste0("alpha == ", round(yValue, 4)) hjust <- -0.2 p <- p + ggplot2::annotate("label", x = -Inf, hjust = hjust, y = yValue, label = yValueLabel, size = 2.5, parse = TRUE, colour = "white", fill = "white") p <- p + ggplot2::annotate("text", x = -Inf, hjust = hjust - 0.15, y = yValue, label = yValueLabel, size = 2.5, parse = TRUE) } return(p) } .getScalingFactors <- function(leftAxisValues, rightAxisValues) { m1 <- ifelse(length(na.omit(leftAxisValues)) == 0, 1, max(na.omit(leftAxisValues))) m2 <- ifelse(length(na.omit(rightAxisValues)) == 0, 1, max(na.omit(rightAxisValues))) if (is.na(m1)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "y-values, left (", .arrayToString(leftAxisValues), ") are not specified correctly") } if (is.na(m2)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "y-values, right (", .arrayToString(rightAxisValues), ") are not specified correctly") } if (m1 > m2) { scalingFactor1 <- 1 scalingFactor2 <- m1 / m2 } else if (m1 < m2) { scalingFactor1 <- m2 / m1 scalingFactor2 <- 1 } else { scalingFactor1 <- 1 scalingFactor2 <- 1 } return(list(scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2)) } .plotDataFrame <- function(data, mainTitle = NA_character_, xlab = NA_character_, ylab = NA_character_, xAxisLabel = NA_character_, yAxisLabel1 = NA_character_, yAxisLabel2 = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendTitle = NA_character_, legendPosition = NA_integer_, scalingFactor1 = 1, scalingFactor2 = 1, addPowerAndAverageSampleNumber = FALSE, mirrorModeEnabled = FALSE, ratioEnabled = FALSE, plotSettings = NULL, sided = 1, ...) { if (!is.data.frame(data)) { stop("'data' must be a data.frame (is ", class(data), ")") } if (is.null(plotSettings)) { plotSettings <- PlotSettings() } nRow <- nrow(data) data <- data[!(data$xValues == 0 & data$xValues == data$yValues), ] removedRows1 <- nRow - nrow(data) nRow <- nrow(data) data <- data[!is.na(data$yValues), ] removedRows2 <- nRow - nrow(data) if (getLogLevel() == C_LOG_LEVEL_WARN && (removedRows1 > 0 || removedRows2 > 0)) { warning(sprintf("Removed %s rows containing (0, 0)-points and %s rows containing missing values", removedRows1, removedRows2), call. = FALSE) } categoryEnabled <- !is.null(data$categories) && (sum(is.na(data$categories)) < length(data$categories)) if (categoryEnabled) { data <- data[, c("xValues", "yValues", "categories")] } else { data <- data[, c("xValues", "yValues")] } if (mirrorModeEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes(x = data$xValues, y = data$yValues, fill = factor(data$categories))) } else if (categoryEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes(x = data$xValues, y = data$yValues, colour = factor(data$categories))) } else { p <- ggplot2::ggplot(data, ggplot2::aes(x = data$xValues, y = data$yValues)) } p <- plotSettings$setTheme(p) p <- plotSettings$hideGridLines(p) # set main title p <- plotSettings$setMainTitle(p, mainTitle) # set legend if (mirrorModeEnabled || (!is.na(legendPosition) && legendPosition == -1)) { p <- p + ggplot2::theme(legend.position = "none") } else if (categoryEnabled) { p <- plotSettings$setLegendPosition(p, legendPosition = legendPosition) p <- plotSettings$setLegendBorder(p) p <- plotSettings$setLegendTitle(p, legendTitle) p <- plotSettings$setLegendLabelSize(p) } # set optional scale limits xLim <- .getOptionalArgument("xlim", ...) yLim <- .getOptionalArgument("ylim", ...) if (is.null(yLim) && !missing(yAxisLabel1) && !is.na(yAxisLabel1) && yAxisLabel1 == "Critical value") { yMax <- max(na.omit(data$yValues)) if (length(yMax) == 1 && yMax < 0.1) { yLim <- c(0, 2 * yMax) } } if ((!is.null(xLim) && is.numeric(xLim) && length(xLim) == 2) || (!is.null(yLim) && is.numeric(yLim) && length(yLim) == 2)) { p <- p + ggplot2::coord_cartesian(xlim = xLim, ylim = yLim, expand = TRUE, default = FALSE, clip = "on") } # add dashed line to y = 0 or y = 1 if (mirrorModeEnabled) { p <- p + ggplot2::geom_hline(yintercept = ifelse(ratioEnabled, 1, 0), linetype = "dashed") } xAxisLabel <- .toCapitalized(xAxisLabel) yAxisLabel1 <- .toCapitalized(yAxisLabel1) yAxisLabel2 <- .toCapitalized(yAxisLabel2) p <- plotSettings$setAxesLabels(p, xAxisLabel = xAxisLabel, yAxisLabel1 = yAxisLabel1, yAxisLabel2 = yAxisLabel2, xlab = xlab, ylab = ylab, scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2) # plot lines and points plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), !addPowerAndAverageSampleNumber, plotPointsEnabled) if (length(data$xValues) > 20) { plotPointsEnabled <- FALSE } p <- plotSettings$plotValues(p, plotPointsEnabled = plotPointsEnabled, pointBorder = .getPointBorder(data, plotSettings)) p <- plotSettings$setAxesAppearance(p) p <- plotSettings$setColorPalette(p, palette) p <- plotSettings$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- plotSettings$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) # start plot generation return(p) } .getPointBorder <- function(data, plotSettings) { numberOfCategories <- 1 if (sum(is.na(data$categories)) < length(data$categories)) { numberOfCategories <- length(unique(as.character(data$categories))) } pointBorder <- 4 if (length(data$xValues) / numberOfCategories > 10) { pointBorder <- 1 plotSettings$adjustPointSize(-2) } else if (numberOfCategories > 8) { pointBorder <- 1 } else if (numberOfCategories > 6) { pointBorder <- 2 } else if (numberOfCategories > 4) { pointBorder <- 3 } return(pointBorder) } .getLegendPosition <- function(plotSettings, designMaster, data, yParameterName1, yParameterName2, addPowerAndAverageSampleNumber) { if (length(unique(data$categories)) > 6) { plotSettings$adjustPointSize(-0.5) plotSettings$adjustLegendFontSize(-2) return(C_POSITION_OUTSIDE_PLOT) } if (.isTrialDesignWithValidFutilityBounds(designMaster) && yParameterName1 == "futilityBounds" && yParameterName2 == "criticalValues") { return(C_POSITION_RIGHT_BOTTOM) } if (.isTrialDesignWithValidAlpha0Vec(designMaster) && yParameterName1 == "alpha0Vec" && yParameterName2 == "criticalValues") { return(C_POSITION_RIGHT_TOP) } if (yParameterName1 == "criticalValues") { return(C_POSITION_RIGHT_TOP) } if (yParameterName1 %in% c("stageLevels", "alphaSpent", "betaSpent")) { return(C_POSITION_LEFT_TOP) } if (addPowerAndAverageSampleNumber) { return(C_POSITION_LEFT_CENTER) } return(C_POSITION_OUTSIDE_PLOT) } .addQnormAlphaLine <- function(p, designMaster, plotSettings, data, annotationEnabled = TRUE) { alpha <- designMaster$alpha if (designMaster$sided == 2) { alpha <- alpha / 2 } yValue <- stats::qnorm(1 - alpha) yValueLabel <- paste0("qnorm(1 - ", alpha, " ) == ", round(yValue, 4)) if (designMaster$sided == 1) { p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") } else { p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") p <- p + ggplot2::geom_hline(yintercept = -yValue, linetype = "dashed") } if (annotationEnabled) { p <- p + ggplot2::annotate("label", x = -Inf, hjust = -0.1, y = yValue, label = yValueLabel, size = 2.5, parse = TRUE, colour = "white", fill = "white") p <- p + ggplot2::annotate("text", x = -Inf, hjust = -0.15, y = yValue, label = yValueLabel, size = 2.5, parse = TRUE) } # expand y-axis range if (designMaster$sided == 1) { yMax <- max(stats::na.omit(data$yValues)) if (!is.null(data$yValues2) && length(data$yValues2) > 0) { yMax <- max(yMax, stats::na.omit(data$yValues2)) } eps <- (yMax - yValue) * 0.15 p <- plotSettings$expandAxesRange(p, y = yValue - eps) } return(p) } .getLambdaStepFunctionByTime <- function(time, piecewiseSurvivalTime, lambda2) { if (length(piecewiseSurvivalTime) == 0 || any(is.na(piecewiseSurvivalTime))) { return(lambda2[1]) } for (i in 1:length(piecewiseSurvivalTime)) { if (time <= piecewiseSurvivalTime[i]) { return(lambda2[i]) } } return(lambda2[length(lambda2)]) } .getLambdaStepFunction <- function(timeValues, piecewiseSurvivalTime, lambda) { if (length(piecewiseSurvivalTime) != length(lambda)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be equal to length of 'lambda' (", length(lambda), ") - 1") } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (length(piecewiseSurvivalTime) == 0) { return(lambda[1]) } lambdaValues <- c() for (time in timeValues) { lambdaValues <- c(lambdaValues, .getLambdaStepFunctionByTime(time, piecewiseSurvivalTime, lambda)) } return(lambdaValues) } rpact/R/class_analysis_dataset.R0000644000176200001440000030347513573662652016457 0ustar liggesusers###################################################################################### # # # -- Dataset classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 23-11-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### C_KEY_WORDS_GROUPS <- c("group", "groups") C_KEY_WORDS_STAGES <- c("stage", "stages") C_KEY_WORDS_SAMPLE_SIZES <- c("n", "sampleSizes", "sampleSize") C_KEY_WORDS_MEANS <- c("means", "mean") C_KEY_WORDS_ST_DEVS <- c("stDevs", "stDev", "stds", "st.dev", "sd") C_KEY_WORDS_EVENTS <- c("event", "events") C_KEY_WORDS_EVENTS_1 <- c("event1", "events1") C_KEY_WORDS_EVENTS_2 <- c("event2", "events2") C_KEY_WORDS_OVERALL_EVENTS <- c("overallEvents", "overallEvent", "overall.events", "overall.event") C_KEY_WORDS_OVERALL_EVENTS_1 <- c("overallEvents1", "overallEvent1", "overall.events.1", "overall.event.1") C_KEY_WORDS_OVERALL_EVENTS_2 <- c("overallEvents2", "overallEvent2", "overall.events.2", "overall.event.2") C_KEY_WORDS_SAMPLE_SIZES_1 <- c("n1", "sampleSize1", "sampleSizes1") C_KEY_WORDS_MEANS_1 <- c("means1", "mean1") C_KEY_WORDS_ST_DEVS_1 <- c("stDevs1", "stDev1", "stds1", "st.dev1", "sd1") C_KEY_WORDS_SAMPLE_SIZES_2 <- c("n2", "sampleSize2", "sampleSizes2") C_KEY_WORDS_MEANS_2 <- c("means2", "mean2") C_KEY_WORDS_ST_DEVS_2 <- c("stDevs2", "stDev2", "stds2", "st.dev2", "sd2") C_KEY_WORDS_OVERALL_SAMPLE_SIZES <- c("overallN", "overall.n", "overallSampleSizes", "overallSampleSize") C_KEY_WORDS_OVERALL_MEANS <- c("overallMeans", "overallMean", "overall.means", "overall.mean") C_KEY_WORDS_OVERALL_ST_DEVS <- c("overallStDevs", "overallStDev", "overall.st.dev", "overall.stds", "overall.sd") C_KEY_WORDS_OVERALL_SAMPLE_SIZES_1 <- c("overallN1", "overall.n.1", "overallSampleSizes1", "overallSampleSize1") C_KEY_WORDS_OVERALL_MEANS_1 <- c("overallMeans1", "overallMean1", "overall.means.1", "overall.mean.1") C_KEY_WORDS_OVERALL_ST_DEVS_1 <- c("overallStDevs1", "overallStDev1", "overall.st.dev.1", "overall.stds.1", "overall.sd.1") C_KEY_WORDS_OVERALL_SAMPLE_SIZES_2 <- c("overallN2", "overall.n.2", "overallSampleSizes2", "overallSampleSize2") C_KEY_WORDS_OVERALL_MEANS_2 <- c("overallMeans2", "overallMean2", "overall.means.2", "overall.mean.2") C_KEY_WORDS_OVERALL_ST_DEVS_2 <- c("overallStDevs2", "overallStDev2", "overall.st.dev.2", "overall.stds.2", "overall.sd.2") C_KEY_WORDS_ALLOCATION_RATIOS <- c("allocationRatios", "allocationRatio", "ar", "allocation.ratios", "allocation.ratio") C_KEY_WORDS_LOG_RANKS <- c("logRanks", "logRank", "lr", "log.ranks", "log.rank") C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS <- c("overallAllocationRatios", "overallAllocationRatio", "oar", "overall.allocation.ratios", "overall.allocation.ratio") C_KEY_WORDS_OVERALL_LOG_RANKS <- c("overallLogRanks", "overallLogRank", "olr", "overall.log.ranks", "overall.log.rank") C_KEY_WORDS <- c( C_KEY_WORDS_GROUPS, C_KEY_WORDS_STAGES, C_KEY_WORDS_SAMPLE_SIZES, C_KEY_WORDS_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_EVENTS, C_KEY_WORDS_EVENTS_1, C_KEY_WORDS_EVENTS_2, C_KEY_WORDS_OVERALL_EVENTS, C_KEY_WORDS_OVERALL_EVENTS_1, C_KEY_WORDS_OVERALL_EVENTS_2, C_KEY_WORDS_SAMPLE_SIZES_1, C_KEY_WORDS_MEANS_1, C_KEY_WORDS_ST_DEVS_1, C_KEY_WORDS_SAMPLE_SIZES_2, C_KEY_WORDS_MEANS_2, C_KEY_WORDS_ST_DEVS_2, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_OVERALL_ST_DEVS, C_KEY_WORDS_OVERALL_SAMPLE_SIZES_1, C_KEY_WORDS_OVERALL_MEANS_1, C_KEY_WORDS_OVERALL_ST_DEVS_1, C_KEY_WORDS_OVERALL_SAMPLE_SIZES_2, C_KEY_WORDS_OVERALL_MEANS_2, C_KEY_WORDS_OVERALL_ST_DEVS_2, C_KEY_WORDS_ALLOCATION_RATIOS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_OVERALL_LOG_RANKS ) #' @title #' Read Dataset #' #' @description #' Reads a data file and returns it as dataset object. #' #' @param file A CSV file (see \code{\link[utils]{read.table}}). #' @param header A logical value indicating whether the file contains the names of #' the variables as its first line. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{readDataset}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param fill logical. If \code{TRUE} then in case the rows have unequal length, blank fields #' are implicitly added. #' @param comment.char character: a character vector of length one containing a single character #' or an empty string. Use "" to turn off the interpretation of comments altogether. #' @param fileEncoding character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to code{\link[utils]{read.table}}. #' #' @details #' \code{readDataset} is a wrapper function that uses \code{\link[utils]{read.table}} to read the #' CSV file into a data frame, transfers it from long to wide format with \code{\link[stats]{reshape}} #' and puts the data to \code{\link{getDataset}}. #' #' @return Returns a \code{\link{Dataset}} object. #' #' @seealso #' \itemize{ #' \item \code{\link{readDatasets}} for reading multiple datasets, #' \item \code{\link{writeDataset}} for writing a single dataset, #' \item \code{\link{writeDatasets}} for writing multiple datasets. #' } #' #' @export #' readDataset <- function(file, ..., header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8") { if (!file.exists(file)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the file '", file, "' does not exist") } data <- utils::read.table(file = file, header = header, sep = sep, quote = quote, dec = dec, fill = fill, fileEncoding = fileEncoding, ...) dataWide <- stats::reshape(data = data, direction = "wide", idvar = "stages", timevar = "groups") colnames(dataWide) <- gsub("\\.", "", colnames(dataWide)) return(getDataset(dataWide)) } #' @title #' Write Dataset #' #' @description #' Writes a dataset to a CSV file. #' #' @param dataset A dataset. #' @param file The target CSV file. #' @param append Logical. Only relevant if file is a character string. #' If \code{TRUE}, the output is appended to the file. If \code{FALSE}, any existing file of the name is destroyed. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{writeDataset}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param eol The character(s) to print at the end of each line (row). #' @param na The string to use for missing values in the data. #' @param row.names Either a logical value indicating whether the row names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of row names to be written. #' @param col.names Either a logical value indicating whether the column names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of column names to be written. #' See the section on 'CSV files' for the meaning of \code{col.names = NA}. #' @param qmethod A character string specifying how to deal with embedded double quote characters #' when quoting strings. Must be one of "double" (default in \code{writeDataset}) or "escape". #' @param fileEncoding Character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{write.table}}. #' #' @details #' \code{\link{writeDataset}} is a wrapper function that coerces the dataset to a data frame and uses \cr #' \code{\link[utils]{write.table}} to write it to a CSV file. #' #' @seealso #' \itemize{ #' \item \code{\link{writeDatasets}} for writing multiple datasets, #' \item \code{\link{readDataset}} for reading a single dataset, #' \item \code{\link{readDatasets}} for reading multiple datasets. #' } #' #' @export #' writeDataset <- function(dataset, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8") { .assertIsDataset(dataset) x <- as.data.frame(dataset, niceColumnNamesEnabled = FALSE) utils::write.table(x = x, file = file, append = append, quote = quote, sep = sep, eol = eol, na = na, dec = dec, row.names = FALSE, col.names = TRUE, qmethod = qmethod, fileEncoding = fileEncoding) } #' @title #' Read Multiple Datasets #' #' @description #' Reads a data file and returns it as a list of dataset objects. #' #' @param file A CSV file (see \code{\link[utils]{read.table}}). #' @param header A logical value indicating whether the file contains the names of #' the variables as its first line. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{readDatasets}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param fill logical. If \code{TRUE} then in case the rows have unequal length, blank fields #' are implicitly added. #' @param comment.char character: a character vector of length one containing a single character #' or an empty string. Use "" to turn off the interpretation of comments altogether. #' @param fileEncoding character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{read.table}}. #' #' @details #' Reads a file that was written by \code{\link{writeDatasets}} before. #' #' @return Returns a list of \code{\link{Dataset}} objects. #' #' @seealso #' \itemize{ #' \item \code{\link{readDataset}} for reading a single dataset, #' \item \code{\link{writeDatasets}} for writing multiple datasets, #' \item \code{\link{writeDataset}} for writing a single dataset. #' } #' #' @export #' readDatasets <- function(file, ..., header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8") { if (!file.exists(file)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the file '", file, "' does not exist") } data <- utils::read.table(file = file, header = header, sep = sep, quote = quote, dec = dec, fill = fill, fileEncoding = fileEncoding, ...) if (is.null(data[["datasetId"]])) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "data file must contain the column 'datasetId'") } datasets <- list() for (datasetId in unique(data$datasetId)) { subData <- data[data$datasetId == datasetId, ] dataFrame <- subset(subData, select = -datasetId) description <- NA_character_ if (!is.null(dataFrame[["description"]])) { description <- as.character(dataFrame$description[1]) dataFrame <- subset(dataFrame, select = -description) } if (length(unique(subData$groups)) == 2) { dataWide <- stats::reshape(dataFrame, direction = "wide", idvar = "stages", timevar = "groups") colnames(dataWide) <- gsub("\\.", "", colnames(dataWide)) dataset <- getDataset(dataWide) } else { dataset <- getDataset(dataFrame) } dataset$setDescription(description) datasets <- c(datasets, dataset) } return(datasets) } #' @title #' Write Multiple Datasets #' #' @description #' Writes a list of datasets to a CSV file. #' #' @param datasets A list of datasets. #' @param file The target CSV file. #' @param append Logical. Only relevant if file is a character string. #' If \code{TRUE}, the output is appended to the file. If FALSE, any existing file of the name is destroyed. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{writeDatasets}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param eol The character(s) to print at the end of each line (row). #' @param na The string to use for missing values in the data. #' @param row.names Either a logical value indicating whether the row names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of row names to be written. #' @param col.names Either a logical value indicating whether the column names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of column names to be written. #' See the section on 'CSV files' for the meaning of \code{col.names = NA}. #' @param qmethod A character string specifying how to deal with embedded double quote characters #' when quoting strings. Must be one of "double" (default in \code{writeDatasets}) or "escape". #' @param fileEncoding Character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{write.table}}. #' #' @details #' The format of the CSV file is optimized for usage of \code{\link{readDatasets}}. #' #' @seealso #' \itemize{ #' \item \code{\link{writeDataset}} for writing a single dataset, #' \item \code{\link{readDatasets}} for reading multiple datasets, #' \item \code{\link{readDataset}} for reading a single dataset. #' } #' #' @export #' writeDatasets <- function(datasets, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8") { if (!is.list(datasets)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'datasets' must be a list of datasets") } if (length(datasets) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'datasets' is empty") } datasetType <- NA_character_ dataFrames <- NULL for (i in 1:length(datasets)) { dataset <- datasets[[i]] .assertIsDataset(dataset) if (is.na(datasetType)) { datasetType <- class(dataset) } else if (class(dataset) != datasetType) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all datasets must have the same type") } data <- as.data.frame(dataset, niceColumnNamesEnabled = FALSE) datasetId <- ifelse(!is.null(dataset$getId()) && !is.na(dataset$getId()), dataset$getId(), i) data <- cbind(rep(datasetId, nrow(data)), data) colnames(data)[1] <- "datasetId" if (!is.null(dataset$getDescription()) && !is.na(dataset$getDescription())) { data <- cbind(data, rep(dataset$getDescription(), nrow(data))) colnames(data)[ncol(data)] <- "description" } if (is.null(dataFrames)) { dataFrames <- data } else { dataFrames <- rbind(dataFrames, data) } } if (is.null(dataFrames)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to bind datasets") } utils::write.table(x = dataFrames, file = file, append = append, quote = quote, sep = sep, eol = eol, na = na, dec = dec, row.names = FALSE, col.names = TRUE, qmethod = qmethod, fileEncoding = fileEncoding) } #' @title #' Get Dataset #' #' @description #' Creates a dataset object and returns it. #' #' @param ... A \code{data.frame} or some data vectors defining the dataset. #' @param floatingPointNumbersEnabled If \code{TRUE}, #' sample sizes can be specified as floating-point numbers #' (in general this only make sense for simulation purposes); \cr #' by default \code{floatingPointNumbersEnabled = FALSE}, i.e., #' samples sizes defined as floating-point numbers will be truncated. #' #' @details #' The different dataset types \code{DatasetMeans}, of \code{DatasetRates}, or #' \code{DatasetSurvival} can be created as follows: #' \itemize{ #' \item An element of \code{\link{DatasetMeans}} for one sample is created by \cr #' \code{getDataset(sampleSizes =, means =, stDevs =)} where \cr #' \code{sampleSizes}, \code{means}, \code{stDevs} are vectors with stagewise sample sizes, #' means and standard deviations of length given by the number of available stages. #' \item An element of \code{\link{DatasetMeans}} for two samples is created by \cr #' \code{getDataset(sampleSizes1 =, sampleSizes2 =, means1 =, means2 =, } \cr #' \code{stDevs1 =, stDevs2 =)} where #' \code{sampleSizes1}, \code{sampleSizes2}, \code{means1}, \code{means2}, #' \code{stDevs1}, \code{stDevs2} are vectors with #' stagewise sample sizes, means and standard deviations for the two treatment groups #' of length given by the number of available stages. #' \item An element of \code{\link{DatasetRates}} for one sample is created by \cr #' \code{getDataset(sampleSizes =, events =)} where \code{sampleSizes}, \code{events} are vectors #' with stagewise sample sizes and events of length given by the number of available stages. #' \item An element of \code{\link{DatasetRates}} for two samples is created by \cr #' \code{getDataset(sampleSizes1 =, sampleSizes2 =, events1 =, events2 =)} where #' \code{sampleSizes1}, \code{sampleSizes2}, \code{events1}, \code{events2} #' are vectors with stagewise sample sizes #' and events for the two treatment groups of length given by the number of available stages. #' \item An element of \code{\link{DatasetSurvival}} is created by \cr #' \code{getDataset(events =, logRanks =, allocationRatios =)} where #' \code{events}, \code{logRanks}, and \code{allocation ratios} are the stagewise events, #' (one-sided) logrank statistics, and allocation ratios. #' } #' Prefix \code{overall[Capital case of first letter of variable name]...} for the variable #' names enables entering the overall results and calculates stagewise statistics. #' #' Note that in survival design usually the overall events and logrank test statistics are provided #' in the output, so \cr #' \code{getDataset(overallEvents=, overallLogRanks =, overallAllocationRatios =)} \cr #' is the usual command for entering survival data. Note also that for \code{overallLogranks} also the #' z scores from a Cox regression can be used. #' #' \code{n} can be used in place of \code{samplesizes}. #' #' @return Returns a \code{\link{Dataset}} object. #' #' @examples #' #' # Create a Dataset of Means (one group): #' #' datasetOfMeans <- getDataset( #' n = c(22, 11, 22, 11), #' means = c(1, 1.1, 1, 1), #' stDevs = c(1, 2, 2, 1.3) #' ) #' datasetOfMeans #' datasetOfMeans$show(showType = 2) #' #' datasetOfMeans <- getDataset( #' overallSampleSizes = c(22, 33, 55, 66), #' overallMeans = c(1.000, 1.033, 1.020, 1.017 ), #' overallStDevs = c(1.00, 1.38, 1.64, 1.58) #' ) #' datasetOfMeans #' datasetOfMeans$show(showType = 2) #' as.data.frame(datasetOfMeans) #' #' # Create a Dataset of Means (two groups): #' #' datasetOfMeans <- getDataset( #' n1 = c(22, 11, 22, 11), #' n2 = c(22, 13, 22, 13), #' means1 = c(1, 1.1, 1, 1), #' means2 = c(1.4, 1.5, 3, 2.5), #' stDevs1 = c(1, 2, 2, 1.3), #' stDevs2 = c(1, 2, 2, 1.3) #' ) #' datasetOfMeans #' #' datasetOfMeans <- getDataset( #' overallSampleSizes1 = c(22, 33, 55, 66), #' overallSampleSizes2 = c(22, 35, 57, 70), #' overallMeans1 = c(1, 1.033, 1.020, 1.017), #' overallMeans2 = c(1.4, 1.437, 2.040, 2.126), #' overallStDevs1 = c(1, 1.38, 1.64, 1.58), #' overallStDevs2 = c(1, 1.43, 1.82, 1.74) #' ) #' datasetOfMeans #' #' df <- data.frame( #' stages = 1:4, #' n1 = c(22, 11, 22, 11), #' n2 = c(22, 13, 22, 13), #' means1 = c(1, 1.1, 1, 1), #' means2 = c(1.4, 1.5, 3, 2.5), #' stDevs1 = c(1, 2, 2, 1.3), #' stDevs2 = c(1, 2, 2, 1.3) #' ) #' datasetOfMeans <- getDataset(df) #' datasetOfMeans #' #' ## Create a Dataset of Rates (one group): #' #' datasetOfRates <- getDataset( #' n = c(8, 10, 9, 11), #' events = c(4, 5, 5, 6) #' ) #' datasetOfRates #' #' ## Create a Dataset of Rates (two groups): #' #' datasetOfRates <- getDataset( #' n2 = c(8, 10, 9, 11), #' n1 = c(11, 13, 12, 13), #' events2 = c(3, 5, 5, 6), #' events1 = c(10, 10, 12, 12) #' ) #' datasetOfRates #' #' #' ## Create a Survival Dataset #' #' dataset <- getDataset( #' overallEvents = c(8, 15, 19, 31), #' overallAllocationRatios = c(1, 1, 1, 2), #' overallLogRanks = c(1.52, 1.98, 1.99, 2.11) #' ) #' dataset #' #' @export #' #' @name getDataset #' getDataset <- function(..., floatingPointNumbersEnabled = FALSE) { args <- list(...) if (length(args) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data.frame or data vectors expected") } exampleType <- args[["example"]] if (!is.null(exampleType) && exampleType %in% c("means", "rates", "survival")) { return(.getDatasetExample(exampleType = exampleType)) } dataFrame <- .getDataFrameFromArgs(...) if (is.null(dataFrame)) { paramNames <- names(args) paramNames <- paramNames[paramNames != ""] if (length(paramNames) != length(args)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all parameters must be named") } if (length(paramNames) != length(unique(paramNames))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the parameter names must be unique") } dataFrame <- .createDataFrame(...) } if (.isDataObjectMeans(...)) { return(DatasetMeans(dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled)) } if (.isDataObjectRates(...)) { return(DatasetRates(dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled)) } if (.isDataObjectSurvival(...)) { return(DatasetSurvival(dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "failed to identify dataset type") } .getDatasetExample <- function(exampleType) { if (exampleType == "means") { return(getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7))) } else if (exampleType == "rates") { return(getDataset( n1 = c(23, 25), n2 = c(25, NA), n3 = c(24, 27), n4 = c(22, 29), events1 = c(15, 12), events2 = c(19, NA), events3 = c(18, 22), events4 = c(12, 13))) } else if (exampleType == "survival") { return(getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2,1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11))) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'exampleType' (", exampleType, ") is not allowed") } .arraysAreEqual <- function(a1, a2) { if (length(a1) != length(a2)) { return(FALSE) } l <- length(a1) if (l > 0) { a1 <- sort(a1) a2 <- sort(a2) if (base::sum(a1 == a2) < l) { return(FALSE) } } return(TRUE) } .createDataFrame <- function(...) { args <- list(...) argNames <- .getArgumentNames(...) if (length(args) == 0 || length(argNames) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data.frame or data vectors expected") } multiArmEnabled <- any(grep("3", argNames)) numberOfValues <- length(args[[1]]) naIndicesBefore <- NULL for (argName in argNames) { argValues <- args[[argName]] if (length(argValues) != numberOfValues) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "all data vectors must have the same length: '", argName, "' (", length(argValues), ") differs from '", argNames[1], "' (", numberOfValues, ")") } if (.equalsRegexpIgnoreCase(argName, "^stages?$")) { if (length(stats::na.omit(argValues)) != length(argValues)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "NA's not allowed for '", argName, "'; stages must be defined completely") } definedStages <- sort(intersect(unique(argValues), 1:numberOfValues)) if (length(definedStages) < numberOfValues) { if (length(definedStages) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "no valid stages are defined; ", "stages must be defined completely (", .arrayToString(1:numberOfValues), ")") } msg <- ifelse(length(definedStages) == 1, paste0("only stage ", definedStages, " is defined"), paste0("only stages ", .arrayToString(definedStages), " are defined")) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, msg, "; stages must be defined completely") } } naIndices <- which(is.na(argValues)) if (length(naIndices) > 0) { stageIndex <- naIndices[length(naIndices)] if (stageIndex != numberOfValues) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' contains a NA at stage ", stageIndex, " followed by a value for a higher stage; NA's must be the last values") } } if (length(naIndices) > 1) { indexBefore <- naIndices[length(naIndices)] for (i in (length(naIndices) - 1):1) { index <- naIndices[i] if (indexBefore - index > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' contains alternating values and NA's; ", "NA's must be the last values") } indexBefore <- index } } if (!multiArmEnabled && !is.null(naIndicesBefore) && !.equalsRegexpIgnoreCase(argName, "^stages?$")) { if (!.arraysAreEqual(naIndicesBefore, naIndices)) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "if NA's exist, then they are mandatory for each data vector at the same position") } } naIndicesBefore <- naIndices if (base::sum(is.infinite(argValues)) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all data values must be finite; ", "'", argName, "' contains infinite values") } if (!is.numeric(argValues)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all data vectors must be numeric ('", argName, "' is ", class(argValues), ")") } if (length(argValues) > C_KMAX_UPPER_BOUND) { stop(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "'", argName, "' is out of bounds [1, ", C_KMAX_UPPER_BOUND, "]") } } dataFrame <- as.data.frame(args) if (length(intersect(tolower(names(dataFrame)), c("stage", "stages"))) == 0) { dataFrame$stages <- 1:nrow(dataFrame) } return(dataFrame) } .getDataFrameFromArgs <- function(...) { args <- list(...) if (length(args) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "cannot initialize dataset because no data are defined") } dataFrame <- NULL dataFrameCounter <- 0 for (arg in args) { if (is.data.frame(arg)) { dataFrameCounter <- dataFrameCounter + 1 if (is.null(dataFrame)) { dataFrame <- arg } } } if (dataFrameCounter > 1) { warning("Found ", dataFrameCounter, ", data.frame arguments; ", "only the first data.frame will be used for the initialization of the dataset", call. = FALSE) } return(dataFrame) } .getArgumentNames <- function(...) { dataFrame <- .getDataFrameFromArgs(...) if (!is.null(dataFrame)) { return(names(dataFrame)) } args <- list(...) if (length(args) == 0) { return(character(0)) } return(names(args)) } .assertIsValidDatasetArgument <- function(...) { argNames <- .getArgumentNames(...) if (length(argNames) == 0) { return(TRUE) } argNamesLower <- tolower(argNames) dataObjectkeyWords <- tolower(C_KEY_WORDS) multiArmKeywords <- tolower(c( C_KEY_WORDS_EVENTS, C_KEY_WORDS_OVERALL_EVENTS, C_KEY_WORDS_SAMPLE_SIZES, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, C_KEY_WORDS_MEANS, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_OVERALL_ST_DEVS, C_KEY_WORDS_ALLOCATION_RATIOS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_LOG_RANKS)) unknownArgs <- setdiff(argNamesLower, dataObjectkeyWords) unknownArgsChecked <- unknownArgs unknownArgs <- c() for (unknownArg in unknownArgsChecked) { unknown <- TRUE for (multiArmKeyword in multiArmKeywords) { if (grepl(paste0(multiArmKeyword, "\\d{1,4}"), unknownArg)) { unknown <- FALSE } } if (unknown) { unknownArgs <- c(unknownArgs, unknownArg) } } if (length(unknownArgs) > 0) { for (i in 1:length(unknownArgs)) { unknownArgs[i] <- argNames[argNamesLower == unknownArgs[i]][1] } if (length(unknownArgs) == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument '", unknownArgs, "' is not a valid dataset argument") } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the arguments ", .arrayToString(unknownArgs, encapsulate = TRUE), " are no valid dataset arguments") } } invisible(TRUE) } .isDataObject <- function(..., dataObjectkeyWords) { .assertIsValidDatasetArgument(...) argNames <- .getArgumentNames(...) if (length(argNames) == 0) { return(FALSE) } argNames <- tolower(argNames) matching <- intersect(argNames, tolower(dataObjectkeyWords)) return(length(matching) > 0) } .isDataObjectMeans <- function(...) { return(.isDataObject(..., dataObjectkeyWords = c(C_KEY_WORDS_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_MEANS_1, C_KEY_WORDS_ST_DEVS_1, C_KEY_WORDS_MEANS_2, C_KEY_WORDS_ST_DEVS_2, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_OVERALL_ST_DEVS, C_KEY_WORDS_OVERALL_MEANS_1, C_KEY_WORDS_OVERALL_ST_DEVS_1, C_KEY_WORDS_OVERALL_MEANS_2, C_KEY_WORDS_OVERALL_ST_DEVS_2))) } .isDataObjectRates <- function(...) { dataObjectkeyWords1 <- c(C_KEY_WORDS_EVENTS, C_KEY_WORDS_OVERALL_EVENTS) dataObjectkeyWords2 <- c(C_KEY_WORDS_OVERALL_LOG_RANKS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_ALLOCATION_RATIOS) dataObjectkeyWords1 <- c(dataObjectkeyWords1, paste0(dataObjectkeyWords1, c(1, 2))) dataObjectkeyWords2 <- c(dataObjectkeyWords2, paste0(dataObjectkeyWords2, c(1, 2))) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords1) && !.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords2)) } .isDataObjectSurvival <- function(...) { dataObjectkeyWords <- c(C_KEY_WORDS_OVERALL_LOG_RANKS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_ALLOCATION_RATIOS) dataObjectkeyWords <- c(dataObjectkeyWords, paste0(dataObjectkeyWords, c(1, 2))) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords)) } #' #' @name Dataset #' #' @title #' Dataset #' #' @description #' Basic class for datasets. #' #' @field stages The stage numbers. #' @field groups The group numbers. #' #' @details #' \code{Dataset} is the basic class for #' \itemize{ #' \item \code{\link{DatasetMeans}}, #' \item \code{\link{DatasetRates}}, and #' \item \code{\link{DatasetSurvival}}. #' } #' This basic class contains the fields \code{stages} and \code{groups} and several commonly used #' functions. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' Dataset <- setRefClass("Dataset", contains = "ParameterSet", fields = list( .data = "data.frame", .plotSettings = "PlotSettings", .id = "integer", .description = "character", .floatingPointNumbersEnabled = "logical", stages = "integer", groups = "integer" ), methods = list( initialize = function(dataFrame, ..., floatingPointNumbersEnabled = FALSE) { callSuper(...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames() .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .floatingPointNumbersEnabled <<- floatingPointNumbersEnabled .id <<- NA_integer_ .description <<- NA_character_ if (!missing(dataFrame)) { .initByDataFrame(dataFrame) } }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing dataset objects' .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { .resetCat() if (showType == 2) { .cat("Technical summary of the dataset object of class", methods::classLabel(class(.self)), ":\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .showParametersOfOneGroup(.getUserDefinedParameters(), title = .toString(startWithUpperCase = TRUE), orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), title = "Calculated data", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (!is.na(.description) && nchar(.description) > 0) { .cat("Description: ", .description, "\n\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .initByDataFrame = function(dataFrame) { if (!is.data.frame(dataFrame)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataFrame' must be a data.frame (is an instance of class ", class(dataFrame), ")") } if (!.paramExists(dataFrame, "stage") && !.paramExists(dataFrame, "stages")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataFrame' must contain parameter 'stages' or 'stage'") } stages <<- as.integer(.getValuesByParameterName(dataFrame, c("stages", "stage"))) if (length(unique(stages)) < length(stages)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stages' (", .arrayToString(stages), ") must be a unique vector of stage numbers") } groups <<- rep(1L, length(stages)) .setParameterType("groups", C_PARAM_USER_DEFINED) .setParameterType("stages", C_PARAM_USER_DEFINED) }, .validateDataset = function() { .assertIsValidKMax(kMax = getNumberOfStages()) }, .validateValues = function(values, name) { l1 <- length(unique(stages)) l2 <- length(values) if (l1 != l2) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "there ", ifelse(l1 == 1, paste("is", l1, "stage"), paste("are", l1, "stages")), " defined", " (", .arrayToString(unique(stages)), ") and '", name, "' has length ", l2) } }, .fillWithNAs = function(kMax) { numberOfStages <- getNumberOfStages() if (numberOfStages >= kMax) { return(invisible()) } numberOfGroups <- getNumberOfGroups() for (s in (numberOfStages + 1):kMax) { for (g in 1:numberOfGroups) { stages <<- c(stages, s) groups <<- c(groups, g) } } }, .orderDataByStageAndGroup = function() { .data <<- .data[order(.data[, 1], .data[, 2]), ] }, .getNumberOfNAsToAdd = function(kMax) { n <- kMax - getNumberOfStages() if (n <= 0) { return(0) } n <- n * getNumberOfGroups() return(n) }, .paramExists = function(dataFrame, parameterName) { for (p in parameterName) { value <- dataFrame[[p]] if (!is.null(value)) { return(TRUE) } } return(FALSE) }, .getValuesByParameterName = function(dataFrame, parameterNameVariants, ..., defaultValues = NULL, suffix = "") { for (parameterName in parameterNameVariants) { key <- paste0(parameterName, suffix) if (.paramExists(dataFrame, key)) { return(dataFrame[[key]]) } } if (!is.null(defaultValues)) { return(defaultValues) } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "parameter '", paste0(parameterNameVariants[1], suffix), "' is missing or not correctly specified") }, .getIndices = function(stage, group) { if (is.null(.data)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.data' must be defined") } if (!is.null(stage) && !any(is.na(stage)) && all(stage < 0)) { i <- 1:getNumberOfStages() stage <- i[!(i %in% abs(stage))] } if (!is.null(group) && !any(is.na(group)) && all(group < 0)) { i <- 1:getNumberOfGroups() group <- i[!(i %in% abs(group))] } if (!is.null(group) && length(group) == 1 && is.na(group)) { if (!all(stage %in% .data$stage)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stage' (", .arrayToString(stage), ") out of range [", .arrayToString(sort(unique(.data$stage))), "]") } indices <- (.data$stage %in% stage) indices[is.na(indices)] <- FALSE return(indices) } if (!is.null(stage) && length(stage) == 1 && is.na(stage)) { if (!all(group %in% .data$group)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'group' (", .arrayToString(group), ") out of range [", .arrayToString(sort(unique(.data$group))), "]") } indices <- (.data$group %in% group) indices[is.na(indices)] <- FALSE return(indices) } if (!all(stage %in% .data$stage)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stage' (", .arrayToString(stage), ") out of range [", .arrayToString(sort(unique(.data$stage))), "]") } if (!all(group %in% .data$group)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'group' (", .arrayToString(group), ") out of range [", .arrayToString(sort(unique(.data$group))), "]") } if (length(stage) > 1) { indices <- (.data$stage %in% stage & .data$group %in% group) indices[is.na(indices)] <- FALSE return(indices) } indices <- (.data$stage %in% stage & .data$group %in% group) indices[is.na(indices)] <- FALSE return(indices) }, .getValidatedFloatingPointNumbers = function(n, type = "Sample sizes") { if (.floatingPointNumbersEnabled) { return(n) } nToCheck <- stats::na.omit(n) if (any(nToCheck != as.integer(nToCheck))) { warning(type, " specified as floating-point numbers were truncated", call. = FALSE) } n[!is.na(n)] <- as.integer(n[!is.na(n)]) return(n) }, .keyWordExists = function(dataFrame, keyWords, suffix = "") { for (key in keyWords) { if (.paramExists(dataFrame, paste0(key, suffix))) { return(TRUE) } } return(FALSE) }, .getNumberOfGroups = function(dataFrame, keyWords) { for (group in 3:1000) { if (!.keyWordExists(dataFrame, keyWords, group)) { return(group - 1) } } return(2) }, .getValidatedStage = function(..., stage = NA_integer_, group = NA_integer_) { if (length(list(...)) > 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "argument 'stage' or 'group' is missing, all arguments must be named") } if (all(is.na(stage))) { stage <- c(1:getNumberOfStages()) } return(stage) }, getNumberOfGroups = function() { data <- stats::na.omit(.data) return(length(unique(data$group))) }, getNumberOfStages = function() { data <- stats::na.omit(.data) return(length(unique(data$stage))) }, isDatasetMeans = function() { return(class(.self) == "DatasetMeans") }, isDatasetRates = function() { return(class(.self) == "DatasetRates") }, isDatasetSurvival = function() { return(class(.self) == "DatasetSurvival") }, setId = function(id) { .id <<- as.integer(id) }, getId = function() { return(.id) }, setDescription = function(description) { .description <<- description }, getDescription = function() { return(.description) }, .toString = function(startWithUpperCase = FALSE) { s <- "unknown dataset" if (isDatasetMeans()) { s <- "dataset of means" } else if (isDatasetRates()) { s <- "dataset of rates" } else if (isDatasetSurvival()) { s <- "dataset of survival data" } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) } ) ) #' #' @name DatasetMeans #' #' @title #' Dataset of Means #' #' @description #' Class for a dataset of means. #' #' @field groups The group numbers. #' @field stages The stage numbers. #' @field sampleSizes The sample sizes. #' @field means The means. #' @field stDevs The standard deviations. #' #' @details #' This object can not be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of means. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetMeans <- setRefClass("DatasetMeans", contains = "Dataset", fields = list( sampleSizes = "numeric", means = "numeric", stDevs = "numeric", overallSampleSizes = "numeric", overallMeans = "numeric", overallStDevs = "numeric" ), methods = list( getSampleSize = function(stage, group = 1) { return(.data$sampleSize[.getIndices(stage = stage, group = group)]) }, getMean = function(stage, group = 1) { return(.data$mean[.getIndices(stage = stage, group = group)]) }, getStDev = function(stage, group = 1) { return(.data$stDev[.getIndices(stage = stage, group = group)]) }, getSampleSizes = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$sampleSize[.getIndices(stage = stage, group = group)]) }, getMeans = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$mean[.getIndices(stage = stage, group = group)]) }, getStDevs = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$stDev[.getIndices(stage = stage, group = group)]) }, getSampleSizesUpTo = function(to, group = 1) { return(.data$sampleSize[.getIndices(stage = c(1:to), group = group)]) }, getMeansUpTo = function(to, group = 1) { return(.data$mean[.getIndices(stage = c(1:to), group = group)]) }, getStDevsUpTo = function(to, group = 1) { return(.data$stDev[.getIndices(stage = c(1:to), group = group)]) }, getOverallSampleSize = function(stage, group = 1) { return(.data$overallSampleSize[.getIndices(stage = stage, group = group)]) }, getOverallMean = function(stage, group = 1) { return(.data$overallMean[.getIndices(stage = stage, group = group)]) }, getOverallStDev = function(stage, group = 1) { return(.data$overallStDev[.getIndices(stage = stage, group = group)]) }, getOverallSampleSizes = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallSampleSize[.getIndices(stage = stage, group = group)]) }, getOverallMeans = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallMean[.getIndices(stage = stage, group = group)]) }, getOverallStDevs = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallStDev[.getIndices(stage = stage, group = group)]) }, getOverallSampleSizesUpTo = function(to, group = 1) { return(.data$overallSampleSize[.getIndices(stage = c(1:to), group = group)]) }, getOverallMeansUpTo = function(to, group = 1) { return(.data$overallMean[.getIndices(stage = c(1:to), group = group)]) }, getOverallStDevsUpTo = function(to, group = 1) { return(.data$overallStDev[.getIndices(stage = c(1:to), group = group)]) }, .getValidatedSampleSizes = function(n) { return(.getValidatedFloatingPointNumbers(n, type = "Sample sizes")) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) # case: one mean - stage wise if (.paramExists(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) { sampleSizes <<- .getValidatedSampleSizes(.getValuesByParameterName(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) .validateValues(sampleSizes, "n") means <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_MEANS) .validateValues(means, "means") stDevs <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_ST_DEVS) .validateValues(stDevs, "stDevs") kMax <- length(sampleSizes) stageNumber <- length(stats::na.omit(sampleSizes)) dataInput <- data.frame( sampleSizes = sampleSizes, means = means, stDevs = stDevs) dataInput <- .getOverallData(dataInput, kMax, stage = stageNumber) overallSampleSizes <<- .getValidatedSampleSizes(dataInput$overallSampleSizes) overallMeans <<- dataInput$overallMeans overallStDevs <<- dataInput$overallStDevs .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("means", C_PARAM_USER_DEFINED) .setParameterType("stDevs", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallMeans", C_PARAM_GENERATED) .setParameterType("overallStDevs", C_PARAM_GENERATED) } # case: one mean - overall else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) { overallSampleSizes <<- .getValidatedSampleSizes(.getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) .validateValues(overallSampleSizes, "overallSampleSizes") overallMeans <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_MEANS) .validateValues(overallMeans, "overallMeans") overallStDevs <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_ST_DEVS) .validateValues(overallStDevs, "overallStDevs") kMax <- length(overallSampleSizes) stageNumber <- length(stats::na.omit(overallSampleSizes)) dataInput <- data.frame( overallSampleSizes = overallSampleSizes, overallMeans = overallMeans, overallStDevs = overallStDevs) dataInput <- .getStageWiseData(dataInput, kMax, stage = stageNumber) sampleSizes <<- .getValidatedSampleSizes(dataInput$sampleSizes) means <<- dataInput$means stDevs <<- dataInput$stDevs .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("means", C_PARAM_GENERATED) .setParameterType("stDevs", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallMeans", C_PARAM_USER_DEFINED) .setParameterType("overallStDevs", C_PARAM_USER_DEFINED) } # case: two or more means - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) means <<- numeric(0) stDevs <<- numeric(0) overallSampleSizes <<- numeric(0) overallMeans <<- numeric(0) overallStDevs <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { sampleSizesTemp <- .getValidatedSampleSizes(.getValuesByParameterName( dataFrame, C_KEY_WORDS_SAMPLE_SIZES, suffix = group)) .validateValues(sampleSizesTemp, paste0("n", group)) sampleSizes <<- c(sampleSizes, sampleSizesTemp) meansTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_MEANS, suffix = group) .validateValues(meansTemp, paste0("means", group)) means <<- c(means, meansTemp) stDevsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_ST_DEVS, suffix = group) .validateValues(stDevsTemp, paste0("stDevs", group)) stDevs <<- c(stDevs, stDevsTemp) groups <<- c(groups, rep(as.integer(group), length(sampleSizesTemp))) kMax <- length(sampleSizesTemp) numberOfValidStages <- length(stats::na.omit(sampleSizesTemp)) overallData <- .getOverallData(data.frame( sampleSizes = sampleSizesTemp, means = meansTemp, stDevs = stDevsTemp), kMax, stage = numberOfValidStages) overallSampleSizes <<- c(overallSampleSizes, .getValidatedSampleSizes(overallData$overallSampleSizes)) overallMeans <<- c(overallMeans, overallData$overallMeans) overallStDevs <<- c(overallStDevs, overallData$overallStDevs) } if (base::sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } if (base::sum(stats::na.omit(stDevs) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all standard deviations must be >= 0") } .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("means", C_PARAM_USER_DEFINED) .setParameterType("stDevs", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallMeans", C_PARAM_GENERATED) .setParameterType("overallStDevs", C_PARAM_GENERATED) } # case: two or more means - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) means <<- numeric(0) stDevs <<- numeric(0) overallSampleSizes <<- numeric(0) overallMeans <<- numeric(0) overallStDevs <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallSampleSizesTemp <- .getValidatedSampleSizes(.getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, suffix = group)) .validateValues(overallSampleSizesTemp, paste0("overallSampleSizes", group)) overallSampleSizes <<- c(overallSampleSizes, overallSampleSizesTemp) overallMeansTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_MEANS, suffix = group) .validateValues(overallMeansTemp, paste0("overallMeans", group)) overallMeans <<- c(overallMeans, overallMeansTemp) overallStDevsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_ST_DEVS, suffix = group) .validateValues(overallStDevsTemp, paste0("overallStDevs", group)) overallStDevs <<- c(overallStDevs, overallStDevsTemp) groups <<- c(groups, rep(as.integer(group), length(overallSampleSizesTemp))) kMax <- length(overallSampleSizesTemp) numberOfValidStages <- length(stats::na.omit(overallSampleSizesTemp)) overallData <- .getStageWiseData(data.frame( overallSampleSizes = overallSampleSizesTemp, overallMeans = overallMeansTemp, overallStDevs = overallStDevsTemp), kMax, stage = numberOfValidStages) validatedSampleSizes <- .getValidatedSampleSizes(overallData$sampleSizes) .validateValues(validatedSampleSizes, paste0("n", group)) sampleSizes <<- c(sampleSizes, validatedSampleSizes) means <<- c(means, overallData$means) stDevs <<- c(stDevs, overallData$stDevs) if (base::sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } if (base::sum(stats::na.omit(stDevs) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all standard deviations must be >= 0") } } .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("means", C_PARAM_GENERATED) .setParameterType("stDevs", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallMeans", C_PARAM_USER_DEFINED) .setParameterType("overallStDevs", C_PARAM_USER_DEFINED) } else { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "sample sizes are missing or not correctly specified") } .data <<- data.frame(stage = stages, group = groups, sampleSize = sampleSizes, mean = means, stDev = stDevs, overallSampleSize = overallSampleSizes, overallMean = overallMeans, overallStDev = overallStDevs) .orderDataByStageAndGroup() .setDataToVariables() .validateDataset() }, .setDataToVariables = function() { stages <<- .data$stage groups <<- .data$group sampleSizes <<- .data$sampleSize means <<- .data$mean stDevs <<- .data$stDev overallSampleSizes <<- .data$overallSampleSize overallMeans <<- .data$overallMean overallStDevs <<- .data$overallStDev }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) naRealsToAdd <- rep(NA_real_, n) sampleSizes <<- c(sampleSizes, naRealsToAdd) means <<- c(means, naRealsToAdd) stDevs <<- c(stDevs, naRealsToAdd) overallSampleSizes <<- c(overallSampleSizes, naRealsToAdd) overallMeans <<- c(overallMeans, naRealsToAdd) overallStDevs <<- c(overallStDevs, naRealsToAdd) .data <<- data.frame(stage = stages, group = groups, sampleSize = sampleSizes, mean = means, stDev = stDevs, overallSampleSize = overallSampleSizes, overallMean = overallMeans, overallStDev = overallStDevs) .orderDataByStageAndGroup() .setDataToVariables() }, getRandomData = function() { data <- NULL for (stage in 1:getNumberOfStages()) { for (group in 1:getNumberOfGroups()) { randomData <- stats::rnorm(n = getSampleSize(stage = stage, group = group), mean = getMean(stage = stage, group = group), sd = getStDev(stage = stage, group = group)) row <- data.frame( stage = stage, group = group, randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } data$stage <- factor(data$stage) data$group <- factor(data$group, label=paste("Group", c(1:getNumberOfGroups()))) return(data) }, .getOverallData = function(dataInput, kMax, stage) { "Calculates overall means and standard deviation if stagewise data is available" if (is.null(dataInput[["sampleSizes"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'sampleSizes'") } if (is.null(dataInput[["means"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'means'") } if (is.null(dataInput[["stDevs"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'stDevs'") } dataInput$overallSampleSizes <- c(base::cumsum(dataInput$sampleSizes[1:stage]), rep(NA_real_, kMax - stage)) dataInput$overallMeans <- c(base::cumsum(dataInput$sampleSizes[1:stage] * dataInput$means[1:stage]) / base::cumsum(dataInput$sampleSizes[1:stage]), rep(NA_real_, kMax - stage)) dataInput$overallStDevs <- rep(NA_real_, kMax) for (k in 1:stage) { dataInput$overallStDevs[k] <- base::sqrt((base::sum((dataInput$sampleSizes[1:k] - 1) * dataInput$stDevs[1:k]^2) + base::sum(dataInput$sampleSizes[1:k] * (dataInput$means[1:k] - dataInput$overallMeans[k])^2)) / (base::sum(dataInput$sampleSizes[1:k]) - 1)) } return(dataInput) }, .getStageWiseData = function(dataInput, kMax, stage) { "Calculates stagewise means and standard deviation if overall data is available" if (is.null(dataInput[["overallSampleSizes"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallSampleSizes'") } if (is.null(dataInput[["overallMeans"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallMeans'") } if (is.null(dataInput[["overallStDevs"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallStDevs'") } dataInput$sampleSizes <- c(dataInput$overallSampleSizes[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$sampleSizes[2:stage] <- dataInput$overallSampleSizes[2:stage] - dataInput$overallSampleSizes[1:(stage - 1)] } dataInput$means <- c(dataInput$overallMeans[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1){ for (k in 2:stage) { dataInput$means[k] <- (dataInput$overallSampleSizes[k] * dataInput$overallMeans[k] - dataInput$overallSampleSizes[k - 1] * dataInput$overallMeans[k - 1])/ dataInput$sampleSizes[k] } } dataInput$stDevs <- c(dataInput$overallStDevs[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1){ for (k in 2:stage) { dataInput$stDevs[k] <- base::sqrt(((dataInput$overallSampleSizes[k] - 1) * dataInput$overallStDevs[k]^2 - (dataInput$overallSampleSizes[k - 1] - 1) * dataInput$overallStDevs[k - 1]^2 + base::sum(dataInput$sampleSizes[1:(k - 1)] * (dataInput$means[1:(k - 1)] - dataInput$overallMeans[k - 1])^2) - base::sum(dataInput$sampleSizes[1:k] * (dataInput$means[1:k] - dataInput$overallMeans[k])^2)) / (dataInput$sampleSizes[k] - 1)) } } return(dataInput) } ) ) #' #' @title #' Dataset Plotting #' #' @param x The \code{\link{Dataset}} object to plot. #' @param y Not available for this kind of plot (is only defined to be compatible #' to the generic plot function). #' @param main The main title, default is \code{"Dataset"}. #' @param xlab The x-axis label, default is \code{"Stage"}. #' @param ylab The y-axis label. #' @param legendTitle The legend title, default is \code{"Group"}. #' @param palette The palette, default is \code{"Set1"}. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param ... Optional \code{ggplot2} arguments. #' #' @description #' Plots a dataset. #' #' @details #' Generic function to plot all kinds of datasets. #' #' @return #' A \code{ggplot2} object. #' #' @examples #' #' # Plot a dataset of means #' dataExample <- getDataset( #' n1 = c(22, 11, 22, 11), #' n2 = c(22, 13, 22, 13), #' means1 = c(1, 1.1, 1, 1), #' means2 = c(1.4, 1.5, 3, 2.5), #' stDevs1 = c(1, 2, 2, 1.3), #' stDevs2 = c(1, 2, 2, 1.3)) #' #' if (require(ggplot2)) plot(dataExample, main = "Comparison of means") #' #' # Plot a dataset of rates #' dataExample <- getDataset( #' n1 = c(8, 10, 9, 11), #' n2 = c(11, 13, 12, 13), #' events1 = c(3, 5, 5, 6), #' events2 = c(8, 10, 12, 12) #' ) #' #' if (require(ggplot2)) plot(dataExample, main = "Comparison of rates") #' #' @export #' plot.Dataset <- function(x, y, ..., main = "Dataset", xlab = "Stage", ylab = NA_character_, legendTitle = "Group", palette = "Set1", showSource = FALSE) { .assertGgplotIsInstalled() if (x$isDatasetMeans()) { data <- x$getRandomData() if (is.na(ylab)) { ylab <- "Random data" } } else if (x$isDatasetRates()) { data <- x$.data if (is.na(ylab)) { ylab <- "Frequency (Events and Sample Size)" } } else if (x$isDatasetSurvival()) { # Open work: implement dataset plot of survival data stop("Plot of survival data is not implemented yet") } if (showSource) { warning("'showSource' = TRUE is not implemented yet for class ", class(x)) } if (x$getNumberOfGroups() == 1) { if (x$isDatasetMeans()) { p <- ggplot2::ggplot(ggplot2::aes(y = data$randomData, x = factor(data$stage)), data = data) p <- p + ggplot2::geom_boxplot(ggplot2::aes(fill = data$stage)) p <- p + ggplot2::geom_point(colour = "#0e414e", shape = 20, position = ggplot2::position_jitter(width = .1), size = x$getPlotSettings()$pointSize) p <- p + ggplot2::stat_summary(fun.y = "mean", geom = "point", shape = 21, position = ggplot2::position_dodge(.75), size = 4, fill = "white", colour = "black", show.legend = FALSE) } else if (x$isDatasetRates()) { p <- ggplot2::ggplot(show.legend = FALSE) # plot sample size p <- p + ggplot2::geom_bar(ggplot2::aes(y = data$sampleSize, x = factor(data$stage), fill = factor(data$stage)), data = data, position = "dodge", stat = "identity", alpha = 0.4) # plot events p <- p + ggplot2::geom_bar(ggplot2::aes(y = data$event, x = factor(data$stage), fill = factor(data$stage)), data = data, position = "dodge", stat = "identity") } else if (x$isDatasetSurvival()) { # implement survival plot here } } else { data$stageGroup <- interaction(data$stage, data$group) if (x$isDatasetMeans()) { p <- ggplot2::ggplot(ggplot2::aes(y = data$randomData, x = factor(data$stage), fill = factor(data$group)), data = data) p <- p + ggplot2::geom_point(ggplot2::aes(colour = data$group), shape = 20, position = ggplot2::position_dodge(.75), size = x$getPlotSettings()$pointSize) p <- p + ggplot2::geom_boxplot() p <- p + ggplot2::stat_summary(ggplot2::aes(colour = data$group), fun.y = "mean", geom = "point", shape = 21, position = ggplot2::position_dodge(.75), size = 4, fill = "white", show.legend = FALSE) } else if (x$isDatasetRates()) { p <- ggplot2::ggplot(show.legend = FALSE) # plot sample size p <- p + ggplot2::geom_bar(ggplot2::aes(y = data$sampleSize, x = factor(data$stage), fill = factor(data$group)), data = data, position = "dodge", stat = "identity", alpha = 0.4) # plot events p <- p + ggplot2::geom_bar(ggplot2::aes(y = data$event, x = factor(data$stage), fill = factor(data$group)), data = data, position = "dodge", stat = "identity") } else if (x$isDatasetSurvival()) { # implement survival plot here } } # hide second legend if (x$getNumberOfGroups() == 1) { p <- p + ggplot2::guides(fill = FALSE, colour = FALSE) } else { p <- p + ggplot2::guides(colour = FALSE) } # set theme p <- x$getPlotSettings()$setTheme(p) #p <- designSet$getPlotSettings()$hideGridLines(p) # set main title p <- x$getPlotSettings()$setMainTitle(p, main) # set axes labels p <- x$getPlotSettings()$setAxesLabels(p, xlab = xlab, ylab = ylab) # set legend if (x$getNumberOfGroups() > 1) { p <- x$getPlotSettings()$setLegendPosition(p, legendPosition = C_POSITION_OUTSIDE_PLOT) p <- x$getPlotSettings()$setLegendBorder(p) p <- x$getPlotSettings()$setLegendTitle(p, legendTitle, mode = "fill") p <- x$getPlotSettings()$setLegendLabelSize(p) } p <- x$getPlotSettings()$setAxesAppearance(p) p <- x$getPlotSettings()$setColorPalette(p, palette, mode = "all") p <- x$getPlotSettings()$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- x$getPlotSettings()$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) p } #' #' @name DatasetRates #' #' @title #' Dataset of Rates #' #' @description #' Class for a dataset of rates. #' #' @field group The group numbers. #' @field stage The stage numbers. #' @field sampleSize The sample sizes. #' @field event The events. #' #' @details #' This object can not be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of rates. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetRates <- setRefClass("DatasetRates", contains = "Dataset", fields = list( sampleSizes = "numeric", events = "numeric", overallSampleSizes = "numeric", overallEvents = "numeric" ), methods = list( getSampleSize = function(stage, group = 1) { return(.data$sampleSize[.getIndices(stage = stage, group = group)]) }, getSampleSizes = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$sampleSize[.getIndices(stage = stage, group = group)]) }, getSampleSizesUpTo = function(to, group = 1) { return(.data$sampleSize[.getIndices(stage = c(1:to), group = group)]) }, getEvent = function(stage, group = 1) { return(.data$event[.getIndices(stage = stage, group = group)]) }, getEvents = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$event[.getIndices(stage = stage, group = group)]) }, getEventsUpTo = function(to, group = 1) { return(.data$event[.getIndices(stage = c(1:to), group = group)]) }, getOverallSampleSize = function(stage, group = 1) { return(.data$overallSampleSize[.getIndices(stage = stage, group = group)]) }, getOverallSampleSizes = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallSampleSize[.getIndices(stage = stage, group = group)]) }, getOverallSampleSizesUpTo = function(to, group = 1) { return(.data$overallSampleSize[.getIndices(stage = c(1:to), group = group)]) }, getOverallEvent = function(stage, group = 1) { return(.data$overallEvent[.getIndices(stage = stage, group = group)]) }, getOverallEvents = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallEvent[.getIndices(stage = stage, group = group)]) }, getOverallEventsUpTo = function(to, group = 1) { return(.data$overallEvent[.getIndices(stage = c(1:to), group = group)]) }, .getValidatedSampleSizes = function(n) { return(.getValidatedFloatingPointNumbers(n, type = "Sample sizes")) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) # case: one rate - stage wise if (.paramExists(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) { sampleSizes <<- .getValidatedSampleSizes( .getValuesByParameterName(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) .validateValues(sampleSizes, "n") events <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS) .validateValues(events, "events") kMax <- length(sampleSizes) stageNumber <- length(stats::na.omit(sampleSizes)) dataInput <- data.frame( sampleSizes = sampleSizes, events = events) dataInput <- .getOverallData(dataInput, kMax, stage = stageNumber) overallSampleSizes <<- .getValidatedSampleSizes(dataInput$overallSampleSizes) overallEvents <<- dataInput$overallEvents .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_GENERATED) } # case: one rate - overall else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) { overallSampleSizes <<- .getValidatedSampleSizes(.getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) .validateValues(overallSampleSizes, "overallSampleSizes") overallEvents <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS) .validateValues(overallEvents, "overallEvents") kMax <- length(overallSampleSizes) stageNumber <- length(stats::na.omit(overallSampleSizes)) stageWiseData <- .getStageWiseData(data.frame( overallSampleSizes = overallSampleSizes, overallEvents = overallEvents), kMax, stage = stageNumber) sampleSizes <<- .getValidatedSampleSizes(stageWiseData$sampleSizes) events <<- stageWiseData$events .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) } # case: two or more rates - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) events <<- numeric(0) overallSampleSizes <<- numeric(0) overallEvents <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { sampleSizesTemp <- .getValidatedSampleSizes(.getValuesByParameterName( dataFrame, C_KEY_WORDS_SAMPLE_SIZES, suffix = group)) .validateValues(sampleSizesTemp, paste0("n", group)) sampleSizes <<- c(sampleSizes, sampleSizesTemp) eventsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS, suffix = group) .validateValues(eventsTemp, paste0("events", group)) events <<- c(events, eventsTemp) groups <<- c(groups, rep(as.integer(group), length(sampleSizesTemp))) kMax <- length(sampleSizesTemp) numberOfValidStages <- length(stats::na.omit(sampleSizesTemp)) overallData <- .getOverallData(data.frame( sampleSizes = sampleSizesTemp, events = eventsTemp), kMax, stage = numberOfValidStages) overallSampleSizes <<- c(overallSampleSizes, .getValidatedSampleSizes(overallData$overallSampleSizes)) overallEvents <<- c(overallEvents, overallData$overallEvents) } if (base::sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_GENERATED) } # case: two or more rates - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) events <<- numeric(0) overallSampleSizes <<- numeric(0) overallEvents <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallSampleSizesTemp <- .getValidatedSampleSizes(.getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, suffix = group)) .validateValues(overallSampleSizesTemp, paste0("overallSampleSizes", group)) overallSampleSizes <<- c(overallSampleSizes, overallSampleSizesTemp) overallEventsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS, suffix = group) .validateValues(overallEventsTemp, paste0("overallEvents", group)) overallEvents <<- c(overallEvents, overallEventsTemp) groups <<- c(groups, rep(as.integer(group), length(overallSampleSizesTemp))) kMax <- length(overallSampleSizesTemp) numberOfValidStages <- length(stats::na.omit(overallSampleSizesTemp)) stageWiseData <- .getStageWiseData(data.frame( overallSampleSizes = overallSampleSizesTemp, overallEvents = overallEventsTemp), kMax, stage = numberOfValidStages) validatedSampleSizes <- .getValidatedSampleSizes(stageWiseData$sampleSizes) .validateValues(validatedSampleSizes, paste0("n", group)) sampleSizes <<- c(sampleSizes, validatedSampleSizes) events <<- c(events, stageWiseData$events) if (base::sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } } .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) } else { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "sample sizes are missing or not correctly specified") } if (base::sum(stats::na.omit(events) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0") } .data <<- data.frame(stage = stages, group = groups, sampleSize = sampleSizes, event = events, overallSampleSize = overallSampleSizes, overallEvent = overallEvents) .orderDataByStageAndGroup() .setDataToVariables() .validateDataset() }, .setDataToVariables = function() { stages <<- .data$stage groups <<- .data$group sampleSizes <<- .data$sampleSize events <<- .data$event overallSampleSizes <<- .data$overallSampleSize overallEvents <<- .data$overallEvent }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) sampleSizes <<- c(sampleSizes, rep(NA_real_, n)) events <<- c(events, rep(NA_real_, n)) overallSampleSizes <<- c(overallSampleSizes, rep(NA_real_, n)) overallEvents <<- c(overallEvents, rep(NA_real_, n)) .data <<- data.frame(stage = stages, group = groups, sampleSize = sampleSizes, event = events, overallSampleSize = overallSampleSizes, overallEvent = overallEvents) .orderDataByStageAndGroup() .setDataToVariables() }, getRandomData = function() { data <- NULL for (stage in 1:getNumberOfStages()) { for (group in 1:getNumberOfGroups()) { n = getSampleSize(stage = stage, group = group) numberOfEvents <- getEvent(stage = stage, group = group) randomIndizes <- sample(x = c(1:n), size = numberOfEvents, replace = FALSE) randomData <- rep(0, n) randomData[randomIndizes] <- 1 row <- data.frame( stage = stage, group = group, randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } data$stage <- factor(data$stage) data$group <- factor(data$group, label=paste("Group", c(1:getNumberOfGroups()))) return(data) }, .getOverallData = function(dataInput, kMax, stage) { "Calculates overall values if stagewise data is available" if (is.null(dataInput[["sampleSizes"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'sampleSizes'") } if (is.null(dataInput[["events"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'events'") } dataInput$overallSampleSizes <- c(base::cumsum(dataInput$sampleSizes[1:stage]), rep(NA_real_, kMax - stage)) dataInput$overallEvents <- c(base::cumsum(dataInput$events[1:stage]), rep(NA_real_, kMax - stage)) return(dataInput) }, .getStageWiseData = function(dataInput, kMax, stage) { "Calculates stagewise values if overall data is available" if (is.null(dataInput[["overallSampleSizes"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallSampleSizes'") } if (is.null(dataInput[["overallEvents"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallEvents'") } dataInput$sampleSizes <- c(dataInput$overallSampleSizes[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$sampleSizes[2:stage] <- dataInput$overallSampleSizes[2:stage] - dataInput$overallSampleSizes[1:(stage - 1)] } dataInput$events <- c(dataInput$overallEvents[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$events[2:stage] <- dataInput$overallEvents[2:stage] - dataInput$overallEvents[1:(stage - 1)] } return(dataInput) } ) ) #' #' @name DatasetSurvival #' #' @title #' Dataset of Survival Data #' #' @description #' Class for a dataset of survival data. #' #' @field group The group numbers. #' @field stage The stage numbers. #' @field overallEvent The overall events. #' @field overallAllocationRatio The overall allocations ratios. #' @field overallLogRank The overall logrank test statistics. #' #' @details #' This object can not be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of survival data. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetSurvival <- setRefClass("DatasetSurvival", contains = "Dataset", fields = list( overallEvents = "numeric", overallAllocationRatios = "numeric", overallLogRanks = "numeric", events = "numeric", allocationRatios = "numeric", logRanks = "numeric" ), methods = list( getEvent = function(stage, group = 1) { return(.data$event[.getIndices(stage = stage, group = group)]) }, # getEvents = function(stage = NA_integer_, group = 1) { # return(.data$event[.getIndices(stage = stage, group = group)]) # }, getEvents = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$event[.getIndices(stage = stage, group = group)]) }, getEventsUpTo = function(to, group = 1) { return(.data$event[.getIndices(stage = c(1:to), group = group)]) }, getAllocationRatio = function(stage, group = 1) { return(.data$allocationRatio[.getIndices(stage = stage, group = group)]) }, # getAllocationRatios = function(stage = NA_integer_, group = 1) { # return(.data$allocationRatio[.getIndices(stage = stage, group = group)]) # }, getAllocationRatios = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$allocationRatio[.getIndices(stage = stage, group = group)]) }, getAllocationRatiosUpTo = function(to, group = 1) { return(.data$allocationRatio[.getIndices(stage = c(1:to), group = group)]) }, getLogRank = function(stage, group = 1) { return(.data$logRank[.getIndices(stage = stage, group = group)]) }, # getLogRanks = function(stage = NA_integer_, group = 1) { # return(.data$logRank[.getIndices(stage = stage, group = group)]) # }, getLogRanks = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$logRank[.getIndices(stage = stage, group = group)]) }, getLogRanksUpTo = function(to, group = 1) { return(.data$logRank[.getIndices(stage = c(1:to), group = group)]) }, getOverallEvent = function(stage, group = 1) { return(.data$overallEvent[.getIndices(stage = stage, group = group)]) }, # getOverallEvents = function(stage = NA_integer_, group = 1) { # return(.data$overallEvent[.getIndices(stage = stage, group = group)]) # }, getOverallEvents = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallEvent[.getIndices(stage = stage, group = group)]) }, getOverallEventsUpTo = function(to, group = 1) { return(.data$overallEvent[.getIndices(stage = c(1:to), group = group)]) }, getOverallAllocationRatio = function(stage, group = 1) { return(.data$overallAllocationRatio[.getIndices(stage = stage, group = group)]) }, # getOverallAllocationRatios = function(stage = NA_integer_, group = 1) { # return(.data$overallAllocationRatio[.getIndices(stage = stage, group = group)]) # }, getOverallAllocationRatios = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallAllocationRatio[.getIndices(stage = stage, group = group)]) }, getOverallAllocationRatiosUpTo = function(to, group = 1) { return(.data$overallAllocationRatio[.getIndices(stage = c(1:to), group = group)]) }, getOverallLogRank = function(stage, group = 1) { return(.data$overallLogRank[.getIndices(stage = stage, group = group)]) }, # getOverallLogRanks = function(stage = NA_integer_, group = 1) { # return(.data$overallLogRank[.getIndices(stage = stage, group = group)]) # }, # getOverallLogRanks = function(..., stage = NA_integer_, group = NA_integer_) { stage <- .getValidatedStage(..., stage = stage, group = group) return(.data$overallLogRank[.getIndices(stage = stage, group = group)]) }, getOverallLogRanksUpTo = function(to, group = 1) { return(.data$overallLogRank[.getIndices(stage = c(1:to), group = group)]) }, .getValidatedEvents = function(n) { return(.getValidatedFloatingPointNumbers(n, type = "Events")) }, .getAllocationRatioDefaultValues = function(stages, events, logRanks) { allocationRatioDefaultValues <- rep(C_ALLOCATION_RATIO_DEFAULT, length(stages)) indices <- which(is.na(events) | is.na(logRanks)) allocationRatioDefaultValues[indices] <- NA_real_ return(allocationRatioDefaultValues) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) # case: survival, two groups - overall if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS)) { overallEvents <<- .getValidatedEvents( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS)) .validateValues(overallEvents, "overallEvents") overallLogRanks <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS) .validateValues(overallLogRanks, "overallLogRanks") overallAllocationRatios <<- .getValuesByParameterName( dataFrame, parameterNameVariants = C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, overallEvents, overallLogRanks)) .validateValues(overallAllocationRatios, "overallAllocationRatios") kMax <- length(overallEvents) stageNumber <- length(stats::na.omit(overallEvents)) dataInput <- data.frame( overallEvents = overallEvents, overallAllocationRatios = overallAllocationRatios, overallLogRanks = overallLogRanks) dataInput <- .getStageWiseData(dataInput, kMax, stage = stageNumber) events <<- .getValidatedEvents(dataInput$events) allocationRatios <<- dataInput$allocationRatios logRanks <<- dataInput$logRanks .setParameterType("events", C_PARAM_GENERATED) .setParameterType("allocationRatios", C_PARAM_GENERATED) .setParameterType("logRanks", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) .setParameterType("overallAllocationRatios", C_PARAM_USER_DEFINED) .setParameterType("overallLogRanks", C_PARAM_USER_DEFINED) .setParameterType("groups", C_PARAM_NOT_APPLICABLE) } # case: survival, two groups - stage wise else if (.paramExists(dataFrame, C_KEY_WORDS_LOG_RANKS)) { events <<- .getValidatedEvents(.getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS)) .validateValues(events, "events") logRanks <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_LOG_RANKS) .validateValues(logRanks, "logRanks") allocationRatios <<- .getValuesByParameterName( dataFrame, C_KEY_WORDS_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, events, logRanks)) .validateValues(allocationRatios, "allocationRatios") kMax <- length(events) stageNumber <- length(stats::na.omit(events)) dataInput <- data.frame( events = events, allocationRatios = allocationRatios, logRanks = logRanks) dataInput <- .getOverallData(dataInput, kMax, stage = stageNumber) overallEvents <<- .getValidatedEvents(dataInput$overallEvents) overallAllocationRatios <<- dataInput$overallAllocationRatios overallLogRanks <<- dataInput$overallLogRanks .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("allocationRatios", C_PARAM_USER_DEFINED) .setParameterType("logRanks", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_GENERATED) .setParameterType("overallAllocationRatios", C_PARAM_GENERATED) .setParameterType("overallLogRanks", C_PARAM_GENERATED) .setParameterType("groups", C_PARAM_NOT_APPLICABLE) } # TODO case: survival, three ore more groups - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_LOG_RANKS, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_LOG_RANKS, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) events <<- numeric(0) allocationRatios <<- numeric(0) logRanks <<- numeric(0) overallEvents <<- numeric(0) overallAllocationRatios <<- numeric(0) overallLogRanks <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallEventsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS, suffix = group) .validateValues(overallEventsTemp, paste0("overallEvents", group)) overallEvents <<- c(overallEvents, overallEventsTemp) overallLogRanksTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS, suffix = group) .validateValues(overallLogRanksTemp, paste0("overallLogRanks", group)) overallLogRanks <<- c(overallLogRanks, overallLogRanksTemp) overallAllocationRatiosTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, suffix = group, defaultValues = .getAllocationRatioDefaultValues(overallEventsTemp, overallEventsTemp, overallAllocationRatiosTemp)) .validateValues(overallAllocationRatiosTemp, paste0("overallAllocationRatios", group)) overallAllocationRatios <<- c(overallAllocationRatios, overallAllocationRatiosTemp) groups <<- c(groups, rep(as.integer(group), length(overallLogRanksTemp))) kMax <- length(overallLogRanksTemp) numberOfValidStages <- length(stats::na.omit(overallLogRanksTemp)) stageWiseData <- .getStageWiseData(data.frame( overallLogRanks = overallLogRanksTemp, overallAllocationRatios = overallAllocationRatiosTemp, overallEvents = overallEventsTemp), kMax, stage = numberOfValidStages) validatedLogRanks <- stageWiseData$logRanks .validateValues(validatedLogRanks, paste0("n", group)) logRanks <<- c(logRanks, validatedLogRanks) allocationRatios <<- c(allocationRatios, stageWiseData$allocationRatios) events <<- c(events, stageWiseData$events) # if (base::sum(stats::na.omit(logRanks) < 0) > 0) { # stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all log ranks must be >= 0") # } } .setParameterType("events", C_PARAM_GENERATED) .setParameterType("allocationRatios", C_PARAM_GENERATED) .setParameterType("logRanks", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) .setParameterType("overallAllocationRatios", C_PARAM_USER_DEFINED) .setParameterType("overallLogRanks", C_PARAM_USER_DEFINED) } # case: survival, three ore more groups - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_LOG_RANKS, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_LOG_RANKS, 2))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_LOG_RANKS) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) events <<- numeric(0) allocationRatios <<- numeric(0) logRanks <<- numeric(0) overallEvents <<- numeric(0) overallAllocationRatios <<- numeric(0) overallLogRanks <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { eventsTemp <- .getValidatedEvents(.getValuesByParameterName( dataFrame, C_KEY_WORDS_EVENTS, suffix = group)) events <<- c(events, eventsTemp) logRanksTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_LOG_RANKS, suffix = group) .validateValues(logRanksTemp, paste0("n", group)) logRanks <<- c(logRanks, logRanksTemp) allocationRatiosTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_ALLOCATION_RATIOS, suffix = group, defaultValues = .getAllocationRatioDefaultValues(eventsTemp, eventsTemp, logRanksTemp)) .validateValues(allocationRatiosTemp, paste0("allocationRatios", group)) allocationRatios <<- c(allocationRatios, allocationRatiosTemp) groups <<- c(groups, rep(as.integer(group), length(eventsTemp))) kMax <- length(eventsTemp) numberOfValidStages <- length(stats::na.omit(eventsTemp)) overallData <- .getOverallData(data.frame( events = eventsTemp, allocationRatios = allocationRatiosTemp, logRanks = logRanksTemp), kMax, stage = numberOfValidStages) overallEvents <<- c(overallEvents, overallData$overallEvents) overallAllocationRatios <<- c(overallAllocationRatios, overallData$overallAllocationRatios) overallLogRanks <<- c(overallLogRanks, overallData$overallLogRanks) } .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("allocationRatios", C_PARAM_USER_DEFINED) .setParameterType("logRanks", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_GENERATED) .setParameterType("overallAllocationRatios", C_PARAM_GENERATED) .setParameterType("overallLogRanks", C_PARAM_GENERATED) } .data <<- data.frame(stage = stages, group = groups, overallEvent = overallEvents, overallAllocationRatio = overallAllocationRatios, overallLogRank = overallLogRanks, event = events, allocationRatio = allocationRatios, logRanks = logRanks) .orderDataByStageAndGroup() .setDataToVariables() .validateDataset() }, .setDataToVariables = function() { stages <<- .data$stage groups <<- .data$group overallEvents <<- .data$overallEvent overallAllocationRatios <<- .data$overallAllocationRatio overallLogRanks <<- .data$overallLogRank events <<- .data$event allocationRatios <<- .data$allocationRatio logRanks <<- .data$logRanks }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) overallEvents <<- c(overallEvents, rep(NA_real_, n)) overallAllocationRatios <<- c(overallAllocationRatios, rep(NA_real_, n)) overallLogRanks <<- c(overallLogRanks, rep(NA_real_, n)) events <<- c(events, rep(NA_real_, n)) allocationRatios <<- c(allocationRatios, rep(NA_real_, n)) logRanks <<- c(logRanks, rep(NA_real_, n)) .data <<- data.frame(stage = stages, group = groups, overallEvent = overallEvents, overallAllocationRatio = overallAllocationRatios, overallLogRank = overallLogRanks, event = events, allocationRatio = allocationRatios, logRanks = logRanks) .orderDataByStageAndGroup() .setDataToVariables() }, getRandomData = function() { stop("The function 'DatasetSurvival.getRandomData()' is not implemented yet") }, .getOverallData = function(dataInput, kMax, stage) { "Calculates overall logrank statistics, events, and allocation ratios if stagewise data is available" if (is.null(dataInput[["events"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'events'") } if (is.null(dataInput[["logRanks"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'logRanks'") } if (is.null(dataInput[["allocationRatios"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'allocationRatios'") } dataInput$overallEvents <- c(base::cumsum(dataInput$events[1:stage]), rep(NA_real_, kMax - stage)) dataInput$overallLogRanks <- c(dataInput$logRanks[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { for (k in 2:stage){ dataInput$overallLogRanks[k] <- (base::sqrt(dataInput$events[k]) * dataInput$logRanks[k] + base::sqrt(dataInput$overallEvents[k - 1]) * dataInput$overallLogRanks[k - 1]) / base::sqrt(dataInput$overallEvents[k]) } } dataInput$overallAllocationRatios <- c(dataInput$allocationRatios[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { for (k in 2:stage){ dataInput$overallAllocationRatios[k] <- (dataInput$events[k] * dataInput$allocationRatios[k] + dataInput$overallEvents[k - 1] * dataInput$overallAllocationRatios[k - 1]) / dataInput$overallEvents[k] } } return(dataInput) }, .getStageWiseData = function(dataInput, kMax, stage) { "Calculates stagewise logrank statistics, events, and allocation ratios if overall data is available" if (is.null(dataInput[["overallEvents"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallEvents'") } if (is.null(dataInput[["overallLogRanks"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallLogRanks'") } if (is.null(dataInput[["overallAllocationRatios"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallAllocationRatios'") } dataInput$events <- c(dataInput$overallEvents[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$events[2:stage] <- dataInput$overallEvents[2:stage] - dataInput$overallEvents[1:(stage - 1)] } dataInput$logRanks <- c(dataInput$overallLogRanks[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$logRanks[2:stage] <- (base::sqrt(dataInput$overallEvents[2:stage]) * dataInput$overallLogRanks[2:stage] - base::sqrt(dataInput$overallEvents[1:(stage - 1)]) * dataInput$overallLogRanks[1:(stage - 1)]) / base::sqrt(dataInput$overallEvents[2:stage] - dataInput$overallEvents[1:(stage - 1)]) } dataInput$allocationRatios <- c(dataInput$overallAllocationRatios[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$allocationRatios[2:stage] <- (dataInput$overallAllocationRatios[2:stage] - dataInput$overallAllocationRatios[1:(stage - 1)] * dataInput$overallEvents[1:(stage - 1)] / dataInput$overallEvents[2:stage]) / (dataInput$events[2:stage] / dataInput$overallEvents[2:stage]) } if (any(stats::na.omit(dataInput$allocationRatios) <= 0)) { stop("Overall allocation ratios not correctly specified") } return(dataInput) } ) ) rpact/R/class_time.R0000644000176200001440000024400513571465022014044 0ustar liggesusers###################################################################################### # # # -- Time classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 19-02-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### C_REGEXP_GREATER_OR_EQUAL <- ">= ?" C_REGEXP_SMALLER <- "< ?" C_REGEXP_SMALLER_OR_EQUAL <- "<= ?" C_REGEXP_DECIMAL_NUMBER <- "\\d*(\\.{1}\\d*)?" TimeDefinition <- setRefClass("TimeDefinition", contains = "ParameterSet", methods = list( initialize = function(...) { callSuper(...) .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, .getRegexpFromTo = function(..., from, to, fromPrefix = "", toPrefix = "") { return(paste0("(^ *", fromPrefix, from, " *- *", toPrefix, to, " *$)")) }, .getRegexpSmallerThan = function() { return(paste0("(^ *", C_REGEXP_SMALLER, C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpDecimalNumber = function() { return(paste0("(^ *", C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpGreaterOrEqualThan = function() { return(paste0("(^ *", C_REGEXP_GREATER_OR_EQUAL, C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpDecimalRangeStart = function() { return(.getRegexpFromTo(from = "0", to = C_REGEXP_DECIMAL_NUMBER, toPrefix = C_REGEXP_SMALLER)) }, .getRegexpDecimalRange = function() { return(.getRegexpFromTo(from = C_REGEXP_DECIMAL_NUMBER, to = C_REGEXP_DECIMAL_NUMBER, toPrefix = C_REGEXP_SMALLER)) }, .getRegexpDecimalRangeEnd = function() { return(.getRegexpFromTo(from = C_REGEXP_DECIMAL_NUMBER, to = "(Inf|x|\\?)", toPrefix = paste0("(", C_REGEXP_SMALLER, " *)?"))) }, .getRegexpDecimalRangeFiniteEnd = function() { return(.getRegexpFromTo(from = C_REGEXP_DECIMAL_NUMBER, to = C_REGEXP_DECIMAL_NUMBER, toPrefix = "<=? ?")) }, .getRegexpOr = function(...) { args <- list(...) if (length(args) == 0) { return("") } if (length(args) == 1) { return(args[[1]]) } return(paste(unlist(args, recursive = FALSE, use.names = FALSE), collapse = "|")) }, .validateTimePeriod = function(timePeriod, i, n, accrualTimeMode = FALSE) { endOfAccrualIsUndefined = FALSE if (i == 1) { if (!grepl(.getRegexpOr(.getRegexpSmallerThan(), .getRegexpDecimalRangeStart()), timePeriod, perl = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the name of the first region must have the format ", "\"=time\", \"time - Inf\" or \"time1 - <=time2\", ", "e.g., \"20\", \">=20\" or \"20 - Inf\" or \"20 - <=30\"") } if (grepl(.getRegexpOr(.getRegexpGreaterOrEqualThan(), .getRegexpDecimalRangeEnd()), timePeriod, perl = TRUE)) { endOfAccrualIsUndefined <- TRUE } timePeriod <- gsub("([Inf >=\\?x]*)|-", "", timePeriod) } else { if (!grepl(.getRegexpOr(.getRegexpGreaterOrEqualThan(), .getRegexpDecimalRangeEnd()), timePeriod, perl = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the name of the last region must have the format ", "\">=time\" or \"time - Inf\", e.g., \">=20\" or \"20 - Inf\"") } } } else { if (!grepl(.getRegexpDecimalRange(), timePeriod, perl = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the name of the inner regions must have the format \"time_1 - =21" = 0.007), hazardRatio = 0.75) #' pwst #' #' \donttest{ #' #' # The object created by getPiecewiseSurvivalTime() can be used directly in getSampleSizeSurvival(): #' getSampleSizeSurvival(piecewiseSurvivalTime = pwst) #' #' # The object created by getPiecewiseSurvivalTime() can be used directly in getPowerSurvival(): #' getPowerSurvival(piecewiseSurvivalTime = pwst, #' maxNumberOfEvents = 40, maxNumberOfSubjects = 100) #' #' } #' getPiecewiseSurvivalTime <- function(piecewiseSurvivalTime = NA_real_, ..., lambda1 = NA_real_, lambda2 = NA_real_, hazardRatio = NA_real_, pi1 = NA_real_, pi2 = NA_real_, median1 = NA_real_, median2 = NA_real_, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1, delayedResponseAllowed = FALSE) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseSurvivalTime", ..., ignore = c(".pi1Default", ".silent")) if (inherits(piecewiseSurvivalTime, "PiecewiseSurvivalTime") || inherits(piecewiseSurvivalTime, "TrialDesignPlanSurvival")) { .warnInCaseOfUnusedArgument(lambda1, "lambda1", NA_real_, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(lambda2, "lambda2", NA_real_, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(hazardRatio, "hazardRatio", NA_real_, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(pi1, "pi1", NA_real_, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(pi2, "pi2", NA_real_, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(eventTime, "eventTime", C_EVENT_TIME_DEFAULT, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(kappa, "kappa", 1, "getPiecewiseSurvivalTime") .warnInCaseOfUnusedArgument(delayedResponseAllowed, "delayedResponseAllowed", FALSE, "getPiecewiseSurvivalTime") } if (inherits(piecewiseSurvivalTime, "PiecewiseSurvivalTime")) { return(piecewiseSurvivalTime) } if (inherits(piecewiseSurvivalTime, "TrialDesignPlanSurvival")) { return(piecewiseSurvivalTime$.piecewiseSurvivalTime) } .assertIsValidLambda(lambda1, 1) .assertIsValidLambda(lambda2, 2) .assertIsNumericVector(hazardRatio, "hazardRatio", naAllowed = TRUE) .assertIsNumericVector(pi1, "pi1", naAllowed = TRUE) .assertIsSingleNumber(pi2, "pi2", naAllowed = TRUE) .assertIsNumericVector(median1, "median1", naAllowed = TRUE) .assertIsSingleNumber(median2, "median2", naAllowed = TRUE) .assertIsSingleNumber(eventTime, "eventTime", naAllowed = TRUE) .assertIsValidKappa(kappa) .assertIsSingleLogical(delayedResponseAllowed, "delayedResponseAllowed") return(PiecewiseSurvivalTime(piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, median1 = median1, median2 = median2, eventTime = eventTime, kappa = kappa, delayedResponseAllowed = delayedResponseAllowed, ...)) } #' @title #' Get Accrual Time #' #' @description #' Returns a \code{AccrualTime} object that contains the accrual time and the accrual intensity. #' #' @param accrualTime The assumed accrual time for the study, default is \code{c(0,12)} (see details). #' @param accrualIntensity A vector of accrual intensities, default is the relative #' intensity \code{0.1} (see details). #' @param maxNumberOfSubjects The maximum number of subjects. #' @param ... Ensures that all arguments after \code{accrualTime} are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' #' \code{accrualTime} can also be used to define a non-constant accrual over time. #' For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and #' \code{accrualIntensity} needs to be specified. The first element of \code{accrualTime} must be equal to 0.\cr #' \code{accrualTime} can also be a list that combines the definition of the accrual time and #' accrual intensity \code{accrualIntensity} (see below and examples for details). #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are #' the same (i.e., the end of accrual is undefined), \code{maxNumberOfPatients > 0} needs to #' be specified and the end of accrual is calculated. #' #' \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same #' (i.e., the end of accrual is undefined), \code{maxNumberOfPatients > 0} needs to be specified #' and the end of accrual is calculated. #' In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr #' If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} #' (i.e., the end of accrual is defined), \code{maxNumberOfPatients} is calculated. #' In that case, \code{accrualIntensity} defines the intensity how subjects enter the trial. #' For example, \code{accrualIntensity = c(1,2)} specifies that in the second accrual interval #' the intensity is doubled as compared to the first accrual interval. The actual accrual intensity #' is calculated for the calculated \code{maxNumberOfPatients}. #' #' @return Returns a \code{\link{AccrualTime}} object. #' #' @export #' #' @examples #' #' \donttest{ #' #' # Case 1 #' #' # > End of accrual, absolute accrual intensity and `maxNumberOfSubjects` are given, #' # > `followUpTime`** shall be calculated. #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), #' accrualIntensity = c(22, 33), maxNumberOfSubjects = 924) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 22, #' "6 - <=30" = 33), #' maxNumberOfSubjects = 924) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' #' #' # Case 2 #' #' # > End of accrual, relative accrual intensity and `maxNumberOfSubjects` are given, #' # > absolute accrual intensity* and `followUpTime`** shall be calculated. #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), #' accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 0.22, #' "6 - <=30" = 0.33), #' maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' #' #' # Case 3 #' #' # > End of accrual and absolute accrual intensity are given, #' # > `maxNumberOfSubjects`* and `followUpTime`** shall be calculated. #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 22, #' "6 - <=30" = 33)) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' #' #' # Case 4 #' #' # > End of accrual, relative accrual intensity and `followUpTime` are given, #' # > absolute accrual intensity** and `maxNumberOfSubjects`** shall be calculated. #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 0.22, #' "6 - <=30" = 0.33)) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' #' #' # Case 5 #' #' # > `maxNumberOfSubjects` and absolute accrual intensity are given, #' # > absolute accrual intensity*, end of accrual* and `followUpTime`** shall be calculated #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6), #' accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 22, #' "6" = 33), #' maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' #' #' # Case 6 (not possible) #' #' # > `maxNumberOfSubjects` and relative accrual intensity are given, #' # > absolute accrual intensity[x], end of accrual* and `followUpTime`** shall be calculated #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6), #' accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 0.22, #' "6" = 0.33), #' maxNumberOfSubjects = 1000) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' # Case 6 is not allowed and therefore an error will be shown: #' #' tryCatch({ #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) #' }, error = function(e) { #' print(e$message) #' }) #' #' #' # Case 7 #' #' # > `followUpTime` and absolute accrual intensity are given, #' # > end of accrual** and `maxNumberOfSubjects`** shall be calculated #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 22, #' "6" = 33)) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' getSampleSizeSurvival(accrualTime = accrualTime, #' pi1 = 0.4, pi2 = 0.2, followUpTime = 6) #' #' #' # Case 8 (not possible) #' #' # > `followUpTime` and relative accrual intensity are given, #' # > absolute accrual intensity[x], end of accrual and `maxNumberOfSubjects` shall be calculated #' #' ## Example: vector based definition #' #' accrualTime <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) #' accrualTime #' #' #' ## Example: list based definition #' #' accrualTime <- getAccrualTime(list( #' "0 - <6" = 0.22, #' "6" = 0.33)) #' accrualTime #' #' #' ## Example: how to use accrual time object #' #' # Case 8 is not allowed and therefore an error will be shown: #' #' tryCatch({ #' getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2, followUpTime = 6) #' }, error = function(e) { #' print(e$message) #' }) #' #' #' # How to show accrual time details #' #' # You can use a sample size or power object as argument for function `getAccrualTime`: #' #' sampleSize <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), #' lambda2 = 0.05, hazardRatio = 0.8, followUpTime = 6) #' sampleSize #' accrualTime <- getAccrualTime(sampleSize) #' accrualTime #' #' } #' getAccrualTime <- function(accrualTime = NA_real_, ..., accrualIntensity = NA_real_, maxNumberOfSubjects = NA_real_) { .warnInCaseOfUnknownArguments(functionName = "getAccrualTime", ..., ignore = "showWarnings") if (inherits(accrualTime, "AccrualTime") || inherits(accrualTime, "TrialDesignPlanSurvival")) { if (!identical(accrualIntensity, C_ACCRUAL_INTENSITY_DEFAULT)) { .warnInCaseOfUnusedArgument(accrualIntensity, "accrualIntensity", NA_real_, "getAccrualTime") } .warnInCaseOfUnusedArgument(maxNumberOfSubjects, "maxNumberOfSubjects", NA_real_, "getAccrualTime") } if (inherits(accrualTime, "AccrualTime")) { return(accrualTime) } if (inherits(accrualTime, "TrialDesignPlanSurvival")) { return(accrualTime$.accrualTime) } .assertIsNumericVector(accrualIntensity, "accrualIntensity", naAllowed = TRUE) .assertIsSingleNumber(maxNumberOfSubjects, "maxNumberOfSubjects", naAllowed = TRUE) args <- list(...) showWarnings <- args[["showWarnings"]] if (is.null(showWarnings) || !is.logical(showWarnings)) { showWarnings <- TRUE } return(AccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, showWarnings = showWarnings)) } #' #' @name PiecewiseSurvivalTime #' #' @title #' Piecewise Exponential Survival Time #' #' @description #' Class for definition of piecewise survival times. #' #' @details #' \code{PiecewiseSurvivalTime} is a class for definition of piecewise survival times. #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' #' @keywords internal #' #' @importFrom methods new #' PiecewiseSurvivalTime <- setRefClass("PiecewiseSurvivalTime", contains = "TimeDefinition", fields = list( .pi1Default = "numeric", .silent = "logical", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", hazardRatio = "numeric", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", eventTime = "numeric", kappa = "numeric", piecewiseSurvivalEnabled = "logical", delayedResponseAllowed = "logical", delayedResponseEnabled = "logical" ), methods = list( initialize = function(piecewiseSurvivalTime = NA_real_, ..., lambda1 = NA_real_, lambda2 = NA_real_, hazardRatio = NA_real_, pi1 = NA_real_, pi2 = NA_real_, median1 = NA_real_, median2 = NA_real_, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1, delayedResponseAllowed = FALSE) { callSuper(piecewiseSurvivalTime = NA_real_, lambda1 = lambda1, lambda2 = lambda2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, median1 = median1, median2 = median2, eventTime = eventTime, kappa = kappa, delayedResponseAllowed = delayedResponseAllowed, ...) if (length(piecewiseSurvivalTime) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be defined (set to NA_real_ if not applicable)") } .stopInCaseOfConflictingArguments(lambda1, "lambda1", median1, "median1") #.stopInCaseOfConflictingArguments(lambda2, "lambda2", median1, "median1") #.stopInCaseOfConflictingArguments(lambda1, "lambda1", median2, "median2") .stopInCaseOfConflictingArguments(lambda2, "lambda2", median2, "median2") .stopInCaseOfConflictingArguments(pi1, "pi1", median1, "median1") #.stopInCaseOfConflictingArguments(pi2, "pi2", median1, "median1") #.stopInCaseOfConflictingArguments(pi1, "pi1", median2, "median2") .stopInCaseOfConflictingArguments(pi2, "pi2", median2, "median2") if (length(median1) > 0 && !all(is.na(median1))) { .self$lambda1 <<- getLambdaByMedian(median1, kappa = kappa) .setParameterType("median1", C_PARAM_USER_DEFINED) .setParameterType("lambda1", C_PARAM_GENERATED) } else { .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("lambda1", ifelse(length(lambda1) == 1 && is.na(lambda1), C_PARAM_NOT_APPLICABLE, C_PARAM_USER_DEFINED)) } if (length(median2) > 0 && !all(is.na(median2))) { .self$lambda2 <<- getLambdaByMedian(median2, kappa = kappa) .setParameterType("median2", C_PARAM_USER_DEFINED) .setParameterType("lambda2", C_PARAM_GENERATED) } else { .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("lambda2", C_PARAM_NOT_APPLICABLE) } args <- list(...) if (!is.null(args[[".pi1Default"]])) { .pi1Default <<- args[[".pi1Default"]] } if (!is.null(args[[".silent"]])) { .silent <<- args[[".silent"]] } else { .silent <<- FALSE } piecewiseSurvivalEnabled <<- FALSE delayedResponseEnabled <<- FALSE .setParameterType("piecewiseSurvivalTime", C_PARAM_NOT_APPLICABLE) .setParameterType("piecewiseSurvivalEnabled", C_PARAM_GENERATED) .setParameterType("delayedResponseEnabled", ifelse(isTRUE(delayedResponseAllowed), C_PARAM_GENERATED, C_PARAM_NOT_APPLICABLE)) .setParameterType("delayedResponseAllowed", ifelse(isTRUE(delayedResponseAllowed), C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE)) .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) .setParameterType("eventTime", ifelse(length(eventTime) == 1 && is.na(eventTime), C_PARAM_NOT_APPLICABLE, ifelse(eventTime == C_EVENT_TIME_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED))) .setParameterType("kappa", ifelse(length(kappa) == 1 && !is.na(kappa) && kappa == 1, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .init(piecewiseSurvivalTime) if (.getParameterType("median1") == C_PARAM_USER_DEFINED && .getParameterType("lambda1") == C_PARAM_USER_DEFINED) { .setParameterType("lambda1", C_PARAM_GENERATED) } if (.getParameterType("median2") == C_PARAM_USER_DEFINED && .getParameterType("lambda2") == C_PARAM_USER_DEFINED) { .setParameterType("lambda2", C_PARAM_GENERATED) } .validateCalculatedArguments() }, .validateCalculatedArguments = function() { if (.getParameterType("median1") == C_PARAM_USER_DEFINED) { if (!isTRUE(all.equal(getLambdaByMedian(median1, kappa = kappa), lambda1, tolerance = 1e-05))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda1' must be ", round(getLambdaByMedian(median1, kappa = kappa), 5), ", but is ", round(lambda1, 5)) } if (!isTRUE(all.equal(getPiByMedian(median1, eventTime = eventTime, kappa = kappa), pi1, tolerance = 1e-05))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi1' must be ", round(getPiByMedian(median1, eventTime = eventTime, kappa = kappa), 5), ", but is ", round(pi1, 5)) } } if (.getParameterType("median2") == C_PARAM_USER_DEFINED) { if (!isTRUE(all.equal(getLambdaByMedian(median2, kappa = kappa), lambda2, tolerance = 1e-05))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda2' must be ", round(getLambdaByMedian(median2, kappa = kappa), 5), ", but is ", round(lambda2, 5)) } if (!isTRUE(all.equal(getPiByMedian(median2, eventTime = eventTime, kappa = kappa), pi2, tolerance = 1e-05))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi2' must be ", round(getPiByMedian(median2, eventTime = eventTime, kappa = kappa), 5), ", but is ", round(pi2, 5)) } } }, .stopInCaseOfConflictingArguments = function(arg1, argName1, arg2, argName2) { if (length(arg1) > 0 && !all(is.na(arg1)) && length(arg2) > 0 && !all(is.na(arg2))) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "it is not allowed to specifiy '", argName1, "' (", .arrayToString(arg1), ")", " and '", argName2, "' (", .arrayToString(arg2), ") concurrently") } }, .asDataFrame = function() { data <- data.frame( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2 ) rownames(data) <- as.character(1:nrow(data)) colnames(data) <- c("Start time", C_PARAMETER_NAMES["lambda1"], # Hazard rate (1) C_PARAMETER_NAMES["lambda2"]) # Hazard rate (2) return(data) }, .isPiBased = function() { return(!.isLambdaBased()) }, .isLambdaBased = function(minNumberOfLambdas = 2) { if (.getParameterType("lambda2") == C_PARAM_USER_DEFINED || .getParameterType("median2") == C_PARAM_USER_DEFINED) { if (length(lambda2) >= minNumberOfLambdas && !any(is.na(lambda2))) { return(TRUE) } } return((length(pi1) == 0 || any(is.na(pi1))) && (length(pi2) == 0 || any(is.na(pi2)))) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing piecewise survival time objects' .resetCat() if (showType == 2) { .cat("Technical summary of the piecewise survival time object of class", methods::classLabel(class(.self)), ":\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Piecewise exponential survival times:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) if (!piecewiseSurvivalEnabled) { .cat(" Piecewise exponential survival is disabled.\n\n", consoleOutputEnabled = consoleOutputEnabled) } else if (length(piecewiseSurvivalTime) == 1) { .cat(" At all times:", lambda2[1], "\n\n", consoleOutputEnabled = consoleOutputEnabled) } else { piecewiseSurvivalTimeStr <- format(piecewiseSurvivalTime) lambda2Str <- format(lambda2) for (i in 1:length(piecewiseSurvivalTime)) { if (i < length(piecewiseSurvivalTime)) { .cat(" ", piecewiseSurvivalTimeStr[i], " - <", piecewiseSurvivalTimeStr[i + 1], ": ", lambda2Str[i], "\n", sep ="", consoleOutputEnabled = consoleOutputEnabled) } else { .cat(" ", rep(" ", 2 + max(nchar(piecewiseSurvivalTimeStr))), ">=", piecewiseSurvivalTimeStr[i], ": ", lambda2Str[i], "\n", sep ="", consoleOutputEnabled = consoleOutputEnabled) } } if (delayedResponseEnabled) { .cat("Delayed response is enabled.\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("Details:\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Generated parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { s <- "piecewise survival time" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, isDelayedResponseEnabled = function() { return(delayedResponseEnabled) }, isPiecewiseSurvivalEnabled = function() { if (length(piecewiseSurvivalTime) == 0) { return(FALSE) } if (length(piecewiseSurvivalTime) == 1 && is.na(piecewiseSurvivalTime)) { return(FALSE) } return(TRUE) }, .initFromList = function(pwSurvTimeList) { if (!is.list(pwSurvTimeList)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be a list") } if (length(pwSurvTimeList) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain at least one entry") } if (!all(is.na(lambda2))) { warning("'lambda2' (", .arrayToString(lambda2), ") will be ignored because 'piecewiseSurvivalTime' is a list", call. = FALSE) } pwSurvStartTimes <- c(0) pwSurvLambda2 <- c() pwSurvTimeNames <- names(pwSurvTimeList) for (i in 1:length(pwSurvTimeNames)) { timePeriod <- pwSurvTimeNames[i] lambdaValue <- pwSurvTimeList[[timePeriod]] .assertIsSingleNumber(lambdaValue, paste0("pwSurvLambda[", i, "]")) timePeriod <- .validateTimePeriod(timePeriod, i = i, n = length(pwSurvTimeNames)) if (i < length(pwSurvTimeNames)) { parts <- strsplit(timePeriod, "- *(< *)?", perl = TRUE)[[1]] if (length(parts) != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all regions (", timePeriod, ") must have the format ", "\"time_1 - 1 && delayedResponseAllowed) { warning("Only the first 'hazardRatio' (", hazardRatio[1], ") was used for piecewise survival time definition ", "(use a loop over the function to simulate different hazard ratios)", call. = FALSE) hazardRatio <<- hazardRatio[1] lambda1 <<- pwSurvLambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else { lambda1 <<- NA_real_ .setParameterType("lambda1", C_PARAM_NOT_APPLICABLE) } lambda2 <<- pwSurvLambda2 .setParameterType("lambda2", C_PARAM_USER_DEFINED) piecewiseSurvivalEnabled <<- !identical(piecewiseSurvivalTime, 0) }, .init = function(pwSurvTime) { .logDebug("pwSurvTime %s, %s", pwSurvTime, class(pwSurvTime)) .logDebug("lambda1 %s, %s", lambda1, .getParameterType("lambda1")) .logDebug("lambda2 %s, %s", lambda2, .getParameterType("lambda2")) # case 1: lambda1 and lambda2 = NA if (length(pwSurvTime) == 1 && (is.na(pwSurvTime) || is.numeric(pwSurvTime)) && #length(lambda1) == 1 && (all(is.na(lambda1)) || .getParameterType("lambda1") == C_PARAM_GENERATED) && length(lambda2) == 1 && (is.na(lambda2) || .getParameterType("lambda2") == C_PARAM_GENERATED) ) { .logDebug(".init, case 1: lambda1 and lambda2 = NA") if (!any(is.na(hazardRatio))) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) } if (!is.na(pwSurvTime)) { warning("'piecewiseSurvivalTime' (", pwSurvTime, ") will be ignored") } if (is.na(pi2)) { if (!is.na(median2)) { .logDebug(".init: calculate pi2 by median2") pi2 <<- getPiByMedian(median2, eventTime, kappa = kappa) .setParameterType("pi2", C_PARAM_GENERATED) } else { .logDebug(".init: set pi2 to default") pi2 <<- C_PI_2_DEFAULT .setParameterType("pi2", C_PARAM_DEFAULT_VALUE) } } else { .assertIsSingleNumber(pi2, "pi2") .setParameterType("pi2", ifelse(pi2 == C_PI_2_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) if (!any(is.na(median2))) { warning("'median2' (", .arrayToString(median2), ") will be ignored") median2 <<- NA_real_ } } hazardRatioCalculationEnabled <- TRUE if (all(is.na(pi1))) { if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) hazardRatioCalculationEnabled <- FALSE } if (!any(is.na(median1))) { .logDebug(".init: calculate pi1 by median1") pi1 <<- getPiByMedian(median1, eventTime, kappa = kappa) .setParameterType("pi1", C_PARAM_GENERATED) } else if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { .logDebug(".init: calculate pi1 by pi2 and hazardRatio") pi1 <<- getPiByLambda( getLambdaByPi(pi2, eventTime, kappa = kappa) * hazardRatio^(1 / kappa), eventTime, kappa = kappa) .setParameterType("pi1", C_PARAM_GENERATED) } else { .logDebug(".init: set pi1 to default") if (!is.null(.pi1Default) && is.numeric(.pi1Default) && length(.pi1Default) > 0) { pi1 <<- .pi1Default } else { pi1 <<- C_PI_1_SAMPLE_SIZE_DEFAULT } .setParameterType("pi1", C_PARAM_DEFAULT_VALUE) } } else { .assertIsNumericVector(pi1, "pi1") if (!any(is.na(median1))) { .logDebug(".init: set median1 to NA") warning("'median1' (", .arrayToString(median1), ") will be ignored") median1 <<- NA_real_ } } if (hazardRatioCalculationEnabled) { if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { warning("'hazardRatio' (", .arrayToString(hazardRatio), ") will be ignored because it will be calculated", call. = FALSE) } .logDebug(".init: calculate hazardRatio by pi1 and pi2") hazardRatio <<- getHazardRatioByPi(pi1, pi2, eventTime, kappa = kappa) .setParameterType("hazardRatio", C_PARAM_GENERATED) } if (length(pi1) > 0 && !any(is.na(pi1))) { pi1Default <- C_PI_1_SAMPLE_SIZE_DEFAULT if (!is.null(.pi1Default) && is.numeric(.pi1Default) && length(.pi1Default) > 0) { pi1Default <- .pi1Default } if (identical(pi1, pi1Default)) { .setParameterType("pi1", C_PARAM_DEFAULT_VALUE) } else if (hazardRatioCalculationEnabled && .getParameterType("pi1") != C_PARAM_GENERATED) { .setParameterType("pi1", C_PARAM_USER_DEFINED) } } if (length(pi2) == 1 && !is.na(pi2)) { if (length(eventTime) == 1 && !is.na(eventTime)) { lambda2 <<- getLambdaByPi(pi2, eventTime, kappa = kappa) .setParameterType("lambda2", C_PARAM_GENERATED) } if (length(pi1) == 1 && is.na(pi1) && !any(is.na(hazardRatio))) { pi1 <<- getPiByLambda(getLambdaByPi( pi2, eventTime, kappa = kappa) * hazardRatio^(1 / kappa), eventTime, kappa = kappa) .setParameterType("pi1", C_PARAM_GENERATED) } if (length(pi1) > 0 && !any(is.na(pi1)) && length(eventTime) == 1 && !is.na(eventTime)) { lambda1 <<- getLambdaByPi(pi1, eventTime, kappa = kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } } .initMedian() return(invisible()) } if (length(pwSurvTime) == 1 && is.na(pwSurvTime)) { pwSurvTime <- NA_real_ } if (is.list(pwSurvTime)) { .assertIsValidHazardRatioVector(hazardRatio) .initFromList(pwSurvTime) .initHazardRatio() if (!piecewiseSurvivalEnabled) { .initPi() .initMedian() } } else if (delayedResponseAllowed && length(lambda2) == 1 && !is.na(lambda2) && length(hazardRatio) > 0) { .logDebug(".init, case 2: delayedResponseAllowed") piecewiseSurvivalEnabled <<- FALSE piecewiseSurvivalTime <<- 0 .initPi() .initHazardRatio() .initMedian() } else if (!is.numeric(pwSurvTime)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be a list or a numeric vector") } else { piecewiseSurvivalTime <<- pwSurvTime if ((all(is.na(piecewiseSurvivalTime)) || identical(piecewiseSurvivalTime, 0)) && length(lambda2) == 1 && !is.na(lambda2)) { .logDebug(".init, case 3: piecewise survival is disabled") piecewiseSurvivalTime <<- 0 .setParameterType("piecewiseSurvivalTime", C_PARAM_DEFAULT_VALUE) piecewiseSurvivalEnabled <<- FALSE #piecewiseSurvivalTime <<- 0 .initHazardRatio() .initPi() .initMedian() } else { .logDebug(".init, case 3: piecewise survival is enabled") if (all(is.na(piecewiseSurvivalTime))) { if (.getParameterType("median1") == C_PARAM_USER_DEFINED) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'median1' (", .arrayToString(median1), ") with length > 1 can only ", "defined together with a single 'median2', 'lambda2' or 'pi2'") } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'piecewiseSurvivalTime' must be specified") } .setParameterType("piecewiseSurvivalTime", C_PARAM_USER_DEFINED) piecewiseSurvivalEnabled <<- TRUE .initHazardRatio() .initPi() } } if (piecewiseSurvivalEnabled) { pi1 <<- NA_real_ pi2 <<- NA_real_ median1 <<- NA_real_ median2 <<- NA_real_ .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("eventTime", C_PARAM_NOT_APPLICABLE) if (eventTime != C_EVENT_TIME_DEFAULT) { warning("Event time (", eventTime, ") will be ignored because it is not ", "applicable for piecewise exponential survival time", call. = FALSE) eventTime <<- C_EVENT_TIME_DEFAULT } } .validateInitialization() }, .initMedian = function() { if (length(eventTime) == 1 && !is.na(eventTime)) { if (length(pi1) > 0 && !all(is.na(pi1)) && .getParameterType("median1") != C_PARAM_USER_DEFINED) { median1 <<- getMedianByPi(pi1, eventTime, kappa = kappa) .setParameterType("median1", C_PARAM_GENERATED) } if (length(pi2) == 1 && !is.na(pi2) && .getParameterType("median2") != C_PARAM_USER_DEFINED) { median2 <<- getMedianByPi(pi2, eventTime, kappa = kappa) .setParameterType("median2", C_PARAM_GENERATED) } } else { if (length(lambda1) > 0 && !all(is.na(lambda1)) && .getParameterType("median1") != C_PARAM_USER_DEFINED) { median1 <<- getMedianByLambda(lambda1, kappa = kappa) .setParameterType("median1", C_PARAM_GENERATED) } if (length(lambda2) == 1 && !is.na(lambda2) && .getParameterType("median2") != C_PARAM_USER_DEFINED) { median2 <<- getMedianByLambda(lambda2, kappa = kappa) .setParameterType("median2", C_PARAM_GENERATED) } } }, .initPi = function() { .logDebug(".initPi: lambda2 is defined") if (length(eventTime) != 1 || is.na(eventTime)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'eventTime' must be specified to calculate 'pi2' by 'lambda2'") } if (length(lambda2) == 0 || any(is.na(lambda2))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda2' must be defined before .initPi() can be called") } .setParameterType("lambda2", C_PARAM_USER_DEFINED) if (piecewiseSurvivalEnabled && length(hazardRatio) > 1) { return(invisible()) } pi2Calulated <- getPiByLambda(lambda2, eventTime, kappa = kappa) if (!is.na(pi2) && !isTRUE(all.equal(pi2, pi2Calulated))) { warning("'pi2' (", pi2, ") will be ignored", call. = FALSE) } pi2 <<- pi2Calulated .setParameterType("pi2", C_PARAM_GENERATED) if (length(lambda1) == 0 || any(is.na(lambda1))) { if (length(hazardRatio) > 0 && !any(is.na(hazardRatio))) { .logDebug(".initPi: calculate lambda1 by hazardRatio") lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else if (length(lambda1) == 0) { lambda1 <<- NA_real_ } else if (delayedResponseAllowed) { .setParameterType("lambda1", C_PARAM_USER_DEFINED) } } if (length(lambda1) > 0 && !all(is.na(lambda1))) { .logDebug(".initPi: calculate p1 by lambda1") pi1Calulated <- getPiByLambda(lambda1, eventTime, kappa = kappa) if (!all(is.na(pi1)) && !isTRUE(all.equal(pi1, pi1Calulated))) { warning("'pi1' (", .arrayToString(pi1), ") will be ignored", call. = FALSE) } pi1 <<- pi1Calulated .setParameterType("pi1", C_PARAM_GENERATED) } }, .initHazardRatio = function() { .logDebug(".initHazardRatio") if (!is.null(hazardRatio) && length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { if ((length(lambda1) == 1 && is.na(lambda1)) || .getParameterType("lambda1") == C_PARAM_GENERATED) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) return(invisible()) } if (!.silent) { warning("'hazardRatio' (", .arrayToString(hazardRatio), ") will be ignored because it will be calculated", call. = FALSE) } } if (any(is.na(lambda2))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda2' must be specified") } if (any(is.na(lambda1))) { if (any(is.na(hazardRatio))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio', 'lambda1' or 'median1' must be specified") } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda1' must be specified") } .setParameterType("lambda1", C_PARAM_USER_DEFINED) hr <- unique(round(lambda1 / lambda2, 8)^kappa) if (length(hr) != 1) { if (length(lambda2) == 1 && length(lambda1) > 1) { hazardRatio <<- (lambda1 / lambda2)^kappa .setParameterType("hazardRatio", C_PARAM_GENERATED) return(invisible()) } else if (delayedResponseAllowed) { hazardRatio <<- (lambda1 / lambda2)^kappa .setParameterType("hazardRatio", C_PARAM_GENERATED) delayedResponseEnabled <<- TRUE return(invisible()) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'hazardRatio' can only be calculated if 'unique(lambda1 / lambda2)' ", "result in a single value; current result = ", .arrayToString(round(hr, 4), vectorLookAndFeelEnabled = TRUE), " (delayed response is not allowed)") } } hazardRatio <<- ((lambda1 / lambda2)^kappa)[1] .setParameterType("hazardRatio", C_PARAM_GENERATED) }, .validateInitialization = function() { if (length(piecewiseSurvivalTime) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain at least one urvival start time") } if (any(is.na(piecewiseSurvivalTime))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain valid survival start times") } if (piecewiseSurvivalTime[1] != 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'piecewiseSurvivalTime' must be 0") } if (length(piecewiseSurvivalTime) != length(lambda2)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be equal to length of 'lambda2' (", length(lambda2), ")") } .assertValuesAreStrictlyIncreasing(piecewiseSurvivalTime, "piecewiseSurvivalTime") if ((length(lambda1) != 1 || is.na(lambda1)) && .getParameterType("lambda1") != C_PARAM_GENERATED) { if (length(hazardRatio) == 1 && !is.na(hazardRatio)) { lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else if (length(hazardRatio) > 1 && delayedResponseAllowed && !is.na(hazardRatio[1])) { if (!delayedResponseEnabled && .isLambdaBased()) { warning("Only the first 'hazardRatio' (", hazardRatio[1], ") was used for piecewise survival time definition", call. = FALSE) hazardRatio <<- hazardRatio[1] lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } } else if (!delayedResponseEnabled && !(length(lambda2) == 1 && length(lambda1) > 1)) { if (length(lambda1) > 1) { warning("'lambda1' (", .arrayToString(lambda1), ") will be ignored", call. = FALSE) } lambda1 <<- NA_real_ .setParameterType("lambda1", C_PARAM_NOT_APPLICABLE) } } else if (length(hazardRatio) == 1 && !is.na(hazardRatio) && length(lambda1) > 0 && !any(is.na(lambda1)) && length(lambda2) > 0 && !any(is.na(lambda2))) { target <- lambda2 * hazardRatio^(1 / kappa) if (length(lambda1) > 0 && !all(is.na(lambda1)) && !isTRUE(all.equal(target, lambda1))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'lambda1' (", .arrayToString(lambda1), ") ", "is not as expected (", .arrayToString(target), ") for given hazard ratio ", hazardRatio) } } if (piecewiseSurvivalEnabled && !(length(lambda1) == 1 && is.na(lambda1)) && length(piecewiseSurvivalTime) != length(lambda1)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be equal to length of 'lambda1' (", length(lambda1), ")") } } ) ) #' #' @name AccrualTime #' #' @title #' Accrual Time #' #' @description #' Class for definition of accrual time and accrual intensity. #' #' @details #' \code{AccrualTime} is a class for definition of accrual time and accrual intensity. #' #' @include f_core_constants.R #' @include f_core_utilities.R #' @include class_core_parameter_set.R #' #' @keywords internal #' #' @importFrom methods new #' AccrualTime <- setRefClass("AccrualTime", contains = "TimeDefinition", fields = list( .showWarnings = "logical", endOfAccrualIsUserDefined = "logical", followUpTimeMustBeUserDefined = "logical", maxNumberOfSubjectsIsUserDefined = "logical", maxNumberOfSubjectsCanBeCalculatedDirectly = "logical", absoluteAccrualIntensityEnabled = "logical", accrualTime = "numeric", accrualIntensity = "numeric", accrualIntensityRelative = "numeric", maxNumberOfSubjects = "numeric", remainingTime = "numeric", piecewiseAccrualEnabled = "logical" ), methods = list( initialize = function(accrualTime = NA_real_, ..., accrualIntensity = NA_real_, maxNumberOfSubjects = NA_real_, showWarnings = TRUE) { callSuper(accrualTime = NA_real_, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, .showWarnings = showWarnings, ...) endOfAccrualIsUserDefined <<- NA followUpTimeMustBeUserDefined <<- NA maxNumberOfSubjectsIsUserDefined <<- NA maxNumberOfSubjectsCanBeCalculatedDirectly <<- TRUE absoluteAccrualIntensityEnabled <<- NA .setParameterType("endOfAccrualIsUserDefined", C_PARAM_GENERATED) .setParameterType("followUpTimeMustBeUserDefined", C_PARAM_GENERATED) .setParameterType("maxNumberOfSubjectsIsUserDefined", C_PARAM_GENERATED) .setParameterType("maxNumberOfSubjectsCanBeCalculatedDirectly", C_PARAM_GENERATED) .setParameterType("absoluteAccrualIntensityEnabled", C_PARAM_GENERATED) accrualIntensityRelative <<- NA_real_ .setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) remainingTime <<- NA_real_ .init(accrualTime) # case 6 correction if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { remainingTime <<- NA_real_ .setParameterType("remainingTime", C_PARAM_NOT_APPLICABLE) .self$accrualTime <<- .self$accrualTime[1:length(.self$accrualIntensity)] } .initAccrualIntensityAbsolute() .validateFormula() .showWarningIfCaseIsNotAllowd() }, .asDataFrame = function() { accrualIntensityTemp <- accrualIntensity if (!all(is.na(accrualIntensityRelative))) { accrualIntensityTemp <- accrualIntensityRelative } if (length(accrualIntensityTemp) + 1 == length(accrualTime)) { accrualIntensityTemp <- c(accrualIntensityTemp, NA_real_) } data <- data.frame( accrualTime = accrualTime, accrualIntensity = accrualIntensityTemp ) rownames(data) <- as.character(1:nrow(data)) colnames(data) <- c("Start time", C_PARAMETER_NAMES["accrualIntensity"]) return(data) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .isAbsoluteAccrualIntensity = function(x) { return(!.isRelativeAccrualIntensity(x)) }, .isRelativeAccrualIntensity = function(x) { return(all(x < 1)) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing accrual time objects' .resetCat() if (showType == 2) { .cat("Technical summary of the accrual time object of class", methods::classLabel(class(.self)), ":\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Accrual time and intensity:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) if (!isAccrualTimeEnabled()) { .cat(" Accrual time is disabled.\n", consoleOutputEnabled = consoleOutputEnabled) } else if (length(accrualTime) == 1) { .cat(" At all times:", accrualIntensity[1], "\n", consoleOutputEnabled = consoleOutputEnabled) } else { accrualTimeStr <- format(accrualTime) accrualIntensityStr <- format(accrualIntensity) for (i in 1:length(accrualTime)) { prefix <- ifelse(i == length(accrualTime) - 1, "<=", " <") suffix <- "" if (!maxNumberOfSubjectsIsUserDefined) { suffix <- " " } if (i < length(accrualTime)) { .cat(" ", accrualTimeStr[i], " - ", prefix, accrualTimeStr[i + 1], suffix, ": ", accrualIntensityStr[i], "\n", consoleOutputEnabled = consoleOutputEnabled) } else if (!maxNumberOfSubjectsIsUserDefined) { .cat(" ", accrualTimeStr[i], " - <=[?]: ", accrualIntensityStr[i], "\n", consoleOutputEnabled = consoleOutputEnabled) } } .cat("", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) if (isAccrualTimeEnabled()) { .showFormula(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .showCase(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("Details:\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Generated parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .getFormula = function() { s <- "" for (i in 1:length(accrualTime)) { if (i < length(accrualTime)) { s <- paste0(s, (round(accrualTime[i + 1], 4) - round(accrualTime[i], 4)), " * ", round(accrualIntensity[i], 4)) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { s <- paste0(s, " * c ") } if (i < length(accrualIntensity)) { s <- paste0(s, " + ") } } } return(s) }, .validateFormula = function() { if (is.na(maxNumberOfSubjects) || length(accrualTime) != length(accrualIntensity) + 1) { return(invisible()) } numberOfSubjects <- 0 for (i in 1:length(accrualTime)) { if (i < length(accrualTime)) { numberOfSubjects <- numberOfSubjects + (accrualTime[i + 1] - accrualTime[i]) * accrualIntensity[i] } } if (!isTRUE(all.equal(numberOfSubjects, maxNumberOfSubjects, tolerance = 1e-03)) && .isAbsoluteAccrualIntensity(accrualIntensity)) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time and intensity: ", .getFormula(), " = ", numberOfSubjects) } }, .showWarningIfCaseIsNotAllowd = function() { caseIsAllowed <- TRUE if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE } else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE } if (!caseIsAllowed) { warning("The specified accrual time and intensity can not be ", "supplemented automatically with the missing information; ", "therefore further calculations are not possible", call. = FALSE) } }, .showFormula = function(consoleOutputEnabled) { .cat("Formula:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(" ", consoleOutputEnabled = consoleOutputEnabled) .cat("maxNumberOfSubjects = ", consoleOutputEnabled = consoleOutputEnabled) if (!is.na(maxNumberOfSubjects)) { .cat(maxNumberOfSubjects, " = ", consoleOutputEnabled = consoleOutputEnabled) } .cat(.getFormula(), consoleOutputEnabled = consoleOutputEnabled) if (length(accrualTime) == length(accrualIntensity)) { .cat("(x - ", accrualTime[length(accrualTime)], ") * ", accrualIntensity[length(accrualIntensity)], consoleOutputEnabled = consoleOutputEnabled) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(" * c ", consoleOutputEnabled = consoleOutputEnabled) } .cat(", where 'x' is the unknown last accrual time", consoleOutputEnabled = consoleOutputEnabled) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(" and 'c' a constant factor", consoleOutputEnabled = consoleOutputEnabled) } } else if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(", where 'c' is a constant factor", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) }, .showCase = function(consoleOutputEnabled = TRUE) { caseIsAllowed <- TRUE prefix <- " " # Case 1 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), # maxNumberOfSubjects = 1000) if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#1):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, absolute accrual intensity and 'maxNumberOfSubjects' are given, ", " 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), ", "accrualIntensity = c(22, 33), maxNumberOfSubjects = 924)\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 2 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), # maxNumberOfSubjects = 1000) else if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { .cat("Case (#2):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, relative accrual intensity and 'maxNumberOfSubjects' are given, ", "absolute accrual intensity* and 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), ", "accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 3 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) else if (endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#3):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual and absolute accrual intensity are given, ", "'maxNumberOfSubjects'* and 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33))\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 4 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) else if (endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { .cat("Case (#4):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, relative accrual intensity and 'followUpTime' are given, ", "absolute accrual intensity** and 'maxNumberOfSubjects'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33))\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 5 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), # maxNumberOfSubjects = 1000) else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#5):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'maxNumberOfSubjects' and absolute accrual intensity are given, ", "end of accrual* and 'followUpTime'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), ", "accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 6 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33), # maxNumberOfSubjects = 1000) else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE .cat("Case (#6):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'maxNumberOfSubjects' and relative accrual intensity are given, ", "absolute accrual intensity[x], end of accrual* and 'followUpTime'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), ", "accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 7 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#7):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'followUpTime' and absolute accrual intensity are given, ", "end of accrual** and 'maxNumberOfSubjects'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33))\n", consoleOutputEnabled = consoleOutputEnabled) } # Case 8 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE .cat("Case (#8):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'followUpTime' and relative accrual intensity are given, ", "absolute accrual intensity[x], end of accrual and 'maxNumberOfSubjects' shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33))\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) if (!caseIsAllowed) { .cat(prefix, "[x] Cannot be calculated.\n", consoleOutputEnabled = consoleOutputEnabled) } .cat(prefix, "(*) Can be calculated directly.\n", consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "(**) Cannot be calculated directly but with ", "'getSampleSizeSurvival' or 'getPowerSurvival'.\n", consoleOutputEnabled = consoleOutputEnabled) }, .validate = function() { # Case 6 if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calulation of 'followUpTime' for given 'maxNumberOfSubjects' ", "and relative accrual intensities (< 1) ", "can only be done if end of accrual is defined") } # Case 8 else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "and relative accrual intensities (< 1) ", "can only be done if end of accrual is defined") } }, .toString = function(startWithUpperCase = FALSE) { s <- "accrual time" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .getAccrualTimeWithoutLeadingZero = function() { if (length(accrualTime) <= 1) { return(NA_real_) } return(accrualTime[2:length(accrualTime)]) }, isAccrualTimeEnabled = function() { if (length(accrualTime) == 0) { return(FALSE) } if (length(accrualTime) == 1 && is.na(accrualTime)) { return(FALSE) } return(TRUE) }, .initFromList = function(accrualTimeList) { if (!is.list(accrualTimeList)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must be a list") } if (length(accrualTimeList) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must contain at least one entry") } if (.showWarnings && !all(is.na(accrualIntensity))&& (length(accrualIntensity) != 1 || accrualIntensity != C_ACCRUAL_INTENSITY_DEFAULT)) { warning("'accrualIntensity' (", .arrayToString(accrualIntensity), ") will be ignored because 'accrualTime' is a list", call. = FALSE) } accrualTime <<- numeric(0) accrualIntensity <<- numeric(0) timeRegions <- names(accrualTimeList) endOfAccrualIsUndefined <- FALSE accrualTime <<- c(accrualTime, 0) for (i in 1:length(timeRegions)) { timePeriod <- timeRegions[i] accrualTimeValue <- accrualTimeList[[timePeriod]] .assertIsSingleNumber(accrualTimeValue, paste0("accrualTime[", i, "]")) settings <- .validateTimePeriod(timePeriod, i = i, n = length(timeRegions), accrualTimeMode = TRUE) timePeriod <- settings$timePeriod endOfAccrualIsUndefined <- settings$endOfAccrualIsUndefined if (i < length(timeRegions)) { parts <- strsplit(timePeriod, "- *(< *)?", perl = TRUE)[[1]] if (length(parts) != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all regions (", timePeriod, ") must have the format ", "\"time_1 - = 2 && length(accrualTime) == length(accrualIntensity) + 1 && !any(is.na(accrualTime)) && !any(is.na(accrualIntensity))) { len <- length(accrualIntensity) accrualIntensityAbsolute <- maxNumberOfSubjects / sum((accrualTime[2:(len + 1)] - accrualTime[1:len]) * accrualIntensity) * accrualIntensity if (!isTRUE(all.equal(accrualIntensityAbsolute, accrualIntensity, tolerance = 1e-06)) && !isTRUE(all.equal(accrualIntensityAbsolute, 0, tolerance = 1e-06))) { .validateAccrualTimeAndIntensity() if (.isAbsoluteAccrualIntensity(accrualIntensity) && .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time and intensity: ", .getFormula(), " = ", .getSampleSize()) } accrualIntensityRelative <<- accrualIntensity accrualIntensity <<- accrualIntensityAbsolute .setParameterType("accrualIntensity", C_PARAM_GENERATED) .setParameterType("accrualIntensityRelative", C_PARAM_USER_DEFINED) } } }, .isNoPiecewiseAccrualTime = function(accrualTimeArg) { if (length(accrualTimeArg) == 0 || any(is.na(accrualTimeArg)) || !all(is.numeric(accrualTimeArg))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'accrualTimeArg' must a be valid numeric vector") } if (length(accrualTimeArg) == 1) { return(TRUE) } if (length(accrualTimeArg) == 2 && accrualTimeArg[1] == 0) { return(TRUE) } return(FALSE) }, .init = function(accrualTimeArg) { if (length(accrualTimeArg) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'accrualTime' must be defined") } if (length(accrualTimeArg) == 1 && is.numeric(accrualTimeArg) && is.na(accrualTimeArg)) { accrualTimeArg <- C_ACCRUAL_TIME_DEFAULT } calculateLastAccrualTimeEnabled <- FALSE if (is.list(accrualTimeArg)) { endOfAccrualIsUndefined <- .initFromList(accrualTimeArg) calculateLastAccrualTimeEnabled <- endOfAccrualIsUndefined && !is.null(maxNumberOfSubjects) && length(maxNumberOfSubjects) == 1 && !is.na(maxNumberOfSubjects) } else if (is.numeric(accrualTimeArg)) { .assertIsNumericVector(accrualTimeArg, "accrualTime") if (length(accrualIntensity) > 1) { .assertIsNumericVector(accrualIntensity, "accrualIntensity") } if (.isNoPiecewiseAccrualTime(accrualTimeArg) && (length(accrualIntensity) == 0 || is.null(accrualIntensity) || all(is.na(accrualIntensity)) || all(accrualIntensity == C_ACCRUAL_INTENSITY_DEFAULT))) { accrualTimeArg <- accrualTimeArg[length(accrualTimeArg)] accrualTime <<- c(0L, accrualTimeArg) .setParameterType("accrualTime", ifelse( identical(as.integer(accrualTime), C_ACCRUAL_TIME_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) accrualIntensity <<- C_ACCRUAL_INTENSITY_DEFAULT .setParameterType("accrualIntensity", C_PARAM_DEFAULT_VALUE) .setParameterType("maxNumberOfSubjects", ifelse(length(maxNumberOfSubjects) == 1 && is.na(maxNumberOfSubjects), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) endOfAccrualIsUserDefined <<- length(accrualTime) == length(accrualIntensity) + 1 maxNumberOfSubjectsIsUserDefined <<- .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED followUpTimeMustBeUserDefined <<- !endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined absoluteAccrualIntensityEnabled <<- FALSE if (maxNumberOfSubjectsIsUserDefined) { accrualIntensity <<- maxNumberOfSubjects / accrualTime[length(accrualTime)] .setParameterType("accrualIntensity", C_PARAM_GENERATED) } return(invisible()) } accrualTime <<- accrualTimeArg if (length(accrualTime) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must contain at least one time value") } if (accrualTime[1] != 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'accrualTime' (", .arrayToString(accrualTime), ") must be 0") } .setParameterType("accrualTime", ifelse( identical(as.integer(accrualTime), C_ACCRUAL_TIME_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("accrualIntensity", C_PARAM_USER_DEFINED) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must be a list or a numeric vector") } absoluteAccrualIntensityEnabled <<- .isAbsoluteAccrualIntensity(accrualIntensity) if (is.null(maxNumberOfSubjects) || length(maxNumberOfSubjects) == 0 || any(is.na(maxNumberOfSubjects))) { if (length(accrualTime) != length(accrualIntensity) + 1 || !absoluteAccrualIntensityEnabled) { maxNumberOfSubjectsCanBeCalculatedDirectly <<- FALSE } .setParameterType("maxNumberOfSubjects", C_PARAM_NOT_APPLICABLE) } else { if (!(length(accrualTime) %in% c(length(accrualIntensity), length(accrualIntensity) + 1))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", length(accrualTime), ") must be equal to length of 'accrualIntensity' if the last 'accrualTime' ", "shall be calculated ", "based on 'maxNumberOfSubjects' or length of 'accrualIntensity' (", length(accrualIntensity), ") + 1 otherwise") } if (length(accrualTime) == length(accrualIntensity)) { calculateLastAccrualTimeEnabled <- TRUE } .setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) } endOfAccrualIsUserDefined <<- length(accrualTime) == length(accrualIntensity) + 1 if (calculateLastAccrualTimeEnabled) { .calculateRemainingTime() } else if (maxNumberOfSubjectsCanBeCalculatedDirectly) { if (length(accrualTime) == 1) { if (length(maxNumberOfSubjects) > 0 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && maxNumberOfSubjects < accrualIntensity[1]) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "must be >= ", accrualIntensity[1], " ('accrualIntensity')") } remainingTime <<- accrualTime .setParameterType("remainingTime", C_PARAM_USER_DEFINED) } else if (length(accrualTime) > 1) { sampleSize <- .getSampleSize() if (length(maxNumberOfSubjects) > 0 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && maxNumberOfSubjects < sampleSize) { if (length(accrualIntensity) == 1) { .setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) accrualTime <<- 0 .calculateRemainingTime() } else { if (length(accrualTime) == length(accrualIntensity) + 1 && .isAbsoluteAccrualIntensity(accrualIntensity)) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time and intensity: ", .getFormula(), " = ", sampleSize) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "must be >= ", sampleSize) } } } else { if ((length(maxNumberOfSubjects) != 1 || is.na(maxNumberOfSubjects)) && .isAbsoluteAccrualIntensity(accrualIntensity)) { maxNumberOfSubjects <<- sampleSize .setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } remainingTime <<- accrualTime[length(accrualTime)] - accrualTime[length(accrualTime) - 1] .setParameterType("remainingTime", C_PARAM_GENERATED) } } } .validateInitialization() maxNumberOfSubjectsIsUserDefined <<- .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED followUpTimeMustBeUserDefined <<- !endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined }, .getSampleSize = function() { if (length(accrualTime) < 2) { return(0) } sampleSize <- 0 for (i in 2:length(accrualTime)) { time <- accrualTime[i] - accrualTime[i - 1] sampleSize <- sampleSize + time * accrualIntensity[i - 1] } return(sampleSize) }, .getValuesAfterDecimalPoint = function(x) { values <- c() for (value in x) { baseLevel <- value - floor(value) if (baseLevel == 0) { baseLevel <- 1 } values <- c(values, baseLevel) } return(values) }, .getBaseLevel = function(x) { return(min(.getValuesAfterDecimalPoint(x[x > 0]))) }, .calcSampleSize = function() { if (length(accrualTime) <= 1) { return(0) } sampleSize <- 0 for (i in 2:length(accrualTime)) { time <- accrualTime[i] - accrualTime[i - 1] sampleSize <- sampleSize + time * accrualIntensity[i - 1] if (sampleSize >= maxNumberOfSubjects && length(accrualTime) == length(accrualIntensity)) { i2 <- i if (length(accrualTime) == length(accrualIntensity) + 1) { i2 <- i - 1 } if (.showWarnings) { n1 <- length(accrualTime) - i + 1 if (length(accrualTime) == length(accrualIntensity)) { n1 <- n1 - 1 } if (n1 == 1) { warning("Last accrual time value (", accrualTime[length(accrualTime)], ") ignored", call. = FALSE) } else if (n1 > 1) { warning("Last ", n1, " accrual time values (", .arrayToString(accrualTime[(length(accrualTime) - n1 + 1):length(accrualTime)]), ") ignored", call. = FALSE) } n2 <- length(accrualIntensity) - i2 + 1 if (n2 == 1) { warning("Last accrual intensity value (", accrualIntensity[length(accrualIntensity)], ") ignored", call. = FALSE) } else if (n2 > 1) { warning("Last ", n2, " accrual intensity values (", .arrayToString(accrualIntensity[i2:length(accrualIntensity)]), ") ignored", call. = FALSE) } } accrualTime <<- accrualTime[1:(i - 1)] accrualIntensity <<- accrualIntensity[1:(i2 - 1)] sampleSize <- 0 if (length(accrualTime) > 1) { sampleSize <- .getSampleSize() } return(sampleSize) } } return(sampleSize) }, .calculateRemainingTime = function() { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) if (length(accrualIntensity) == 1) { lastAccrualIntensity <- accrualIntensity[1] remainingSubjects <- maxNumberOfSubjects } else { sampleSize <- .calcSampleSize() remainingSubjects <- maxNumberOfSubjects - sampleSize if (remainingSubjects < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "is too small for the defined accrual time (minimum = ", sampleSize, ")") } } lastAccrualIntensity <- accrualIntensity[length(accrualIntensity)] remainingTime <<- remainingSubjects / lastAccrualIntensity .setParameterType("remainingTime", C_PARAM_GENERATED) accrualTime <<- c(accrualTime, accrualTime[length(accrualTime)] + remainingTime) .setParameterType("accrualTime", C_PARAM_GENERATED) if (any(accrualTime < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "is too small for the defined accrual time") } }, .validateAccrualTimeAndIntensity = function() { if ((length(accrualTime) >= 2 && any(accrualTime[2:length(accrualTime)] < 0))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' (", .arrayToString(accrualTime), ") must be > 0") } .assertValuesAreStrictlyIncreasing(accrualTime, "accrualTime") if ((length(accrualTime) > 1) && any(accrualIntensity < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualIntensity' (", .arrayToString(accrualIntensity), ") must be >= 0") } if (length(accrualIntensity) == 1 && !is.na(accrualIntensity) && accrualIntensity == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "at least one 'accrualIntensity' value must be > 0") } if (length(accrualIntensity) > 0 && accrualIntensity[1] == 0) { warning("It makes no sense to start 'accrualIntensity' (", .arrayToString(accrualIntensity), ") with 0") } if (sum(.isAbsoluteAccrualIntensity(accrualIntensity)) > 0 && sum(.isRelativeAccrualIntensity(accrualIntensity)) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the values of 'accrualIntensity' (", .arrayToString(accrualIntensity), ") ", "must exclusive absolute or relative, i.e. all (= 0 || >= 1) or all (< 1)") } }, .validateInitialization = function() { .validateAccrualTimeAndIntensity() piecewiseAccrualEnabled <<- !.isNoPiecewiseAccrualTime(accrualTime) } ) ) rpact/R/class_design.R0000644000176200001440000007171613574122752014371 0ustar liggesusers###################################################################################### # # # -- Trial design classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 28-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R #' @include f_core_plot.R NULL #' #' @name TrialDesign #' #' @title #' Basic Trial Design #' #' @description #' Basic class for trial designs. #' #' @details #' \code{TrialDesign} is the basic class for #' \itemize{ #' \item \code{\link{TrialDesignFisher}}, #' \item \code{\link{TrialDesignGroupSequential}}, and #' \item \code{\link{TrialDesignInverseNormal}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' @include f_core_plot.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesign <- setRefClass("TrialDesign", contains = "ParameterSet", fields = list( kMax = "integer", alpha = "numeric", stages = "integer", informationRates = "numeric", userAlphaSpending = "numeric", criticalValues = "numeric", stageLevels = "numeric", alphaSpent = "numeric", bindingFutility = "logical", tolerance = "numeric" ), methods = list( initialize = function(..., alpha = NA_real_, informationRates = NA_real_, userAlphaSpending = NA_real_, criticalValues = NA_real_, stageLevels = NA_real_, alphaSpent = NA_real_, bindingFutility = NA, tolerance = 1e-06 # C_ANALYSIS_TOLERANCE_DEFAULT ) { callSuper(..., alpha = alpha, informationRates = informationRates, userAlphaSpending = userAlphaSpending, criticalValues = criticalValues, stageLevels = stageLevels, alphaSpent = alphaSpent, bindingFutility = bindingFutility, tolerance = tolerance) if (inherits(.self, "TrialDesignConditionalDunnett")) { .parameterNames <<- C_PARAMETER_NAMES } else { .parameterNames <<- .getSubListByNames(.getParameterNames(design = .self), c( "stages", "kMax", "alpha", "informationRates", "userAlphaSpending", "criticalValues", "stageLevels", "alphaSpent", "bindingFutility", "tolerance" )) } .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .initStages() }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing trial design objects' .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show(showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else if (showType == 2) { .cat("Technical summary of the trial design object of class ", methods::classLabel(class(.self)), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Design parameters and output of ", .toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDerivedParameters(), "Derived from user defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { s <- "unknown trial design" if (.isTrialDesignGroupSequential(.self)) { s <- "group sequential design" } else if (.isTrialDesignInverseNormal(.self)) { s <- "inverse normal design" } else if (.isTrialDesignFisher(.self)) { s <- "Fisher design" } else if (.isTrialDesignConditionalDunnett(.self)) { s <- "conditional Dunnett test design" } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .initStages = function() { if (length(kMax) == 1 && !is.na(kMax) && kMax > 0) { stages <<- c(1L:kMax) if (kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } } }, .setParameterType = function(parameterName, parameterType) { parameterType <- callSuper(parameterName = parameterName, parameterType = parameterType) if (parameterName == "futilityBounds" && !bindingFutility) { .parameterNames$futilityBounds <<- C_PARAMETER_NAMES[["futilityBoundsNonBinding"]] } invisible(parameterType) }, .isTrialDesignFisher = function(design = .self) { return(class(design) == C_CLASS_NAME_TRIAL_DESIGN_FISHER) } ) ) #' #' @name TrialDesignCharacteristics #' #' @title #' Trial Design Characteristics #' #' @description #' Class for trial design characteristics. #' #' @details #' \code{TrialDesignCharacteristics} contains all fields required to collect the characteristics of a design. #' This object should not be created directly; use \code{getDesignCharacteristics} #' with suitable arguments to create it. #' #' @seealso \code{\link{getDesignCharacteristics}} for getting the design characteristics. #' #' @include class_core_parameter_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignCharacteristics <- setRefClass("TrialDesignCharacteristics", contains = "ParameterSet", fields = list( .design = "TrialDesign", .probs = "matrix", nFixed = "numeric", shift = "numeric", inflationFactor = "numeric", stages = "integer", information = "numeric", power = "numeric", rejectionProbabilities = "numeric", # efficacy probabilities futilityProbabilities = "numeric", averageSampleNumber1 = "numeric", averageSampleNumber01 = "numeric", averageSampleNumber0 = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(.design = design, ...) .parameterNames <<- .getParameterNames(design) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing trial design characteristics objects' .resetCat() if (showType == 2) { .cat("Technical summary of the design characteristics object of class ", methods::classLabel(class(.self)), ":\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .showParametersOfOneGroup(.getGeneratedParameters(), title = .toString(startWithUpperCase = TRUE), orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .initStages = function() { if (!is.na(.design$kMax) && .design$kMax > 0) { stages <<- c(1L:.design$kMax) if (.design$kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } } }, .getUserDefinedParameters = function() { return("design") }, .toString = function(startWithUpperCase = FALSE) { return(paste(.design$.toString(startWithUpperCase = startWithUpperCase), "characteristics")) } ) ) #' #' @name TrialDesignCharacteristics_as.data.frame #' #' @title #' Coerce TrialDesignCharacteristics to a Data Frame #' #' @description #' Returns the \code{TrialDesignCharacteristics} as data frame. #' #' @param niceColumnNamesEnabled logical. If \code{TRUE}, nice looking names will be used; #' syntactic names otherwise (see \code{\link[base]{make.names}}). #' @param includeAllParameters logical. If \code{TRUE}, all parameters will be included; #' a meaningful parameter selection otherwise. #' #' @details #' Each element of the \code{TrialDesignCharacteristics} is converted to a column in the data frame. #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignCharacteristics <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { if (x$.design$kMax > 1) { parameterNamesToBeExcluded = c("nFixed", "shift") } else { parameterNamesToBeExcluded = c("inflationFactor") } return(x$.getAsDataFrame(parameterNames = parameterNamesToBeExcluded, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, handleParameterNamesAsToBeExcluded = TRUE, tableColumnNames = .getTableColumnNames(design = x$.design))) } #' #' @name TrialDesignFisher #' #' @title #' Fisher Design #' #' @description #' Trial design for Fisher's combination test. #' #' @details #' This object should not be created directly; use \code{\link{getDesignFisher}} #' with suitable arguments to create a Fisher design. #' #' @seealso \code{\link{getDesignFisher}} for creating a Fisher design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignFisher <- setRefClass(C_CLASS_NAME_TRIAL_DESIGN_FISHER, contains = "TrialDesign", fields = list( method = "character", alpha0Vec = "numeric", scale = "numeric", nonStochasticCurtailment = "logical", sided = "integer", simAlpha = "numeric", iterations = "numeric", seed = "numeric" ), methods = list( initialize = function(..., method = NA_character_, alpha0Vec = NA_real_, scale = NA_real_, nonStochasticCurtailment = FALSE, sided = as.integer(1), simAlpha = NA_real_, iterations = 0, seed = NA_real_, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT) { callSuper(..., method = method, alpha0Vec = alpha0Vec, scale = scale, nonStochasticCurtailment = nonStochasticCurtailment, sided = sided, simAlpha = simAlpha, iterations = iterations, seed = seed, tolerance = tolerance ) .parameterNames <<- c(.parameterNames, .getSubListByNames( .getParameterNames(design = .self), c( "method", "alpha0Vec", "scale", "nonStochasticCurtailment", "sided", "simAlpha" ))) .parameterFormatFunctions$criticalValues <<- "formatFisherCriticalValues" .initParameterTypes() }, hasChanged = function(kMax, alpha, sided, method, informationRates, alpha0Vec, userAlphaSpending, bindingFutility) { informationRatesTemp <- informationRates if (any(is.na(informationRatesTemp))) { informationRatesTemp <- (1:kMax) / kMax } alpha0VecTemp <- alpha0Vec[1:(kMax - 1)] if (any(is.na(alpha0VecTemp))) { alpha0VecTemp <- rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1) } if (!identical(kMax, .self$kMax)) return(TRUE) if (!identical(alpha, .self$alpha)) return(TRUE) if (!identical(sided, .self$sided)) return(TRUE) if (!identical(method, .self$method)) return(TRUE) if (!identical(informationRatesTemp, .self$informationRates)) return(TRUE) if (!identical(alpha0VecTemp, .self$alpha0Vec)) return(TRUE) if (!identical(userAlphaSpending, .self$userAlphaSpending)) return(TRUE) if (!identical(bindingFutility, .self$bindingFutility)) return(TRUE) return(FALSE) }, # Defines the order of the parameter output .getParametersToShow = function() { return(c( "method", "kMax", "stages", "informationRates", "alpha", "alpha0Vec", "bindingFutility", "sided", "tolerance", "iterations", "seed", "alphaSpent", "userAlphaSpending", "criticalValues", "stageLevels", "scale", "simAlpha", "nonStochasticCurtailment" )) } ) ) #' #' @name TrialDesignInverseNormal #' #' @title #' Inverse Normal Design #' #' @description #' Trial design for inverse normal method. #' #' @details #' This object should not be created directly; use \code{\link{getDesignInverseNormal}} #' with suitable arguments to create a inverse normal design. #' #' @seealso \code{\link{getDesignInverseNormal}} for creating a inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignInverseNormal <- setRefClass(C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, contains = "TrialDesign", fields = list( typeOfDesign = "character", beta = "numeric", deltaWT = "numeric", futilityBounds = "numeric", gammaA = "numeric", gammaB = "numeric", optimizationCriterion = "character", sided = "integer", betaSpent = "numeric", typeBetaSpending = "character", userBetaSpending = "numeric", power = "numeric", twoSidedPower = "logical", constantBoundsHP = "numeric" ), methods = list( initialize = function(..., beta = C_BETA_DEFAULT, betaSpent = NA_real_, sided = 1L, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0.0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1.0, gammaB = 1.0, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userBetaSpending = NA_real_, power = NA_real_, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, constantBoundsHP = NA_real_ ) { callSuper(..., beta = beta, betaSpent = betaSpent, sided = sided, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, optimizationCriterion = optimizationCriterion, gammaA = gammaA, gammaB = gammaB, typeBetaSpending = typeBetaSpending, userBetaSpending = userBetaSpending, power = power, twoSidedPower = twoSidedPower, constantBoundsHP = constantBoundsHP ) .parameterNames <<- c(.parameterNames, .getSubListByNames( .getParameterNames(design = .self), c( "beta", "betaSpent", "sided", "futilityBounds", "typeOfDesign", "deltaWT", "optimizationCriterion", "gammaA", "gammaB", "typeBetaSpending", "userBetaSpending", "power", "twoSidedPower", "constantBoundsHP" ))) .parameterFormatFunctions$criticalValues <<- "formatGroupSequentialCriticalValues" .initParameterTypes() }, .pasteComparisonResult = function(name, newValue, oldValue) { return(paste0(name, "_new = ", .arrayToString(newValue), " (", class(newValue), "), ", name, "_old = ", .arrayToString(oldValue), " (", class(oldValue), ")")) }, hasChanged = function(kMax, alpha, beta, sided, typeOfDesign, deltaWT, informationRates, futilityBounds, optimizationCriterion, typeBetaSpending, gammaA, gammaB, bindingFutility, userAlphaSpending, userBetaSpending, twoSidedPower, constantBoundsHP) { informationRatesTemp <- informationRates if (any(is.na(informationRatesTemp))) { informationRatesTemp <- (1:kMax) / kMax } futilityBoundsTemp <- futilityBounds[1:(kMax - 1)] if (any(is.na(futilityBoundsTemp))) { futilityBoundsTemp <- rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1) } if (!identical(kMax, .self$kMax)) return(.pasteComparisonResult("kMax", kMax, .self$kMax)) if (!identical(alpha, .self$alpha)) return(.pasteComparisonResult("alpha", alpha, .self$alpha)) if (!identical(beta, .self$beta)) return(.pasteComparisonResult("beta", beta, .self$beta)) if (!identical(sided, .self$sided)) return(.pasteComparisonResult("sided", sided, .self$sided)) if (!identical(typeOfDesign, .self$typeOfDesign)) { return(.pasteComparisonResult("typeOfDesign", typeOfDesign, .self$typeOfDesign)) } if (typeOfDesign == C_TYPE_OF_DESIGN_WT) { if (!identical(deltaWT, .self$deltaWT)) { return(.pasteComparisonResult("deltaWT", deltaWT, .self$deltaWT)) } } if (!identical(informationRatesTemp, .self$informationRates)) { return(.pasteComparisonResult("informationRates", informationRatesTemp, .self$informationRates)) } if (!grepl("^as.*", typeOfDesign)) { if (!identical(futilityBoundsTemp, .self$futilityBounds)) { return(.pasteComparisonResult("futilityBounds", futilityBoundsTemp, .self$futilityBounds)) } } if (!identical(optimizationCriterion, .self$optimizationCriterion)) { return(.pasteComparisonResult("optimizationCriterion", optimizationCriterion, .self$optimizationCriterion)) } if (!identical(typeBetaSpending, .self$typeBetaSpending)) { return(.pasteComparisonResult("typeBetaSpending", typeBetaSpending, .self$typeBetaSpending)) } if (!identical(gammaA, .self$gammaA)) { return(.pasteComparisonResult("gammaA", gammaA, .self$gammaA)) } if (!identical(gammaB, .self$gammaB)) { return(.pasteComparisonResult("gammaB", gammaB, .self$gammaB)) } if (any(na.omit(futilityBounds) > -6) && !identical(bindingFutility, .self$bindingFutility)) { return(.pasteComparisonResult("bindingFutility", bindingFutility, .self$bindingFutility)) } if (!identical(userAlphaSpending, .self$userAlphaSpending)) { return(.pasteComparisonResult("userAlphaSpending", userAlphaSpending, .self$userAlphaSpending)) } if (!identical(userBetaSpending, .self$userBetaSpending)) { return(.pasteComparisonResult("userBetaSpending", userBetaSpending, .self$userBetaSpending)) } if (!identical(twoSidedPower, .self$twoSidedPower)) { return(.pasteComparisonResult("twoSidedPower", twoSidedPower, .self$twoSidedPower)) } if (typeOfDesign == C_TYPE_OF_DESIGN_HP) { if (!identical(constantBoundsHP, .self$constantBoundsHP)) { return(.pasteComparisonResult("constantBoundsHP", constantBoundsHP, .self$constantBoundsHP)) } } return(FALSE) }, # Defines the order of the parameter output .getParametersToShow = function() { return(c( "typeOfDesign", "kMax", "stages", "informationRates", "alpha", "beta", "power", "twoSidedPower", "deltaWT", "futilityBounds", "bindingFutility", "constantBoundsHP", "gammaA", "gammaB", "optimizationCriterion", "sided", "tolerance", "alphaSpent", "userAlphaSpending", "betaSpent", "typeBetaSpending", "userBetaSpending", "criticalValues", "stageLevels" )) } ) ) #' #' @name TrialDesignGroupSequential #' #' @title #' Group Sequential Design #' #' @description #' Trial design for group sequential design. #' #' @details #' This object should not be created directly; use \code{\link{getDesignGroupSequential}} #' with suitable arguments to create a group sequential design. #' #' @seealso \code{\link{getDesignGroupSequential}} for creating a group sequential design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignGroupSequential <- setRefClass( C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, contains = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, methods = list( initialize = function(...) { callSuper(...) .parameterFormatFunctions$criticalValues <<- "formatGroupSequentialCriticalValues" }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing trial design objects' callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignConditionalDunnett #' #' @title #' Conditional Dunnett Design #' #' @description #' Trial design for conditional Dunnett tests. #' #' @details #' This object should not be created directly. # This object should not be created directly; use \code{\link{getDesignConditionalDunnett}} # with suitable arguments to create a conditional Dunnett test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' # @seealso \code{\link{getDesignConditionalDunnett}} for creating a conditional Dunnett test design. TrialDesignConditionalDunnett <- setRefClass( C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT, contains = "TrialDesign", fields = list( informationAtInterim = "numeric", secondStageConditioning = "logical", sided = "integer" ), methods = list( initialize = function(...) { callSuper(...) notApplicableParameters <- c( "kMax", "stages", "informationRates", "userAlphaSpending", "criticalValues", "stageLevels", "alphaSpent", "bindingFutility", "tolerance" ) for (notApplicableParameter in notApplicableParameters) { .setParameterType(notApplicableParameter, C_PARAM_NOT_APPLICABLE) } .setParameterType("alpha", ifelse( identical(alpha, C_ALPHA_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("informationAtInterim", ifelse( identical(informationAtInterim, 0.5), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("secondStageConditioning", ifelse( identical(secondStageConditioning, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) kMax <<- 2L sided <<- 1L }, show = function(showType = 1, digits = NA_integer_) { 'Method for automatically printing trial design objects' callSuper(showType = showType, digits = digits) } ) ) getDesignConditionalDunnett <- function(alpha = C_ALPHA_DEFAULT, informationAtInterim = 0.5, secondStageConditioning = TRUE) { .assertIsValidAlpha(alpha) .assertIsNumericVector(informationAtInterim, "informationAtInterim") return(TrialDesignConditionalDunnett(alpha = alpha, informationAtInterim = informationAtInterim, secondStageConditioning = secondStageConditioning)) } #' #' @title #' Trial Design Plotting #' #' @description #' Plots a trial design. #' #' @details #' Generic function to plot a trial design. #' #' @param x The trial design, obtained from \cr #' \code{\link{getDesignGroupSequential}}, \cr #' \code{\link{getDesignInverseNormal}} or \cr #' \code{\link{getDesignFisher}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param palette The palette, default is \code{"Set1"}. #' @param theta A vector of theta values. #' @param nMax The maximum sample size. #' @param plotPointsEnabled If \code{TRUE}, additional points will be plotted. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{-1}: no legend will be shown #' \item \code{NA}: the algorithm tries to find a suitable position #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{3}: creates a 'Stage Levels' plot #' \item \code{4}: creates a 'Type One Error Spending' plot #' \item \code{5}: creates a 'Power and Early Stopping' plot #' \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot #' \item \code{7}: creates an 'Power' plot #' \item \code{8}: creates an 'Early Stopping' plot #' \item \code{9}: creates an 'Average Sample Size' plot #' } #' @param ... Optional \code{ggplot2} arguments. #' #' @details #' Generic function to plot a trial design. #' #' @seealso \code{\link{plot.TrialDesignSet}} to compare different designs or design parameters visual. #' #' @return #' A \code{ggplot2} object. #' #' @export #' #' @examples #' #' design <- getDesignInverseNormal(kMax = 3, alpha = 0.025, #' typeOfDesign = "asKD", gammaA = 2, #' informationRates = c(0.2, 0.7, 1), #' typeBetaSpending = "bsOF") #' #' if (require(ggplot2)) { #' plot(design) # default: type = 1 #' } #' plot.TrialDesign = function(x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1, palette = "Set1", theta = seq(-1, 1, 0.01), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ...) { fCall = match.call(expand.dots = FALSE) designName <- as.character(fCall$x)[1] .assertGgplotIsInstalled() if (.isTrialDesignFisher(x) && !(type %in% c(1, 3, 4))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 3 or 4") } designSet <- TrialDesignSet(design = x, singleDesign = TRUE) .plotTrialDesignSet(x = designSet, y = y, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, designSetName = designName, ...) } #' #' @name TrialDesign_as.data.frame #' #' @title #' Coerce TrialDesign to a Data Frame #' #' @description #' Returns the \code{TrialDesign} as data frame. #' #' @param niceColumnNamesEnabled logical. If \code{TRUE}, nice looking names will be used; #' syntactic names otherwise (see \code{\link[base]{make.names}}). #' @param includeAllParameters logical. If \code{TRUE}, all parameters will be included; #' a meaningful parameter selection otherwise. #' #' @details #' Each element of the \code{TrialDesign} is converted to a column in the data frame. #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesign <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { .assertIsTrialDesign(x) if (includeAllParameters) { parameterNames <- NULL } else { parameterNames <- x$.getParametersToShow() } return(x$.getAsDataFrame(parameterNames = parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x))) } rpact/R/f_core_assertions.R0000644000176200001440000014652513574176206015446 0ustar liggesusers###################################################################################### # # # -- RPACT assertions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### .stopWithWrongDesignMessage <- function(design) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of ", .arrayToString( .getTrialDesignClassNames(), vectorLookAndFeelEnabled = FALSE), " (is '", class(design), "')") } .isParameterSet <- function(x) { return(isS4(x) && inherits(x, "ParameterSet")) } .assertIsParameterSetClass <- function(x, objectName = "x") { if (!.isParameterSet(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", objectName, "' (", class(x), ") must be a S4 class which inherits from class 'ParameterSet' ") } } .assertIsTrialDesignSet <- function(x, objectName = "x") { if (!.isTrialDesignSet(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designSet' must be an instance of 'TrialDesignSet' (is '", class(x), "')") } } .isTrialDesignSet <- function(x) { return(class(x) == "TrialDesignSet") } .isTrialDesignGroupSequential <- function(design) { return(class(design) == C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL) } .isTrialDesignInverseNormal <- function(design) { return(class(design) == C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) } .isTrialDesignFisher <- function(design) { return(class(design) == C_CLASS_NAME_TRIAL_DESIGN_FISHER) } .isTrialDesignConditionalDunnett <- function(design) { return(class(design) == C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT) } .isTrialDesignInverseNormalOrGroupSequential <- function(design) { return(.isTrialDesignInverseNormal(design) || .isTrialDesignGroupSequential(design)) } .isTrialDesign <- function(design) { return(.isTrialDesignInverseNormal(design) || .isTrialDesignGroupSequential(design) || .isTrialDesignFisher(design) || .isTrialDesignConditionalDunnett(design)) } .isTrialDesignPlanMeans <- function(designPlan) { return(class(designPlan) == "TrialDesignPlanMeans") } .isTrialDesignPlanRates <- function(designPlan) { return(class(designPlan) == "TrialDesignPlanRates") } .isTrialDesignPlanSurvival <- function(designPlan) { return(class(designPlan) == "TrialDesignPlanSurvival") } .isTrialDesignPlan <- function(designPlan) { return(.isTrialDesignPlanMeans(designPlan) || .isTrialDesignPlanRates(designPlan) || .isTrialDesignPlanSurvival(designPlan)) } .assertIsTrialDesignPlan <- function(designPlan) { if (!.isTrialDesignPlan(designPlan)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designPlan' must be an instance of 'TrialDesignPlan' (is '", class(designPlan), "')") } } .assertIsTrialDesign <- function(design) { if (!.isTrialDesign(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of ", .arrayToString( .getTrialDesignClassNames(), vectorLookAndFeelEnabled = FALSE), " (is '", class(design), "')") } } .assertIsTrialDesignInverseNormal <- function(design) { if (!.isTrialDesignInverseNormal(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal' (is '", class(design), "')") } } .assertIsTrialDesignFisher <- function(design) { if (!.isTrialDesignFisher(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignFisher' (is '", class(design), "')") } } .assertIsTrialDesignGroupSequential <- function(design) { if (!.isTrialDesignGroupSequential(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignGroupSequential' (is '", class(design), "')") } } .assertIsTrialDesignConditionalDunnett <- function(design) { if (!.isTrialDesignConditionalDunnett(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignConditionalDunnett' (is '", class(design), "')") } } .assertIsTrialDesignInverseNormalOrGroupSequential <- function(design) { if (!.isTrialDesignInverseNormalOrGroupSequential(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal' or 'TrialDesignGroupSequential' (is '", class(design), "')") } } .isSimulationResults <- function(simulationResults) { return(inherits(simulationResults, "SimulationResults")) } .assertIsSimulationResults <- function(simulationResults) { if (!.isSimulationResults(simulationResults)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'simulationResults' must be an instance of SimulationResults (is '", class(simulationResults), "')") } } .isStageResults <- function(stageResults) { return(.isStageResultsMeans(stageResults) || .isStageResultsRates(stageResults) || .isStageResultsSurvival(stageResults) || .isStageResultsMeansMultiArm(stageResults) || .isStageResultsRatesMultiArm(stageResults) || .isStageResultsSurvivalMultiArm(stageResults)) } .isStageResultsMeans <- function(stageResults) { return(class(stageResults) == "StageResultsMeans") } .isStageResultsMeansMultiArm <- function(stageResults) { return(class(stageResults) == "StageResultsMultiArmMeans") } .isStageResultsRates <- function(stageResults) { return(class(stageResults) == "StageResultsRates") } .isStageResultsRatesMultiArm <- function(stageResults) { return(class(stageResults) == "StageResultsMultiArmRates") } .isStageResultsSurvival <- function(stageResults) { return(class(stageResults) == "StageResultsSurvival") } .isStageResultsSurvivalMultiArm <- function(stageResults) { return(class(stageResults) == "StageResultsMultiArmSurvival") } .assertIsStageResults <- function(stageResults) { if (!.isStageResults(stageResults)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be an instance of ", .arrayToString(.getStageResultsClassNames(), vectorLookAndFeelEnabled = FALSE), " (is '", class(stageResults), "')") } } .assertIsInClosedInterval <- function(x, xName, ..., lower, upper, naAllowed = FALSE) { .warnInCaseOfUnknownArguments(functionName = ".assertIsInClosedInterval", ...) if (naAllowed && all(is.na(x))) { return(invisible()) } if (!naAllowed && length(x) > 1 && any(is.na(x))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", xName, "' (", .arrayToString(x), ") must be a valid numeric vector or a single NA") } if (is.null(upper) || is.na(upper)) { if (any(x < lower, na.rm = TRUE)) { prefix <- ifelse(length(x) > 1, "each value of ", "") stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, prefix, "'", xName, "' (", .arrayToString(x), ") must be >= ", lower) } } else if (any(x < lower, na.rm = TRUE) || any(x > upper, na.rm = TRUE)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", xName, "' (", .arrayToString(x), ") is out of bounds [", lower, "; ", upper, "]") } } .assertIsInOpenInterval <- function(x, xName, lower, upper, naAllowed = FALSE) { if (naAllowed && all(is.na(x))) { return(invisible()) } if (!naAllowed && length(x) > 1 && any(is.na(x))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", xName, "' (", .arrayToString(x), ") must be a valid numeric vector or a single NA") } if (is.null(upper) || is.na(upper)) { if (any(x <= lower, na.rm = TRUE)) { prefix <- ifelse(length(x) > 1, "each value of ", "") stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, prefix, "'", xName, "' (", .arrayToString(x), ") must be > ", lower) } } else if (any(x <= lower, na.rm = TRUE) || any(x >= upper, na.rm = TRUE)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", xName, "' (", .arrayToString(x), ") is out of bounds (", lower, "; ", upper, ")") } } .assertIsValidDataInput <- function(dataInput, design = NULL, stage = NULL) { .assertIsDataset(dataInput) if (!is.null(design)) { .assertIsTrialDesign(design) } stages <- dataInput$stages l1 <- length(stages) for (fieldName in dataInput$.getVisibleFieldNames()) { l2 <- length(dataInput[[fieldName]]) if (fieldName != "stages" && l1 != l2) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all parameters must have the same length ('stage' has length ", l1, ", '", fieldName, "' has length ", l2, ")") } } if (!is.null(stage)) { if (dataInput$getNumberOfGroups() == 1) { if (.isDatasetMeans(dataInput) ) { if (any(na.omit(dataInput$getStDevsUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all standard deviations must be > 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } } else if (.isDatasetRates(dataInput) ) { if (any(na.omit(dataInput$getEventsUpTo(stage)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be >= 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } if (any(na.omit(dataInput$getEventsUpTo(stage)) > na.omit(dataInput$getSampleSizesUpTo(stage)))) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be <= corresponding sample size") } } } else if (dataInput$getNumberOfGroups() == 2) { if (.isDatasetMeans(dataInput) ) { if (any(na.omit(dataInput$getStDevsUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getStDevsUpTo(stage, 2)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all standard deviations must be > 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getSampleSizesUpTo(stage, 2)) <= 0) ) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } } else if (.isDatasetRates(dataInput) ) { if (any(na.omit(dataInput$getEventsUpTo(stage, 1)) < 0) || any(na.omit(dataInput$getEventsUpTo(stage, 2)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be >= 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getSampleSizesUpTo(stage, 2)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } if (any(na.omit(dataInput$getEventsUpTo(stage, 1)) > na.omit(dataInput$getSampleSizesUpTo(stage, 1))) || any(na.omit(dataInput$getEventsUpTo(stage, 2)) > na.omit(dataInput$getSampleSizesUpTo(stage, 2)))) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be <= corresponding sample size") } } } if (.isDatasetSurvival(dataInput) ) { if (any(na.omit(dataInput$getOverallEventsUpTo(stage)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all overall events must be >= 0") } if (any(na.omit(dataInput$getOverallAllocationRatiosUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all overall allocation ratios must be > 0") } } } if (!is.null(design)) { numberOfStages <- length(unique(stats::na.omit(stages))) kMax <- design$kMax if (numberOfStages > kMax) { s <- numberOfStages - kMax plural <- ifelse(s == 1, "", "s") warning(sprintf(paste0("The data of the last %s in the dataset will be ", "ignored because the design has specified kMax = %s"), ifelse(s == 1, "stage", paste0(s, " stages")), kMax, kMax), call. = FALSE) } else if (numberOfStages < kMax) { dataInput$.fillWithNAs(kMax) } } invisible(dataInput) } .assertIsDataset <- function(dataInput) { if (!.isDataset(dataInput)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetMeans', 'DatasetRates' or 'DatasetSurvival' (is '", class(dataInput), "')") } } .assertIsDatasetMeans <- function(dataInput) { if (!.isDatasetMeans(dataInput = dataInput)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetMeans' (is '", class(dataInput), "')") } } .assertIsDatasetRates <- function(dataInput) { if (!.isDatasetRates(dataInput = dataInput)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetRates' (is '", class(dataInput), "')") } } .assertIsDatasetSurvival <- function(dataInput) { if (!.isDatasetSurvival(dataInput = dataInput)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetSurvival' (is '", class(dataInput), "')") } } .isDataset <- function(dataInput) { return(.isDatasetMeans(dataInput) || .isDatasetRates(dataInput) || .isDatasetSurvival(dataInput)) } .isDatasetMeans <- function(dataInput) { return(class(dataInput) == "DatasetMeans") } .isDatasetRates <- function(dataInput) { return(class(dataInput) == "DatasetRates") } .isDatasetSurvival <- function(dataInput) { return(class(dataInput) == "DatasetSurvival") } .assertIsNumericVector <- function(x, argumentName, naAllowed = FALSE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid numerical value or vector") } if ((!naAllowed && any(is.na(x))) || !is.numeric(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a valid numerical value or vector") } } .assertIsIntegerVector <- function(x, argumentName, naAllowed = FALSE, validateType = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid integer value or vector") } if (naAllowed && all(is.na(x))) { return(invisible()) } if ((!naAllowed && any(is.na(x))) || (validateType && !is.integer(x)) || (!validateType && any(as.integer(x) != x))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a valid integer value or vector") } } .assertIsSingleLogical <- function(x, argumentName, naAllowed = FALSE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid single logical value") } if (length(x) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single logical value") } if ((!naAllowed && is.na(x)) || !is.logical(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", x, ") must be a valid single logical value") } } .assertIsSingleNumber <- function(x, argumentName, naAllowed = FALSE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid single numerical value") } if (length(x) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single numerical value") } if ((!naAllowed && is.na(x)) || !is.numeric(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", x, ") must be a valid single numerical value") } } .assertIsSingleInteger <- function(x, argumentName, naAllowed = FALSE, validateType = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid single integer value") } if (length(x) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single integer value") } if ((!naAllowed && is.na(x)) || (validateType && !is.integer(x)) || (!validateType && !is.na(x) && as.integer(x) != x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", x, ") must be a valid single integer value") } } .assertIsSinglePositiveInteger <- function(x, argumentName, naAllowed = FALSE, validateType = TRUE) { if (missing(x) || is.null(x) || length(x) == 0 || x <= 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a positive single integer value") } if (length(x) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single integer value") } if ((!naAllowed && is.na(x)) || (validateType && !is.integer(x)) || (!validateType && as.integer(x) != x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", x, ") must be a positive single integer value") } } .assertIsCharacter <- function(x, argumentName) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid character value") } if (!is.character(x)) { stop(sprintf(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' must be a valid character (is an instance of class '%s')", argumentName, class(x))) } } .assertDesignParameterExists <- function(design, parameterName, defaultValue) { if (missing(design)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'design' must be defined") } if (missing(parameterName)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'parameterName' must be defined") } if (missing(defaultValue)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'defaultValue' must be defined") } value <- design[[parameterName]] if (is.null(value) || length(value) == 0 || all(is.na(value))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "parameter '", parameterName, "' must be specified in design") } if (is.null(defaultValue) || length(defaultValue) == 0 || all(is.na(defaultValue))) { return(invisible()) } if (all(value == defaultValue)) { design$.setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { design$.setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .designParameterExists <- function(design, parameterName) { if (missing(design)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'design' must be defined") } if (missing(parameterName)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'parameterName' must be defined") } value <- design[[parameterName]] if (is.null(value)) { return(FALSE) } if (length(value) > 1) { return(sum(is.na(value)) < length(value)) } return(!is.na(value)) } .assertIsOptimizationCriterion <- function(x) { if (!.isOptimizationCriterion(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "optimization criterion must be one of the following: ", .printOptimizationCriterion()) } } .assertIsValidAlpha <- function(alpha) { .assertIsSingleNumber(alpha, "alpha") if (alpha < 1e-06 || alpha >= 0.5) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'alpha' (", alpha, ") is out of bounds [1e-06; 0.5)") } } .assertIsValidKappa <- function(kappa) { .assertIsSingleNumber(kappa, "kappa") .assertIsInOpenInterval(kappa, "kappa", lower = 0, upper = NULL) } .assertIsValidLambda <- function(lambda, lambdaNumber = 0) { argumentName <- "lambda" if (lambdaNumber >= 1) { argumentName <- paste0("lambda", lambdaNumber) } .assertIsNumericVector(lambda, argumentName, naAllowed = TRUE) if (all(is.na(lambda))) { return(invisible()) } if (any(is.na(lambda))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(lambda), ") must be a valid numeric vector") } .assertIsInClosedInterval(lambda, argumentName, lower = 0, upper = NULL) if (all(lambda == 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(lambda), ") not allowed: ", "at least one lambda value must be > 0") } } .assertIsValidFollowUpTime <- function(followUpTime) { if (is.null(followUpTime) || length(followUpTime) == 0 || is.na(followUpTime)) { return(invisible()) } .assertIsSingleNumber(followUpTime, "followUpTime", naAllowed = TRUE) if (followUpTime < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'followUpTime' (", followUpTime, ") must be >= 0") } } .assertIsValidAccrualTime <- function(accrualTime) { .assertIsNumericVector(accrualTime, "accrualTime", naAllowed = TRUE) if (is.null(accrualTime) || length(accrualTime) == 0 || all(is.na(accrualTime))) { return(invisible()) } if (any(accrualTime < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' (", .arrayToString(accrualTime), ") must be >= 0") } } .assertIsValidMaxNumberOfSubjects <- function(maxNumberOfSubjects) { .assertIsSingleNumber(maxNumberOfSubjects, "maxNumberOfSubjects") .assertIsInClosedInterval(maxNumberOfSubjects, "maxNumberOfSubjects", lower = 1, upper = NULL) } .assertIsValidStandardDeviation <- function(stDev) { .assertIsSingleNumber(stDev, "stDev") if (stDev <= 0) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "standard deviation 'stDev' (", stDev, ") must be > 0") } } .assertIsValidBeta <- function(beta, alpha) { .assertIsSingleNumber(beta, "beta") .assertIsSingleNumber(alpha, "alpha") if (beta < 1e-04 || beta >= 1 - alpha) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'beta' (", beta, ") is out of bounds [1e-04; ", (1 - alpha), "); ", "condition: 1e-05 <= alpha < 1 - beta <= 1 - 1e-04") } } .assertIsValidAlphaAndBeta <- function(alpha, beta) { .assertIsValidAlpha(alpha) .assertIsValidBeta(beta, alpha) } .assertIsValidStage <- function(stage, kMax) { if (stage < 1 || stage > kMax) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'stage' (", stage, ") is out of bounds [1; ", kMax, "]") } } .assertIsValidIterationsAndSeed <- function(iterations, seed, zeroIterationsAllowed = TRUE) { if (is.null(iterations) || length(iterations) == 0 || !is.numeric(iterations)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' must be a valid integer value") } if (zeroIterationsAllowed) { if (iterations < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' (", iterations, ") must be >= 0") } } else { if (iterations < 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' (", iterations, ") must be > 0") } } if (is.null(seed) || length(seed) == 0 || (!is.na(seed) && !is.numeric(seed))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'seed' (", seed, ") must be a valid integer value") } } .assertIsValidLegendPosition <- function(legendPosition) { if (is.null(legendPosition) || length(legendPosition) == 0 || (!is.na(legendPosition) && !is.numeric(legendPosition))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'legendPosition' (", legendPosition, ") must be a valid integer value") } if (!is.na(legendPosition) && (legendPosition < -1 || legendPosition > 6)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'legendPosition' (", legendPosition, ") must be an integer between -1 and 6") } } .assertIsValidKMax <- function(kMax, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (missing(kMax)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'kMax' must be defined") } if (is.null(kMax) || length(kMax) == 0 || sum(is.na(kMax)) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kMax' is invalid") } if (kMax == Inf || kMax == -Inf) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'kMax' (%s) is out of bounds [%s; %s]"), kMax, kMaxLowerBound, kMaxUpperBound)) } if (kMax != as.integer(kMax)) { stop(sprintf(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kMax' (%s) must be a valid integer (is an instance of class '%s')", kMax, class(kMax))) } if (kMax < kMaxLowerBound || kMax > kMaxUpperBound) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'kMax' (%s) is out of bounds [%s; %s]"), kMax, kMaxLowerBound, kMaxUpperBound)) } } .assertAreValidInformationRates <- function(informationRates, kMax = length(informationRates), kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(informationRates) < kMaxLowerBound) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'informationRates' (%s) is out of bounds [%s; %s]"), length(informationRates), kMaxLowerBound, ifelse(kMax >= kMaxLowerBound && kMax < C_KMAX_UPPER_BOUND, kMax, C_KMAX_UPPER_BOUND))) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(informationRates) != kMax) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'informationRates' (%s) must be equal to 'kMax' (%s)"), length(informationRates), kMax)) } if (length(informationRates) > kMaxUpperBound) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'informationRates' (%s) is out of bounds [%s; %s]"), length(informationRates), kMaxLowerBound, kMax)) } if (kMax == 1) { return(invisible()) } .assertValuesAreAreInsideBounds("informationRates", informationRates, 0, 1, lowerBoundInclusive = FALSE) if (min(informationRates) <= 0 || max(informationRates) > 1 || any(informationRates[2 : kMax] <= informationRates[1 : (kMax - 1)])) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'informationRates' (%s) ", "must be strictly increasing: 0 < x_1 < .. < x_%s <= 1"), .arrayToString(informationRates, vectorLookAndFeelEnabled = FALSE), kMax)) } } .assertValuesAreAreInsideBounds <- function(parameterName, values, lowerBound, upperBound, lowerBoundInclusive = TRUE, upperBoundInclusive = TRUE) { lower <- min(values) upper <- max(values) lowerInvalid <- ifelse(lowerBoundInclusive, lower < lowerBound, lower <= lowerBound) upperInvalid <- ifelse(upperBoundInclusive, upper > upperBound, upper >= upperBound) if (lowerInvalid || upperInvalid) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'%s' (%s) is out of bounds %s%s; %s%s"), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE), ifelse(lowerBoundInclusive, "[", "("), lowerBound, upperBound, ifelse(upperBoundInclusive, "]", ")"))) } } .assertValuesAreStrictlyIncreasing <- function(values, parameterName) { len <- length(values) if (len <= 1) { return(invisible()) } if (any(values[2 : len] <= values[1 : (len - 1)])) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must be strictly increasing: x_1 < .. < x_%s"), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE), len)) } } .assertValuesAreMonotoneIncreasing <- function(values, parameterName) { len <- length(values) if (len <= 1) { return(invisible()) } if (any(values[2 : len] < values[1 : (len - 1)])) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must be increasing: x_1 <= .. <= x_%s"), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE), len)) } } .assertAreValidFutilityBounds <- function(futilityBounds, kMax = length(futilityBounds) + 1, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(futilityBounds) < kMaxLowerBound - 1) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'futilityBounds' (%s) is out of bounds [%s; %s]"), length(futilityBounds), kMaxLowerBound - 1, ifelse(kMax >= kMaxLowerBound && kMax < C_KMAX_UPPER_BOUND, kMax - 1, C_KMAX_UPPER_BOUND - 1))) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(futilityBounds) != kMax - 1) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'futilityBounds' (", length(futilityBounds), ") must be equal to 'kMax' (", kMax, ") - 1") } .assertValuesAreAreInsideBounds("futilityBounds", futilityBounds, -6, 6) } .assertIsValidAlpha0Vec <- function(alpha0Vec, kMax = length(alpha0Vec) - 1, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(alpha0Vec) < kMaxLowerBound - 1) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'alpha0Vec' (%s) is out of bounds [%s; %s]"), length(alpha0Vec), kMaxLowerBound - 1, kMax - 1)) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(alpha0Vec) != kMax - 1) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'alpha0Vec' (", length(alpha0Vec), ") must be equal to 'kMax' (", kMax, ") - 1") } .assertValuesAreAreInsideBounds("alpha0Vec", alpha0Vec, 0, 1, lowerBoundInclusive = FALSE) } .assertIsValidSidedParameter <- function(sided) { if (sided != 1 && sided != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'sided' (", sided, ") must be 1 or 2") } } .assertIsValidGroupsParameter <- function(groups) { if (groups != 1 && groups != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'groups' (", groups, ") must be 1 or 2") } } .assertIsValidForLogarithmization <- function(valueList) { if (getLogLevel() %in% c(C_LOG_LEVEL_PROGRESS, C_LOG_LEVEL_DISABLED)) { return(invisible()) } if (missing(valueList)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'valueList' must be defined") } if (!is.list(valueList) || length(valueList) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'valueList' must be a valid list") } for (index in 1:length(valueList)) { value <- valueList[[index]] if (is.null(value) || is.na(value) || !is.numeric(value) || value < 0) { paramName <- names(valueList)[index] stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "logarithmization of '", paramName, "' (", value, ") produces NaN") } } } .allArgumentsAreNotNull <- function(...) { args <- list(...) naCounter <- 0 for (arg in args) { if (!is.null(arg)) { naCounter <- naCounter + sum(is.na(arg)) } } return(naCounter == 0) } .assertAssociatedArgumentsAreDefined <- function(...) { .associatedArgumentsAreDefined(..., warningOnlyEnabled = FALSE) } .associatedArgumentsAreDefined <- function(..., warningOnlyEnabled = TRUE) { args <- NULL tryCatch(expr = { args <- list(...) }, error = function(e) { stop(simpleError(paste0(C_EXCEPTION_TYPE_MISSING_ARGUMENT, e$message), call = e$call)) }) if (.allArgumentsAreNotNull(...)) { return(TRUE) } args <- args[args != "warningOnlyEnabled" & !is.null(args)] argNames <- names(args) if (sum(argNames == "") > 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "each argument must have a name defined, e.g. a = a") } definedArguments <- c() undefinedArguments <- c() for (i in 1:length(args)) { arg <- args[i] argName <- argNames[i] if (missing(arg) || (!is.null(arg) && sum(is.na(arg)) > 0)) { undefinedArguments <- c(undefinedArguments, argName) } else { definedArguments <- c(definedArguments, argName) } } if (length(undefinedArguments) > 0 && length(definedArguments) > 0) { message <- paste0(.arrayToString(undefinedArguments, encapsulate = TRUE), " ", ifelse(warningOnlyEnabled,"should","must"), " be defined because ", .arrayToString(definedArguments, encapsulate = TRUE), ifelse(length(definedArguments) > 1, " are", " is"), " defined") if (warningOnlyEnabled) { warning(C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS, message, call. = FALSE) return(FALSE) } else { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, message) } } invisible(length(definedArguments) == length(args)) } .assertIsValidNPlanned <- function(nPlanned, kMax, stage) { if (length(nPlanned) != kMax - stage) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("'nPlanned' (%s) is invalid: ", "length must be equal to 'kMax' (%s) - 'stage' (%s)"), .arrayToString(nPlanned), kMax, stage)) } if (sum(is.na(nPlanned)) > 0 || sum(nPlanned <= 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("'nPlanned' (%s) is invalid: ", "all values must be > 0"), .arrayToString(nPlanned))) } } .isValidNPlanned <- function(nPlanned, kMax, stage) { if (missing(nPlanned)) { warning("'nPlanned' is missing", call. = FALSE) return(FALSE) } if (!any(is.na(nPlanned))) { if ((length(nPlanned) != kMax - stage)) { warning(sprintf(paste0("'nPlanned' (%s) will be ignored: ", "length must be equal to 'kMax' (%s) - 'stage' (%s)"), .arrayToString(nPlanned), kMax, stage), call. = FALSE) return(FALSE) } if (sum(is.na(nPlanned)) > 0 || sum(nPlanned <= 0) > 0) { warning(sprintf(paste0("'nPlanned' (%s) will be ignored: ", "all values must be > 0"), .arrayToString(nPlanned)), call. = FALSE) return(FALSE) } } return(TRUE) } .warnInCaseOfUnknownArguments <- function(..., functionName, ignore = c()) { args <- list(...) if (length(args) == 0) { return(invisible()) } argNames <- names(args) for (i in 1:length(args)) { arg <- args[i] argName <- ifelse(is.null(argNames[i]), paste0("%param", i, "%"), argNames[i]) if (!(argName %in% ignore)) { warning("Argument unknown in ", functionName, "(...): '", argName, "' = ", arg, " will be ignored", call. = FALSE) } } } .warnInCaseOfUnusedArgument <- function(arg, argName, defaultValue, functionName) { if (!identical(arg, defaultValue)) { warning("Unused argument in ", functionName, "(...): '", argName, "' = ", arg, " will be ignored", call. = FALSE) } } .assertIsDefined <- function(parameter, parameterName) { if (is.null(parameter) || any(is.na(parameter))) { stop("'", parameterName, "' must be defined") } } .isTrialDesignWithValidFutilityBounds <- function(design) { if (is.null(design) || !.isTrialDesignInverseNormalOrGroupSequential(design)) { return(FALSE) } futilityBounds <- design[["futilityBounds"]] if (is.null(futilityBounds)) { return(FALSE) } if (length(futilityBounds) == 0 || sum(is.na(futilityBounds)) == design$kMax) { return(FALSE) } return(sum(futilityBounds == C_FUTILITY_BOUNDS_DEFAULT) == 0) } .isTrialDesignWithValidAlpha0Vec <- function(design) { if (is.null(design) || !.isTrialDesignFisher(design)) { return(FALSE) } alpha0Vec <- design[["alpha0Vec"]] if (is.null(alpha0Vec)) { return(FALSE) } alpha0Vec <- na.omit(alpha0Vec) if (length(alpha0Vec) == 0 || all(is.na(alpha0Vec))) { return(FALSE) } return(sum(alpha0Vec == C_ALPHA_0_VEC_DEFAULT) == 0) } .assertPackageIsInstalled <- function(packageName) { if (!requireNamespace(packageName, quietly = TRUE)) { stop("Package \"", packageName, "\" is needed for this function to work. ", "Please install and load it.", call. = FALSE) } } .assertGgplotIsInstalled <- function() { .assertPackageIsInstalled("ggplot2") } .assertRcppIsInstalled <- function() { .assertPackageIsInstalled("Rcpp") } .assertTestthatIsInstalled <- function() { .assertPackageIsInstalled("testthat") } .assertIsValidThetaH0 <- function(thetaH0, ..., endpoint = c("means", "rates", "survival"), groups, ratioEnabled = FALSE) { .warnInCaseOfUnknownArguments(functionName = ".assertIsValidThetaH0", ...) if (is.na(thetaH0)) { return(invisible()) } if (!is.numeric(thetaH0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' must be a valid numeric value") } endpoint <- match.arg(endpoint) if (endpoint == "means" || endpoint == "rates") { if (groups == 2 && ratioEnabled) { if (thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' (", thetaH0, ") must be > 0") } return(invisible()) } } if (endpoint == "rates") { if (groups == 1) { if (thetaH0 <= 0 || thetaH0 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (0; 1) or not specified") } } else { if (thetaH0 <= -1 || thetaH0 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (-1; 1)") } } } else if (endpoint == "survival") { if (thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' (", thetaH0, ") must be > 0") } } } .assertIsValidThetaH0DataInput <- function(thetaH0, dataInput) { if (.isDatasetRates(dataInput)) { endpoint <- "rates" } else if (.isDatasetSurvival(dataInput)) { endpoint <- "survival" } else { endpoint <- "means" } .assertIsValidThetaH0(thetaH0, endpoint = endpoint, groups = dataInput$getNumberOfGroups()) } .assertIsValidThetaRange <- function(..., thetaRange, thetaAutoSeqEnabled = TRUE, survivalDataEnabled = FALSE) { if (length(thetaRange) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaRange' must be a vector with two entries defining minimum and maximum ", "or a sequence of values with length > 2") } else if (length(thetaRange) == 2 && thetaAutoSeqEnabled) { minValue <- thetaRange[1] maxValue <- thetaRange[2] if (survivalDataEnabled) { .assertIsValidHazardRatio(minValue, "thetaRange[1]") .assertIsValidHazardRatio(maxValue, "thetaRange[2]") } if (minValue == maxValue) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaRange' with length 2 must contain minimum != maximum (", minValue, " == ", maxValue , ")") } by <- (maxValue - minValue) / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT thetaRange <- seq(minValue, maxValue, by) } invisible(thetaRange) } .assertIsValidPiRange <- function(..., piRange, piAutoSeqEnabled = TRUE) { if (length(piRange) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piRange' must be a vector with two entries defining minimum and maximum ", "or a sequence of values with length > 2") } else if (length(piRange) == 2) { if (piAutoSeqEnabled) { minValue <- piRange[1] maxValue <- piRange[2] .assertIsValidPi(minValue, "piRange[1]") .assertIsValidPi(maxValue, "piRange[2]") if (minValue == maxValue) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piRange' with length 2 must contain minimum != maximum (", minValue, " == ", maxValue , ")") } by <- (maxValue - minValue) / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT piRange <- seq(minValue, maxValue, by) } } invisible(piRange) } .assertIsValidPi <- function(piValue, piName) { if (is.null(piValue) || length(piValue) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", piName, "' must be a valid numeric value") } if (all(is.na(piValue))) { return(invisible()) } if (!is.numeric(piValue) || any(is.na(piValue))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", piName, "' (", .arrayToString(piValue), ") must be a valid numeric value") } if (any(piValue <= 0) || any(piValue >= 1)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", piName, "' (", .arrayToString(piValue), ") is out of bounds (0; 1) or event time too long") } } .assertIsValidPi1 <- function(pi1, stageResults = NULL, stage = NULL) { if (is.na(pi1) && !is.null(stageResults) && !is.null(stage)) { if (stageResults$isOneSampleDataset()) { pi1 <- stageResults$overallEvents[stage] / stageResults$overallSampleSizes[stage] } else { pi1 <- stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] } } .assertIsInClosedInterval(pi1, "pi1", lower = 0, upper = 1) invisible(pi1) } .assertIsValidPi2 <- function(pi2, stageResults = NULL, stage = NULL) { if (is.na(pi2) && !is.null(stageResults) && !is.null(stage)) { pi2 <- stageResults$overallEvents2[stage]/stageResults$overallSampleSizes2[stage] } .assertIsInClosedInterval(pi2, "pi2", lower = 0, upper = 1) invisible(pi2) } .assertIsValidAllocationRatioPlanned <- function(allocationRatioPlanned, numberOfGroups) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInClosedInterval(allocationRatioPlanned, "allocationRatioPlanned", lower = 0.001, upper = 1000) if (allocationRatioPlanned != C_ALLOCATION_RATIO_DEFAULT && numberOfGroups == 1) { warning("Planned allocation ratio ", allocationRatioPlanned, " will be ignored ", "because the dataset has only one group", call. = FALSE) } } .assertIsValidAllocationRatioPlannedSampleSize <- function(allocationRatioPlanned, maxNumberOfSubjects = NA_real_) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") if (allocationRatioPlanned < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") is not allowed to be negative") } if (length(maxNumberOfSubjects) > 0 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && allocationRatioPlanned == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "determination of optimal allocation ratio not possible ", "if explicit or implicit 'maxNumberOfSubjects' (", maxNumberOfSubjects, ") > 0, i.e., follow-up time should be calculated ", "(please specify an 'allocationRatioPlanned' > 0)") } } .assertIsValidThetaH1 <- function(thetaH1, stageResults = NULL, stage = NULL) { if (is.na(thetaH1) && !is.null(stageResults) && !is.null(stage)) { thetaH1 <- stageResults$effectSizes[stage] } .assertIsSingleNumber(thetaH1, "thetaH1") invisible(thetaH1) } .assertIsValidAssumedStDev <- function(assumedStDev, stageResults = NULL, stage = NULL) { if (is.na(assumedStDev) && !is.null(stageResults) && !is.null(stage)) { assumedStDev <- stageResults$overallStDevs[stage] } .assertIsSingleNumber(assumedStDev, "assumedStDev") if (assumedStDev <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'assumedStDev' (", assumedStDev, ") must be > 0") } invisible(assumedStDev) } .assertIsValidThetaH1ForMultiArm <- function(thetaH1, stageResults = NULL, stage = NULL) { if (is.na(thetaH1) && !is.null(stageResults) && !is.null(stage)) { thetaH1 <- stageResults$effectSizes[,stage] } .assertIsNumericVector(thetaH1, "thetaH1", naAllowed = TRUE) invisible(thetaH1) } .assertIsValidAssumedStDevForMultiArm <- function(assumedStDev, stageResults = NULL, stage = NULL) { if (is.na(assumedStDev) && !is.null(stageResults) && !is.null(stage)) { assumedStDev <- stageResults$overallStDevs[,stage] } .assertIsNumericVector(assumedStDev, "assumedStDev", naAllowed = TRUE) if (any(assumedStDev <= 0, na.rm = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'assumedStDev' (", assumedStDev, ") must be > 0") } invisible(assumedStDev) } .assertIsValidPiTreatmentsForMultiArm <- function(piTreatments, stageResults = NULL, stage = NULL) { if (is.na(piTreatments) && !is.null(stageResults) && !is.null(stage)) { piTreatments <- stageResults$piTreatments[, stage] } .assertIsNumericVector(piTreatments, "piTreatments", naAllowed = TRUE) .assertIsInClosedInterval(piTreatments, "piTreatments", lower = 0, upper = 1, naAllowed = TRUE) invisible(piTreatments) } .assertIsValidPiControlForMultiArm <- function(piControl, stageResults = NULL, stage = NULL) { if (is.na(piControl) && !is.null(stageResults) && !is.null(stage)) { piControl <- stageResults$piControl[,stage] } .assertIsNumericVector(piControl, "piControl", naAllowed = TRUE) .assertIsInClosedInterval(piControl, "piControl", lower = 0, upper = 1) invisible(piControl) } .isValidValueOrVector <- function(x) { if (is.null(x) || length(x) == 0) { return(FALSE) } return(!any(is.na(x))) } .assertIsValidHazardRatio <- function(hazardRatio, thetaH0) { .assertIsNumericVector(hazardRatio, "hazardRatio") if (any(hazardRatio == thetaH0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "alternative not correctly specified: ", "each hazard ratio (", .arrayToString(hazardRatio[1:min(length(hazardRatio), 10)]), ") must be unequal to 'thetaH0' (", thetaH0, ")") } } .assertIsValidHazardRatioVector <- function(hazardRatio) { .assertIsNumericVector(hazardRatio, "hazardRatio") if (any(hazardRatio <= 0)) { if (length(hazardRatio) == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'hazardRatio' (", hazardRatio ,") must be > 0") } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "each 'hazardRatio' (", .arrayToString(hazardRatio[1:min(length(hazardRatio), 10)]), ") must be > 0") } } } .assertIsValidDirectionUpper <- function(directionUpper, sided, objectType = c("power", "sampleSize")) { objectType <- match.arg(objectType) .assertIsSingleLogical(directionUpper, "directionUpper", naAllowed = TRUE) if (objectType == "power") { if (sided == 1 && is.na(directionUpper)) { directionUpper <- TRUE } if (sided == 2 && !is.na(directionUpper)) { warning("'directionUpper' will be ignored because it ", "is not applicable for 'sided' = 2", call. = FALSE) } } else if (is.na(directionUpper)) { directionUpper <- TRUE } return(directionUpper) } .assertIsValidFunction <- function(fun, ..., funArgName = "fun", expectedArguments = NULL, expectedFunction = NULL, identical = FALSE, validateThreeDots = TRUE) { fCall = match.call(expand.dots = FALSE) if (is.null(expectedArguments) && is.null(expectedFunction)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'expectedArguments' or 'expectedFunction' must be not NULL") } if (!is.function(fun)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' must be a function") } functionName <- as.character(fCall$fun) if (is.null(functionName) || functionName == funArgName) { functionName <- "function" } argNames <- methods::formalArgs(fun) if (!is.null(expectedArguments)) { argNamesExpected <-expectedArguments } else { if (!is.function(expectedFunction)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'expectedFunction' must be a function") } argNamesExpected <- methods::formalArgs(expectedFunction) } if (validateThreeDots) { if (!("..." %in% argNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' must contain the three-dots argument '...', e.g., ", funArgName, " = ", functionName, "(", .arrayToString(argNames), ", ...)") } } argNames <- argNames[argNames != "..."] argNamesExpected <- argNamesExpected[argNamesExpected != "..."] for (argName in argNames) { if (argName != "..." && !(argName %in% argNamesExpected)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument '", argName, "' in '", funArgName, "' (", functionName, ") is not allowed. ", "Use one or more of the following arguments: ", .arrayToString(argNamesExpected)) } } if (identical) { for (argNameExpected in argNamesExpected) { if (argNameExpected != "..." && !(argNameExpected %in% argNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' (", functionName, ") must contain ", "an argument with name '", argNameExpected, "'") } } return(invisible()) } counter <- 0 unusedArgs <- c() for (argNameExpected in argNamesExpected) { if (argNameExpected %in% argNames) { counter <- counter + 1 } else { unusedArgs <- c(unusedArgs, argNameExpected) } } if (counter == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' (", functionName, ") must contain at ", "least one of the following arguments: ", .arrayToString(argNamesExpected)) } if (length(unusedArgs) > 0) { warning("Note that the following arguments can optionally be used in '", funArgName, "' (", functionName, "): \n", .arrayToString(unusedArgs), call. = FALSE) } } .assertIsValidMinNumberOfSubjectsPerStage <- function( parameterValues, parameterName, plannedSubjects, conditionalPower, kMax) { if (kMax == 1) { return(invisible(NA_real_)) } .assertIsNumericVector(parameterValues, parameterName, naAllowed = TRUE) if (is.na(conditionalPower)) { return(parameterValues) } if (length(parameterValues) == 0 || (length(parameterValues) == 1 && is.na(parameterValues))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", parameterName, "' must be defined ", "because 'conditionalPower' is defined") } if (length(parameterValues) != kMax) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", parameterName, "' (", .arrayToString(parameterValues), ") must have length ", kMax) } if (any(is.na(parameterValues[2:length(parameterValues)]))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", parameterName, "' (", .arrayToString(parameterValues), ") must contain valid numeric values") } if (!is.na(parameterValues[1]) && parameterValues[1] != plannedSubjects[1]) { warning("First value of '", parameterName, "' (", parameterValues[1], ") will be ignored") } parameterValues[1] <- plannedSubjects[1] .assertIsInClosedInterval(parameterValues, parameterName, lower = 1, upper = NULL) return(invisible(parameterValues)) } .assertAreSuitableInformationRates <- function(design, dataInput, stage) { if (!.isTrialDesignGroupSequential(design) || stage == 1) { return(invisible()) } param <- NA_character_ if (dataInput$isDatasetSurvival()) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallEventsUpTo(stage)[2:stage] / dataInput$getOverallEventsUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "events" } } else { if (dataInput$getNumberOfGroups() == 1) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage)[2:stage] / dataInput$getOverallSampleSizesUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "sample sizes" } } else if (dataInput$getNumberOfGroups() == 2) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage)[2:stage] / dataInput$getOverallSampleSizesUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES) || any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage,2)[2:stage] / dataInput$getOverallSampleSizesUpTo(1,2)*design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "sample sizes" } } } if (!is.na(param)) { warning("Observed ", param, " not according to specified information rates in ", "group sequential design. ", "Test procedure might not control Type I error rate.", call. = FALSE) } } rpact/R/class_analysis_stage_results.R0000644000176200001440000010462113574432627017704 0ustar liggesusers###################################################################################### # # # -- Stage results classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### .getStageResultsClassNames <- function() { return(c("StageResultsMeans", "StageResultsRates", "StageResultsSurvival", "StageResultsMultiArmMeans", "StageResultsMultiArmRates", "StageResultsMultiArmSurvival")) } #' #' @name StageResults #' #' @title #' Basic Stage Results #' #' @description #' Basic class for stage results. #' #' @details #' \code{StageResults} is the basic class for \code{StageResultsMeans}, #' \code{StageResultsRates}, and \code{StageResultsSurvival}. #' #' @field testStatistics The stage-wise test statistics. #' @field pValues The stage-wise p-values. #' @field combInverseNormal The inverse normal test. #' @field combFisher The Fisher's combination test. #' @field effectSizes The effect sizes for different designs. #' @field testActions The action drawn from test result. #' @field weightsFisher The weights for Fisher's combination test. #' @field weightsInverseNormal The weights for inverse normal statistic. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' @include class_core_plot_settings.R #' #' @keywords internal #' #' @importFrom methods new #' StageResults <- setRefClass("StageResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .dataInput = "Dataset", stages = "integer", pValues = "numeric", weightsFisher = "numeric", weightsInverseNormal = "numeric", thetaH0 = "numeric", direction = "character" ), methods = list( initialize = function(...) { callSuper(...) }, init = function(design, dataInput) { .design <<- design .dataInput <<- dataInput .plotSettings <<- PlotSettings() if (!missing(design)) { stages <<- c(1:design$kMax) if (design$kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } .parameterNames <<- .getParameterNames(design) } .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .setParameterType("pValues", ifelse( .isMultiArm(), C_PARAM_NOT_APPLICABLE, C_PARAM_GENERATED)) .setParameterType("thetaH0", ifelse( identical(thetaH0, C_THETA_H0_MEANS_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("direction", ifelse( identical(direction, C_DIRECTION_UPPER), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing stage results' .resetCat() if (showType == 2) { .cat("Technical summary of the stage results object of class ", methods::classLabel(class(.self)), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat(.toString(startWithUpperCase = TRUE), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) # .showParametersOfOneGroup(.getDerivedParameters(), "Derived from user defined parameters", # orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, isDirectionUpper = function() { return(direction == C_DIRECTION_UPPER) }, .isMultiArm = function() { return(grepl("multi", tolower(class(.self)))) }, .getParametersToShow = function() { return(c("stages")) }, .toString = function(startWithUpperCase = FALSE) { prefix <- paste(ifelse(startWithUpperCase, "Stage results of", "stage results of")) if (class(.self) == "StageResultsMeans") { return(paste(prefix, "means")) } if (class(.self) == "StageResultsMultiArmMeans") { return(paste(prefix, "multi-armed means")) } if (class(.self) == "StageResultsRates") { return(paste(prefix, "rates")) } if (class(.self) == "StageResultsMultiArmRates") { return(paste(prefix, "multi-armed rates")) } if (class(.self) == "StageResultsSurvival") { return(paste(prefix, "survival data")) } if (class(.self) == "StageResultsMultiArmSurvival") { return(paste(prefix, "multi-armed survival")) } return("unknown stage results") }, getDataInput = function() { return(.dataInput) }, getNumberOfGroups = function() { return(.dataInput$getNumberOfGroups()) }, isOneSampleDataset = function() { return(getNumberOfGroups() == 1) }, isTwoSampleDataset = function() { return(getNumberOfGroups() == 2) }, isDatasetMeans = function() { return(.dataInput$isDatasetMeans()) }, isDatasetRates = function() { return(.dataInput$isDatasetRates()) }, isDatasetSurvival = function() { return(.dataInput$isDatasetSurvival()) }, getNumberOfStages = function() { if (.isMultiArm()) { return(max(ncol(stats::na.omit(effectSizes)), ncol(stats::na.omit(separatePValues)))) } return(max(length(stats::na.omit(effectSizes)), length(stats::na.omit(pValues)))) } ) ) #' #' @name StageResultsMeans #' #' @title #' Stage Results of Means #' #' @description #' Class for stage results of means. #' #' @details #' This object can not be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of means. #' #' @field testStatistics The stage-wise test statistics. #' @field pValues The stage-wise p-values. #' @field combInverseNormal The inverse normal test. #' @field combFisher The Fisher's combination test. #' @field effectSizes The effect sizes for different designs. #' @field testActions The action drawn from test result. #' @field weightsFisher The weights for Fisher's combination test. #' @field weightsInverseNormal The weights for inverse normal statistic. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMeans <- setRefClass("StageResultsMeans", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallTestStatistics = "numeric", overallPValues = "numeric", effectSizes = "numeric", testStatistics = "numeric", overallMeans = "numeric", overallMeans1 = "numeric", overallMeans2 = "numeric", overallStDevs = "numeric", overallStDevs1 = "numeric", overallStDevs2 = "numeric", overallSampleSizes = "numeric", overallSampleSizes1 = "numeric", overallSampleSizes2 = "numeric", equalVariances = "logical", normalApproximation = "logical" ), methods = list( initialize = function(design, dataInput, ..., equalVariances = TRUE, normalApproximation = FALSE) { callSuper(.design = design, .dataInput = dataInput, ..., equalVariances = equalVariances, normalApproximation = normalApproximation) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("equalVariances", ifelse( identical(equalVariances, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallTestStatistics", "overallPValues" ) if (.dataInput$getNumberOfGroups() == 1) { parametersToShow <- c(parametersToShow, "overallMeans", "overallStDevs", "overallSampleSizes" ) } else if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c(parametersToShow, "overallMeans1", "overallMeans2", "overallStDevs1", "overallStDevs2", "overallSampleSizes1", "overallSampleSizes2" ) } parametersToShow <- c(parametersToShow, "testStatistics", "pValues", "effectSizes" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c(parametersToShow, "thetaH0", "direction", "normalApproximation" ) if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c(parametersToShow, "equalVariances" ) } return(parametersToShow) } ) ) #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmMeans <- setRefClass("StageResultsMultiArmMeans", contains = "StageResults", fields = list( stage = "integer", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallStDevs = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", effectSizes = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", varianceOption = "character", normalApproximation = "logical", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., varianceOption = C_VARIANCE_OPTION_DEFAULT, normalApproximation = FALSE) { callSuper(.design = design, .dataInput = dataInput, ..., varianceOption = varianceOption, normalApproximation = normalApproximation) init(design = design, dataInput = dataInput) for (param in c("singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("varianceOption", ifelse( identical(varianceOption, C_VARIANCE_OPTION_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", "normalApproximation", #"directionUpper", "varianceOption", "overallTestStatistics", "overallPValues", "overallStDevs", "testStatistics", "separatePValues", "effectSizes", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResultsRates #' #' @title #' Stage Results of Rates #' #' @description #' Class for stage results of rates. #' #' @details #' This object can not be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of rates. #' #' @field testStatistics The stage-wise test statistics. #' @field pValues The stage-wise p-values. #' @field combInverseNormal The inverse normal test. #' @field combFisher The Fisher's combination test. #' @field effectSizes The effect sizes for different designs. #' @field testActions The action drawn from test result. #' @field weightsFisher The weights for Fisher's combination test. #' @field weightsInverseNormal The weights for inverse normal statistic. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsRates <- setRefClass("StageResultsRates", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallTestStatistics = "numeric", overallPValues = "numeric", effectSizes = "numeric", testStatistics = "numeric", overallEvents = "numeric", overallEvents1 = "numeric", overallEvents2 = "numeric", overallSampleSizes = "numeric", overallSampleSizes1 = "numeric", overallSampleSizes2 = "numeric", normalApproximation = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = TRUE) { callSuper(.design = design, .dataInput = dataInput, ..., normalApproximation = normalApproximation) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("normalApproximation", ifelse( identical(normalApproximation, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallTestStatistics", "overallPValues" ) if (.dataInput$getNumberOfGroups() == 1) { parametersToShow <- c(parametersToShow, "overallEvents", "overallSampleSizes" ) } else if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c(parametersToShow, "overallEvents1", "overallEvents2", "overallSampleSizes1", "overallSampleSizes2" ) } parametersToShow <- c(parametersToShow, "testStatistics", "pValues", "effectSizes" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c(parametersToShow, "thetaH0", "direction", "normalApproximation" ) return(parametersToShow) } ) ) #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmRates <- setRefClass("StageResultsMultiArmRates", contains = "StageResults", fields = list( stage = "integer", piControl = "matrix", piTreatments = "matrix", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", normalApproximation = "logical", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = FALSE) { callSuper(.design = design, .dataInput = dataInput, ..., normalApproximation = normalApproximation) init(design = design, dataInput = dataInput) for (param in c("singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", "normalApproximation", #"directionUpper", "piControl", "piTreatments", "overallTestStatistics", "overallPValues", "overallStDevs", "testStatistics", "separatePValues", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResultsSurvival #' #' @title #' Stage Results of Survival Data #' #' @description #' Class for stage results survival data. #' #' @details #' This object can not be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of survival data. #' #' @field testStatistics The stage-wise test statistics. #' @field pValues The stage-wise p-values. #' @field combInverseNormal The inverse normal test. #' @field combFisher The Fisher's combination test. #' @field effectSizes The effect sizes for different designs. #' @field testActions The action drawn from test result. #' @field weightsFisher The weights for Fisher's combination test. #' @field weightsInverseNormal The weights for inverse normal statistic. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsSurvival <- setRefClass("StageResultsSurvival", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallPValues = "numeric", effectSizes = "numeric", overallLogRanks = "numeric", overallEvents = "numeric", overallAllocationRatios = "numeric", events = "numeric", allocationRatios = "numeric", logRanks = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(.design = design, .dataInput = dataInput, ...) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallLogRanks", "overallPValues", "overallEvents", "overallAllocationRatios", "events", "allocationRatios", "logRanks", "pValues", "overallPValues", "effectSizes" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c(parametersToShow, "thetaH0", "direction" ) return(parametersToShow) } ) ) #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmSurvival <- setRefClass("StageResultsMultiArmSurvival", contains = "StageResults", fields = list( stage = "integer", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", effectSizes = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = FALSE) { callSuper(.design = design, .dataInput = dataInput, ...) init(design = design, dataInput = dataInput) for (param in c("singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", #"directionUpper", "overallTestStatistics", "overallPValues", "testStatistics", "separatePValues", "effectSizes", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c(parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c(parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResults_names #' #' @title #' The Names of a Stage Results object #' #' @description #' Function to get the names of a \code{StageResults} object. #' #' @details #' Returns the names of stage results that can be accessed by the user. #' #' @export #' #' @keywords internal #' names.StageResults <- function(x) { return(x$.getParametersToShow()) } #' #' @name StageResults_as.data.frame #' #' @title #' Coerce Stage Results to a Data Frame #' #' @description #' Returns the \code{StageResults} as data frame. #' #' @details #' Coerces the stage results to a data frame. #' #' @export #' #' @keywords internal #' as.data.frame.StageResults <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, type = 1, ...) { if (type == 1) { parametersToShow <- x$.getParametersToShow() return(x$.getAsDataFrame(parameterNames = parametersToShow, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x$.design))) } kMax <- length(x$stages) group1 <- rep(1, kMax) group2 <- rep(2, kMax) empty <- rep(NA_real_, kMax) stageResults <- data.frame( Stage = c(x$stages, x$stages), Group = c(group1, group2), "Overall Mean" = c(x$overallMeans1, x$overallMeans2), "Overall StDev" = c(x$overallStDevs1, x$overallStDevs2), "Overall test statistics" = c(x$overallTestStatistics, empty), "Overall p-value" = c(x$overallPValues, empty), "Overall StdDev" = c(x$overallStDevs, empty), "Test statistic" = c(x$testStatistics, empty), "p-value" = c(x$pValues, empty), "Comb Inverse Normal" = c(x$combInverseNormal, empty), "Comb Fisher" = c(x$combFisher, empty), "Weights Fisher" = c(x$weightsFisher, empty), "Weights Inverse Normal" = c(x$weightsInverseNormal, empty), row.names = row.names, ... ) stageResults <- stageResults[with(stageResults, order(Stage, Group)), ] return(stageResults) } #' #' @title #' Stage Results Plotting #' #' @description #' Plots the conditional power together with the likelihood function. #' #' @param x The stage results at given stage, obtained from \code{getStageResults} or \code{getAnalysisResults}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param stage The stage number (optional). Default: total number of existing stages in the data input #' used to create the stage results. #' @param nPlanned The additional (i.e. "new" and not cumulative) sample size planned for each of the subsequent stages. #' The argument should be a vector with length equal to the number of remaining stages and contain #' the combined sample size from both treatment groups if two groups are considered. For survival outcomes, #' it should contain the planned number of additional events. #' @param allocationRatioPlanned The allocation ratio for two treatment groups planned for #' the subsequent stages, the default value is 1. #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param legendTitle The legend title. #' @param palette The palette, default is \code{"Set1"}. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = 1). Note that at the moment only one type #' (the conditional power plot) is available. #' @param ... Optional \code{ggplot2} arguments. Furthermore the following arguments can be defined: #' \itemize{ #' \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. #' Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). #' \item \code{piRange}: A range of assumed rates pi1 to calculate the conditional power. #' Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from #' \code{getAnalysisResults}). #' \item \code{directionUpper}: The direction of one-sided testing. #' Default is \code{directionUpper = TRUE} which means that larger values of the #' test statistics yield smaller p-values. #' \item \code{thetaH0}: The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for #' defining the null hypothesis H0: pi = thetaH0. #' } #' #' @details #' Generic function to plot all kinds of stage results. #' The conditional power is calculated only if effect size and sample size is specified. #' #' @return #' A \code{ggplot2} object. #' #' @export #' #' @examples #' #' design <- getDesignGroupSequential(kMax = 4, alpha = 0.025, #' informationRates = c(0.2, 0.5, 0.8, 1), #' typeOfDesign = "WT", deltaWT = 0.25) #' #' dataExample <- getDataset( #' n = c(20, 30, 30), #' means = c(50, 51, 55), #' stDevs = c(130, 140, 120) #' ) #' #' stageResults <- getStageResults(design, dataExample, thetaH0 = 20) #' #' if (require(ggplot2)) plot(stageResults, nPlanned = c(30), thetaRange = c(0, 100)) #' plot.StageResults <- function(x, y, ..., type = 1L, nPlanned, stage = x$getNumberOfStages(), allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE) { .assertGgplotIsInstalled() .assertIsStageResults(x) .assertIsValidLegendPosition(legendPosition) plotData <- .getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, ...) yParameterName1 <- "Conditional power" yParameterName2 <- "Likelihood" if (x$.isMultiArm()) { numberOfTreatments <- nrow(x$testStatistics) treatmentArmsToShow <- as.integer(list(...)[["treatmentArms"]]) if (is.null(treatmentArmsToShow) || length(treatmentArmsToShow) == 0 || is.na(treatmentArmsToShow) || !is.numeric(treatmentArmsToShow)) { treatmentArmsToShow <- 1L:as.integer(numberOfTreatments) } data <- data.frame( xValues = numeric(0), yValues = numeric(0), categories = character(0) ) for (treatmentArm in treatmentArmsToShow) { legend1 <- ifelse(length(treatmentArmsToShow) == 1, yParameterName1, paste0(yParameterName1, " (", treatmentArm, ")")) legend2 <- ifelse(length(treatmentArmsToShow) == 1, yParameterName2, paste0(yParameterName2, " (", treatmentArm, ")")) if (all(is.na(plotData$condPowerValues[treatmentArm, ]))) { data <- rbind(data, data.frame( xValues = plotData$xValues, yValues = plotData$likelihoodValues[treatmentArm, ], categories = rep(legend2, length(plotData$xValues)) )) } else { data <- rbind(data, data.frame( xValues = c(plotData$xValues, plotData$xValues), yValues = c(plotData$condPowerValues[treatmentArm, ], plotData$likelihoodValues[treatmentArm, ]), categories = c(rep(legend1, length(plotData$xValues)), rep(legend2, length(plotData$xValues))) )) } } } else { data <- data.frame( xValues = c(plotData$xValues, plotData$xValues), yValues = c(plotData$condPowerValues, plotData$likelihoodValues), categories = c(rep(yParameterName1, length(plotData$xValues)), rep(yParameterName2, length(plotData$xValues))) ) } p <- ggplot2::ggplot(data, ggplot2::aes(x = data$xValues, y = data$yValues, colour = factor(data$categories))) p <- x$getPlotSettings()$setTheme(p) p <- x$getPlotSettings()$hideGridLines(p) # set main title mainTitle <- ifelse(is.na(main), plotData$main, main) p <- x$getPlotSettings()$setMainTitle(p, mainTitle, subtitle = plotData$sub) # set legend if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } p <- x$getPlotSettings()$setLegendPosition(p, legendPosition = legendPosition) p <- x$getPlotSettings()$setLegendBorder(p) p <- x$getPlotSettings()$setLegendTitle(p, legendTitle) p <- x$getPlotSettings()$setLegendLabelSize(p) # set axes labels p <- x$getPlotSettings()$setAxesLabels(p, xAxisLabel = plotData$xlab, yAxisLabel1 = plotData$ylab, xlab = xlab, ylab = ylab) # plot lines and points p <- x$getPlotSettings()$plotValues(p, plotPointsEnabled = FALSE, pointBorder = 1) p <- x$getPlotSettings()$setAxesAppearance(p) p <- x$getPlotSettings()$setColorPalette(p, palette) p <- x$getPlotSettings()$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- x$getPlotSettings()$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) # start plot generation return(p) } rpact/R/class_analysis_results.R0000644000176200001440000004474013574406064016522 0ustar liggesusers###################################################################################### # # # -- Analysis result classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' #' @name AnalysisResults #' #' @title #' Basic Class for Analysis Results #' #' @description #' A basic class for analysis results. #' #' @details #' \code{AnalysisResults} is the basic class for #' \itemize{ #' \item \code{\link{AnalysisResultsFisher}}, #' \item \code{\link{AnalysisResultsGroupSequential}}, and #' \item \code{\link{AnalysisResultsInverseNormal}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' @include class_analysis_stage_results.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResults <- setRefClass("AnalysisResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .dataInput = "Dataset", .stageResults = "StageResults", stages = "integer", informationRates = "numeric", criticalValues = "numeric", futilityBounds = "numeric", alphaSpent = "numeric", stageLevels = "numeric", effectSizes = "numeric", testStatistics = "numeric", pValues = "numeric", testActions = "character", thetaH0 = "numeric", thetaH1 = "numeric", assumedStDev = "numeric", conditionalRejectionProbabilities = "numeric", nPlanned = "numeric", allocationRatioPlanned = "numeric", pi1 = "numeric", pi2 = "numeric", conditionalPower = "numeric", repeatedConfidenceIntervalLowerBounds = "numeric", repeatedConfidenceIntervalUpperBounds = "numeric", repeatedPValues = "numeric", finalStage = "integer", finalPValues = "numeric", finalConfidenceIntervalLowerBounds = "numeric", finalConfidenceIntervalUpperBounds = "numeric", medianUnbiasedEstimates = "numeric", normalApproximation = "logical", equalVariances = "logical", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(.design = design, .dataInput = dataInput, ...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS finalStage <<- NA_integer_ stages <<- c(1:design$kMax) if (design$kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } informationRates <<- design$informationRates .setParameterType("informationRates", C_PARAM_DERIVED) criticalValues <<- design$criticalValues .setParameterType("criticalValues", C_PARAM_DERIVED) if (.isTrialDesignFisher(design)) { futilityBounds <<- design$alpha0Vec } else if (.isTrialDesignInverseNormalOrGroupSequential(design)) { futilityBounds <<- design$futilityBounds } .setParameterType("futilityBounds", C_PARAM_DERIVED) alphaSpent <<- design$alphaSpent .setParameterType("alphaSpent", C_PARAM_DERIVED) stageLevels <<- design$stageLevels .setParameterType("stageLevels", C_PARAM_DERIVED) }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing analysis result objects' .resetCat() if (showType == 2) { .cat("Technical summary of the analysis results object of class ", methods::classLabel(class(.self)), ":\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .showParametersOfOneGroup(parameters = .getParametersToShow(), title = .toString(startWithUpperCase = TRUE), orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { prefix <- paste(ifelse(startWithUpperCase, "Analysis results", "analysis results")) if (class(.self) == "AnalysisResultsGroupSequential") { return(paste(prefix, "(group sequential design)")) } if (class(.self) == "AnalysisResultsInverseNormal") { return(paste(prefix, "(inverse normal design)")) } if (class(.self) == "AnalysisResultsFisher") { return(paste(prefix, "(Fisher design)")) } return("unknown analysis results") }, .getParametersToShow = function() { parametersToShow <- c( "stages", "informationRates", "criticalValues", "futilityBounds", "alphaSpent", "stageLevels", "effectSizes", "testStatistics", "pValues" ) if (.isTrialDesignGroupSequential(.design)) { parametersToShow <- c(parametersToShow, "overallTestStatistics", "overallPValues") } else { parametersToShow <- c(parametersToShow, "combinationTestStatistics") } parametersToShow <- c(parametersToShow, "testActions", "thetaH0", "conditionalRejectionProbabilities", "nPlanned", "allocationRatioPlanned" ) if (.dataInput$isDatasetRates()) { if (.dataInput$getNumberOfGroups() == 1) { parametersToShow <- c(parametersToShow, "pi1") } else { parametersToShow <- c(parametersToShow, "pi1", "pi2") } } else { parametersToShow <- c(parametersToShow, "thetaH1") } if (.dataInput$isDatasetMeans()) { parametersToShow <- c(parametersToShow, "assumedStDev") } if (.isTrialDesignFisher(.design) && length(conditionalPowerSimulated) > 0 && (length(conditionalPowerSimulated) != 1 || conditionalPowerSimulated != -1)) { parametersToShow <- c(parametersToShow, "conditionalPowerSimulated") } else { parametersToShow <- c(parametersToShow, "conditionalPower") } parametersToShow <- c(parametersToShow, "repeatedConfidenceIntervalLowerBounds", "repeatedConfidenceIntervalUpperBounds", "repeatedPValues", "finalStage", "finalPValues", "finalConfidenceIntervalLowerBounds", "finalConfidenceIntervalUpperBounds", "medianUnbiasedEstimates" ) return(parametersToShow) }, getNumberOfStages = function() { return(max(length(stats::na.omit(effectSizes)), length(stats::na.omit(testStatistics)), length(stats::na.omit(pValues)))) }, getDataInput = function() { return(.dataInput) } ) ) #' #' @name AnalysisResults_as.data.frame #' #' @title #' Coerce AnalysisResults to a Data Frame #' #' @description #' Returns the \code{\link{AnalysisResults}} object as data frame. #' #' @details #' Coerces the analysis results to a data frame. #' #' @return #' A data frame #' #' @export #' #' @keywords internal #' as.data.frame.AnalysisResults <- function(x, row.names = NULL, optional = FALSE, ...) { parametersToShow <- x$.getParametersToShow() parametersToShow <- parametersToShow[!(parametersToShow %in% c( "finalStage", "allocationRatioPlanned", "thetaH0", "thetaH1", "pi1", "pi2" ))] return(x$.getAsDataFrame(parameterNames = parametersToShow, tableColumnNames = .getTableColumnNames(design = x$.design))) } #' #' @name AnalysisResults_names #' #' @title #' The Names of a Analysis Results object #' #' @description #' Function to get the names of a \code{\link{AnalysisResults}} object. #' #' @details #' Returns the names of a analysis results that can be accessed by the user. #' #' @return #' A character vector containing the names of the \code{\link{AnalysisResults}} object. #' #' @export #' #' @keywords internal #' names.AnalysisResults <- function(x) { return(x$.getVisibleFieldNames()) } #' #' @name AnalysisResultsGroupSequential #' #' @title #' Analysis Results Group Sequential #' #' @description #' Class for analysis results results based on a group sequential design. #' #' @details #' This object can not be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a group sequential design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsGroupSequential <- setRefClass("AnalysisResultsGroupSequential", contains = "AnalysisResults", fields = list( overallTestStatistics = "numeric", overallPValues = "numeric" ) ) #' #' @name AnalysisResultsInverseNormal #' #' @title #' Analysis Results Inverse Normal #' #' @description #' Class for analysis results results based on an inverse normal design. #' #' @details #' This object can not be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsInverseNormal <- setRefClass("AnalysisResultsInverseNormal", contains = "AnalysisResults", fields = list( combinationTestStatistics = "numeric" ) ) #' #' @name AnalysisResultsFisher #' #' @title #' Analysis Results Fisher #' #' @description #' Class for analysis results based on a Fisher design. #' #' @details #' This object can not be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a Fisher design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsFisher <- setRefClass("AnalysisResultsFisher", contains = "AnalysisResults", fields = list( conditionalPowerSimulated = "numeric", combinationTestStatistics = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) conditionalPowerSimulated <<- -1 } ) ) .getAnalysisResultsPlotArguments <- function(x, nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_) { if (all(is.na(nPlanned))) { nPlanned <- stats::na.omit(x$nPlanned) stage <- x$.design$kMax - length(nPlanned) } else if (is.na(stage)) { stage <- x$getNumberOfStages() } if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- x$allocationRatioPlanned } return(list( stageResults = x$.stageResults, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned )) } #' #' @title #' Analysis Results Plotting #' #' @description #' Plots the conditional power together with the likelihood function. #' #' @param x The analysis results at given stage, obtained from \code{\link{getAnalysisResults}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param nPlanned The additional (i.e. "new" and not cumulative) sample size planned for each of the subsequent stages. #' The argument should be a vector with length equal to the number of remaining stages and contain #' the combined sample size from both treatment groups if two groups are considered. For survival outcomes, #' it should contain the planned number of additional events. #' @param stage The stage number (optional). Default: total number of existing stages in the data input #' used to create the analysis results. #' @param allocationRatioPlanned The allocation ratio n1/n2 for two treatment groups planned for #' the subsequent stages, the default value is 1. #' @param main The main title, default is \code{"Dataset"}. #' @param xlab The x-axis label, default is \code{"Stage"}. #' @param ylab The y-axis label. #' @param legendTitle The legend title, default is \code{""}. #' @param palette The palette, default is \code{"Set1"}. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available. #' @param ... Optional \code{ggplot2} arguments. Furthermore the following arguments can be defined: #' \itemize{ #' \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. #' Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). #' \item \code{piRange}: A range of assumed rates pi1 to calculate the conditional power. #' Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from #' \code{getAnalysisResults}). #' \item \code{directionUpper}: The direction of one-sided testing. #' Default is \code{directionUpper = TRUE} which means that larger values of the #' test statistics yield smaller p-values. #' \item \code{thetaH0}: The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for #' defining the null hypothesis H0: pi = thetaH0. #' } #' #' @details #' The conditional power is calculated only if effect size and sample size is specified. #' #' @return #' A \code{ggplot2} object. #' #' @export #' #' @examples #' #' design <- getDesignGroupSequential(kMax = 2) #' #' dataExample <- getDataset( #' n = c(20, 30), #' means = c(50, 51), #' stDevs = c(130, 140) #' ) #' #' result <- getAnalysisResults(design = design, #' dataInput = dataExample, thetaH0 = 20, #' nPlanned = c(30), thetaH1 = 1.5, stage = 1) #' #' if (require(ggplot2)) plot(result, thetaRange = c(0, 100)) #' plot.AnalysisResults <- function(x, y, ..., type = 1L, nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = "", palette = "Set1", legendPosition = NA_integer_, showSource = FALSE) { if (showSource) { warning("'showSource' = TRUE is not implemented yet for class ", class(x)) } .assertIsValidLegendPosition(legendPosition = legendPosition) plotArgs <- .getAnalysisResultsPlotArguments(x = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned) nPlanned <- plotArgs$nPlanned stage <- plotArgs$stage allocationRatioPlanned <- plotArgs$allocationRatioPlanned if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(plot.StageResults(x = x$.stageResults, y = y, nPlanned = nPlanned, stage = stage, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, type = type, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(plot.StageResults(x = x$.stageResults, y = y, nPlanned = nPlanned, stage = stage, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, type = type, pi2 = pi2, allocationRatioPlanned = allocationRatioPlanned, ...)) } } plot.StageResults(x = x$.stageResults, y = y, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, type = type, ...) } rpact/R/f_core_constants.R0000644000176200001440000011367713574374022015266 0ustar liggesusers###################################################################################### # # # -- RPACT constants -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### C_LOG_LEVEL_TRACE <- "TRACE" C_LOG_LEVEL_DEBUG <- "DEBUG" C_LOG_LEVEL_INFO <- "INFO" C_LOG_LEVEL_WARN <- "WARN" C_LOG_LEVEL_ERROR <- "ERROR" C_LOG_LEVEL_PROGRESS <- "PROGRESS" C_LOG_LEVEL_DISABLED <- "DISABLED" # the ratio of the circumference of a circle to its diameter C_CIRCLE_CONSTANT_PI <- base::pi # 3.1415926535897931 # used in 'class_core_plot_settings.R' C_POSITION_OUTSIDE_PLOT <- 0 C_POSITION_LEFT_TOP <- 1 C_POSITION_LEFT_CENTER <- 2 C_POSITION_LEFT_BOTTOM <- 3 C_POSITION_RIGHT_TOP <- 4 C_POSITION_RIGHT_CENTER <- 5 C_POSITION_RIGHT_BOTTOM <- 6 C_DESIGN_TOLERANCE_DEFAULT <- 1e-08 C_CONST_NEWTON_COTES <- 15 C_TWO_SIDED_POWER_DEFAULT <- FALSE C_BINDING_FUTILITY_DEFAULT <- FALSE C_BINDING_FUTILITY_FISHER_DEFAULT <- TRUE C_CONST_BOUND_HP_DEFAULT <- 3 C_ALPHA_DEFAULT <- 0.025 C_BETA_DEFAULT <- 0.2 C_KMAX_DEFAULT <- 3L # L <- integer literal C_KMAX_UPPER_BOUND <- 10L C_KMAX_UPPER_BOUND_FISHER <- 6L C_NA_MAX_DEFAULT <- 100L C_POWER_ASN_THETA_DEFAULT <- seq(-1, 1, 0.02) C_ANALYSIS_TOLERANCE_DEFAULT <- 1e-06 C_ANALYSIS_TOLERANCE_FISHER_DEFAULT <- 1e-14 C_UPPER_BOUNDS_DEFAULT <- 8 C_FUTILITY_BOUNDS_DEFAULT <- -6 C_ALPHA_0_VEC_DEFAULT <- 1 C_THETA_H0_MEANS_DEFAULT <- 0 C_THETA_H0_RATES_DEFAULT <- 0 C_THETA_H0_SURVIVAL_DEFAULT <- 1 C_ALLOCATION_RATIO_DEFAULT <- 1 C_DIRECTION_UPPER_DEFAULT <- TRUE C_NORMAL_APPROXIMATION_MEANS_DEFAULT <- FALSE C_NORMAL_APPROXIMATION_RATES_DEFAULT <- TRUE C_EQUAL_VARIANCES_DEFAULT <- TRUE C_ITERATIONS_DEFAULT <- 1000 C_ACCEPT_DEVIATION_INFORMATIONRATES <- 0.05 C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT <- 50 C_VARIED_PARAMETER_SEQUENCE_LENGTH_DEFAULT <- 30 C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL <- "TrialDesignGroupSequential" C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL <- "TrialDesignInverseNormal" C_CLASS_NAME_TRIAL_DESIGN_FISHER <- "TrialDesignFisher" C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT <- "TrialDesignConditionalDunnett" .getTrialDesignClassNames <- function(inclusiveConditionalDunnet = TRUE) { trialDesignClassNames <- c(C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, C_CLASS_NAME_TRIAL_DESIGN_FISHER) if (inclusiveConditionalDunnet) { trialDesignClassNames <- c(trialDesignClassNames, C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT) } return(trialDesignClassNames) } C_EXCEPTION_TYPE_RUNTIME_ISSUE = "Runtime exception: " C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT = "Illegal argument: " C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT = "Illegal data input: " C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS = "Conflicting arguments: " C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS = "Argument out of bounds: " C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS = "Argument length out of bounds: " C_EXCEPTION_TYPE_INDEX_OUT_OF_BOUNDS = "Index out of bounds: " C_EXCEPTION_TYPE_MISSING_ARGUMENT = "Missing argument: " C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS = "Incomplete associated arguments: " C_DIRECTION_LOWER = "lower" C_DIRECTION_UPPER = "upper" # # Constants used in 'f_analysis_multiarm' # C_INTERSECTIONTEST_MULTIARMED_DEFAULT <- "Dunnett" C_VARIANCE_OPTION_DEFAULT <- "overallPooled" # # Constants used in 'parameters.R' # C_PARAM_USER_DEFINED <- "u" C_PARAM_DEFAULT_VALUE <- "d" C_PARAM_GENERATED <- "g" C_PARAM_DERIVED <- ">" C_PARAM_NOT_APPLICABLE <- "." C_PARAM_TYPE_UNKNOWN <- "?" # # Constants used in 'f_simulation_survival.R' # C_PI_2_DEFAULT <- 0.2 C_PI_1_DEFAULT <- seq(0.2, 0.5, 0.1) C_PI_1_SAMPLE_SIZE_DEFAULT <- c(0.4, 0.5, 0.6) C_DROP_OUT_RATE_1_DEFAULT <- 0 C_DROP_OUT_RATE_2_DEFAULT <- 0 C_DROP_OUT_TIME_DEFAULT <- 12L C_EVENT_TIME_DEFAULT <- 12L C_ALLOCATION_1_DEFAULT <- 1 C_ALLOCATION_2_DEFAULT <- 1 C_MAX_ITERATIONS_DEFAULT <- 10L C_MAX_SIMULATION_ITERATIONS_DEFAULT <- 1000L C_ACCRUAL_TIME_DEFAULT <- c(0L, 12L) C_ACCRUAL_INTENSITY_DEFAULT <- 0.1 C_FOLLOW_UP_TIME_DEFAULT <- 6L # # Additional constants used in 'f_design_sample_size_calculator.R' # C_ALTERNATIVE_DEFAULT <- seq(0.2, 1, 0.2) C_ALTERNATIVE_POWER_SIMULATION_DEFAULT <- seq(0, 1, 0.2) C_STDEV_DEFAULT <- 1 # # Constants used in 'core_group_sequential_design.R' # # Type of design is one of the following: # O'Brien & Fleming, # Pocock, # Wang & Tsiatis Delta class, # Haybittle & Peto, # Optimum design within Wang & Tsiatis class, # Pocock type alpha spending, # O'Brien & Fleming type alpha spending, # Kim & DeMets alpha spending, # Hwang, Shi & DeCani alpha spending, # user defined alpha spending # C_TYPE_OF_DESIGN_OF <- "OF" # O'Brien & Fleming C_TYPE_OF_DESIGN_P <- "P" # Pocock, C_TYPE_OF_DESIGN_WT <- "WT" # Wang & Tsiatis Delta class C_TYPE_OF_DESIGN_HP <- "HP" # Haybittle & Peto C_TYPE_OF_DESIGN_WT_OPTIMUM <- "WToptimum" # Optimum design within Wang & Tsiatis class C_TYPE_OF_DESIGN_AS_P <- "asP" # Pocock type alpha spending C_TYPE_OF_DESIGN_AS_OF <- "asOF" # O'Brien & Fleming type alpha spending C_TYPE_OF_DESIGN_AS_KD <- "asKD" # Kim & DeMets alpha spending C_TYPE_OF_DESIGN_AS_HSD <- "asHSD" # Hwang, Shi & DeCani alpha spending C_TYPE_OF_DESIGN_AS_USER <- "asUser" # user defined alpha spending C_DEFAULT_TYPE_OF_DESIGN <- C_TYPE_OF_DESIGN_OF # the default type of design .getDesignTypes <- function() { return(c( C_TYPE_OF_DESIGN_OF, C_TYPE_OF_DESIGN_P, C_TYPE_OF_DESIGN_WT, C_TYPE_OF_DESIGN_HP, C_TYPE_OF_DESIGN_WT_OPTIMUM, C_TYPE_OF_DESIGN_AS_P, C_TYPE_OF_DESIGN_AS_OF, C_TYPE_OF_DESIGN_AS_KD, C_TYPE_OF_DESIGN_AS_HSD, C_TYPE_OF_DESIGN_AS_USER )) } .printDesignTypes <- function() { .arrayToString(.getDesignTypes(), encapsulate = TRUE) } .isAlphaSpendingDesignType <- function(typeOfDesign, userDefinedAlphaSpendingIncluded = TRUE) { if (userDefinedAlphaSpendingIncluded && typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { return(TRUE) } return(typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_P, C_TYPE_OF_DESIGN_AS_OF, C_TYPE_OF_DESIGN_AS_KD,C_TYPE_OF_DESIGN_AS_HSD)) } # # Type of beta spending design is one of the following: # Pocock type beta spending, # O'Brien & Fleming type beta spending, # Kim & DeMets beta spending, # Hwang, Shi & DeCani beta spending, # user defined beta spending # "none", "bsP", "bsOF", "bsKD", "bsHSD", "bsUser" C_TYPE_OF_DESIGN_BS_NONE <- "none" C_TYPE_OF_DESIGN_BS_P <- "bsP" # Pocock type beta spending C_TYPE_OF_DESIGN_BS_OF <- "bsOF" # O'Brien & Fleming type beta spending C_TYPE_OF_DESIGN_BS_KD <- "bsKD" # Kim & DeMets beta spending C_TYPE_OF_DESIGN_BS_HSD <- "bsHSD" # Hwang, Shi & DeCani beta spending C_TYPE_OF_DESIGN_BS_USER <- "bsUser" # user defined beta spending .getBetaSpendingDesignTypes <- function() { return(c( C_TYPE_OF_DESIGN_BS_NONE, C_TYPE_OF_DESIGN_BS_P, C_TYPE_OF_DESIGN_BS_OF, C_TYPE_OF_DESIGN_BS_KD, C_TYPE_OF_DESIGN_BS_HSD, C_TYPE_OF_DESIGN_BS_USER )) } .printBetaSpendingDesignTypes <- function() { .arrayToString(.getBetaSpendingDesignTypes(), encapsulate = TRUE) } .isBetaSpendingDesignType <- function(typeOfDesign, userDefinedBetaSpendingIncluded = TRUE, noneIncluded = FALSE) { if (userDefinedBetaSpendingIncluded && typeOfDesign == C_TYPE_OF_DESIGN_BS_USER) { return(TRUE) } if (noneIncluded && typeOfDesign == C_TYPE_OF_DESIGN_BS_NONE) { return(TRUE) } return(typeOfDesign %in% c( C_TYPE_OF_DESIGN_BS_P, C_TYPE_OF_DESIGN_BS_OF, C_TYPE_OF_DESIGN_BS_KD, C_TYPE_OF_DESIGN_BS_HSD )) } ## ## ------------------------------------------- ## C_OPTIMIZATION_CRITERION_ASNH1 <- "ASNH1" C_OPTIMIZATION_CRITERION_ASNIFH1 <- "ASNIFH1" C_OPTIMIZATION_CRITERION_ASN_SUM <- "ASNsum" C_OPTIMIZATION_CRITERION_DEFAULT <- C_OPTIMIZATION_CRITERION_ASNH1 .getOptimizationCriterions <- function() { return(c( C_OPTIMIZATION_CRITERION_ASNH1, C_OPTIMIZATION_CRITERION_ASNIFH1, C_OPTIMIZATION_CRITERION_ASN_SUM )) } .printOptimizationCriterion <- function() { .arrayToString(.getOptimizationCriterions(), encapsulate = TRUE) } .isOptimizationCriterion <- function(x) { return(x %in% .getOptimizationCriterions()) } ## ## ------------------------------------------- ## C_FISHER_METHOD_FULL_ALPHA <- "fullAlpha" C_FISHER_METHOD_EQUAL_ALPHA <- "equalAlpha" C_FISHER_METHOD_NO_INTERACTION <- "noInteraction" C_FISHER_METHOD_USER_DEFINED_ALPHA <- "userDefinedAlpha" C_FISHER_METHOD_DEFAULT <- C_FISHER_METHOD_EQUAL_ALPHA .getFisherMethods <- function() { return(c( C_FISHER_METHOD_FULL_ALPHA, C_FISHER_METHOD_EQUAL_ALPHA, C_FISHER_METHOD_NO_INTERACTION, C_FISHER_METHOD_USER_DEFINED_ALPHA )) } .printFisherMethods <- function() { .arrayToString(.getFisherMethods(), encapsulate = TRUE) } .isFisherMethod <- function(method) { return(method %in% .getFisherMethods()) } ## ## ------------------------------------------- ## C_PARAMETER_NAMES <- list( iterations = "Iterations", seed = "Seed", groups = "Treatment groups", stages = "Stages", sampleSizes = "Sample sizes", means = "Means", stDevs = "Standard deviations", overallEvents = "Overall events", overallAllocationRatios = "Overall allocation ratios", overallLogRanks = "Overall log-ranks", bindingFutility = "Binding futility", constantBoundsHP = "Haybittle Peto constants", kMax = "Maximum number of stages", alpha = "Significance level", finalStage = "Final stage", informationRates = "Information rates", # DOTO remove ending 's' (plural)? criticalValues = "Critical values", stageLevels = "Stage levels", alphaSpent = "Cumulative alpha spending", tolerance = "Tolerance", method = "Method", alpha0Vec = "Alpha_0", scale = "Scale", nonStochasticCurtailment = "Non stochastic curtailment", simAlpha = "Simulated alpha", beta = "Type II error rate", betaSpent = "Cumulative beta spending", sided = "Test", futilityBounds = "Futility bounds (binding)", futilityBoundsNonBinding = "Futility bounds (non-binding)", typeOfDesign = "Type of design", deltaWT = "Delta for Wang & Tsiatis Delta class", optimizationCriterion = "Optimization criterion for optimum design within Wang & Tsiatis class", gammaA = "Parameter for alpha spending function", gammaB = "Parameter for beta spending function", typeBetaSpending = "Type of beta spending", userAlphaSpending = "User defined alpha spending", userBetaSpending = "User defined beta spending", probs = "Exit probabilities" , power = "Power", theta = "Effect", direction = "Direction", normalApproximation = "Normal approximation", equalVariances = "Equal variances", shift = "Shift", inflationFactor = "Inflation factor", information = "Informations", rejectionProbabilities = "Rejection probabilities", futilityProbabilities = "Futility probabilities", averageSampleNumber1 = "Ratio expected vs fixed sample size under H1", averageSampleNumber01 = "Ratio expected vs fixed sample size under a value between H0 and H1", averageSampleNumber0 = "Ratio expected vs fixed sample size under H0", allocationRatioPlanned = "Planned allocation ratio", thetaH0 = "Theta H0", thetaH1 = "Assumed effect", assumedStDev = "Assumed standard deviation", pi1 = "pi (1)", pi2 = "pi (2)", pi1H1 = "pi (1) under H1", pi2H1 = "pi (2) under H1", nPlanned = "Planned sample size", piControl = "Control rates", piTreatments = "Treatment rates", effectSizes = "Effect sizes", testStatistics = "Test statistics", pValues = "p-values", combinationTestStatistics = "Combination test statistics", testActions = "Actions", conditionalPower = "Conditional power", conditionalPowerAchieved = "Cond. power (achieved)", conditionalPowerSimulated = "Cond. power (simulated)", conditionalRejectionProbabilities = "Cond. rejection probability", repeatedConfidenceIntervalLowerBounds = "RCIs (lower)", repeatedConfidenceIntervalUpperBounds = "RCIs (upper)", repeatedPValues = "Repeated p-values", finalPValues = "Final p-value", finalConfidenceIntervalLowerBounds = "Final CIs (lower)", finalConfidenceIntervalUpperBounds = "Final CIs (upper)", medianUnbiasedEstimates = "Median unbiased estimate", overallSampleSizes = "Overall sample sizes", overallSampleSizes1 = "Overall sample sizes (1)", overallSampleSizes2 = "Overall sample sizes (2)", overallTestStatistics = "Overall test statistics", overallPValues = "Overall p-values", overallMeans = "Overall means", overallMeans1 = "Overall means (1)", overallMeans2 = "Overall means (2)", overallStDevs1 = "Overall standard deviations (1)", overallStDevs2 = "Overall standard deviations (2)", overallStDevs = "Overall standard deviations", testStatistics = "Test statistics", combInverseNormal = "Inverse normal combination", combFisher = "Fisher combination", weightsFisher = "Weights Fisher", weightsInverseNormal = "Weights inverse normal", overallLogRanks = "Overall log-ranks", overallEvents = "Overall number of events", overallEvents1 = "Overall number of events (1)", overallEvents2 = "Overall number of events (2)", overallAllocationRatios = "Overall allocation ratios", events = "Number of events", allocationRatios = "Allocation ratios", logRanks = "Log-ranks", nMax = "N_max", averageSampleNumber = "Average sample sizes (ASN)", calculatedPower = "Power", earlyStop = "Early stop", rejectPerStage = "Reject per stage", futilityPerStage = "Futility stop per stage", overallEarlyStop = "Overall Early stop", overallReject = "Overall reject", overallFutility = "Overall futility", riskRatio = "Risk ratio", meanRatio = "Mean ratio", alternative = "Alternatives", stDev = "Standard deviation", nFixed = "Number of subjects fixed", nFixed1 = "Number of subjects fixed (1)", nFixed2 = "Number of subjects fixed (2)", maxNumberOfSubjects = "Maximum number of subjects", maxNumberOfSubjects1 = "Maximum number of subjects (1)", maxNumberOfSubjects2 = "Maximum number of subjects (2)", numberOfSubjects = "Number of subjects", numberOfSubjects1 = "Number of subjects (1)", numberOfSubjects2 = "Number of subjects (2)", expectedNumberOfSubjectsH0 = "Expected number of subjects under H0", expectedNumberOfSubjectsH01 = "Expected number of subjects under H0/H1", expectedNumberOfSubjectsH1 = "Expected number of subjects under H1", expectedNumberOfSubjects = "Expected number of subjects", omega = "Probability of an event", hazardRatio = "Hazard ratio", typeOfComputation = "Type of computation", accountForObservationTimes = "Account for observation times", eventTime = "Event time", accrualTime = "Accrual time", totalAccrualTime = "Total accrual time", remainingTime = "Remaining time", followUpTime = "Follow up time", dropoutRate1 = "Drop-out rate (1)", dropoutRate2 = "Drop-out rate (2)", dropoutTime = "Drop-out time", calculateFollowUpTime = "Calculate follow up time", eventsFixed = "Number of events fixed", expectedEventsH0 = "Expected number of events under H0", expectedEventsH01 = "Expected number of events under H0/H1", expectedEventsH1 = "Expected number of events under H1", analysisTime = "Analysis times", studyDurationH1 = "Expected study duration under H1", eventsPerStage = "Number of events by stage", expectedNumberOfSubjectsH1 = "Expected number of subjects under H1", twoSidedPower = "Two-sided power", plannedEvents = "Planned events", plannedSubjects = "Planned subjects", minNumberOfEventsPerStage = "Minimum number of events per stage", maxNumberOfEventsPerStage = "Maximum number of events per stage", minNumberOfSubjectsPerStage = "Minimum number of subjects per stage", maxNumberOfSubjectsPerStage = "Maximum number of subjects per stage", accrualIntensity = "Accrual intensity", accrualIntensityRelative = "Accrual intensity (relative)", maxNumberOfIterations = "Maximum number of iterations", allocation1 = "Allocation 1", allocation2 = "Allocation 2", expectedNumberOfEvents = "Expected number of events", expectedNumberOfEventsPerStage = "Expected number of events by stage", eventsNotAchieved = "Events not achieved", subjects = "Subjects", overallReject = "Overall reject", futilityStop = "Futility stop", studyDuration = "Expected study duration", maxStudyDuration = "Maximal study duration", directionUpper = "Direction upper", piecewiseSurvivalTime = "Piecewise survival times", lambda2 = "lambda (2)", lambda1 = "lambda (1)", kappa = "kappa", earlyStopPerStage = "Early stop per stage", effect = "Effect", maxNumberOfEvents = "Maximum number of events", criticalValuesEffectScale = "Critical values (effect scale)", criticalValuesEffectScaleLower = "Lower critical values (effect scale)", criticalValuesEffectScaleUpper = "Upper critical values (effect scale)", criticalValuesPValueScale = "Local one-sided significance levels", ".design$stageLevels" = "Local one-sided significance levels", futilityBoundsEffectScale = "Futility bounds (effect scale)", futilityBoundsPValueScale = "Futility bounds (1-sided p-value scale)", analysisTime = "Analysis time", eventsPerStage1 = "Observed # events by stage (1)", eventsPerStage2 = "Observed # events by stage (2)", testStatistic = "Test statistic", logRankStatistic = "Log-rank statistic", hazardRatioEstimateLR = "Hazard ratio estimate LR", delayedResponseAllowed = "Delayed response allowed", delayedResponseEnabled = "Delayed response enabled", piecewiseSurvivalEnabled = "Piecewise exponential survival enabled", median1 = "median (1)", median2 = "median (2)", eventsPerStage = "Observed number of events by stage", expectedNumberOfEvents = "Observed number of events", expectedNumberOfSubjects = "Observed number of subjects", endOfAccrualIsUserDefined = "End of accrual is user defined", followUpTimeMustBeUserDefined = "Follow-up time must be user defined", maxNumberOfSubjectsIsUserDefined = "Max number of subjects is user defined", maxNumberOfSubjectsCanBeCalculatedDirectly = "Max number of subjects can be calculated directly", absoluteAccrualIntensityEnabled = "Absolute accrual intensity is enabled", time = "Time", overallEventProbabilities = "Overall event probabilities", eventProbabilities1 = "Event probabilities (1)", eventProbabilities2 = "Event probabilities (2)", informationAtInterim = "Information at interim", secondStageConditioning = "Conditional second stage p-values", separatePValues = "Separate p-values", singleStepAdjustedPValues = "Single step adjusted p-values", intersectionTest = "Intersection test", varianceOption = "Variance option", optimumAllocationRatio = "Optimum allocation ratio", rejected = "Rejected" ) .getParameterNames <- function(design = NULL, designPlan = NULL) { parameterNames <- C_PARAMETER_NAMES if (!is.null(design) && !is.na(design$bindingFutility) && !design$bindingFutility) { parameterNames$futilityBounds <- C_PARAMETER_NAMES[["futilityBoundsNonBinding"]] } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && !is.null(designPlan$.piecewiseSurvivalTime) && designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { parameterNames$lambda2 <- "Piecewise survival lambda (2)" parameterNames$lambda1 <- "Piecewise survival lambda (1)" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && identical(designPlan$.design$kMax, 1L)) { parameterNames$maxNumberOfEvents <- "Number of events" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlan") && identical(designPlan$.design$kMax, 1L)) { parameterNames$studyDuration <- "Study duration" } if (!is.null(designPlan) && (inherits(designPlan, "TrialDesignPlanMeans") || inherits(designPlan, "SimulationResultsMeans")) && isTRUE(designPlan$meanRatio)) { parameterNames$stDev <- "Coefficient of variation" } if (!is.null(design) && class(design) != "TrialDesign" && design$sided == 2) { parameterNames$criticalValuesPValueScale <- "Local two-sided significance levels" } return(parameterNames) } C_TABLE_COLUMN_NAMES <- list( iterations = "Iterations", seed = "Seed", groups = "Treatment group", stages = "Stage", sampleSizes = "Sample size", means = "Mean", stDevs = "Standard deviation", overallEvents = "Overall event", overallAllocationRatios = "Overall allocation ratio", overallLogRanks = "Overall log-rank", overallMeans = "Overall mean", bindingFutility = "Binding futility", constantBoundsHP = "Haybittle Peto constant", kMax = "Maximum # stages", alpha = "Significance level", finalStage = "Final stage", informationRates = "Information rate", criticalValues = "Critical value", stageLevels = "Stage level", alphaSpent = "Cumulative alpha spending", tolerance = "Tolerance", method = "Method", alpha0Vec = "Alpha_0", scale = "Scale", nonStochasticCurtailment = "Non stochastic curtailment", simAlpha = "Simulated alpha", beta = "Type II error rate", betaSpent = "Cumulative beta spending", sided = "Test", futilityBounds = "Futility bound (binding)", futilityBoundsNonBinding = "Futility bound (non-binding)", typeOfDesign = "Type of design", deltaWT = "Delta (Wang & Tsiatis)", optimizationCriterion = "Optimization criterion (Wang & Tsiatis)", gammaA = "Parameter for alpha spending function", gammaB = "Parameter for beta spending function", typeBetaSpending = "Type of beta spending", userAlphaSpending = "User defined alpha spending", userBetaSpending = "User defined beta spending", probs = "Internal calculation probabilities" , power = "Power", theta = "Effect", direction = "Direction", normalApproximation = "Normal approximation", equalVariances = "Equal variance", assumedStDev = "Assumed standard deviation", shift = "Shift", inflationFactor = "Inflation factor", information = "Information", rejectionProbabilities = "Rejection probability", futilityProbabilities = "Futility probability", averageSampleNumber1 = "Ratio expected vs fixed sample size under H1", averageSampleNumber01 = "Ratio expected vs fixed sample size under a value between H0 and H1", averageSampleNumber0 = "Ratio expected vs fixed sample size under H0", allocationRatioPlanned = "Planned allocation ratio", thetaH0 = "Theta H0", # Effect thetaH1 = "Assumed effect", assumedStDev = "Assumed standard deviation", pi1 = "pi (1)", pi2 = "pi (2)", pi1H1 = "pi (1) under H1", pi2H1 = "pi (2) under H1", nPlanned = "Planned sample size", stages = "Stage", effectSizes = "Effect size", testStatistics = "Test statistic", pValues = "p-value", combinationTestStatistics = "Combination test statistic", testActions = "Action", conditionalPower = "Conditional power", conditionalPowerAchieved = "Cond. power (achieved)", conditionalPowerSimulated = "Cond. power (simulated)", conditionalRejectionProbabilities = "Cond. rejection probabilities", repeatedConfidenceIntervalLowerBounds = "RCI (lower)", repeatedConfidenceIntervalUpperBounds = "RCI (upper)", repeatedPValues = "Repeated p-value", finalPValues = "Final p-value", finalConfidenceIntervalLowerBounds = "Final CI (lower)", finalConfidenceIntervalUpperBounds = "Final CI (upper)", medianUnbiasedEstimates = "Median unbiased estimate", overallSampleSizes = "Overall sample size", overallSampleSizes1 = "Overall sample size (1)", overallSampleSizes2 = "Overall sample size (2)", overallTestStatistics = "Overall test statistic", overallPValues = "Overall p-value", overallMeans1 = "Overall mean (1)", overallMeans2 = "Overall mean (2)", overallStDevs1 = "Overall standard deviation (1)", overallStDevs2 = "Overall standard deviation (2)", overallStDevs = "Overall standard deviation", testStatistics = "Test statistic", combInverseNormal = "Inverse Normal Combination", combFisher = "Fisher Combination", weightsFisher = "Weight Fisher", weightsInverseNormal = "Weight Inverse Normal", overallLogRanks = "Overall log-rank", overallEvents = "Overall # events", overallEvents1 = "Overall # events (1)", overallEvents2 = "Overall #events (2)", overallAllocationRatios = "Overall allocation ratio", events = "# events", allocationRatios = "Allocation ratio", logRanks = "Log-rank", nMax = "N_max", averageSampleNumber = "Average sample size (ASN)", calculatedPower = "Power", earlyStop = "Early stop", rejectPerStage = "Reject per stage", futilityPerStage = "Futility stop per stage", overallEarlyStop = "Overall Early stop", overallReject = "Overall reject", overallFutility = "Overall futility", riskRatio = "Risk ratio", meanRatio = "Mean ratio", alternative = "Alternative", stDev = "Standard deviation", nFixed = "# subjects fixed", nFixed1 = "# subjects fixed (1)", nFixed2 = "# subjects fixed (2)", maxNumberOfSubjects = "Max # subjects", maxNumberOfSubjects1 = "Max # subjects (1)", maxNumberOfSubjects2 = "Max # subjects (2)", numberOfSubjects = "# subjects", numberOfSubjects1 = "# subjects (1)", numberOfSubjects2 = "# subjects (2)", expectedNumberOfSubjectsH0 = "Expected # subjects under H0", expectedNumberOfSubjectsH01 = "Expected # subjects under H0/H1", expectedNumberOfSubjectsH1 = "Expected # subjects under H1", expectedNumberOfSubjects = "Expected # subjects", omega = "Probability of an event", hazardRatio = "Hazard ratio", typeOfComputation = "Type of computation", accountForObservationTimes = "Account for observation times", eventTime = "Event time", accrualTime = "Accrual time", totalAccrualTime = "Total accrual time", remainingTime = "Remaining time", followUpTime = "Follow up time", dropoutRate1 = "Drop-out rate (1)", dropoutRate2 = "Drop-out rate (2)", dropoutTime = "Drop-out time", calculateFollowUpTime = "Calculate follow up time", eventsFixed = "# events fixed", expectedEventsH0 = "Expected # events under H0", expectedEventsH01 = "Expected # events under H0/H1", expectedEventsH1 = "Expected # events under H1", analysisTime = "Analysis time", eventsPerStage1 = "Observed # events by stage (1)", eventsPerStage2 = "Observed # events by stage (2)", studyDurationH1 = "Expected study duration H1", eventsPerStage = "# events by stage", expectedNumberOfSubjectsH1 = "Expected # subjects H1", twoSidedPower = "Two-sided power", plannedEvents = "Required planned events", plannedSubjects = "Required planned subjects", minNumberOfEventsPerStage = "Minimum # events per stage", maxNumberOfEventsPerStage = "Maximum # events per stage", minNumberOfSubjectsPerStage = "Minimum # of subjects per stage", maxNumberOfSubjectsPerStage = "Maximum # of subjects per stage", accrualIntensity = "Accrual intensity", accrualIntensityRelative = "Accrual intensity (relative)", maxNumberOfIterations = "Maximum # iterations", allocation1 = "Allocation 1", allocation2 = "Allocation 2", expectedNumberOfEvents = "Expected # events", expectedNumberOfEventsPerStage = "Expected # events by stage", eventsNotAchieved = "Events not achieved", subjects = "Subjects", futilityStop = "Futility stop", studyDuration = "Expected study duration", maxStudyDuration = "Maximal study duration", directionUpper = "Direction upper", piecewiseSurvivalTime = "Piecewise survival times", lambda2 = "lambda (2)", lambda1 = "lambda (1)", kappa = "kappa", earlyStopPerStage = "Early stop per stage", effect = "Effect", maxNumberOfEvents = "Maximum # events", criticalValuesEffectScale = "Critical value (effect scale)", criticalValuesEffectScaleLower = "Lower critical value (effect scale)", criticalValuesEffectScaleUpper = "Upper critical value (effect scale)", criticalValuesPValueScale = "Local one-sided significance level", ".design$stageLevels" = "Local one-sided significance level", futilityBoundsEffectScale = "Futility bound (effect scale)", futilityBoundsPValueScale = "Futility bound (1-sided p-value scale)", delayedResponseAllowed = "Delayed response allowed", delayedResponseEnabled = "Delayed response enabled", piecewiseSurvivalEnabled = "Piecewise exponential survival enabled", median1 = "median (1)", median2 = "median (2)", eventsPerStage = "Observed number of events by stage", expectedNumberOfEvents = "Observed number of events", expectedNumberOfSubjects = "Observed number of subjects", endOfAccrualIsUserDefined = "End of accrual is user defined", followUpTimeMustBeUserDefined = "Follow-up time must be user defined", maxNumberOfSubjectsIsUserDefined = "Max number of subjects is user defined", maxNumberOfSubjectsCanBeCalculatedDirectly = "Max number of subjects can be calculated directly", absoluteAccrualIntensityEnabled = "Absolute accrual intensity is enabled", time = "Time", overallEventProbabilities = "Overall event probability", eventProbabilities1 = "Event probability (1)", eventProbabilities2 = "Event probability (2)", informationAtInterim = "Information at interim", secondStageConditioning = "Conditional second stage p-value", separatePValues = "Separate p-value", singleStepAdjustedPValues = "Single step adjusted p-value", intersectionTest = "Intersection test", varianceOption = "Variance option", optimumAllocationRatio = "Optimum allocation ratio", rejected = "Rejected" ) .getTableColumnNames <- function(design = NULL, designPlan = NULL) { tableColumnNames <- C_TABLE_COLUMN_NAMES if (!is.null(design) && !is.na(design$bindingFutility) && !design$bindingFutility) { tableColumnNames$futilityBounds <- C_TABLE_COLUMN_NAMES[["futilityBoundsNonBinding"]] } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && !is.null(designPlan$.piecewiseSurvivalTime) && designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { tableColumnNames$lambda2 = "Piecewise survival lambda (2)" tableColumnNames$lambda1 = "Piecewise survival lambda (1)" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && identical(designPlan$.design$kMax, 1L)) { tableColumnNames$maxNumberOfEvents <- "Number of events" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlan") && identical(designPlan$.design$kMax, 1L)) { tableColumnNames$studyDuration <- "Study duration" } if (!is.null(designPlan) && (inherits(designPlan, "TrialDesignPlanMeans") || inherits(designPlan, "SimulationResultsMeans")) && isTRUE(designPlan$meanRatio)) { tableColumnNames$stDev <- "Coefficient of variation" } if (!is.null(design) && class(design) != "TrialDesign" && design$sided == 2) { tableColumnNames$criticalValuesPValueScale <- "Local two-sided significance level" } return(tableColumnNames) } C_PARAMETER_FORMAT_FUNCTIONS <- list( means = "formatMeans", stDevs = "formatStDevs", stDev = "formatStDevs", assumedStDev = "formatStDevs", overallAllocationRatios = "formatRatios", overallLogRanks = "formatTestStatistics", allocationRatioPlanned = "formatRatios", alpha = "formatProbabilities", informationRates = "formatRates", stageLevels = "formatProbabilities", alphaSpent = "formatProbabilities", alpha0Vec = "formatProbabilities", simAlpha = "formatProbabilities", criticalValues = "formatFisherCriticalValues", # will be set in class TrialDesignFisher criticalValues = "formatGroupSequentialCriticalValues", # will be set in class TrialDesignGroupSequential betaSpent = "formatProbabilities", futilityBounds = "formatGroupSequentialCriticalValues", alpha0Vec = "formatFisherCriticalValues", constantBoundsHP = "formatGroupSequentialCriticalValues", nFixed = "formatSampleSizes", nFixed1 = "formatSampleSizes", nFixed2 = "formatSampleSizes", shift = "formatProbabilities", inflationFactor = "formatProbabilities", information = "formatProbabilities", power = "formatProbabilities", rejectionProbabilities = "formatProbabilities", futilityProbabilities = "formatFutilityProbabilities", probs = "formatProbabilities", averageSampleNumber1 = "formatProbabilities", averageSampleNumber01 = "formatProbabilities", averageSampleNumber0 = "formatProbabilities", effectSizes = "formatMeans", thetaH1 = "formatMeans", testStatistics = "formatTestStatistics", pValues = "formatPValues", combinationTestStatistics = "formatTestStatistics", conditionalPower = "formatConditionalPower", conditionalPowerAchieved = "formatConditionalPower", conditionalPowerSimulated = "formatConditionalPower", conditionalRejectionProbabilities = "formatProbabilities", repeatedConfidenceIntervalLowerBounds = "formatStDevs", repeatedConfidenceIntervalUpperBounds = "formatStDevs", repeatedPValues = "formatRepeatedPValues", finalPValues = "formatPValues", finalConfidenceIntervalLowerBounds = "formatStDevs", finalConfidenceIntervalUpperBounds = "formatStDevs", medianUnbiasedEstimates = "formatStDevs", overallTestStatistics = "formatTestStatistics", overallPValues = "formatPValues", overallMeans = "formatMeans", overallMeans1 = "formatMeans", overallMeans2 = "formatMeans", overallStDevs1 = "formatStDevs", overallStDevs2 = "formatStDevs", overallStDevs = "formatStDevs", testStatistics = "formatTestStatistics", combInverseNormal = "formatTestStatistics", combFisher = "formatFisherTestStatistics", weightsFisher = "formatRates", weightsInverseNormal = "formatRates", overallLogRanks = "formatTestStatistics", logRanks = "formatTestStatistics", theta = "formatDouble", averageSampleNumber = "formatDouble", calculatedPower = "formatProbabilities", earlyStop = "formatDouble", rejectPerStage = "formatDouble", futilityPerStage = "formatDouble", overallEarlyStop = "formatDouble", overallReject = "formatDouble", overallFutility = "formatDouble", maxNumberOfSubjects = "formatSampleSizes", maxNumberOfSubjects1 = "formatSampleSizes", maxNumberOfSubjects2 = "formatSampleSizes", maxNumberOfEvents = "formatSampleSizes", numberOfSubjects = "formatSampleSizes", numberOfSubjects1 = "formatSampleSizes", numberOfSubjects2 = "formatSampleSizes", expectedNumberOfSubjectsH0 = "formatSampleSizes", expectedNumberOfSubjectsH01 = "formatSampleSizes", expectedNumberOfSubjectsH1 = "formatSampleSizes", expectedNumberOfSubjects = "formatSampleSizes", omega = "formatRates", hazardRatio = "formatRates", pi1 = "formatRates", pi2 = "formatRates", pi1H1 = "formatRates", pi2H1 = "formatRates", piecewiseSurvivalTime = "formatTime", lambda2 = "formatRates", lambda1 = "formatRates", eventTime = "formatDouble", accrualTime = "formatTime", totalAccrualTime = "formatTime", remainingTime = "formatTime", followUpTime = "formatTime", dropoutRate1 = "formatRates", dropoutRate2 = "formatRates", dropoutTime = "formatTime", eventsFixed = "formatSampleSizes", expectedEventsH0 = "formatSampleSizes", expectedEventsH01 = "formatSampleSizes", expectedEventsH1 = "formatSampleSizes", analysisTime = "formatTime", studyDurationH1 = "formatDurations", eventsPerStage = "formatSampleSizes", expectedNumberOfSubjectsH1 = "formatSampleSizes", events = "formatSampleSizes", expectedNumberOfEvents = "formatSampleSizes", expectedNumberOfEventsPerStage = "formatSampleSizes", eventsNotAchieved = "formatDouble", subjects = "formatSampleSizes", futilityStop = "formatSimulationOutput", studyDuration = "formatDurations", maxStudyDuration = "formatDurations", earlyStopPerStage = "formatDouble", effect = "formatDouble", criticalValuesEffectScale = "formatGroupSequentialCriticalValues", criticalValuesEffectScaleLower = "formatGroupSequentialCriticalValues", criticalValuesEffectScaleUpper = "formatGroupSequentialCriticalValues", criticalValuesPValueScale = "formatProbabilities", futilityBoundsEffectScale = "formatGroupSequentialCriticalValues", futilityBoundsPValueScale = "formatProbabilities", median1 = "formatRatesDynamic", median2 = "formatRatesDynamic", accrualIntensity = "formatAccrualIntensities", accrualIntensityRelative = "formatAccrualIntensities", eventsPerStage = "formatSampleSizes", expectedNumberOfEvents = "formatSampleSizes", expectedNumberOfSubjects = "formatSampleSizes", time = "formatTime", overallEventProbabilities = "formatProbabilities", eventProbabilities1 = "formatProbabilities", eventProbabilities2 = "formatProbabilities", informationAtInterim = "formatRates", separatePValues = "formatPValues", singleStepAdjustedPValues = "formatPValues", userAlphaSpending = "formatHowItIs", userBetaSpending = "formatHowItIs" ) .getParameterFormatFunctions <- function() { return(C_PARAMETER_FORMAT_FUNCTIONS) } rpact/R/f_design_utilities.R0000644000176200001440000004075213555756357015615 0ustar liggesusers###################################################################################### # # # -- RPACT design utilities -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26-02-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_assertions.R NULL # This function generates the piecewise exponential survival function or (if kappa != 1) a Weibull cdf .getPiecewiseExponentialDistributionSingleTime <- function( time, piecewiseLambda, piecewiseSurvivalTime = NA_real_, kappa) { if (length(piecewiseLambda) == 1) { if (kappa <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kappa' (", kappa, ") must be > 0") } return(pweibull(time, kappa, scale = 1 / piecewiseLambda, lower.tail = TRUE, log.p = FALSE)) } if (length(piecewiseSurvivalTime) != length(piecewiseLambda)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be equal to length of 'piecewiseLambda' (", .arrayToString(piecewiseLambda), ")") } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (kappa != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition") } len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (time <= piecewiseSurvivalTime[i]) { if (i == 1) { return(1 - exp(-(piecewiseLambda[1] * time))) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] if (i > 2) { y <- y + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) } y <- y + piecewiseLambda[i] * (time - piecewiseSurvivalTime[i - 1]) return(1 - exp(-y)) } } if (len == 1) { y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) } else { y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:len] * (piecewiseSurvivalTime[2:len] - piecewiseSurvivalTime[1:(len - 1)])) + piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) } return(1 - exp(-y)) } .getPiecewiseExponentialSingleQuantile <- function( quantile, piecewiseLambda, piecewiseSurvivalTime, kappa) { if (length(piecewiseLambda) == 1) { if (kappa <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "kappa needs to a positive number") } return((-log(1 - quantile))^(1 / kappa) / piecewiseLambda[1]) } if (kappa != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition") } cdfValues <- .getPiecewiseExponentialDistribution(piecewiseSurvivalTime, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = 1) cdfValues <- cdfValues[2:length(cdfValues)] # use values without a leading 0 piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (quantile <= cdfValues[i]) { if (i == 1) { return(-log(1 - quantile) / piecewiseLambda[1]) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] if (i > 2) { y <- y + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) } return(piecewiseSurvivalTime[i - 1] - (log(1 - quantile) + y) / piecewiseLambda[i]) } } if (len == 1) { return(piecewiseSurvivalTime[1] - (log(1 - quantile) + piecewiseLambda[1] * piecewiseSurvivalTime[1]) / piecewiseLambda[2]) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:len] * (piecewiseSurvivalTime[2:len] - piecewiseSurvivalTime[1:(len - 1)])) return(piecewiseSurvivalTime[len] - (log(1 - quantile) + y) / piecewiseLambda[len + 1]) } .getPiecewiseExponentialDistribution <- function(time, piecewiseLambda, piecewiseSurvivalTime, kappa) { if (length(time) == 1 && length(piecewiseSurvivalTime) == 1 && identical(time, piecewiseSurvivalTime) && length(piecewiseLambda) > 1) { result <- c() for (lambda in piecewiseLambda) { result <- c(result, .getPiecewiseExponentialDistributionSingleTime( time, lambda, piecewiseSurvivalTime, kappa)) } return(result) } result <- c() for (timeValue in time) { result <- c(result, .getPiecewiseExponentialDistributionSingleTime( timeValue, piecewiseLambda, piecewiseSurvivalTime, kappa)) } return(result) } .getPiecewiseExponentialSettings <- function(..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { if (!all(is.na(piecewiseLambda)) && is.list(piecewiseSurvivalTime)) { stop("'piecewiseSurvivalTime' needs to be a numeric vector and not a list ", "because 'piecewiseLambda' (", piecewiseLambda, ") is defined separately") } if (any(is.na(piecewiseSurvivalTime))) { .assertIsSingleNumber(kappa, "kappa") } if (length(piecewiseLambda) == 1 && !is.na(piecewiseLambda) && length(piecewiseSurvivalTime) > 0 && !all(is.na(piecewiseSurvivalTime))) { warning("Argument 'piecewiseSurvivalTime' will be ignored because ", "length of 'piecewiseLambda' is 1", call. = FALSE) } setting <- PiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = piecewiseLambda, hazardRatio = 1, kappa = kappa, delayedResponseAllowed = FALSE) return(list(piecewiseSurvivalTime = setting$piecewiseSurvivalTime, piecewiseLambda = setting$lambda2)) } #' #' @title #' The Piecewise Exponential Distribution #' #' @description #' Distribution function, quantile function and random number generation for the #' piecewise exponential distribution. #' #' @param t,time Vector of time values. #' @param q,quantile Vector of quantiles. #' @param n Number of observations. #' @param s,piecewiseSurvivalTime Vector of start times defining the "time pieces". #' @param lambda,piecewiseLambda Vector of lambda values (hazard rates) corresponding to the start times. #' @param kappa The kappa value. Is needed for the specification of the Weibull distribution. #' In this case, no piecewise definition is possible, i.e., #' only lambda and kappa need to be specified. #' This function is equivalent to pweibull(t, kappa, 1 / lambda) of the R core system, i.e., #' the scale parameter is 1 / 'hazard rate'. #' For example, getPiecewiseExponentialDistribution(time = 130, #' piecewiseLambda = 0.01, kappa = 4.2) and #' pweibull(q = 130, shape = 4.2, scale = 1 /0.01) provide the sample result. #' @param ... Ensures that all arguments after \code{time} are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' #' \code{getPiecewiseExponentialDistribution} (short: \code{ppwexp}), #' \code{getPiecewiseExponentialQuantile} (short: \code{qpwexp}), and #' \code{getPiecewiseExponentialRandomNumbers} (short: \code{rpwexp}) provide #' probabilities, quantiles, and random numbers according to a piecewise #' exponential or a Weibull distribution. #' The piecewise definition is performed through a vector of #' starting times (\code{piecewiseSurvivalTime}) and a vector of hazard rates (\code{piecewiseLambda}). #' You can also use a list that defines the starting times and piecewise #' lambdas together and define piecewiseSurvivalTime as this list. #' The list needs to have the form, for example, #' piecewiseSurvivalTime <- list( #' "0 - <6" = 0.025, #' "6 - <9" = 0.04, #' "9 - <15" = 0.015, #' ">=15" = 0.007) #' For the Weibull case, you can also specify a shape parameter kappa in order to #' calculated probabilities, quantiles, or random numbers. #' In this case, no piecewise definition is possible, i.e., only piecewiseLambda and #' kappa need to be specified. #' #' @examples #' #' # Calculate probabilties for a range of time values for a #' # piecewise exponential distribution with hazard rates #' # 0.025, 0.04, 0.015, and 0.007 in the intervals #' # [0, 6), [6, 9), [9, 15), [15,Inf), respectively, #' # and re-return the time values: #' piecewiseSurvivalTime <- list( #' "0 - <6" = 0.025, #' "6 - <9" = 0.04, #' "9 - <15" = 0.015, #' ">=15" = 0.01) #' y <- getPiecewiseExponentialDistribution(seq(0, 150, 15), #' piecewiseSurvivalTime = piecewiseSurvivalTime) #' getPiecewiseExponentialQuantile(y, #' piecewiseSurvivalTime = piecewiseSurvivalTime) #' #' @name utilitiesForPiecewiseExponentialDistribution #' NULL #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialDistribution <- function(time, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialDistribution", ...) .assertIsNumericVector(time, "time") if (any(time < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "time needs to be a non-negative number") } settings <- .getPiecewiseExponentialSettings(piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa) return(.getPiecewiseExponentialDistribution(time = time, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa = kappa)) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export ppwexp <- function(t, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialDistribution(time = t, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ...) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialQuantile <- function(quantile, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialQuantile", ...) .assertIsNumericVector(quantile, "quantile") if (any(quantile < 0) || any(quantile > 1)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "quantile needs to be within [0; 1]") } settings <- .getPiecewiseExponentialSettings(piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa) result <- c() for (quantileValue in quantile) { result <- c(result, .getPiecewiseExponentialSingleQuantile(quantileValue, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa)) } return(result) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export qpwexp <- function(q, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialQuantile(quantile = q, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ...) } .getPiecewiseExponentialRandomNumbersFast <- function(n, piecewiseSurvivalTime, piecewiseLambda) { result <- rexp(n, rate = piecewiseLambda[1]) if (length(piecewiseSurvivalTime) > 1) { for (i in 2:length(piecewiseSurvivalTime)) { result <- ifelse(result < piecewiseSurvivalTime[i], result, piecewiseSurvivalTime[i] + rexp(n, rate = piecewiseLambda[i])) } } result } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialRandomNumbers <- function(n, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialRandomNumbers", ...) .assertIsSingleInteger(n, "n", validateType = FALSE) if (n <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "n needs to be a positive integer.") } settings <- .getPiecewiseExponentialSettings(piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa) if (kappa == 1) { return(.getPiecewiseExponentialRandomNumbersFast(n, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda)) } randomQuantiles <- runif(n, 0, 1) result <- c() for (quantile in randomQuantiles) { result <- c(result, .getPiecewiseExponentialSingleQuantile(quantile, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa = kappa)) } return(result) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export rpwexp <- function(n, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialRandomNumbers(n = n, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ...) } #' #' @title #' Survival Helper Functions for Conversion of Pi, Lambda, Median #' #' @description #' Functions to convert pi, lambda and median values into each other. #' #' @param piValue,pi1,pi2,lambda,median Value that shall be converted. #' @param eventTime The assumed time under which the event rates #' are calculated, default is \code{12}. #' @param kappa The scale parameter of the Weibull distribution, default is \code{1}. #' The Weibull distribution cannot be used for the piecewise #' definition of the survival time distribution. #' #' @details #' Can be used, e.g., to convert median values into pi or lambda values for usage in #' \code{\link{getSampleSizeSurvival}} or \code{\link{getPowerSurvival}}. #' #' @name utilitiesForSurvivalTrials #' NULL #' @rdname utilitiesForSurvivalTrials #' @export getLambdaByPi <- function(piValue, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) { .assertIsValidPi(piValue, "pi") .assertIsValidKappa(kappa) for (value in piValue) { if (value > 1 - 1e-15 && value < 1 + 1e-15) { stop("'pi' must be != 1") } } return((-log(1 - piValue))^(1 / kappa) / eventTime) } #' @rdname utilitiesForSurvivalTrials #' @export getLambdaByMedian <- function(median, kappa = 1) { .assertIsValidKappa(kappa) return(log(2)^(1 / kappa) / median) } #' @rdname utilitiesForSurvivalTrials #' @export getHazardRatioByPi <- function(pi1, pi2, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) { .assertIsValidPi(pi1, "pi1") .assertIsValidPi(pi2, "pi2") .assertIsValidKappa(kappa) return((getLambdaByPi(pi1, eventTime, kappa) / getLambdaByPi(pi2, eventTime, kappa))^kappa) } #' @rdname utilitiesForSurvivalTrials #' @export getPiByLambda <- function(lambda, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) { .assertIsValidLambda(lambda) .assertIsValidKappa(kappa) return(1 - exp(-(lambda * eventTime)^kappa)) } # alternative: return(1 - exp(-(log(2)^(1 / kappa) / median * eventTime)^kappa)) #' @rdname utilitiesForSurvivalTrials #' @export getPiByMedian <- function(median, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) { .assertIsValidKappa(kappa) return(1 - 2^(-(eventTime / median)^kappa)) } #' @rdname utilitiesForSurvivalTrials #' @export getMedianByLambda <- function(lambda, kappa = 1) { .assertIsValidLambda(lambda) .assertIsValidKappa(kappa) return(log(2)^(1 / kappa) / lambda) } #' @rdname utilitiesForSurvivalTrials #' @export getMedianByPi <- function(piValue, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1) { .assertIsValidPi(piValue, "piValue") getMedianByLambda(getLambdaByPi(piValue, eventTime, kappa), kappa) } rpact/R/RcppExports.R0000644000176200001440000000267513574432653014226 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 getRandomSurvivalDistribution <- function(rate, kappa) { .Call(`_rpact_getRandomSurvivalDistribution`, rate, kappa) } getRandomPiecewiseExponentialDistribution <- function(cdfValues, piecewiseLambda, piecewiseSurvivalTime) { .Call(`_rpact_getRandomPiecewiseExponentialDistribution`, cdfValues, piecewiseLambda, piecewiseSurvivalTime) } getSimulationSurvivalCpp <- function(designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa) { .Call(`_rpact_getSimulationSurvivalCpp`, designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa) } rpact/R/class_simulation_results.R0000644000176200001440000010405513573662763017070 0ustar liggesusers###################################################################################### # # # -- Simulation result classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.1.0 # # Date: 13-05-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' #' @name SimulationResults #' #' @title #' Class for Simulation Results #' #' @description #' A class for simulation results. #' #' @details #' \code{SimulationResults} is the basic class for #' \itemize{ #' \item \code{\link{SimulationResultsMeans}}, #' \item \code{\link{SimulationResultsRates}}, and #' \item \code{\link{SimulationResultsSurvival}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResults <- setRefClass("SimulationResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .data = "data.frame", .rawData = "data.frame", .showStatistics = "logical" ), methods = list( initialize = function(design, ...) { callSuper(.design = design, ...) .showStatistics <<- TRUE .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design, .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, getPlotSettings = function() { return(.plotSettings) }, setShowStatistics = function(showStatistics) { .assertIsSingleLogical(showStatistics, "showStatistics") .showStatistics <<- showStatistics }, show = function(showType = 1, digits = NA_integer_, showStatistics = TRUE) { .show(showType = showType, digits = digits, showStatistics = showStatistics, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, showStatistics = TRUE, consoleOutputEnabled = TRUE) { 'Method for automatically printing simulation result objects' .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show(showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else if (showType == 2) { .cat("Technical summary of the simulation results object of class ", methods::classLabel(class(.self)), ":\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat(.toString(startWithUpperCase = TRUE), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Results", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) ## statistics of simulated data if (showStatistics && .showStatistics) { .cat("Simulated data:\n", consoleOutputEnabled = consoleOutputEnabled) if (inherits(.self, "SimulationResultsMeans")) { params <- c( "numberOfSubjects", "testStatistic") } else if (inherits(.self, "SimulationResultsRates")) { params <- c( "numberOfSubjects", "testStatistic") } else if (inherits(.self, "SimulationResultsSurvival")) { params <- c( "analysisTime", "numberOfSubjects", "eventsPerStage1", "eventsPerStage2", "eventsPerStage", "testStatistic", "logRankStatistic", "hazardRatioEstimateLR") } if (!all(is.na(conditionalPowerAchieved)) && any(!is.na(conditionalPowerAchieved)) && any(na.omit(conditionalPowerAchieved) != 0)) { params <- c(params, "conditionalPowerAchieved") } stages <- sort(unique(.self$.data$stageNumber)) if (inherits(.self, "SimulationResultsMeans")) { levelName <- "alternative" } else { levelName <- "pi1" } levels <- unique(.self$.data[[levelName]]) if (length(levels) > 1 && !any(is.na(levels))) { levels <- sort(levels) } else { levels <- NA_real_ } for (parameterName in params) { paramCaption <- .parameterNames[[parameterName]] if (is.null(paramCaption)) { paramCaption <- paste0("%", parameterName, "%") } for (stage in stages) { for (levelValue in levels) { .catStatisticsLine(stage, parameterName, paramCaption, levelValue = levelValue, levelName = levelName, consoleOutputEnabled = consoleOutputEnabled) } } } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } twoGroupsEnabled <- !inherits(.self, "SimulationResultsMeans") if (.design$kMax > 1 || twoGroupsEnabled) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) if (twoGroupsEnabled) { .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) } if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .catStatisticsLine = function(stage, parameterName, paramCaption, levelValue, levelName, consoleOutputEnabled) { if (stage == 1 && parameterName == "conditionalPowerAchieved") { return(invisible()) } postfix <- paste0("[", stage, "]") if (!is.na(levelValue)) { if (inherits(.self, "SimulationResultsMeans")) { levelName <- "alternative" } else { levelName <- "pi1" } postfix <- paste0(postfix, ", ", levelName, " = ", round(levelValue, 4)) paramValue <- .self$.data[[parameterName]][ .self$.data$stageNumber == stage & .self$.data[[levelName]] == levelValue] } else { paramValue <- .self$.data[[parameterName]][ .self$.data$stageNumber == stage] } variableNameFormatted <- formatVariableName(name = paramCaption, n = .getNChar(), prefix = "", postfix = postfix) if (!is.null(paramValue) && length(paramValue) > 0 && is.numeric(paramValue)) { paramValueFormatted <- paste0("median [range]: ", round(stats::median(paramValue), 3), " [", paste(round(base::range(paramValue), 3), collapse = " - "), "]; ", "mean +/-sd: ", round(base::mean(paramValue), 3), " +/-", round(stats::sd(paramValue), 3)) } else { paramValueFormatted <- "median [range]: NA [NA - NA]; mean +/sd: NA +/-NA" } output <- paste(variableNameFormatted, paramValueFormatted, "\n") if (!grepl(": median \\[range\\]: NA \\[NA - NA\\]", output)) { .cat(output, consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { s <- "simulation" if (inherits(.self, "SimulationResultsMeans")) { s <- paste(s, "of means") } else if (inherits(.self, "SimulationResultsRates")) { s <- paste(s, "of rates") } else if (inherits(.self, "SimulationResultsSurvival")) { s <- paste(s, "of survival data") } else { s <- paste(s, "results") } if (.isTrialDesignGroupSequential(.design)) { s <- paste(s, "(group sequential design)") } else if (.isTrialDesignInverseNormal(.design)) { s <- paste(s, "(inverse normal design)") } else if (.isTrialDesignFisher(.design)) { s <- paste(s, "(Fisher design)") } else { s <- paste("unknown", s) } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) } ) ) #' #' @name SimulationResultsMeans #' #' @title #' Class for Simulation Results Means #' #' @description #' A class for simulation results means. #' #' @details #' Use \code{\link{getSimulationMeans}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsMeans <- setRefClass("SimulationResultsMeans", contains = "SimulationResults", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .data = "data.frame", .rawData = "data.frame", .showStatistics = "logical", alternative = "numeric", effect = "numeric", stDev = "numeric", groups = "integer", allocationRatioPlanned = "numeric", directionUpper = "logical", thetaH0 = "numeric", meanRatio = "logical", iterations = "matrix", sampleSizes = "matrix", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", expectedNumberOfSubjects = "numeric", plannedSubjects = "numeric", minNumberOfSubjectsPerStage = "numeric", maxNumberOfSubjectsPerStage = "numeric", conditionalPower = "numeric", thetaH1 = "numeric", maxNumberOfIterations = "integer", conditionalPowerAchieved = "matrix", seed = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "effect", "iterations", "sampleSizes", "eventsNotAchieved", "expectedNumberOfSubjects", "rejectPerStage", "overallReject", "futilityPerStage", "futilityStop", "earlyStop", "analysisTime", "studyDuration")) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsRates #' #' @title #' Class for Simulation Results Rates #' #' @description #' A class for simulation results rates. #' #' @details #' Use \code{\link{getSimulationRates}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsRates <- setRefClass("SimulationResultsRates", contains = "SimulationResults", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .data = "data.frame", .rawData = "data.frame", .showStatistics = "logical", pi1 = "numeric", pi2 = "numeric", effect = "numeric", groups = "integer", allocationRatioPlanned = "numeric", directionUpper = "logical", thetaH0 = "numeric", riskRatio = "logical", pi1H1 = "numeric", pi2H1 = "numeric", iterations = "matrix", sampleSizes = "matrix", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", expectedNumberOfSubjects = "numeric", plannedSubjects = "numeric", minNumberOfSubjectsPerStage = "numeric", maxNumberOfSubjectsPerStage = "numeric", conditionalPower = "numeric", thetaH1 = "numeric", maxNumberOfIterations = "integer", conditionalPowerAchieved = "matrix", seed = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "effect", "iterations", "sampleSizes", "eventsNotAchieved", "expectedNumberOfSubjects", "rejectPerStage", "overallReject", "futilityPerStage", "futilityStop", "earlyStop", "analysisTime", "studyDuration")) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsSurvival #' #' @title #' Class for Simulation Results Survival #' #' @description #' A class for simulation results survival. #' #' @details #' Use \code{\link{getSimulationSurvival}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsSurvival <- setRefClass("SimulationResultsSurvival", contains = "SimulationResults", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", maxNumberOfSubjects = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", plannedEvents = "numeric", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", allocationRatioPlanned = "numeric", directionUpper = "logical", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", eventTime = "numeric", thetaH0 = "numeric", allocation1 = "numeric", allocation2 = "numeric", minNumberOfEventsPerStage = "numeric", maxNumberOfEventsPerStage = "numeric", conditionalPower = "numeric", thetaH1 = "numeric", maxNumberOfIterations = "integer", kappa = "numeric", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", hazardRatio = "numeric", iterations = "matrix", analysisTime = "matrix", studyDuration = "numeric", eventsPerStage = "matrix", expectedNumberOfEvents = "numeric", eventsNotAchieved = "matrix", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjects = "numeric", rejectPerStage = "matrix", overallReject = "numeric", futilityPerStage = "matrix", futilityStop = "numeric", earlyStop = "numeric", conditionalPowerAchieved = "matrix", seed = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "hazardRatio", "iterations", "eventsPerStage", "expectedNumberOfEvents", "eventsNotAchieved", "numberOfSubjects", "expectedNumberOfSubjects", "rejectPerStage", "overallReject", "futilityPerStage", "futilityStop", "earlyStop", "analysisTime", "studyDuration", "allocationRatioPlanned")) { .setParameterType(generatedParam, C_PARAM_GENERATED) } .setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) } ) ) .assertIsValidVariedParameterVectorForSimulationResultsPlotting <- function(simulationResults, plotType) { if (inherits(simulationResults, "SimulationResultsMeans")) { if (is.null(simulationResults$alternative) || is.na(simulationResults$alternative) || length(simulationResults$alternative) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'alternative' with length > 1 is defined") } } else if (inherits(simulationResults, "SimulationResultsRates")) { if (is.null(simulationResults$pi1) || is.na(simulationResults$pi1) || length(simulationResults$pi1) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'pi1' with length > 1 is defined") } } else if (inherits(simulationResults, "SimulationResultsSurvival")) { if (is.null(simulationResults$hazardRatio) || is.na(simulationResults$hazardRatio) || length(simulationResults$hazardRatio) <= 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'hazardRatio' with length > 1 is defined or derived") } if (length(simulationResults$hazardRatio) != length(simulationResults$overallReject)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is not available for piecewise survival (only type 13 and 14)") } } } .plotSimulationResults <- function(simulationResults, designMaster, type = 5L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, simulationResultsName = NA_character_, ...) { .assertGgplotIsInstalled() .assertIsSimulationResults(simulationResults) .assertIsValidLegendPosition(legendPosition) theta <- .assertIsValidThetaRange(thetaRange = theta) survivalEnabled <- inherits(simulationResults, "SimulationResultsSurvival") meansEnabled <- inherits(simulationResults, "SimulationResultsMeans") if (survivalEnabled) { nMax <- simulationResults$expectedNumberOfEvents[1] # use first value for plotting } else { nMax <- simulationResults$expectedNumberOfSubjects[1] # use first value for plotting } if (type %in% c(1:4)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not available for simulation results (type must be > 4)") } if (!survivalEnabled && type %in% c(10:14)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is only available for survival simulation results") } variedParameters <- logical(0) if (is.na(plotPointsEnabled)) { plotPointsEnabled <- FALSE } showSourceHint <- "" if (type == 5) { # Power and Stopping Probabilities .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Power and Early Stopping") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } if (survivalEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("overallReject", "earlyStop", "futilityStop") if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } if (is.na(ylab)) { ylab <- "" } if (is.null(list(...)[["ylim"]])) { ylim <- c(0, 1) return(.plotParameterSet(parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, ylim = ylim, ...)) # ratioEnabled = TRUE } else { return(.plotParameterSet(parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, ...)) } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 6) { # Average Sample Size / Average Event Number .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { titlePart <- ifelse(survivalEnabled, "Number of Events", "Number of Subjects") items <- PlotSubTitleItems(title = paste0("Expected ", titlePart, " and Power / Early Stop")) .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } if (survivalEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } if (survivalEnabled) { yParameterNames <- "expectedNumberOfEvents" expectedNumberOfEvents <- simulationResults[["expectedNumberOfEvents"]] if (is.null(expectedNumberOfEvents) || length(expectedNumberOfEvents) == 0) { yParameterNames <- "observedEventsH1" } } else { yParameterNames <- "expectedNumberOfSubjects" } yParameterNames <- c(yParameterNames, "overallReject", "earlyStop") if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_CENTER } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 7) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Power") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } if (survivalEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- "overallReject" if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 8) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Overall Early Stopping") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } if (survivalEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("earlyStop", "futilityStop") if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_CENTER } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 9) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = ifelse(survivalEnabled, "Expected Number of Events", "Expected Number of Subjects")) .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } if (survivalEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } if (survivalEnabled) { yParameterNames <- "expectedNumberOfEvents" } else { yParameterNames <- "expectedNumberOfSubjects" } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 10) { # Study Duration .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Study Duration") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "studyDuration" .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 11) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Expected Number of Subjects") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfSubjects" .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) } else if (type == 12) { # Analysis Time .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { items <- PlotSubTitleItems(title = "Analysis Times") .addPlotSubTitleItems(simulationResults, designMaster, items, type) main <- items$toQuote() } xParameterName <- "hazardRatio" yParameterNames <- "analysisTime" data <- NULL for (k in 1:designMaster$kMax) { part <- data.frame( categories = rep(k, length(simulationResults$hazardRatio)), xValues = simulationResults$hazardRatio, yValues = simulationResults$analysisTime[k, ] ) if (is.null(data)) { data <- part } else { data <- rbind(data, part) } } if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_CENTER } .showPlotSourceInformation(objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, showSource = showSource) return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = "Hazard Ratio", yAxisLabel1 = "Analysis Time", yAxisLabel2 = NA_character_, plotPointsEnabled = TRUE, legendTitle = "Stage", legendPosition = legendPosition, sided = designMaster$sided)) } else if (type == 13 || type == 14) { # Cumulative Distribution Function / Survival function return(.plotSurvivalFunction(simulationResults, designMaster = designMaster, type = type, main = main, xlab = xlab, ylab = ylab, palette = palette, legendPosition = legendPosition, showSource = showSource)) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 5, 6,..., 14") } return(.plotParameterSet(parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = (type != 2), ratioEnabled = TRUE, ...)) } #' #' @title #' Simulation Results Plotting #' #' @param x The simulation results, obtained from \cr #' \code{\link{getSimulationSurvival}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param palette The palette, default is \code{"Set1"}. #' @param theta A vector of theta values. #' @param plotPointsEnabled If \code{TRUE}, additional points will be plotted. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{-1}: no legend will be shown #' \item \code{NA}: the algorithm tries to find a suitable position #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{2}: creates a 'Boundaries Effect Scale' plot #' \item \code{3}: creates a 'Boundaries p Values Scale' plot #' \item \code{4}: creates a 'Type One Error Spending' plot #' \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot #' \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot #' \item \code{7}: creates an 'Overall Power' plot #' \item \code{8}: creates an 'Overall Early Stopping' plot #' \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot #' \item \code{10}: creates a 'Study Duration' plot #' \item \code{11}: creates an 'Expected Number of Subjects' plot #' \item \code{12}: creates an 'Analysis Times' plot #' \item \code{13}: creates a 'Cumulative Distribution Function' plot #' \item \code{14}: creates a 'Survival Function' plot #' } #' @param ... Optional \code{ggplot2} arguments. #' #' @description #' Plots simulation results. #' #' @details #' Generic function to plot all kinds of simulation results. #' #' @return #' A \code{ggplot2} object. #' #' @export #' plot.SimulationResults = function(x, y, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1, palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ...) { fCall = match.call(expand.dots = FALSE) simulationResultsName <- as.character(fCall$x)[1] .plotSimulationResults(simulationResults = x, designMaster = x$.design, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, simulationResultsName = simulationResultsName, ...) } #' #' @title #' Get Simulation Data #' #' @description #' Returns the aggregated simulation data. #' #' @param x An \code{SimulationResults} object created by \code{\link{getSimulationMeans}}, #' \code{\link{getSimulationRates}}, or \code{\link{getSimulationSurvival}}. #' #' @details #' This data are the base for creation of the small statistics in the simulation results output. #' #' @keywords internal #' #' @export #' getData <- function(x) { if (!inherits(x, "SimulationResults")) { # or 'Dataset' stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'x' must be a 'SimulationResults' object; for example, use getSimulationSurvival() to create one") } return(x$.data) } .getAggregatedDataByIterationNumber <- function(rawData, iterationNumber) { subData <- rawData[rawData$iterationNumber == iterationNumber, ] eventsPerStage1 <- sum(subData$event[subData$treatmentGroup == 1]) eventsPerStage2 <- sum(subData$event[subData$treatmentGroup == 2]) return(data.frame( iterationNumber = iterationNumber, stageNumber = subData$stopStage[1], analysisTime = max(subData$observationTime), numberOfSubjects = nrow(subData), eventsPerStage1 = eventsPerStage1, eventsPerStage2 = eventsPerStage2, eventsPerStage = eventsPerStage1 + eventsPerStage2 )) } .getAggregatedData <- function(rawData) { iterationNumbers <- sort(unique(rawData$iterationNumber)) data <- NULL for (iterationNumber in iterationNumbers) { row <- .getAggregatedDataByIterationNumber(rawData, iterationNumber) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } return(data) } #' #' @title #' Get Simulation Raw Data #' #' @description #' Returns the raw data which was generated randomly for simulation. #' #' @param x An \code{SimulationResults} object created by \code{\link{getSimulationSurvival}}. #' @param aggregate If \code{TRUE} the raw data will be aggregated similar to #' the result of \code{\link{getData}}, default is \code{FALSE}. #' #' @details #' This function works only if \code{\link{getSimulationSurvival}} was called #' with a \code{maxNumberOfRawDatasetsPerStage > 0} (default is \code{0}). #' #' @keywords internal #' #' @export #' getRawData <- function(x, aggregate = FALSE) { if (!inherits(x, "SimulationResultsSurvival")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'x' must be a 'SimulationResultsSurvival' object; use getSimulationSurvival() to create one") } rawData <- x$.rawData if (is.null(rawData) || ncol(rawData) == 0 || nrow(rawData) == 0) { stop("Simulation results contain no raw data; ", "choose a 'maxNumberOfRawDatasetsPerStage' > 0, e.g., ", "getSimulationSurvival(..., maxNumberOfRawDatasetsPerStage = 1)") } if (!aggregate) { return(rawData) } return(.getAggregatedData(rawData)) } rpact/R/f_simulation_means.R0000644000176200001440000010170313554520666015601 0ustar liggesusers################################################################################################## # # # -- Simulation of continous data with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 21-04-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ################################################################################################## .getTestStatisticsMeans <- function( designNumber, informationRates, groups, meanRatio, thetaH0, allocationRatioPlanned, sampleSizesPerStage, testStatisticsPerStage) { stage <- length(sampleSizesPerStage) overallTestStatistic <- sqrt(sampleSizesPerStage) %*% testStatisticsPerStage / sqrt(sum(sampleSizesPerStage)) pValuesSeparate <- 1 - pnorm(testStatisticsPerStage[stage]) if (designNumber == 1L) { value <- overallTestStatistic } else if (designNumber == 2L) { if (stage == 1) { value <- testStatisticsPerStage[1] } else { value <- (sqrt(informationRates[1]) * testStatisticsPerStage[1] + sqrt(informationRates[2:stage] - informationRates[1:(stage - 1)]) %*% testStatisticsPerStage[2:stage]) / sqrt(informationRates[stage]) } } else if (designNumber == 3L) { weightFisher <- rep(NA_real_, stage) weightFisher[1] <- 1 if (stage > 1) { weightFisher[2:stage] <- sqrt(informationRates[2:stage] - informationRates[1:(stage-1)]) / sqrt(informationRates[1]) } value <- prod((1 - pnorm(testStatisticsPerStage[1:stage]))^weightFisher[1:stage]) } if (groups == 1) { effectEstimate <- overallTestStatistic / sqrt(sum(sampleSizesPerStage)) } else { if (!meanRatio) { effectEstimate <- overallTestStatistic / sqrt(allocationRatioPlanned * sum(sampleSizesPerStage)) * (1 + allocationRatioPlanned) } else { effectEstimate <- overallTestStatistic / sqrt(allocationRatioPlanned*sum(sampleSizesPerStage)) * sqrt((1 + allocationRatioPlanned) * (1 + thetaH0^2 * allocationRatioPlanned)) } } return(list(value = value, overallTestStatistic = overallTestStatistic, effectEstimate = effectEstimate, pValuesSeparate = pValuesSeparate)) } .getSimulationMeansStageSubjects <- function(..., stage, meanRatio, thetaH0, groups, plannedSubjects, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaStandardized, conditionalPower, conditionalCriticalValue) { if (is.na(conditionalPower)) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } mult <- 1 if (groups == 2) { thetaH0 <- ifelse(meanRatio, thetaH0, 1) mult <- 1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned) } stageSubjects <- (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / (max(1e-12, thetaStandardized))^2 stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage]) return(stageSubjects) } .getSimulationStepMeans <- function(..., k, kMax, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, meanRatio, thetaH0, alternative, stDev, groups, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, effectEstimate, sampleSizesPerStage, testStatisticsPerStage, testStatistic, calcSubjectsFunction) { stageSubjects <- plannedSubjects[1] # perform event size recalculation for stages 2,..., kMax simulatedConditionalPower <- 0 if (k > 1) { # used effect size is either estimated from test statistic or pre-fixed if (is.na(thetaH1)) { thetaStandardized <- effectEstimate } else { thetaStandardized <- (thetaH1 - thetaH0) / stDev } if (!directionUpper) { thetaStandardized <- -thetaStandardized } # conditional critical value to reject the null hypotheses at the next stage of the trial if (designNumber == 3L) { conditionalCriticalValue <- stats::qnorm(1 - (criticalValues[k] / testStatistic$value)^(1 / sqrt((informationRates[k] - informationRates[k - 1]) / informationRates[1]))) } else { conditionalCriticalValue <- (criticalValues[k] * sqrt(informationRates[k]) - testStatistic$value * sqrt(informationRates[k - 1])) / sqrt(informationRates[k] - informationRates[k - 1]) } stageSubjects <- calcSubjectsFunction( stage = k, meanRatio = meanRatio, thetaH0 = thetaH0, groups = groups, plannedSubjects = plannedSubjects, sampleSizesPerStage = sampleSizesPerStage, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, thetaStandardized = thetaStandardized, conditionalCriticalValue = conditionalCriticalValue) if (is.null(stageSubjects) || length(stageSubjects) != 1 || !is.numeric(stageSubjects) || is.na(stageSubjects)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcSubjectsFunction' returned an illegal or undefined result (", stageSubjects,"); ", "the output must be a single numeric value") } # calculate conditional power for computed stageSubjects if (groups == 1) { thetaStandardized <- thetaStandardized } else { if (!meanRatio) { thetaStandardized <- thetaStandardized * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) } else { thetaStandardized <- thetaStandardized * sqrt(allocationRatioPlanned) / sqrt((1 + allocationRatioPlanned) * (1 + thetaH0 * allocationRatioPlanned)) } } simulatedConditionalPower <- 1 - stats::pnorm(conditionalCriticalValue - thetaStandardized * sqrt(stageSubjects)) } if (groups == 1) { testResult <- (2 * directionUpper - 1) * stats::rnorm(1, (alternative - thetaH0) / stDev * sqrt(stageSubjects)) } else { if (!meanRatio) { testResult <- (2 * directionUpper - 1) * stats::rnorm(1, (alternative - thetaH0) / stDev * sqrt(allocationRatioPlanned * stageSubjects) / (1 + allocationRatioPlanned)) } else { testResult <- (2 * directionUpper - 1) * stats::rnorm(1, (alternative - thetaH0) / stDev * sqrt(allocationRatioPlanned * stageSubjects) / sqrt((1 + allocationRatioPlanned) * (1 + thetaH0^2 * allocationRatioPlanned))) } } sampleSizesPerStage <- c(sampleSizesPerStage, stageSubjects) testStatisticsPerStage <- c(testStatisticsPerStage, testResult) testStatistic <- .getTestStatisticsMeans(designNumber, informationRates, groups, meanRatio, thetaH0, allocationRatioPlanned, sampleSizesPerStage, testStatisticsPerStage) effectEstimate <- testStatistic$effectEstimate simulatedRejections <- 0 simulatedFutilityStop <- 0 trialStop <- FALSE if (k == kMax){ trialStop <- TRUE } if (designNumber <= 2) { if (testStatistic$value >= criticalValues[k]) { simulatedRejections <- 1 trialStop <- TRUE } # add small number to avoid ties if (k < kMax && testStatistic$value <= futilityBounds[k]) { simulatedFutilityStop <- 1 trialStop <- TRUE } } else { if (testStatistic$value <= criticalValues[k]) { simulatedRejections <- 1 trialStop <- TRUE } if (k < kMax && testStatistic$pValuesSeparate >= alpha0Vec[k]) { simulatedFutilityStop <- 1 trialStop <- TRUE } } if (!directionUpper){ effectEstimate <- -effectEstimate } return(list( trialStop = trialStop, sampleSizesPerStage = sampleSizesPerStage, testStatisticsPerStage = testStatisticsPerStage, testStatistic = testStatistic, effectEstimate = effectEstimate, simulatedSubjects = stageSubjects, simulatedRejections = simulatedRejections, simulatedFutilityStop = simulatedFutilityStop, simulatedConditionalPower = simulatedConditionalPower )) } #' @title #' Get Simulation Means #' #' @description #' Returns the simulated power, stopping probabilities, conditional power, and expected sample size #' for testing means in a one or two treatment groups testing situation. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument. #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param meanRatio If \code{meanRatio = TRUE} is specified, the design characteristics for #' one-sided testing of H0: mu1/mu2 = thetaH0 are simulated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or a value != 1 for testing the mean ratio) can be specified, default is #' \code{0} or \code{1} for difference and ratio testing, respectively. #' @param alternative The alternative hypothesis value. This can be a vector of assumed #' alternatives, default is \code{seq(0,1,0.2)}. #' @param directionUpper Specifies the direction of the alternative, only applicable #' for one-sided testing, default is \code{TRUE}. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups #' design, default is \code{1}. #' @param plannedSubjects \code{plannedSubjects} is a vector of length \code{kMax} (the number of stages of the design) #' that determines the number of cumulated (overall) subjects when the interim stages are planned. #' @param minNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{minNumberOfSubjectsPerStage} determines the #' minimum number of subjects per stage (i.e., not cumulated), the first element #' is not taken into account. #' @param maxNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{maxNumberOfSubjectsPerStage} determines the maximum number #' of subjects per stage (i.e., not cumulated), the first element is not taken into account. #' @param conditionalPower The conditional power for the subsequent stage under which the sample size recalculation is performed. #' @param thetaH1 If specified, the value of the alternative under which the conditional power calculation is performed. #' @param stDev The standard deviation under which the conditional power calculation is performed, default is 1. #' If \code{meanRatio = TRUE} is specified, stDev defines the coefficient of variation sigma/mu2. #' @param maxNumberOfIterations The number of simulation iterations. #' @param calcSubjectsFunction Optionally, a function can be entered that defines the way of performing the sample size #' recalculation. By default, sample size recalulation is performed with conditional power with specified #' \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details #' and examples). #' @param seed The seed to reproduce the simulation, default is a random seed. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of subjects and parameter configuration. #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' calcSubjectsFunction\cr #' This function returns the number of subjects at given conditional power and conditional Type I error rate for specified #' testing situation. The function might depend on variables \code{stage}, \code{meanRatio}, \code{thetaH0}, \code{groups}, #' \code{plannedSubjects}, \code{sampleSizesPerStage}, \code{directionUpper}, \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, #' \code{thetaStandardized}. #' The function has to obtain the three-dots arument '...' (see examples). #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link{getData}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{alternative}: The alternative hypothesis value. #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, or Fishers combination test). #' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from #' the considered stage is taken into account. #' \item \code{effectEstimate}: Standardized overall simulated effect estimate. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{thetaH1}. #' } #' #' @return Returns a \code{\link{SimulationResultsMeans}} object. #' #' @export #' #' @examples #' #' # Fixed sample size with minimum required definitions, #' # alternative = c(0, 1, 2, 3, 4), standard deviation = 5 #' getSimulationMeans(getDesignGroupSequential(), alternative = 40, #' stDev = 50, plannedSubjects = c(20, 40, 60), thetaH1 = 60, #' maxNumberOfIterations = 50) #' #' \donttest{ #' #' # Increase number of simulation iterations and compare results #' # with power calculator using normal approximation #' getSimulationMeans(alternative = 0:4, stDev = 5, #' plannedSubjects = 40, maxNumberOfIterations = 50) #' getPowerMeans(alternative = 0:4, stDev = 5, #' maxNumberOfSubjects = 40, normalApproximation = TRUE) #' #' # Do the same for a three-stage O'Brien&Fleming inverse #' # normal group sequential design with non-binding futility stops #' designIN <- getDesignInverseNormal(typeOfDesign = "OF", futilityBounds = c(0, 0)) #' x <- getSimulationMeans(designIN, alternative = c(0:4), stDev = 5, #' plannedSubjects = c(20, 40, 60), maxNumberOfIterations = 1000) #' getPowerMeans(designIN, alternative = 0:4, stDev = 5, #' maxNumberOfSubjects = 60, normalApproximation = TRUE) #' #' # Assess power and average sample size if a sample size increase is foreseen #' # at conditional power 80% for each subsequent stage based on observed overall #' # effect and specified minNumberOfSubjectsPerStage and #' # maxNumberOfSubjectsPerStage #' getSimulationMeans(designIN, alternative = 0:4, stDev = 5, #' plannedSubjects = c(20, 40, 60), #' minNumberOfSubjectsPerStage = c(20, 20, 20), #' maxNumberOfSubjectsPerStage = c(80, 80, 80), #' conditionalPower = 0.8, #' maxNumberOfIterations = 50) #' #' # Do the same under the assumption that a sample size increase only takes #' # place at the first interim. The sample size for the third stage is set equal #' # to the second stage sample size. #' mySampleSizeCalculationFunction <- function(..., stage, #' minNumberOfSubjectsPerStage, #' maxNumberOfSubjectsPerStage, #' sampleSizesPerStage, #' conditionalPower, #' conditionalCriticalValue, #' thetaStandardized) { #' if (stage == 2) { #' stageSubjects <- 4 * (max(0, conditionalCriticalValue + #' stats::qnorm(conditionalPower)))^2 / (max(1e-12, thetaStandardized))^2 #' stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], #' stageSubjects), maxNumberOfSubjectsPerStage[stage]) #' } else { #' stageSubjects <- sampleSizesPerStage[stage - 1] #' } #' return(stageSubjects) #' } #' getSimulationMeans(designIN, alternative = 2:4, stDev = 5, #' plannedSubjects = c(20, 40, 60), #' minNumberOfSubjectsPerStage = c(20, 20, 20), #' maxNumberOfSubjectsPerStage = c(40, 160, 160), #' conditionalPower = 0.8, #' calcSubjectsFunction = mySampleSizeCalculationFunction, #' maxNumberOfIterations = 50) #' #' } #' getSimulationMeans <- function( design = NULL, ..., groups = 2L, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_POWER_SIMULATION_DEFAULT, stDev = C_STDEV_DEFAULT, plannedSubjects = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, seed = NA_real_, calcSubjectsFunction = NULL) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSimulationMeans", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationMeans", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") if (meanRatio) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) } .assertIsNumericVector(alternative, "alternative", naAllowed = FALSE) .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) if (meanRatio) { .assertIsInOpenInterval(thetaH1, "thetaH1", 0, NULL, naAllowed = TRUE) } .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, NULL, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) .assertIsValidStandardDeviation(stDev) if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationMeansStageSubjects } .assertIsValidFunction(fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationMeansStageSubjects) if (design$sided == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only one-sided case is implemented for the simulation design") } if (groups == 1L) { if (isTRUE(meanRatio)) { warning("'meanRatio' (", meanRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE) } if (!is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } } else if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } simulationResults <- SimulationResultsMeans(design, meanRatio = meanRatio) minNumberOfSubjectsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, design$kMax) maxNumberOfSubjectsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, design$kMax) if (!is.na(conditionalPower)) { if (design$kMax > 1) { if (any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")") } .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_) } else { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } } else { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } .assertIsIntegerVector(plannedSubjects, "plannedSubjects", validateType = FALSE) if (length(plannedSubjects) != design$kMax) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedSubjects' (", .arrayToString(plannedSubjects), ") must have length ", design$kMax) } .assertIsInClosedInterval(plannedSubjects, "plannedSubjects", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedSubjects, "plannedSubjects") .assertIsSingleLogical(directionUpper, "directionUpper") seed <- .setSeed(seed) effect <- alternative - thetaH0 simulationResults$effect <- effect .setValueAndParameterType(simulationResults, "meanRatio", meanRatio, FALSE) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, ifelse(meanRatio, 1, 0)) .setValueAndParameterType(simulationResults, "alternative", alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT) .setValueAndParameterType(simulationResults, "stDev", stDev, C_STDEV_DEFAULT) .setValueAndParameterType(simulationResults, "groups", as.integer(groups), 2L) .setValueAndParameterType(simulationResults, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) if (groups == 1L) { simulationResults$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } .setValueAndParameterType(simulationResults, "plannedSubjects", plannedSubjects, NA_real_) .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT) .setValueAndParameterType(simulationResults, "seed", seed, NA_real_) if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } informationRates <- design$informationRates criticalValues <- design$criticalValues kMax <- design$kMax cols <- length(alternative) sampleSizes <- matrix(0, kMax, cols) rejectPerStage <- matrix(0, kMax, cols) overallReject <- rep(0, cols) futilityPerStage <- matrix(0, kMax - 1, cols) futilityStop <- rep(0, cols) iterations <- matrix(0, kMax, cols) expectedNumberOfSubjects <- rep(0, cols) conditionalPowerAchieved <- matrix(NA_real_, kMax, cols) len <- length(alternative) * maxNumberOfIterations * kMax dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataAlternative <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataNumberOfSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA_real_, len) dataFutilityPerStage <- rep(NA_real_, len) dataTestStatisticsPerStage <- rep(NA_real_, len) dataTestStatistic <- rep(NA_real_, len) dataTrialStop <- rep(NA, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) if (designNumber == 3L) { dataPValuesSeparate <- rep(NA_real_, len) } index <- 1 for (i in 1:length(alternative)) { simulatedSubjects <- rep(0, kMax) simulatedOverallSubjects <- rep(0, kMax) simulatedRejections <- rep(0, kMax) simulatedFutilityStop <- rep(0, kMax - 1) simulatedOverallSubjects <- 0 simulatedConditionalPower <- rep(0, kMax) for (j in 1:maxNumberOfIterations) { trialStop <- FALSE sampleSizesPerStage <- c() testStatisticsPerStage <- c() testStatistic <- NULL effectEstimate <- NULL for (k in 1:kMax) { if (!trialStop) { stepResult <- .getSimulationStepMeans( k = k, kMax = kMax, designNumber = designNumber, informationRates = informationRates, futilityBounds = futilityBounds, alpha0Vec = alpha0Vec, criticalValues = criticalValues, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative[i], stDev = stDev, groups = groups, plannedSubjects = plannedSubjects, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, effectEstimate = effectEstimate, sampleSizesPerStage = sampleSizesPerStage, testStatisticsPerStage = testStatisticsPerStage, testStatistic = testStatistic, calcSubjectsFunction = calcSubjectsFunction) trialStop <- stepResult$trialStop sampleSizesPerStage <- stepResult$sampleSizesPerStage testStatisticsPerStage <- stepResult$testStatisticsPerStage testStatistic <- stepResult$testStatistic simulatedSubjectsStep <- stepResult$simulatedSubjects simulatedRejectionsStep <- stepResult$simulatedRejections simulatedFutilityStopStep <- stepResult$simulatedFutilityStop simulatedConditionalPowerStep <- NA_real_ effectEstimate <- stepResult$effectEstimate if (k > 1) { simulatedConditionalPowerStep <- stepResult$simulatedConditionalPower } iterations[k, i] <- iterations[k, i] + 1 simulatedSubjects[k] <- simulatedSubjects[k] + simulatedSubjectsStep simulatedRejections[k] <- simulatedRejections[k] + simulatedRejectionsStep if (k < kMax) { simulatedFutilityStop[k] <- simulatedFutilityStop[k] + simulatedFutilityStopStep } simulatedConditionalPower[k] <- simulatedConditionalPower[k] + simulatedConditionalPowerStep dataIterationNumber[index] <- j dataStageNumber[index] <- k dataAlternative[index] <- alternative[i] dataEffect[index] <- effect[i] dataNumberOfSubjects[index] <- simulatedSubjectsStep dataRejectPerStage[index] <- simulatedRejectionsStep dataFutilityPerStage[index] <- simulatedFutilityStopStep dataTestStatistic[index] <- testStatistic$value dataTestStatisticsPerStage[index] <- testStatisticsPerStage[k] dataTrialStop[index] <- trialStop dataConditionalPowerAchieved[index] <- simulatedConditionalPowerStep dataEffectEstimate[index] <- effectEstimate if (designNumber == 3L) { dataPValuesSeparate[index] <- testStatistic$pValuesSeparate } index <- index + 1 } } } simulatedOverallSubjects <- sum(simulatedSubjects[1:k]) sampleSizes[, i] <- simulatedSubjects / iterations[, i] rejectPerStage[, i] <- simulatedRejections / maxNumberOfIterations overallReject[i] <- sum(simulatedRejections / maxNumberOfIterations) futilityPerStage[, i] <- simulatedFutilityStop / maxNumberOfIterations futilityStop[i] <- sum(simulatedFutilityStop / maxNumberOfIterations) expectedNumberOfSubjects[i] <- simulatedOverallSubjects / maxNumberOfIterations if (kMax > 1) { conditionalPowerAchieved[2:kMax, i] <- simulatedConditionalPower[2:kMax] / iterations[2:kMax, i] } } simulationResults$iterations <- iterations simulationResults$sampleSizes <- sampleSizes simulationResults$rejectPerStage <- rejectPerStage simulationResults$overallReject <- overallReject simulationResults$futilityPerStage <- futilityPerStage simulationResults$futilityStop <- futilityStop if (kMax > 1) { if (length(alternative) == 1){ simulationResults$earlyStop <- sum(futilityPerStage) + sum(rejectPerStage[1:(kMax - 1)]) } else { if (kMax > 2) { rejectPerStageColSum <- colSums(rejectPerStage[1:(kMax - 1), ]) } else { rejectPerStageColSum <- rejectPerStage[1, ] } simulationResults$earlyStop <- colSums(futilityPerStage) + rejectPerStageColSum } } else { simulationResults$earlyStop <- rep(0, length(alternative)) } simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$conditionalPowerAchieved <- conditionalPowerAchieved if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, alternative = dataAlternative, effect = dataEffect, numberOfSubjects = dataNumberOfSubjects, rejectPerStage = dataRejectPerStage, futilityPerStage = dataFutilityPerStage, testStatistic = dataTestStatistic, testStatisticsPerStage = dataTestStatisticsPerStage, effectEstimate = dataEffectEstimate, trialStop = dataTrialStop, conditionalPowerAchieved = round(dataConditionalPowerAchieved,6) ) if (designNumber == 3L) { data$pValue <- dataPValuesSeparate } data <- data[!is.na(data$alternative), ] simulationResults$.data <- data return(simulationResults) } rpact/R/f_analysis_base_means.R0000644000176200001440000020055213574405002016220 0ustar liggesusers###################################################################################### # # # -- Analysis of means with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 25-11-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### # @title # Get Analysis Results Means # # @description # Returns an analysis result object. # # @param design The trial design. # # @return Returns a \code{AnalysisResultsMeans} object. # # @keywords internal # .getAnalysisResultsMeans <- function(design, dataInput, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsMeansGroupSequential(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsMeansInverseNormal(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsMeansFisher(design = design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsMeansInverseNormal <- function( design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, parallelComputingEnabled = FALSE) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsMeansInverseNormal", ignore = c("stage"), ...) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsMeansAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, parallelComputingEnabled = parallelComputingEnabled) return(results) } .getAnalysisResultsMeansGroupSequential <- function( design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, parallelComputingEnabled = FALSE, ...) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsMeansGroupSequential", ignore = c("stage"), ...) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) .getAnalysisResultsMeansAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, parallelComputingEnabled = parallelComputingEnabled) return(results) } .getAnalysisResultsMeansFisher <- function( design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_, parallelComputingEnabled = FALSE, ...) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsMeansFisher", ignore = c("stage"), ...) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .getAnalysisResultsMeansAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed, parallelComputingEnabled = parallelComputingEnabled) return(results) } # # The following parameters will be taken from 'design': # stages, informationRates, criticalValues, futilityBounds, alphaSpent, stageLevels # .getAnalysisResultsMeansAll <- function(results, design, dataInput, stage, directionUpper, normalApproximation, equalVariances, thetaH0, thetaH1, assumedStDev, nPlanned, allocationRatioPlanned, tolerance, iterations, seed, parallelComputingEnabled = FALSE) { startTime <- Sys.time() stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) results$.stageResults <- stageResults .logProgress("Stage results calculated", startTime = startTime) .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, dataInput$getNumberOfGroups()) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) results$directionUpper <- directionUpper results$normalApproximation <- normalApproximation results$equalVariances <- equalVariances results$allocationRatioPlanned <- allocationRatioPlanned results$thetaH0 <- thetaH0 results$thetaH1 <- thetaH1 results$nPlanned <- nPlanned results$assumedStDev <- assumedStDev while (length(results$nPlanned) < design$kMax) { results$nPlanned <- c(NA_real_, results$nPlanned) } # effect size results$effectSizes <- stageResults$effectSizes # test statistic results$testStatistics <- stageResults$testStatistics # p-value results$pValues <- stageResults$pValues # combined test statistic and test action if (.isTrialDesignInverseNormal(design)) { results$combinationTestStatistics <- stageResults$combInverseNormal } else if (.isTrialDesignGroupSequential(design)) { results$overallTestStatistics <- stageResults$overallTestStatistics results$overallPValues <- stageResults$overallPValues } else if (.isTrialDesignFisher(design)) { results$combinationTestStatistics <- stageResults$combFisher } # test actions results$testActions <- getTestActions(design = design, stageResults = stageResults, stage = stage) # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { conditionalPowerList <- .getConditionalPowerMeans(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, thetaH1 = thetaH1, iterations = iterations, seed = seed) if (conditionalPowerList$simulated) { results$conditionalPowerSimulated <- conditionalPowerList$conditionalPower } else { results$conditionalPower <- conditionalPowerList$conditionalPower results$conditionalPowerSimulated <- numeric(0) } } else { results$conditionalPower <- .getConditionalPowerMeans(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, thetaH1 = thetaH1)$conditionalPower } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities( design = design, stageResults = stageResults, stage = stage) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) if (parallelComputingEnabled && .createParallelComputingCluster()) { startTime <- Sys.time() .parallelComputingCaseNumbers <<- c(1, 2) .parallelComputingArguments <<- list( results = results, design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance, stageResults = stageResults ) parallel::clusterExport(.parallelComputingCluster, c('.getAnalysisResultsMeansParallelComputing', '.parallelComputingCaseNumbers', '.parallelComputingArguments')) parallelComputingResults <- .runAnalysisResultsMeansParallelComputing() results$repeatedConfidenceIntervalLowerBounds <- parallelComputingResults[[1]]$repeatedConfidenceIntervalLowerBounds results$repeatedConfidenceIntervalUpperBounds <- parallelComputingResults[[1]]$repeatedConfidenceIntervalUpperBounds results$repeatedPValues <- parallelComputingResults[[2]]$repeatedPValues .logProgress("Repeated confidence interval and repeated p-values calculated", startTime = startTime) } else { # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsMeans( design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues(design = design, stageResults = stageResults, stage = stage, tolerance = tolerance) .logProgress("Repeated p-values calculated", startTime = startTime) } # final p-value startTime <- Sys.time() finalPValue <- getFinalPValue(design = design, stageResults = stageResults, stage = stage) results$finalPValues <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage) results$finalStage <- finalPValue$finalStage .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalMeans(design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance) .logProgress("Final confidence interval calculated", startTime = startTime) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage) } return(results) } .runAnalysisResultsMeansParallelComputing <- function() { results <- parallel::parLapply(.parallelComputingCluster, .parallelComputingCaseNumbers, function(i) { .getAnalysisResultsMeansParallelComputing(i, .parallelComputingArguments) } ) return(results) } # @title # Get Analysis Results Means Parallel Computing # # @description # Internal usage for parallel computing only. # # @details # Cluster based parallel computing requires exported functions. # # @keywords internal # #' @export .getAnalysisResultsMeansParallelComputing <- function(caseNumber, arguments) { results <- arguments$results design <- arguments$design dataInput <- arguments$dataInput stage <- arguments$stage normalApproximation <- arguments$normalApproximation equalVariances <- arguments$equalVariances tolerance <- arguments$tolerance stageResults <- arguments$stageResults # RCI - repeated confidence interval if (caseNumber == 1) { repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsMeans( design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance) return(list(repeatedConfidenceIntervalLowerBounds = repeatedConfidenceIntervals[1, ], repeatedConfidenceIntervalUpperBounds = repeatedConfidenceIntervals[2, ])) } # repeated p-value else if (caseNumber == 2) { return(list(repeatedPValues = getRepeatedPValues(design = design, stageResults = stageResults, stage = stage, tolerance = tolerance))) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'caseNumber' (", caseNumber, ") must be 1 or 2") } # @title # Get Stage Results Means # # @description # Returns a stage results object. # # @param design the trial design. # # @return Returns a \code{StageResultsMeans} object. # # @keywords internal # .getStageResultsMeans <- function(..., design, dataInput, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) { .assertIsDatasetMeans(dataInput = dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getStageResultsMeans", ignore = c("stage"), ...) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) effectSizes <- rep(NA_real_, design$kMax) if (dataInput$getNumberOfGroups() == 1) { overallTestStatistics <- c((dataInput$getOverallMeansUpTo(stage) - thetaH0) / dataInput$getOverallStDevsUpTo(stage) * sqrt(dataInput$getOverallSampleSizesUpTo(stage)), rep(NA_real_, design$kMax - stage)) if (normalApproximation) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { overallPValues <- 1 - stats::pt(overallTestStatistics, dataInput$getOverallSampleSizesUpTo(stage) - 1) } effectSizes[1:stage] <- dataInput$getOverallMeansUpTo(stage) } if (dataInput$getNumberOfGroups() == 2) { # common variance overallStDevs <- rep(NA_real_, design$kMax) for (k in 1:stage) { overallStDevs[k] <- sqrt(((sum(dataInput$getSampleSizesUpTo(k, 1)) - 1) * dataInput$getOverallStDev(k)^2 + (sum(dataInput$getSampleSizesUpTo(k, 2)) - 1) * dataInput$getOverallStDev(k, 2)^2) / (sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - 2)) } overallSampleSizes1 <- dataInput$getOverallSampleSizesUpTo(stage) overallSampleSizes2 <- dataInput$getOverallSampleSizesUpTo(stage, 2) if (equalVariances) { overallTestStatistics <- c((dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) - thetaH0) / overallStDevs[1:stage] / sqrt(1 / overallSampleSizes1 + 1 / overallSampleSizes2), rep(NA_real_, design$kMax - stage)) } else { overallTestStatistics <- c((dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) - thetaH0) / (sqrt(dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 + dataInput$getOverallStDevsUpTo(stage, 2)^2 / overallSampleSizes2)), rep(NA_real_, design$kMax - stage)) } if (normalApproximation) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { if (equalVariances) { overallPValues <- 1 - stats::pt(overallTestStatistics, overallSampleSizes1 + overallSampleSizes2 - 2) } else { u <- dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 / (dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 + dataInput$getOverallStDevsUpTo(stage, 2)^2 / overallSampleSizes2) overallPValues <- 1 - stats::pt(overallTestStatistics, 1 / (u^2 / (overallSampleSizes1 - 1) + (1 - u)^2 / (overallSampleSizes2 - 1))) } } effectSizes <- dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) } if (!directionUpper) { overallPValues <- 1 - overallPValues } # calculation of stagewise test statistics and combination tests testStatistics <- rep(NA_real_, design$kMax) pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) for (k in 1:stage) { if (dataInput$getNumberOfGroups() == 1) { # stage-wise test statistics testStatistics[k] <- (dataInput$getMean(k) - thetaH0) / dataInput$getStDev(k) * sqrt(dataInput$getSampleSize(k)) if (normalApproximation) { # stage-wise p-values pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { pValues[k] <- 1 - stats::pt(testStatistics[k], dataInput$getSampleSize(k) - 1) } } if (dataInput$getNumberOfGroups() == 2) { # stage-wise test statistics if (equalVariances) { testStatistics[k] <- (dataInput$getMean(k, 1) - dataInput$getMean(k, 2) - thetaH0) / sqrt(((dataInput$getSampleSize(k, 1) - 1) * dataInput$getStDev(k, 1)^2 + (dataInput$getSampleSize(k, 2) - 1) * dataInput$getStDev(k, 2)^2) / (dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - 2)) / sqrt(1 / dataInput$getSampleSize(k, 1) + 1 / dataInput$getSampleSize(k, 2)) } else { testStatistics[k] <- (dataInput$getMean(k, 1) - dataInput$getMean(k, 2) - thetaH0) / sqrt(dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) + dataInput$getStDev(k, 2)^2 / dataInput$getSampleSize(k, 2)) } if (normalApproximation) { # stage-wise p-values pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { if (equalVariances) { pValues[k] <- 1 - stats::pt(testStatistics[k], dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - 2) } else { u <- dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) / (dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) + dataInput$getStDev(k, 2)^2 / dataInput$getSampleSize(k, 2)) pValues[k] <- 1 - stats::pt(testStatistics[k], 1 / (u^2 / (dataInput$getSampleSize(k, 1) - 1) + (1 - u)^2 / (dataInput$getSampleSize(k, 2) - 1))) } } } if (!directionUpper) { pValues[k] <- 1 - pValues[k] } # inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% stats::qnorm(1 - pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } if (dataInput$getNumberOfGroups() == 1) { stageResults <- StageResultsMeans( design = design, dataInput = dataInput, overallTestStatistics = overallTestStatistics, overallPValues = overallPValues, overallMeans = dataInput$getOverallMeans(), overallStDevs = dataInput$getOverallStDevs(), overallSampleSizes = dataInput$getOverallSampleSizesUpTo(stage), testStatistics = testStatistics, effectSizes = effectSizes, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, equalVariances = equalVariances ) } else if (dataInput$getNumberOfGroups() == 2) { stageResults <- StageResultsMeans( design = design, dataInput = dataInput, overallTestStatistics = overallTestStatistics, overallPValues = overallPValues, overallMeans1 = dataInput$getOverallMeans(group = 1), overallMeans2 = dataInput$getOverallMeans(group = 2), overallStDevs1 = dataInput$getOverallStDevs(group = 1), overallStDevs2 = dataInput$getOverallStDevs(group = 2), overallStDevs = overallStDevs, # common variance overallSampleSizes1 = dataInput$getOverallSampleSizesUpTo(stage), overallSampleSizes2 = dataInput$getOverallSampleSizesUpTo(stage, 2), effectSizes = effectSizes, testStatistics = testStatistics, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, equalVariances = equalVariances ) } if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } # # Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Means # .getRepeatedConfidenceIntervalsMeans <- function(design, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsMeansGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsMeansInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsMeansFisher(design = design, ...)) } .stopWithWrongDesignMessage(design) } .getRootThetaMeans <- function(design, dataInput, stage, directionUpper, normalApproximation, equalVariances, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance ) return(result) } .getUpperLowerThetaMeans <- function(design, dataInput, theta, stage, directionUpper, normalApproximation, equalVariances, conditionFunction, firstParameterName, secondValue) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } maxSearchIterations <- 50 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop(sprintf(paste0("Failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)"), stage, stageResults[[firstParameterName]][stage], secondValue, firstValue, theta)) } } return(theta) } .getRepeatedConfidenceIntervalsMeansAll <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) futilityCorr <- rep(NA_real_, design$kMax) # necessary for adjustment for binding futility boundaries criticalValues <- design$criticalValues if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, 2, design$kMax) for (k in 1:stage) { startTime <- Sys.time() # finding maximum upper and minimum lower bounds for RCIs thetaLow <- .getUpperLowerThetaMeans(design, dataInput, theta = -1, stage = k, directionUpper = TRUE, normalApproximation, equalVariances, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k]) thetaUp <- .getUpperLowerThetaMeans(design, dataInput, theta = 1, stage = k, directionUpper = FALSE, normalApproximation, equalVariances, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k]) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[1, k] <- .getRootThetaMeans(design = design, dataInput = dataInput, stage = k, directionUpper = TRUE, normalApproximation, equalVariances, thetaLow, thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance) repeatedConfidenceIntervals[2, k] <- .getRootThetaMeans(design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation, equalVariances, thetaLow, thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance) # adjustment for binding futility bounds if (k > 1 && conditionFunction(bounds[k - 1], border) & design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- .getUpperLowerThetaMeans(design = design, dataInput = dataInput, theta = -1, stage = k - 1, directionUpper = TRUE, normalApproximation, equalVariances, conditionFunction = conditionFunction, firstParameterName = parameterName, secondValue = bounds[k - 1]) } else { thetaUp <- .getUpperLowerThetaMeans(design = design, dataInput = dataInput, theta = 1, stage = k - 1, directionUpper = FALSE, normalApproximation, equalVariances, conditionFunction = conditionFunction, firstParameterName = parameterName, secondValue = bounds[k - 1]) } futilityCorr[k] <- .getRootThetaMeans(design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, normalApproximation, equalVariances, thetaLow, thetaUp, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]){ repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) } return(repeatedConfidenceIntervals) } # # RCIs based on group sequential combination test # .getRepeatedConfidenceIntervalsMeansGroupSequential <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsMeansGroupSequential", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsMeansAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "overallPValues", ...)) } # # RCIs based on inverse normal combination test # .getRepeatedConfidenceIntervalsMeansInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsMeansInverseNormal", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsMeansAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "combInverseNormal", ...)) } # # RCIs based on Fisher's combination test # .getRepeatedConfidenceIntervalsMeansFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsMeansFisher", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsMeansAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "combFisher", ...)) } # # Calculation of conditional power based on group sequential method # .getConditionalPowerMeansGroupSequential <- function(..., design, stageResults, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerMeansGroupSequential", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValues <- design$criticalValues if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } # shifted decision region for use in getGroupSeqProbs # Group Sequential Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - stats::qnorm(1 - stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - stats::qnorm(1 - stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list(nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on inverse normal method # .getConditionalPowerMeansInverseNormal <- function(..., design, stageResults, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerMeansInverseNormal", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on Fisher combination test # .getConditionalPowerMeansFisher <- function(..., design, stageResults, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerMeansFisher", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE .assertIsValidNPlanned(nPlanned, kMax, stage) nPlanned <- c(rep(NA_real_, stage), nPlanned) if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stage, nPlanned = nPlanned) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1/weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Could not calculate conditional power for stage ", kMax, call. = FALSE) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(stats::qnorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = iterations, seed = seed, simulated = simulated )) } .getConditionalPowerMeans <- function(..., design, stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, assumedStDev = NA_real_) { if (any(is.na(nPlanned))) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stage)) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } if (.isTrialDesignGroupSequential(design)) { return(.getConditionalPowerMeansGroupSequential(design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerMeansInverseNormal(design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ...)) } if (.isTrialDesignFisher(design)) { return(.getConditionalPowerMeansFisher(design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ...)) } .stopWithWrongDesignMessage(design) } .getConditionalPowerPlotMeans <- function(..., design, stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, assumedStDev = NA_real_) { if (!.associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange)) { warning("A planned sample size (nPlanned) and ", "a range of effect sizes (thetaRange) must be specified", call. = FALSE) } .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, stageResults$getDataInput()$getNumberOfGroups()) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange) condPowerValues <- rep(NA, length(thetaRange)) likelihoodValues <- rep(NA, length(thetaRange)) if (stageResults$isOneSampleDataset()) { stdErr <- stageResults$overallStDevs[stage] / sqrt(stageResults$overallSampleSizes[stage]) } if (stageResults$isTwoSampleDataset()) { stdErr <- stageResults$overallStDevs[stage] * sqrt(1 / stageResults$overallSampleSizes1[stage] + 1 / stageResults$overallSampleSizes2[stage]) } for (i in seq(along = thetaRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerMeansGroupSequential( design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev, ...)$conditionalPower[design$kMax] } if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerMeansInverseNormal( design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev, ...)$conditionalPower[design$kMax] } if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerMeansFisher( design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev, ...)$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(thetaRange[i], stageResults$effectSizes[stage], stdErr) / stats::dnorm(0, 0, stdErr) } if (assumedStDev != round(assumedStDev, 0)) { assumedStDev <- sprintf("%.2f", assumedStDev) } if (stageResults$isOneSampleDataset()) { subTitle <- paste0("Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", std = ", assumedStDev) } else { subTitle <- paste0("Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", std = ", assumedStDev, ", allocation ratio = ", allocationRatioPlanned) } return(list( xValues = thetaRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = "Conditional Power Plot with Likelihood", xlab = "Effect size", ylab = "Conditional power / Likelihood", sub = subTitle )) } # # Calculation of final confidence interval # based on group sequential test without SSR (general case). # .getFinalConfidenceIntervalMeansGroupSequential <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageGroupSeq <- .getStageGroupSeq(design, stageResults, stage) finalStage <- min(stageGroupSeq, design$kMax) # early stopping or at end of study if (stageGroupSeq < design$kMax || stage == design$kMax) { if (stageGroupSeq == 1) { finalConfidenceIntervalGeneral[1] <- stats::qnorm(1 - stageResults$overallPValues[1]) - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stats::qnorm(1 - stageResults$overallPValues[1]) + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stats::qnorm(1 - stageResults$overallPValues[1]) if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } } else { finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageGroupSeq > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stderr <- stageResults$overallStDevs[finalStage] / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stderr <- stageResults$overallStDevs[finalStage] * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageGroupSeq == 1) { finalConfidenceInterval[1] <- stageResults$effectSizes[1] - stats::qnorm(1 - design$alpha / design$sided)*stderr finalConfidenceInterval[2] <- stageResults$effectSizes[1] + stats::qnorm(1 - design$alpha / design$sided)*stderr medianUnbiased <- stageResults$effectSizes[1] } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 } } if (!directionUpper){ medianUnbiasedGeneral = -medianUnbiasedGeneral finalConfidenceIntervalGeneral = -finalConfidenceIntervalGeneral if (stageGroupSeq > 1){ medianUnbiased = -medianUnbiased finalConfidenceInterval = -finalConfidenceInterval } } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral), medianUnbiased = medianUnbiased, finalConfidenceInterval = sort(finalConfidenceInterval) )) } # # Calculation of final confidence interval # based on inverse normal method, only theoretically shown to be valid for kMax <= 2 or no SSR. # .getFinalConfidenceIntervalMeansInverseNormal <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageInvNormal <- .getStageInverseNormal(design, stageResults, stage) finalStage <- min(stageInvNormal, design$kMax) # early stopping or at end of study if (stageInvNormal < design$kMax || stage == design$kMax) { if (stageInvNormal == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$combInverseNormal[1] - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$combInverseNormal[1] + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$combInverseNormal[1] if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } } else { if (design$kMax > 2) { warning("Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)", call. = FALSE) } finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageInvNormal > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stderr <- stageResults$overallStDevs[finalStage] / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stderr <- stageResults$overallStDevs[finalStage] * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageInvNormal == 1) { finalConfidenceInterval[1] <- stageResults$effectSizes[1] - stats::qnorm(1 - design$alpha / design$sided)*stderr finalConfidenceInterval[2] <- stageResults$effectSizes[1] + stats::qnorm(1 - design$alpha / design$sided)*stderr medianUnbiased <- stageResults$effectSizes[1] } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 } } if (!directionUpper){ medianUnbiasedGeneral = -medianUnbiasedGeneral finalConfidenceIntervalGeneral = -finalConfidenceIntervalGeneral if (stageInvNormal > 1){ medianUnbiased = -medianUnbiased finalConfidenceInterval = -finalConfidenceInterval } } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral), medianUnbiased = medianUnbiased, finalConfidenceInterval = sort(finalConfidenceInterval) )) } .getQFunctionResultBasedOnDataInput <- function(..., design, dataInput, theta, stage, infRate, directionUpper, normalApproximation, equalVariances) { if (dataInput$getNumberOfGroups() == 1) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation) } if (dataInput$getNumberOfGroups() == 2) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) } return(.getQFunctionResult(design = design, stageResults = stageResults, theta = theta, infRate = infRate)) } # # Calculation of final confidence interval # based on Fisher combination test, only valid for kMax <= 2. # .getFinalConfidenceIntervalMeansFisher <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) stageFisher <- .getStageFisher(design, stageResults, stage) finalStage <- min(stageFisher, design$kMax) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ # early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { if (dataInput$getNumberOfGroups() == 1) { infRate <- sqrt(stageResults$overallSampleSizes[1]) stderr <- stageResults$overallStDevs[finalStage] / sqrt(stageResults$overallSampleSizes[finalStage]) } else { infRate <- 1 / sqrt(1 / stageResults$overallSampleSizes1[1] + 1 / stageResults$overallSampleSizes2[1]) stderr <- stageResults$overallStDevs[finalStage] * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } if (stageFisher == 1) { finalConfidenceInterval[1] <- stageResults$effectSizes[1] - stats::qnorm(1 - design$alpha / design$sided)*stderr finalConfidenceInterval[2] <- stageResults$effectSizes[1] + stats::qnorm(1 - design$alpha / design$sided)*stderr medianUnbiased <- stageResults$effectSizes[1] } else { maxSearchIterations <- 50 if (design$kMax >= 1) { warning("Calculation of final confidence interval for Fisher's ", "design not implemented yet.", call. = FALSE) return(list(finalStage = NA_integer_ , medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax))) } thetaLow <- -1 .getQFunctionResult(design = design, stageResults = stageResults, theta = thetaLow, infRate = infRate) iteration <- 0 while(iteration <= maxSearchIterations && .getQFunctionResultBasedOnDataInput(design = design, dataInput = dataInput, theta = thetaLow, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) > design$alpha / design$sided) { thetaLow <- 2 * thetaLow iteration <- iteration + 1 if (iteration == maxSearchIterations) { thetaLow <- -1 } } thetaUp <- 1 iteration <- 0 while(iteration <= maxSearchIterations && .getQFunctionResultBasedOnDataInput(design = design, dataInput = dataInput, theta = thetaUp, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) < 1 - design$alpha / design$sided) { thetaUp <- 2 * thetaUp iteration <- iteration + 1 if (iteration == maxSearchIterations) { thetaUp <- 1 } } finalConfidenceInterval[1] <- .getOneDimensionalRoot(function(theta) { return(.getQFunctionResultBasedOnDataInput(design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) - design$alpha / design$sided) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance) finalConfidenceInterval[2] <- .getOneDimensionalRoot(function(theta) { return(.getQFunctionResultBasedOnDataInput(design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) - 1 + design$alpha / design$sided) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance) medianUnbiased <- .getOneDimensionalRoot(function(theta) { return(.getQFunctionResultBasedOnDataInput(design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances) - 0.5) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance) } if (is.na(finalConfidenceInterval[1])) { finalStage <- NA_integer_ } } return(list( finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalMeans <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getFinalConfidenceIntervalMeans", ignore = c("stage"), ...) if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_MEANS_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalMeansGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance)) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalMeansInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance)) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalMeansFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance)) } .stopWithWrongDesignMessage(design) } rpact/R/class_design_set.R0000644000176200001440000006556613573662737015265 0ustar liggesusers###################################################################################### # # # -- Trial design set classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_plot.R NULL #' @title #' Get Design Set #' #' @description #' Creates a trial design set object and returns it. #' #' @param ... 'designs' OR 'design' and one or more design parameters, e.g., deltaWT = c(0.1, 0.3, 0.4). #' \itemize{ #' \item \code{design} The master design (optional, you need to specify an #' additional parameter that shall be varied). #' \item \code{designs} The designs to compare (optional). #' } #' #' @details #' Specify a master design and one or more design parameters or a list of designs. #' #' @return Returns a \code{\link{TrialDesignSet}} object. #' #' @examples #' #' # Example 1 #' design <- getDesignGroupSequential(alpha = 0.05, kMax = 6, #' sided = 2, typeOfDesign = "WT", deltaWT = 0.1) #' designSet <- getDesignSet() #' designSet$add(design = design, deltaWT = c(0.3, 0.4)) #' if (require(ggplot2)) plot(designSet, type = 1) #' #' # Example 2 (shorter script) #' design <- getDesignGroupSequential(alpha = 0.05, kMax = 6, #' sided = 2, typeOfDesign = "WT", deltaWT = 0.1) #' designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) #' if (require(ggplot2)) plot(designSet) #' #' @export #' getDesignSet <- function(...) { return(TrialDesignSet(...)) } #' #' @name TrialDesignSet #' #' @title #' Class for trial design sets. #' #' @description #' \code{TrialDesignSet} is a class for creating a collection of different trial designs. #' #' @field designs The designs (optional). #' @field design The master design (optional). #' #' @details #' This object can not be created directly; better use \code{\link{getDesignSet}} #' with suitable arguments to create a set of designs. #' #' @seealso \code{\link{getDesignSet}} #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_plot.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignSet <- setRefClass("TrialDesignSet", contains = "FieldSet", fields = list( .plotSettings = "PlotSettings", designs = "list", variedParameters = "character" ), methods = list( # # @param ... 'designs' OR 'design' and one or more design parameters, e.g., deltaWT = c(0.1, 0.3, 0.4) # initialize = function(...) { .plotSettings <<- PlotSettings() designs <<- list() variedParameters <<- character(0) if (length(list(...)) > 0) { add(...) } }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing trial design sets' .resetCat() .cat("Trial design set with ", length(designs), " designs\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) for (design in designs) { design$.show(showType = showType, consoleOutputEnabled = consoleOutputEnabled) } }, isEmpty = function() { return(length(designs) == 0) }, getSize = function() { return(length(designs)) }, getDesignMaster = function() { if (length(designs) == 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no design master defined") } return(designs[[1]]) }, .validateDesignsArgument = function(designsToAdd, args) { if (!is.list(designsToAdd)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be a list") } if (length(designsToAdd) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be not empty") } designsToAddValidated <- list() for (d in designsToAdd) { if (.isTrialDesign(d)) { designsToAddValidated <- c(designsToAddValidated, d) } else { parentDesign <- d[[".design"]] if (is.null(parentDesign)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be a list of trial designs (found '", class(d), "')") } warning("Only the parent design of ", class(d), " was added to trial design set", call. = FALSE) designsToAddValidated <- c(designsToAddValidated, parentDesign) } } varPar <- args[["variedParameters"]] if (!is.null(varPar) && length(varPar) > 0) { variedParameters <<- c(variedParameters, varPar) } args <- args[names(args) != "designs" && names(args) != "variedParameters"] if (length(args) > 0) { warning("Argument", ifelse(length(args) > 1, "s", ""), " ", .arrayToString(args, encapsulate = TRUE), " will be ignored ", "because for 'designs' only argument 'variedParameters' will be respected", call. = FALSE) } designs <<- c(designs, designsToAddValidated) }, addVariedParameters = function(varPar) { if (is.null(varPar) || !is.character(varPar)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'varPar' must be a valid character vector") } variedParameters <<- c(variedParameters, varPar) }, .validateOptionalArguments = function(...) { args <- list(...) designsToAdd <- .getOptionalArgument(optionalArgumentName = "designs", ...) if (!is.null(designsToAdd)) { .validateDesignsArgument(designsToAdd = designsToAdd, args = args) return(NULL) } design <- .getOptionalArgument(optionalArgumentName = "design", ...) optionalArgumentsDefined = (length(args) > 0) if (is.null(design) && !optionalArgumentsDefined) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "please specify a 'design' to add and/or a design parameter, ", "e.g., deltaWT = c(0.1, 0.3, 0.4)") } if (is.null(design) && optionalArgumentsDefined && length(designs) == 0) { stop(C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS, "at least one design (master) must be defined in this ", "design set to respect any design parameters") } if (!is.null(design)) { designs <<- c(designs, design) } else if (length(designs) > 0) { design <- designs[[1]] # use design master } if (!.isTrialDesign(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' (", class(design), ") must be an instance of class 'TrialDesign'") } .getArgumentNames(validatedDesign = design, ...) invisible(design) }, .getArgumentNames = function(validatedDesign, ...) { args <- list(...) if (length(args) == 0) { return(character(0)) } argumentNames <- names(args) if (length(argumentNames) == 0) { warning("No argument names available for ", paste(args, collapse = ", "), call. = FALSE) return(character(0)) } argumentNames <- argumentNames[nchar(argumentNames) != 0] argumentNames <- argumentNames[!(argumentNames %in% c("design", "designs", "singleDesign"))] visibleFieldNames <- validatedDesign$.getVisibleFieldNames() for (arg in argumentNames) { if (!(arg %in% visibleFieldNames)) { stop(sprintf(paste0(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'%s' does not contain a field with name '%s'"), class(validatedDesign), arg)) } } invisible(argumentNames) }, add = function(...) { "Adds 'designs' OR a 'design' and/or a design parameter, e.g., deltaWT = c(0.1, 0.3, 0.4)" design <- .validateOptionalArguments(...) args <- list(...) singleDesign <- args[["singleDesign"]] if (!is.null(singleDesign) && is.logical(singleDesign) && singleDesign) { return(invisible()) } if (!is.null(design)) { d <- .createDesignVariants(validatedDesign = design, ...) designs <<- c(designs, d) } }, assertHaveEqualSidedValues = function() { if (length(designs) == 0) { return(invisible()) } sided = getDesignMaster()$sided for (design in designs) { if (sided != design$sided) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "designs have different directions of alternative (design master is ", ifelse(sided == 1, "one", "two"), " sided)") } } }, .createDesignVariants = function(validatedDesign, ...) { .assertIsTrialDesign(validatedDesign) argumentNames <- .getArgumentNames(validatedDesign = validatedDesign, ...) if (length(argumentNames) == 0) { warning("Creation of design variants stopped: no valid design parameters found", call. = FALSE) return(list()) } if (length(argumentNames) > 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "too many arguments (", .arrayToString(argumentNames, encapsulate = TRUE), "): up to 2 design parameters are allowed") } designVariants <- .createDesignVariantsRecursive(designMaster = validatedDesign, args = list(...), argumentIndex = 1, argumentNames = argumentNames) return(designVariants) }, .designSettingExists = function(parameterName, parameterValue, numberOfArguments = 1, parameterNameBefore = NULL, parameterValueBefore = NULL) { if (length(designs) == 0) { return(FALSE) } for (design in designs) { if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) { if (design[[parameterNameBefore]] == parameterValueBefore && design[[parameterName]] == parameterValue) { return(TRUE) } } else if (numberOfArguments == 1) { if (design[[parameterName]] == parameterValue) { return(TRUE) } } } return(FALSE) }, .createDesignVariantsRecursive = function(designMaster, args, argumentIndex, argumentNames, parameterNameBefore = NULL, parameterValueBefore = NULL) { if (argumentIndex > length(argumentNames)) { return(list()) } designVariants <- list() argumentName <- argumentNames[argumentIndex] variedParameters <<- unique(c(variedParameters, argumentName)) argumentValues <- args[[argumentName]] for (argumentValue in argumentValues) { if (.designSettingExists(argumentName, argumentValue, numberOfArguments = length(argumentNames), parameterNameBefore, parameterValueBefore)) { if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) { warning(sprintf("Argument ignored: there exists already a design with %s = %s (%s = %s)", argumentName, argumentValue, parameterNameBefore, parameterValueBefore), call. = FALSE) } else { warning(sprintf("Argument ignored: there exists already a design with %s = %s", argumentName, argumentValue), call. = FALSE) } } else { designMaster2 <- .createDesignVariant(designMaster = designMaster, argumentName = argumentName, argumentValue = argumentValue) if (argumentIndex == length(argumentNames)) { if (is.null(parameterNameBefore) || is.null(parameterValueBefore)) { .logDebug("Create design variant %s = %s", argumentName, argumentValue) } else { .logDebug("Create design variant %s = %s (%s = %s)", argumentName, argumentValue, parameterNameBefore, parameterValueBefore) } designVariants <- c(designVariants, designMaster2) } designCopies2 <- .createDesignVariantsRecursive(designMaster = designMaster2, args = args, argumentIndex = argumentIndex + 1, argumentNames = argumentNames, parameterNameBefore = argumentName, parameterValueBefore = argumentValue) if (length(designCopies2) > 0) { designVariants <- c(designVariants, designCopies2) } } } return(designVariants) }, .createDesignVariant = function(designMaster, argumentName, argumentValue) { if (.isTrialDesignGroupSequential(designMaster)) { defaultValues <- .getDesignGroupSequentialDefaultValues() } else if (.isTrialDesignInverseNormal(designMaster)) { defaultValues <- .getDesignInverseNormalDefaultValues() } else if (.isTrialDesignFisher(designMaster)) { defaultValues <- .getDesignFisherDefaultValues() } for (userDefinedParamName in designMaster$.getUserDefinedParameters()) { defaultValues[[userDefinedParamName]] <- designMaster[[userDefinedParamName]] } defaultValues[[argumentName]] <- argumentValue if (.isTrialDesignGroupSequential(designMaster)) { return(getDesignGroupSequential( kMax = defaultValues$kMax, alpha = defaultValues$alpha, beta = defaultValues$beta, sided = defaultValues$sided, informationRates = defaultValues$informationRates, futilityBounds = defaultValues$futilityBounds, typeOfDesign = defaultValues$typeOfDesign, deltaWT = defaultValues$deltaWT, optimizationCriterion = defaultValues$optimizationCriterion, gammaA = defaultValues$gammaA, typeBetaSpending = defaultValues$typeBetaSpending, userAlphaSpending = defaultValues$userAlphaSpending, userBetaSpending = defaultValues$userBetaSpending, gammaB = defaultValues$gammaB, tolerance = defaultValues$tolerance)) } else if (.isTrialDesignInverseNormal(designMaster)) { return(getDesignInverseNormal( kMax = defaultValues$kMax, alpha = defaultValues$alpha, beta = defaultValues$beta, sided = defaultValues$sided, informationRates = defaultValues$informationRates, futilityBounds = defaultValues$futilityBounds, typeOfDesign = defaultValues$typeOfDesign, deltaWT = defaultValues$deltaWT, optimizationCriterion = defaultValues$optimizationCriterion, gammaA = defaultValues$gammaA, typeBetaSpending = defaultValues$typeBetaSpending, userAlphaSpending = defaultValues$userAlphaSpending, userBetaSpending = defaultValues$userBetaSpending, gammaB = defaultValues$gammaB, tolerance = defaultValues$tolerance)) } else if (.isTrialDesignFisher(designMaster)) { return(getDesignFisher( kMax = defaultValues$kMax, alpha = defaultValues$alpha, method = defaultValues$method, userAlphaSpending = defaultValues$userAlphaSpending, informationRates = defaultValues$informationRates, alpha0Vec = defaultValues$alpha0Vec, sided = defaultValues$sided, tolerance = defaultValues$tolerance, iterations = defaultValues$iterations, seed = defaultValues$seed)) } } ) ) #' #' @title #' Access Trial Design by Index #' #' @description #' Function to the \code{TrialDesign} at position \code{i} in a \code{TrialDesignSet} object. #' #' @details #' Can be used to iterate with "[index]"-syntax over all designs in a design set. #' #' @export #' #' @keywords internal #' setMethod("[", "TrialDesignSet", function(x, i, j = NA_character_) { if (length(x$designs) == 0) { return(NULL) } design <- x$designs[[i]] if (!is.na(j) && is.character(j)) { return(design[[j]]) } return(design) } ) #' #' @name TrialDesignSet_names #' #' @title #' The Names of a Trial Design Set object #' #' @description #' Function to get the names of a \code{TrialDesignSet} object. #' #' @details #' Returns the names of a design set that can be accessed by the user. #' #' @export #' #' @keywords internal #' names.TrialDesignSet <- function(x) { return(x$.getVisibleFieldNames()) } #' #' @name TrialDesignSet_length #' #' @title #' Length of Trial Design Set #' #' @description #' Returns the number of designs in a \code{TrialDesignSet}. #' #' @details #' Is helpful for iteration over all designs in a design set with "[index]"-syntax. #' #' @export #' #' @keywords internal #' length.TrialDesignSet <- function(x) { return(length(x$designs)) } #' #' @name TrialDesignSet_as.data.frame #' #' @title #' Coerce Trial Design Set to a Data Frame #' #' @description #' Returns the \code{TrialDesignSet} as data frame. #' #' @details #' Coerces the design set to a data frame. #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignSet <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_, ...) { .assertIsTrialDesignSet(x) if (x$isEmpty()) { stop("The design set is empty") } fCall = match.call(expand.dots = FALSE) theta <- .assertIsValidThetaRange(thetaRange = theta, thetaAutoSeqEnabled = (as.character(fCall$theta)[1] != "seq")) if (addPowerAndAverageSampleNumber) { .assertAssociatedArgumentsAreDefined( addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax) } fisherDesignEnabled <- .isTrialDesignFisher(x$getDesignMaster()) dataFrame <- NULL for (design in x$designs) { if (fisherDesignEnabled != .isTrialDesignFisher(design)) { stop("All trial designs must be from the same type ", "('", class(x$designs[[1]]), "' != '", class(design), ")'") } df <- as.data.frame(design, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters) if (.isTrialDesignWithValidFutilityBounds(design)) { futilityBoundsName <- "futilityBounds" if (niceColumnNamesEnabled) { futilityBoundsName <- .getTableColumnNames(design = design)[["futilityBounds"]] } kMax <- design$kMax df[[futilityBoundsName]][kMax] <- design$criticalValues[kMax] } if (.isTrialDesignWithValidAlpha0Vec(design)) { alpha0VecName <- "alpha0Vec" if (niceColumnNamesEnabled) { alpha0VecName <- .getTableColumnNames(design = design)[["alpha0Vec"]] } kMax <- design$kMax df[[alpha0VecName]][kMax] <- design$criticalValues[kMax] } if (addPowerAndAverageSampleNumber) { results <- PowerAndAverageSampleNumberResult(design, theta = theta, nMax = nMax) df2 <- as.data.frame(results, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters) df <- merge(df, df2, all.y = TRUE) } if (is.null(dataFrame)) { if (niceColumnNamesEnabled) { dataFrame <- cbind("Design number" = rep(1, nrow(df)), df) } else { dataFrame <- cbind(designNumber = rep(1, nrow(df)), df) } } else { if (niceColumnNamesEnabled) { df <- cbind("Design number" = rep(max(dataFrame$"Design number") + 1, nrow(df)), df) } else { df <- cbind(designNumber = rep(max(dataFrame$designNumber) + 1, nrow(df)), df) } dataFrame <- rbind(dataFrame, df) } } return(dataFrame) } #' #' @title #' Trial Design Set Plotting #' #' @description #' Plots a trial design set. #' #' @param x The trial design set, obtained from \code{\link{getDesignSet}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param palette The palette, default is \code{"Set1"}. #' @param theta A vector of theta values. #' @param nMax The maximum sample size. #' @param plotPointsEnabled If \code{TRUE}, additional points will be plotted. #' @param showSource If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with \code{\link[graphics]{plot}}. #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{-1}: no legend will be shown #' \item \code{NA}: the algorithm tries to find a suitable position #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{3}: creates a 'Stage Levels' plot #' \item \code{4}: creates a 'Type One Error Spending' plot #' \item \code{5}: creates a 'Power and Early Stopping' plot #' \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot #' \item \code{7}: creates an 'Power' plot #' \item \code{8}: creates an 'Early Stopping' plot #' \item \code{9}: creates an 'Average Sample Size' plot #' } #' @param ... Optional \code{ggplot2} arguments. #' #' @details #' Generic function to plot a trial design set. #' Is, e.g., useful to compare different designs or design parameters visual. #' #' @return #' A \code{ggplot2} object. #' #' @export #' #' @examples #' #' design <- getDesignInverseNormal(kMax = 3, alpha = 0.025, #' typeOfDesign = "asKD", gammaA = 2, #' informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF") #' #' # Create a set of designs based on the master design defined above #' # and varied parameter 'gammaA' #' designSet <- getDesignSet(design = design, gammaA = 4) #' #' if (require(ggplot2)) plot(designSet, type = 1, legendPosition = 6) #' plot.TrialDesignSet <- function(x, y, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, ...) { fCall = match.call(expand.dots = FALSE) designSetName <- as.character(fCall$x)[1] .plotTrialDesignSet(x = x, y = y, type = type, main = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, designSetName = designSetName, ...) } .plotTrialDesignSet <- function(x, y, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, designSetName = NA_character_, ...) { .assertGgplotIsInstalled() .assertIsValidLegendPosition(legendPosition) theta <- .assertIsValidThetaRange(thetaRange = theta) parameterSet <- x designMaster <- parameterSet$getDesignMaster() .assertIsTrialDesign(designMaster) if (type == 1) { main <- ifelse(is.na(main), "Boundaries", main) xParameterName <- "informationRates" yParameterNames <- c("criticalValues") if (designMaster$sided == 1) { if (.isTrialDesignWithValidFutilityBounds(designMaster)) { yParameterNames <- c("futilityBounds", "criticalValues") } if (.isTrialDesignWithValidAlpha0Vec(designMaster)) { yParameterNames <- c("alpha0Vec", "criticalValues") } } } else if (type == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "designs with undefined endpoint do not support plot type 2") } else if (type == 3) { main <- ifelse(is.na(main), "Stage Levels", main) xParameterName <- "informationRates" yParameterNames <- "stageLevels" } else if (type == 4) { main <- ifelse(is.na(main), "Type One Error Spending", main) xParameterName <- "informationRates" yParameterNames <- c("alphaSpent") if (!.isTrialDesignFisher(designMaster) && designMaster$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) { yParameterNames <- c(yParameterNames, "betaSpent") palette <- "Paired" } plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), FALSE, plotPointsEnabled) } else if (type == 5) { if (is.na(main)) { main <- bquote(atop(bold('Power and Early Stopping'), atop('(N'['max']*'='*.(nMax)*')'))) } xParameterName <- "theta" yParameterNames <- c("overallEarlyStop", "calculatedPower") } else if (type == 6) { if (is.na(main)) { main <- bquote(atop(bold('Average Sample Size and Power / Early Stop'), atop('(N'['max']*'='*.(nMax)*")"))) } xParameterName <- "theta" yParameterNames <- c("averageSampleNumber", "overallEarlyStop", "calculatedPower") } else if (type == 7) { if (is.na(main)) { main <- bquote(atop(bold('Power'), atop('(N'['max']*'='*.(nMax)*")"))) } xParameterName <- "theta" yParameterNames <- "calculatedPower" } else if (type == 8) { if (is.na(main)) { main <- bquote(atop(bold('Early Stopping'), atop('(N'['max']*'='*.(nMax)*")"))) } xParameterName <- "theta" yParameterNames <- "overallEarlyStop" } else if (type == 9) { if (is.na(main)) { main <- bquote(atop(bold('Average Sample Size'), atop('(N'['max']*'='*.(nMax)*")"))) } xParameterName <- "theta" yParameterNames <- "averageSampleNumber" } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 9") } .showPlotSourceInformation(objectName = designSetName, xParameterName = xParameterName, yParameterNames = yParameterNames, nMax = nMax, showSource = showSource) return(.plotParameterSet(parameterSet = parameterSet, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, ...)) } rpact/R/f_simulation_survival.R0000644000176200001440000012646513560550724016360 0ustar liggesusers###################################################################################### # # # -- Simulation of survival data with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 11-12-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include class_simulation_results.R NULL #' @title #' Get Simulation Survival #' #' @description #' Returns the analysis times, power, stopping probabilities, conditional power, and expected sample size #' for testing the hazard ratio in a two treatment groups survival design. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, #' and \code{sided} can be directly entered as argument. #' @param thetaH0 The null hypothesis value. The default value is \code{1}. For one-sided testing, #' a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified. #' @param directionUpper Specifies the direction of the alternative, only applicable #' for one-sided testing, default is \code{TRUE}. #' @param pi1 The assumed event rate in the treatment group, default is \code{seq(0.2,0.5,0.1)}. #' @param pi2 The assumed event rate in the control group, default is 0.2. #' @param lambda1 The assumed hazard rate in the treatment group, there is no default. #' lambda1 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param lambda2 The assumed hazard rate in the reference group, there is no default. #' lambda2 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param median1 The assumed median survival time in the treatment group, there is no default. #' @param median2 The assumed median survival time in the reference group, there is no default. #' @param hazardRatio The vector of hazard ratios under consideration. #' If the event or hazard rates in both treatment groups are defined, the hazard ratio needs #' not to be specified as it is calculated. #' @param piecewiseSurvivalTime A vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function (see details). #' @param kappa The scale parameter of the Weibull distribution, default is \code{1}. #' The Weibull distribution cannot be used for the piecewise #' definition of the survival time distribution. #' Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} #' are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact. #' @param allocation1 The number how many subjects are assigned to treatment 1 in a #' subsequent order, default is \code{1} #' @param allocation2 The number how many subjects are assigned to treatment 2 in a #' subsequent order, default is \code{1} #' @param eventTime The assumed time under which the event rates are calculated, default is \code{12}. #' @param accrualTime The assumed accrual time for the study, default is \code{12} #' (see \code{\link{getAccrualTime}}). #' @param accrualIntensity A vector of accrual intensities, default is the relative #' intensity \code{0.1} (see \code{\link{getAccrualTime}}). #' @param dropoutRate1 The assumed drop-out rate in the treatment group, default is \code{0}. #' @param dropoutRate2 The assumed drop-out rate in the control group, default is \code{0}. #' @param dropoutTime The assumed time for drop-out rates in the control and the #' treatment group, default is \code{12}. #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified. #' If accrual time and accrual intensity is specified, this will be calculated. #' @param plannedEvents \code{plannedEvents} is a vector of length kMax #' (the number of stages of the design) with increasing numbers #' that determines the number of cumulated (overall) events when the interim stages are planned. #' @param minNumberOfEventsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{minNumberOfEventsPerStage} determines the #' minimum number of events per stage (i.e., not cumulated), the first element #' is not taken into account. #' @param maxNumberOfEventsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{maxNumberOfEventsPerStage} determines the maximum number #' of events per stage (i.e., not cumulated), the first element is not taken into account. #' @param conditionalPower The conditional power for the subsequent stage under which the sample size recalculation is performed. #' @param thetaH1 If specified, the value of the hazard ratio under which the conditional power calculation is performed. #' @param maxNumberOfIterations The number of simulation iterations. #' @param maxNumberOfRawDatasetsPerStage The number of raw datasets per stage that shall #' be extracted and saved as \code{\link[base]{data.frame}}, default is \code{0}. #' \code{\link{getRawData}} can be used to get the extracted raw data from the object. #' @param longTimeSimulationAllowed Logical that indicates whether long time simulations #' that consumes more than 30 seconds are allowed or not, default is \code{FALSE}. #' @param seed The seed to reproduce the simulation, default is a random seed. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of events, number of subjects, and parameter configuration. #' It also simulates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, integers \code{allocation1} and \code{allocation2} can be specified that determine the number allocated #' to treatment group 1 and treatment group 2, respectively. #' #' The formula of Kim & Tsiatis (Biometrics, 1990) #' is used to calculated the expected number of events under the alternative #' (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized #' to piecewise survival times and non-constant piecewise accrual over time.\cr #' #' \code{piecewiseSurvivalTime} #' The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also #' be a list that combines the definition of the time intervals and hazard rates in the reference group. #' The definition of the survival time in the treatment group is obtained by the specification #' of the hazard ratio (see examples for details). #' #' Note that \code{numberOfSubjects}, \code{numberOfSubjects1}, and \code{numberOfSubjects2} in the output #' are expected number of subjects. #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following #' parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link{getData}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{pi1}: The assumed or derived event rate in the treatment group. #' \item \code{pi2}: The assumed or derived event rate in the control group. #' \item \code{hazardRatio}: The hazard ratio under consideration (if available). #' \item \code{analysisTime}: The analysis time. #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{eventsPerStage1}: The observed number of events per stage #' in treatment group 1. #' \item \code{eventsPerStage2}: The observed number of events per stage #' in treatment group 2. #' \item \code{eventsPerStage}: The observed number of events per stage #' in both treatment groups. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{eventsNotAchieved}: 1 if number of events could not be reached with #' observed number of subjects, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, #' or Fisher combination test)' #' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided #' log-rank test at considered stage. #' \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the #' log-rank statistic. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{thetaH1}. #' } #' #' @section Raw Data: #' \code{\link{getRawData}} can be used to get the simulated raw data from the #' object as \code{\link[base]{data.frame}}. Note that \code{getSimulationSurvival} #' must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. #' The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stopStage}: The stage of stopping. #' \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) #' \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. #' \item \code{treatmentGroup}: The treatment group number (1 or 2). #' \item \code{survivalTime}: The survival time of the subject. #' \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). #' \item \code{observationTime}: The specific observation time. #' \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr #' if (event == TRUE) {\cr #' timeUnderObservation <- survivalTime;\cr #' } else if (dropoutEvent == TRUE) {\cr #' timeUnderObservation <- dropoutTime;\cr #' } else {\cr #' timeUnderObservation <- observationTime - accrualTime;\cr #' } #' \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. #' \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. #' } #' #' @return Returns a \code{\link{SimulationResultsSurvival}} object. #' #' @export #' #' @examples #' #' # Fixed sample size with minimum required definitions, pi1 = (0.3,0.4,0.5,0.6) and #' # pi2 = 0.3 at event time 12, and accrual time 24 #' getSimulationSurvival(pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, eventTime = 12, #' accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' \donttest{ #' #' # Increase number of simulation iterations #' getSimulationSurvival(pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, eventTime = 12, #' accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' # Determine necessary accrual time with default settings if 200 subjects and #' # 30 subjects per time unit can be recruited #' getSimulationSurvival(plannedEvents = 40, accrualTime = 0, #' accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 50) #' #' # Determine necessary accrual time with default settings if 200 subjects and #' # if the first 6 time units 20 subjects per time unit can be recruited, #' # then 30 subjects per time unit #' getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6), #' accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' # Determine maximum number of Subjects with default settings if the first #' # 6 time units 20 subjects per time unit can be recruited, and after #' # 10 time units 30 subjects per time unit #' getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6, 10), #' accrualIntensity = c(20, 30), maxNumberOfIterations = 50) #' #' # Specify accrual time as a list #' at <- list( #' "0 - <6" = 20, #' "6 - Inf" = 30) #' getSimulationSurvival(plannedEvents = 40, accrualTime = at, #' maxNumberOfSubjects = 200, maxNumberOfIterations = 50) #' #' # Specify accrual time as a list, if maximum number of subjects need to be calculated #' at <- list( #' "0 - <6" = 20, #' "6 - <=10" = 30) #' getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 50) #' #' # Specify effect size for a two-stage group sequential design with O'Brien & Fleming boundaries. #' # Effect size is based on event rates at specified event time, directionUpper = FALSE #' # needs to be specified because it should be shown that hazard ratio < 1 #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), #' maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50) #' #' # As above, but with a three-stage O'Brien and Flemming design with #' # specified information rates, note that planned events consists of integer values #' d3 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) #' getSimulationSurvival(design = d3, pi1 = 0.2, pi2 = 0.3, eventTime = 24, #' plannedEvents = round(d3$informationRates * 40), #' maxNumberOfSubjects = 200, directionUpper = FALSE, #' maxNumberOfIterations = 50) #' #' # Effect size is based on event rate at specified event time for the reference group and #' # hazard ratio, directionUpper = FALSE needs to be specified because it should be shown #' # that hazard ratio < 1 #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, #' pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, #' directionUpper = FALSE, maxNumberOfIterations = 50) #' #' # Effect size is based on hazard rate for the reference group and #' # hazard ratio, directionUpper = FALSE needs to be specified because #' # it should be shown that hazard ratio < 1 #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), #' maxNumberOfSubjects = 200, directionUpper = FALSE, #' maxNumberOfIterations = 50) #' #' # Specification of piecewise exponential survival time and hazard ratios, #' # note that in getSimulationSurvival only on hazard ratio is used #' # in the case that the survival time is piecewise expoential #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), #' plannedEvents = c(20, 40), maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' # Specification of piecewise exponential survival time for both treatment arms #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), #' maxNumberOfSubjects = 200, maxNumberOfIterations = 50) #' #' # Specification of piecewise exponential survival time as a list, #' # note that in getSimulationSurvival only on hazard ratio #' # (not a vector) can be used #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = 1.5, #' plannedEvents = c(20, 40), maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50) #' #' # Specification of piecewise exponential survival time and delayed effect #' # (response after 5 time units) #' getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), #' maxNumberOfSubjects = 200, maxNumberOfIterations = 50) #' #' # Specify effect size based on median survival times #' getSimulationSurvival(median1 = 5, median2 = 3, plannedEvents = 40, #' maxNumberOfSubjects = 200, directionUpper = FALSE, #' maxNumberOfIterations = 50) #' #' # Specify effect size based on median survival #' # times of Weibull distribtion with kappa = 2 #' getSimulationSurvival(median1 = 5, median2 = 3, kappa = 2, #' plannedEvents = 40, maxNumberOfSubjects = 200, #' directionUpper = FALSE, maxNumberOfIterations = 50) #' #' # Perform recalculation of number of events based on conditional power for a #' # three-stage design with inverse normal combination test, where the conditional power #' # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold #' # increase in originally planned sample size (number of events) is allowed #' # Note that the first value in minNumberOfEventsPerStage and #' # maxNumberOfEventsPerStage is arbitrary, i.e., it has no effect. #' dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) #' #' resultsWithSSR1 <- getSimulationSurvival(design = dIN, #' hazardRatio = seq(1, 1.6, 0.1), #' pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, #' plannedEvents = c(58, 102, 146), #' minNumberOfEventsPerStage = c(58, 44, 44), #' maxNumberOfEventsPerStage = 4 * c(58, 44, 44), #' maxNumberOfSubjects = 800, maxNumberOfIterations = 50) #' resultsWithSSR1 #' #' # If thetaH1 is unspecified, the observed hazard ratio estimate #' # (calculated from the log-rank statistic) is used for performing the #' # recalculation of the number of events #' resultsWithSSR2 <- getSimulationSurvival(design = dIN, #' hazardRatio = seq(1, 1.6, 0.1), #' pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), #' minNumberOfEventsPerStage = c(58, 44, 44), #' maxNumberOfEventsPerStage = 4 * c(58, 44, 44), #' maxNumberOfSubjects = 800, maxNumberOfIterations = 50) #' resultsWithSSR2 #' #' # Compare it with design without event size recalculation #' resultsWithoutSSR <- getSimulationSurvival(design = dIN, #' hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, #' plannedEvents = c(58, 102, 145), maxNumberOfSubjects = 800, #' maxNumberOfIterations = 50) #' resultsWithoutSSR$overallReject #' resultsWithSSR1$overallReject #' resultsWithSSR2$overallReject #' #' # Confirm that event size racalcuation increases the Type I error rate, #' # i.e., you have to use the combination test #' dGS <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) #' resultsWithSSRGS <- getSimulationSurvival(design = dGS, hazardRatio = seq(1), #' pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), #' minNumberOfEventsPerStage = c(58, 44, 44), #' maxNumberOfEventsPerStage = 4 * c(58, 44, 44), #' maxNumberOfSubjects = 800, maxNumberOfIterations = 50) #' resultsWithSSRGS$overallReject #' #' # Set seed to get reproduceable results #' #' identical( #' getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, #' seed = 99)$analysisTime, #' getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, #' seed = 99)$analysisTime #' ) #' #' } #' getSimulationSurvival <- function(design = NULL, ..., thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, hazardRatio = NA_real_, kappa = 1, piecewiseSurvivalTime = NA_real_, allocation1 = C_ALLOCATION_1_DEFAULT, allocation2 = C_ALLOCATION_2_DEFAULT, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT, maxNumberOfSubjects = NA_real_, plannedEvents = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, maxNumberOfRawDatasetsPerStage = 0, longTimeSimulationAllowed = FALSE, seed = NA_real_) { .assertRcppIsInstalled() if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSimulationSurvival", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationSurvival", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) .assertIsNumericVector(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, NULL, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) .assertIsNumericVector(lambda1, "lambda1", naAllowed = TRUE) .assertIsNumericVector(lambda2, "lambda2", naAllowed = TRUE) .assertIsSinglePositiveInteger(allocation1, "allocation1", validateType = FALSE) .assertIsSinglePositiveInteger(allocation2, "allocation2", validateType = FALSE) if (!is.na(thetaH1) && is.na(conditionalPower)) { warning("'thetaH1' (", thetaH1, ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) } if (design$sided == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Only one-sided case is implemented for the survival simulation design") } if (!all(is.na(lambda2)) && !all(is.na(lambda1)) && length(lambda2) != length(lambda1)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'lambda2' (", length(lambda2), ") must be equal to length of 'lambda1' (", length(lambda1), ")") } if (all(is.na(lambda2)) && !all(is.na(lambda1))) { warning("'lambda1' (", .arrayToString(lambda1), ") will be ignored ", "because 'lambda2' (", .arrayToString(lambda2), ") is undefined", call. = FALSE) lambda1 <- NA_real_ } if (!all(is.na(lambda2)) && is.list(piecewiseSurvivalTime)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' needs to be a numeric vector and not a list ", "because 'lambda2' (", .arrayToString(lambda2), ") is defined separately") } simulationResults <- SimulationResultsSurvival(design) if (is.na(conditionalPower)) { if (length(minNumberOfEventsPerStage) != 1 || !is.na(minNumberOfEventsPerStage)) { warning("'minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) } if (length(maxNumberOfEventsPerStage) != 1 || !is.na(maxNumberOfEventsPerStage)) { warning("'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) } } minNumberOfEventsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", plannedEvents, conditionalPower, design$kMax) maxNumberOfEventsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", plannedEvents, conditionalPower, design$kMax) if (!is.na(conditionalPower)) { if (design$kMax > 1) { if (any(maxNumberOfEventsPerStage - minNumberOfEventsPerStage < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") must be not smaller than minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ")") } .setValueAndParameterType(simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_) .setValueAndParameterType(simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_) } else { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } } else { simulationResults$minNumberOfEventsPerStage <- NA_real_ simulationResults$maxNumberOfEventsPerStage <- NA_real_ simulationResults$.setParameterType("minNumberOfEventsPerStage", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("maxNumberOfEventsPerStage", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) } accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects) if (is.na(accrualSetup$maxNumberOfSubjects)) { if (identical(accrualIntensity, 1L)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "choose a 'accrualIntensity' > 1 or define 'maxNumberOfSubjects'") } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' must be defined") } simulationResults$.accrualTime <- accrualSetup accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() simulationResults$maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects simulationResults$.setParameterType("maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects")) simulationResults$accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() simulationResults$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) simulationResults$accrualIntensity <- accrualSetup$accrualIntensity simulationResults$.setParameterType("accrualIntensity", accrualSetup$.getParameterType("accrualIntensity")) .assertIsIntegerVector(plannedEvents, "plannedEvents", validateType = FALSE) if (length(plannedEvents) != design$kMax) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedEvents' (", .arrayToString(plannedEvents), ") must have length ", design$kMax) } .assertIsInClosedInterval(plannedEvents, "plannedEvents", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedEvents, "plannedEvents") simulationResults$plannedEvents <- plannedEvents simulationResults$.setParameterType("plannedEvents", C_PARAM_USER_DEFINED) pwsTimeObject <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, eventTime = eventTime, kappa = kappa, delayedResponseAllowed = TRUE, .pi1Default = C_PI_1_DEFAULT) simulationResults$.piecewiseSurvivalTime <- pwsTimeObject simulationResults$hazardRatio <- pwsTimeObject$hazardRatio simulationResults$.setParameterType("hazardRatio", pwsTimeObject$.getParameterType("hazardRatio")) if (pwsTimeObject$.isLambdaBased()) { simulationResults$piecewiseSurvivalTime <- pwsTimeObject$piecewiseSurvivalTime simulationResults$.setParameterType("piecewiseSurvivalTime", C_PARAM_USER_DEFINED) simulationResults$lambda2 <- pwsTimeObject$lambda2 simulationResults$.setParameterType("lambda2", pwsTimeObject$.getParameterType("lambda2")) lambdaVec2 <- simulationResults$lambda2 simulationResults$lambda1 <- pwsTimeObject$lambda1 simulationResults$.setParameterType("lambda1", pwsTimeObject$.getParameterType("lambda1")) if (any(is.na(pwsTimeObject$lambda1))) { .assertIsValidHazardRatioVector(pwsTimeObject$hazardRatio) .setValueAndParameterType(simulationResults, "hazardRatio", pwsTimeObject$hazardRatio, NA_real_) numberOfResults <- length(simulationResults$hazardRatio) lambdaVec1 <- simulationResults$lambda2 * pwsTimeObject$hazardRatio } else { .setValueAndParameterType(simulationResults, "hazardRatio", pwsTimeObject$hazardRatio, NA_real_) numberOfResults <- 1 lambdaVec1 <- pwsTimeObject$lambda1 } .warnInCaseOfDefinedPiValue(simulationResults, "pi1") .warnInCaseOfDefinedPiValue(simulationResults, "pi2") simulationResults$pi1 <- pwsTimeObject$pi1 simulationResults$pi2 <- pwsTimeObject$pi2 simulationResults$.setParameterType("pi1", pwsTimeObject$.getParameterType("pi1")) simulationResults$.setParameterType("pi2", pwsTimeObject$.getParameterType("pi2")) cdfValues1 <- .getPiecewiseExponentialDistribution( pwsTimeObject$piecewiseSurvivalTime, lambdaVec1, pwsTimeObject$piecewiseSurvivalTime, kappa = kappa) cdfValues2 <- .getPiecewiseExponentialDistribution( pwsTimeObject$piecewiseSurvivalTime, lambdaVec2, pwsTimeObject$piecewiseSurvivalTime, kappa = kappa) if (length(cdfValues1) == 1) { cdfValues1 <- NA_real_ cdfValues2 <- NA_real_ } else { cdfValues1 <- cdfValues1[2:length(cdfValues1)] # use values without a leading 0 cdfValues2 <- cdfValues2[2:length(cdfValues2)] } pi1 <- NA_real_ pi2 <- NA_real_ } else { numberOfResults <- .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject( simulationResults, pwsTimeObject) pi1 <- simulationResults$pi1 pi2 <- simulationResults$pi2 simulationResults$piecewiseSurvivalTime <- NA_real_ lambdaVec1 <- NA_real_ lambdaVec2 <- NA_real_ cdfValues1 <- NA_real_ cdfValues2 <- NA_real_ } numberOfSimStepsTotal <- numberOfResults * maxNumberOfIterations * accrualSetup$maxNumberOfSubjects maxNumberOfSimStepsTotal <- 10 * 100000 * 100 if (numberOfSimStepsTotal > maxNumberOfSimStepsTotal) { if (!longTimeSimulationAllowed) { stop("Simulation stopped because long time simulation is disabled ", "and the defined number of single simulation steps (", numberOfSimStepsTotal, ") is larger than the threshold ", maxNumberOfSimStepsTotal, ". ", "Set 'longTimeSimulationAllowed = TRUE' to enable simulations ", "that take a long time (> 30 sec)") } cat("Note that the simulation may take a long time because", sprintf("%.0f", numberOfSimStepsTotal), "single simulation steps must be calculated") } .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) .setValueAndParameterType(simulationResults, "eventTime", eventTime, C_EVENT_TIME_DEFAULT) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, C_THETA_H0_SURVIVAL_DEFAULT) .setValueAndParameterType(simulationResults, "allocation1", allocation1, C_ALLOCATION_1_DEFAULT) .setValueAndParameterType(simulationResults, "allocation2", allocation2, C_ALLOCATION_2_DEFAULT) allocationRatioPlanned <- allocation1 / allocation2 .setValueAndParameterType(simulationResults, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_) if (!is.na(thetaH0) && !is.na(thetaH1) && thetaH0 != 1) { thetaH1 <- thetaH1 / thetaH0 .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_) simulationResults$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_) } if (is.na(conditionalPower)) { simulationResults$.setParameterType("thetaH1", C_PARAM_NOT_APPLICABLE) } .setValueAndParameterType(simulationResults, "kappa", kappa, 1) .setValueAndParameterType(simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT) .setValueAndParameterType(simulationResults, "seed", .setSeed(seed), NA_real_) if (is.na(seed)) { simulationResults$.setParameterType("seed", C_PARAM_DEFAULT_VALUE) } phi <- -c(log(1 - dropoutRate1), log(1 - dropoutRate2)) / dropoutTime densityIntervals <- accrualTime if (length(accrualTime) > 1) { densityIntervals[2:length(accrualTime)] <- accrualTime[2:length(accrualTime)] - accrualTime[1:(length(accrualTime) - 1)] } densityVector <- accrualSetup$accrualIntensity / sum(densityIntervals * accrualSetup$accrualIntensity) accrualTimeValue <- cumsum(rep(1 / (densityVector * accrualSetup$maxNumberOfSubjects), round(densityVector * densityIntervals * accrualSetup$maxNumberOfSubjects)))[1:accrualSetup$maxNumberOfSubjects] # to avoid last value to be NA_real_ i <- accrualSetup$maxNumberOfSubjects while (is.na(accrualTimeValue[i])) { accrualTimeValue[i] <- accrualTime[length(accrualTime)] i <- i - 1 } treatmentGroup <- rep(c(rep(1, allocation1), rep(2, allocation2)), ceiling(accrualSetup$maxNumberOfSubjects / (allocation1 + allocation2)))[1:accrualSetup$maxNumberOfSubjects] if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } resultData <- getSimulationSurvivalCpp( designNumber, design$kMax, design$sided, design$criticalValues, design$informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocation1, allocation2, accrualTimeValue, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1, pi2, eventTime, .getPiecewiseExpStartTimesWithoutLeadingZero(pwsTimeObject$piecewiseSurvivalTime), cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, accrualSetup$maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa) overview <- resultData$overview if (length(overview) == 0 || nrow(overview) == 0) { stop("No simulation results calculated") } n <- nrow(overview) overview <- cbind( design = rep(sub("^TrialDesign", "", class(design)), n), overview) if (pwsTimeObject$.isPiBased() && pwsTimeObject$.getParameterType("hazardRatio") != C_PARAM_USER_DEFINED) { simulationResults$hazardRatio <- matrix(overview$hazardRatio, nrow = design$kMax)[1, ] } simulationResults$iterations <- matrix(as.integer(overview$iterations), nrow = design$kMax) if (!is.null(overview$eventsPerStage)) { simulationResults$eventsPerStage <- matrix(overview$eventsPerStage, nrow = design$kMax) } simulationResults$eventsNotAchieved <- matrix(overview$eventsNotAchieved, nrow = design$kMax) simulationResults$numberOfSubjects <- matrix(overview$numberOfSubjects, nrow = design$kMax) simulationResults$numberOfSubjects1 <- .getNumberOfSubjects1(simulationResults$numberOfSubjects, allocationRatioPlanned) simulationResults$numberOfSubjects2 <- .getNumberOfSubjects2(simulationResults$numberOfSubjects, allocationRatioPlanned) if (allocationRatioPlanned != 1) { simulationResults$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) simulationResults$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } if (design$kMax > 1) { simulationResults$rejectPerStage <- matrix(overview$rejectPerStage, nrow = design$kMax) } simulationResults$overallReject <- matrix(overview$overallReject, nrow = design$kMax)[1, ] if (!all(is.na(overview$conditionalPowerAchieved))) { simulationResults$conditionalPowerAchieved <- matrix( overview$conditionalPowerAchieved, nrow = design$kMax) simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (design$kMax == 1) { simulationResults$.setParameterType("numberOfSubjects", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("eventsPerStage", C_PARAM_NOT_APPLICABLE) } if (design$kMax > 1) { if (numberOfResults == 1) { simulationResults$futilityPerStage <- matrix( overview$futilityPerStage[1:(design$kMax - 1)], nrow = design$kMax - 1) } else { simulationResults$futilityPerStage <- matrix(matrix( overview$futilityPerStage, nrow = design$kMax)[1:(design$kMax - 1), ], nrow = design$kMax - 1) } } if (design$kMax > 1) { simulationResults$futilityStop <- matrix(overview$futilityStop, nrow = design$kMax)[1, ] simulationResults$earlyStop <- simulationResults$futilityStop + simulationResults$overallReject - simulationResults$rejectPerStage[design$kMax, ] } else { simulationResults$futilityStop <- rep(0, numberOfResults) simulationResults$earlyStop <- rep(0, numberOfResults) } simulationResults$analysisTime <- matrix(overview$analysisTime, nrow = design$kMax) simulationResults$studyDuration <- matrix(overview$studyDuration, nrow = design$kMax)[1, ] if (design$kMax > 1) { subData <- simulationResults$rejectPerStage[1:(design$kMax - 1), ] + simulationResults$futilityPerStage pStop <- rbind(subData, 1 - colSums(subData)) numberOfSubjects <- simulationResults$numberOfSubjects numberOfSubjects[is.na(numberOfSubjects)] <- 0 simulationResults$expectedNumberOfSubjects <- diag(t(numberOfSubjects) %*% pStop) if (nrow(simulationResults$eventsPerStage) > 0 && ncol(simulationResults$eventsPerStage) > 0) { eventsPerStage <- simulationResults$eventsPerStage eventsPerStage[is.na(eventsPerStage)] <- 0 simulationResults$expectedNumberOfEvents <- diag(t(eventsPerStage) %*% pStop) } } else { simulationResults$expectedNumberOfSubjects <- as.numeric(simulationResults$numberOfSubjects) if (nrow(simulationResults$eventsPerStage) > 0 && ncol(simulationResults$eventsPerStage) > 0) { simulationResults$expectedNumberOfEvents <- as.numeric(simulationResults$eventsPerStage) } } data <- resultData$data[!is.na(resultData$data$iterationNumber), ] data$trialStop <- (data$rejectPerStage == 1 | data$futilityPerStage == 1 | data$stageNumber == design$kMax) if (!is.null(data$eventsPerStage)) { if (directionUpper) { data$hazardRatioEstimateLR <- exp(data$logRankStatistic * (1 + allocation1 / allocation2) / sqrt(allocation1 / allocation2 * data$eventsPerStage)) } else { data$hazardRatioEstimateLR <- exp(-data$logRankStatistic * (1 + allocation1 / allocation2) / sqrt(allocation1 / allocation2 * data$eventsPerStage)) } } simulationResults$.data <- data stages <- 1:design$kMax rawData <- resultData$rawData if (!is.null(rawData) && nrow(rawData) > 0 && ncol(rawData) > 0) { rawData <- rawData[!is.na(rawData$iterationNumber), ] } if (!is.null(rawData) && nrow(rawData) > 0 && ncol(rawData) > 0) { stopStageNumbers <- rawData$stopStage missingStageNumbers <- c() if (length(stopStageNumbers) > 0) { stopStageNumbers <- order(unique(stopStageNumbers)) missingStageNumbers <- stages[!which(stages %in% stopStageNumbers)] } else { missingStageNumbers <- stages } if (length(missingStageNumbers) > 0) { warning("Could not get rawData (individual results) for stages ", .arrayToString(missingStageNumbers), call. = FALSE) } } else { rawData <- data.frame( iterationNumber = numeric(0), stopStage = numeric(0), pi1 = numeric(0), pi2 = numeric(0), subjectId = numeric(0), accrualTime = numeric(0), treatmentGroup = numeric(0), survivalTime = numeric(0), dropoutTime = numeric(0), observationTime = numeric(0), timeUnderObservation = numeric(0), event = logical(0), dropoutEvent = logical(0), censorIndicator = numeric(0) ) if (maxNumberOfRawDatasetsPerStage > 0) { warning("Could not get rawData (individual results) for stages ", .arrayToString(stages), call. = FALSE) } } if (pwsTimeObject$.isLambdaBased() || length(pi1) < 2) { rawData <- rawData[, !(colnames(rawData) %in% c("pi1", "pi2"))] } # Remove censorIndicator because it will not be calculated yet rawData <- rawData[, colnames(rawData) != "censorIndicator"] simulationResults$.rawData <- rawData return(simulationResults) } rpact/R/f_design_sample_size_calculator.R0000644000176200001440000061706113574426744020324 0ustar liggesusers###################################################################################### # # # -- Sample size calculator -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.1.0 # # Date: 29-01-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### .addEffectScaleBoundaryDataToDesignPlan <- function(designPlan) { .assertIsTrialDesignPlan(designPlan) design <- designPlan$.design if (.isTrialDesignPlanMeans(designPlan)) { if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$maxNumberOfSubjects <- designPlan$nFixed } boundaries <- .getEffectScaleBoundaryDataMeans(design, designPlan$thetaH0, designPlan$meanRatio, designPlan$stDev, designPlan$maxNumberOfSubjects, designPlan$groups, designPlan$allocationRatioPlanned, designPlan$directionUpper, designPlan$normalApproximation) } else if (.isTrialDesignPlanRates(designPlan)) { if (designPlan$.isSampleSizeObject()) { # comes from getSampleSize if (designPlan$groups == 1) { designPlan$directionUpper <- (designPlan$pi1 > designPlan$thetaH0) } else { if (designPlan$riskRatio) { designPlan$directionUpper <- (designPlan$pi1 / designPlan$pi2 > designPlan$thetaH0) } else { designPlan$directionUpper <- (designPlan$pi1 - designPlan$pi2 > designPlan$thetaH0) } } designPlan$.setParameterType("directionUpper", C_PARAM_GENERATED) } if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$maxNumberOfSubjects <- designPlan$nFixed } boundaries <- .getEffectScaleBoundaryDataRates(design, designPlan$thetaH0, designPlan$pi2, designPlan$maxNumberOfSubjects, designPlan$groups, designPlan$riskRatio, designPlan$allocationRatioPlanned, designPlan$directionUpper) } else if (.isTrialDesignPlanSurvival(designPlan)) { if (designPlan$.isSampleSizeObject()) { # comes from getSampleSize designPlan$directionUpper <- (designPlan$hazardRatio > designPlan$thetaH0) designPlan$.setParameterType("directionUpper", C_PARAM_GENERATED) } if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$eventsPerStage <- matrix(designPlan$eventsFixed,nrow = 1) } boundaries <- .getEffectScaleBoundaryDataSurvival(design, designPlan$thetaH0, designPlan$eventsPerStage, designPlan$allocationRatioPlanned, designPlan$directionUpper) } if (designPlan$.design$sided == 1) { designPlan$criticalValuesEffectScale <- boundaries$criticalValuesEffectScaleUpper designPlan$.setParameterType("criticalValuesEffectScale", C_PARAM_GENERATED) } else { designPlan$criticalValuesEffectScaleUpper <- boundaries$criticalValuesEffectScaleUpper designPlan$criticalValuesEffectScaleLower <- boundaries$criticalValuesEffectScaleLower designPlan$.setParameterType("criticalValuesEffectScaleUpper", C_PARAM_GENERATED) designPlan$.setParameterType("criticalValuesEffectScaleLower", C_PARAM_GENERATED) } if (!.isTrialDesignFisher(design) && design$sided == 1 && any(design$futilityBounds > -6)) { designPlan$.setParameterType("futilityBoundsEffectScale", C_PARAM_GENERATED) } else { designPlan$.setParameterType("futilityBoundsEffectScale", C_PARAM_NOT_APPLICABLE) } designPlan$futilityBoundsEffectScale <- round(boundaries$futilityBoundsEffectScale,8) } .getEffectScaleBoundaryDataMeans <- function(design, thetaH0, meanRatio, stDev, maxNumberOfSubjects, groups, allocationRatioPlanned, directionUpper, normalApproximation) { futilityBoundsEffectScale <- rep(NA_real_, design$kMax - 1) # Initialising effect scale matrix if (normalApproximation){ criticalValues <- design$criticalValues futilityBounds <- design$futilityBounds } else { criticalValues <- stats::qt(1 - design$stageLevels, design$informationRates %*% t(maxNumberOfSubjects) - groups) futilityBounds <- stats::qt(stats::pnorm(design$futilityBounds), design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects) - groups) } if (groups == 1) { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev / sqrt(design$informationRates %*% t(maxNumberOfSubjects)) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev / sqrt(design$informationRates %*% t(maxNumberOfSubjects)) if (!.isTrialDesignFisher(design) && design$sided == 1 && any(futilityBounds > -6)) { futilityBoundsEffectScale <- thetaH0 + futilityBounds * stDev / sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects)) } } else if (!meanRatio) { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates %*% t(maxNumberOfSubjects))) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates %*% t(maxNumberOfSubjects))) if (!.isTrialDesignFisher(design) && design$sided == 1 && any(futilityBounds > -6)) { futilityBoundsEffectScale <- thetaH0 + futilityBounds * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } } else { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates %*% t(maxNumberOfSubjects))) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates %*% t(maxNumberOfSubjects))) if (!.isTrialDesignFisher(design) && design$sided == 1 && any(futilityBounds > -6)) { futilityBoundsEffectScale <- thetaH0 + futilityBounds * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } } directionUpper[is.na(directionUpper)] <- TRUE if (length(directionUpper) > 0 && all(!directionUpper)) { criticalValuesEffectScaleUpper <- -criticalValuesEffectScaleUpper + 2 * thetaH0 criticalValuesEffectScaleLower <- -criticalValuesEffectScaleLower + 2 * thetaH0 if (!any(is.na(futilityBoundsEffectScale))) { futilityBoundsEffectScale <- -futilityBoundsEffectScale + 2 * thetaH0 } } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScale = matrix(futilityBoundsEffectScale, nrow = design$kMax - 1) )) } .getEffectScaleBoundaryDataRates <- function(design, thetaH0, pi2, maxNumberOfSubjects, groups, riskRatio, allocationRatioPlanned, directionUpper) { nParameters <- length(maxNumberOfSubjects) directionUpper[is.na(directionUpper)] <- TRUE criticalValuesEffectScaleUpper <- matrix(, nrow = design$kMax, ncol = nParameters) criticalValuesEffectScaleLower <- matrix(, nrow = design$kMax, ncol = nParameters) futilityBoundsEffectScale <- matrix(, nrow = design$kMax - 1, ncol = nParameters) if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, nParameters) } if (groups == 1) { n1 <- design$informationRates %*% t(maxNumberOfSubjects) for (j in (1:nParameters)) { criticalValuesEffectScaleUpper[, j] <- thetaH0 + (2 * directionUpper[j] - 1) * design$criticalValues * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[,j]) if (design$sided == 2) { criticalValuesEffectScaleLower[, j] <- thetaH0 - design$criticalValues * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[,j]) } if (!.isTrialDesignFisher(design) && (design$sided == 1) && any(design$futilityBounds > -6)) { futilityBoundsEffectScale[, j] <- thetaH0 + (2 * directionUpper[j] - 1) * design$futilityBounds * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[1:(design$kMax - 1), j]) } } } else if (!riskRatio) { boundaries <- design$criticalValues # calculate pi1 that solves (pi1 - pi2 - thetaH0) / SE(pi1 - pi2 - thetaH0) # = crit by using Farrington & Manning approach for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1/allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "diff") (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) criticalValuesEffectScaleUpper[i,j] <- pi1Bound - pi2 # difference to pi2 } if (design$sided == 2) { for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "diff") (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) + boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) criticalValuesEffectScaleLower[i,j] <- pi1Bound - pi2 # difference to pi2 } } } if (!.isTrialDesignFisher(design) && (design$sided == 1) && any(design$futilityBounds > -6)) { boundaries <- design$futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1/allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "diff") (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) futilityBoundsEffectScale[i,j] <- pi1Bound - pi2 # difference to pi2 } } } } else { boundaries <- design$criticalValues # calculate pi1 that solves (pi1 - thetaH0 * pi2) / SE(pi1 - thetaH0 * pi2) # = crit by using Farrington & Manning approach for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1/allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "ratio") (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) criticalValuesEffectScaleUpper[i,j] <- pi1Bound / pi2 # ratio to pi2 } if (design$sided == 2) { for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "ratio") (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) + boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) criticalValuesEffectScaleLower[i,j] <- pi1Bound / pi2 # ratio to pi2 } } } if (!.isTrialDesignFisher(design) && (design$sided == 1) && any(design$futilityBounds > -6)) { boundaries <- design$futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1/allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch({pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues(x, pi2, thetaH0, allocationRatioPlanned[j], method = "ratio") (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5)$root }, error = function(e) { pi1Bound <<- NA_real_ }) futilityBoundsEffectScale[i,j] <- pi1Bound / pi2 # ratio to pi2 } } } } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScale = matrix(futilityBoundsEffectScale, nrow = design$kMax - 1) )) } .getEffectScaleBoundaryDataSurvival <- function( design, thetaH0, eventsPerStage, allocationRatioPlanned, directionUpper) { if (design$kMax == 1) { nParameters <- length(eventsPerStage) } else { nParameters <- ncol(eventsPerStage) } directionUpper[is.na(directionUpper)] <- TRUE if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, nParameters) } criticalValuesEffectScaleUpper <- matrix(, nrow = design$kMax, ncol = nParameters) criticalValuesEffectScaleLower <- matrix(, nrow = design$kMax, ncol = nParameters) futilityBoundsEffectScale <- matrix(, nrow = design$kMax - 1, ncol = nParameters) for (j in (1:nParameters)) { if (design$sided == 1) { criticalValuesEffectScaleUpper[,j] <- thetaH0 * (exp(design$criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[,j])))^(2 * directionUpper[j] - 1) } else { criticalValuesEffectScaleUpper[,j] <- thetaH0 * (exp(design$criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[,j]))) criticalValuesEffectScaleLower[,j] <- thetaH0 * (exp(-design$criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[,j]))) } if (!.isTrialDesignFisher(design) && design$sided == 1 && any(design$futilityBounds > -6)) { futilityBoundsEffectScale[,j] <- thetaH0 * (exp(design$futilityBounds * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[1:(design$kMax - 1),j])))^(2 * directionUpper[j] - 1) } } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScale = matrix(futilityBoundsEffectScale, nrow = design$kMax - 1) )) } #' @title #' Get Sample Size Means #' #' @description #' Returns the sample size for testing means in one or two samples. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, #' and \code{sided} can be directly entered as argument. #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param normalApproximation If \code{normalApproximation = TRUE} is specified, the variance is #' assumed to be known, default is FALSE, i.e., the calculations are performed #' with the t distribution. #' @param meanRatio If \code{meanRatio = TRUE} is specified, the sample size for #' one-sided testing of H0: mu1/mu2 = thetaH0 is calculated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or a value != 1 for testing the mean ratio) can be specified, default is #' \code{0} or \code{1} for difference and ratio testing, respectively. #' @param alternative The alternative hypothesis value. This can be a vector of assumed #' alternatives, default is \code{seq(0.2,1,0.2)}. #' @param stDev The standard deviation, default is 1. If \code{meanRatio = TRUE} #' is specified, stDev defines the coefficient of variation sigma/mu2. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups #' design, default is 1. If \code{allocationRatioPlanned = 0} is entered, #' the optimal allocation ratio yielding the smallest overall sample size is determined. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the stage-wise (non-cumulated) and maximum #' sample size for testing means. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' A null hypothesis value thetaH0 != 0 for testing the difference of two means or #' thetaH0 != 1 for testing the ratio of two means can be specified. #' Critical bounds and stopping for futility bounds are provided at the effect scale #' (mean, mean difference, or mean ratio, respectively) for each sample size calculation separately. #' #' @return Returns a \code{\link{TrialDesignPlanMeans}} object. #' #' @export #' #' @examples #' #' # Calculate sample sizes in a fixed sample size parallel group design #' # with allocation ratio n1/n2 = 2 for a range of alternative values 1,...,5 #' # with assumed standard deviation = 3.5; two-sided alpha = 0.05, power 1 - beta = 90%: #' getSampleSizeMeans(alpha = 0.05, beta = 0.1, sided = 2, groups = 2, #' alternative = seq(1, 5, 1), stDev = 3.5, allocationRatioPlanned = 2) #' #' # Calculate sample sizes in a three-stage Pocock paired comparison design testing #' # H0: mu = 2 for a range of alternative values 3,4,5 with assumed standard #' # deviation = 3.5; one-sided alpha = 0.05, power 1 - beta = 90%: #' getSampleSizeMeans(getDesignGroupSequential(typeOfDesign = "P", alpha = 0.05, #' sided = 1, beta = 0.1), groups = 1, thetaH0 = 2, #' alternative = seq(3, 5, 1), stDev = 3.5) #' getSampleSizeMeans <- function(design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_DEFAULT, stDev = C_STDEV_DEFAULT, allocationRatioPlanned = NA_real_) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeMeans", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeMeans", ...) .warnInCaseOfTwoSidedPowerArgument(...) } designPlan <- .createDesignPlanMeans(objectType = "sampleSize", design = design, normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ...) return(.getSampleSize(designPlan)) } .getDefaultDesignForSampleSizeCalculations <- function(..., powerEnabled = FALSE) { ignore = c() alpha <- .getOptionalArgument("alpha", ...) if (is.null(alpha)) { alpha <- NA_real_ } else { ignore <- c(ignore, "alpha") } beta <- .getOptionalArgument("beta", ...) if (is.null(beta)) { beta <- NA_real_ } else { ignore <- c(ignore, "beta") } sided <- .getOptionalArgument("sided", ...) if (is.null(sided)) { sided <- 1L } else { ignore <- c(ignore, "sided") } twoSidedPower <- .getOptionalArgument("twoSidedPower", ...) if (is.null(twoSidedPower)) { if (powerEnabled && sided == 2) { twoSidedPower <- TRUE } else { twoSidedPower <- C_TWO_SIDED_POWER_DEFAULT } } else { ignore <- c(ignore, "twoSidedPower") } .warnInCaseOfUnknownArguments(functionName = ".getDefaultDesignForSampleSizeCalculations", ignore = ignore, ...) design <- getDesignGroupSequential(kMax = 1, alpha = alpha, beta = beta, sided = sided, twoSidedPower = twoSidedPower) return(design) } .warnInCaseOfTwoSidedPowerArgument <- function(...) { if ("twoSidedPower" %in% names(list(...))) { warning("'twoSidedPower' can only be defined in 'design'", call. = FALSE) } } .warnInCaseOfTwoSidedPowerIsDisabled <- function(design) { if (design$sided == 2 && !is.na(design$twoSidedPower) && !design$twoSidedPower && design$.getParameterType("twoSidedPower") == C_PARAM_USER_DEFINED) { warning("design$twoSidedPower = FALSE will be ignored because design$sided = 2", call. = FALSE) } } #' @title #' Get Sample Size Rates #' #' @description #' Returns the sample size for testing rates in one or two samples. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument. #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param normalApproximation If \code{normalApproximation = FALSE} is specified, the sample size #' for the case of one treatment group is calculated exactly using the binomial distribution, #' default is \code{TRUE}. #' @param riskRatio If \code{riskRatio = TRUE} is specified, the sample size for one-sided #' testing of H0: \code{pi1/pi2 = thetaH0} is calculated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or != 1 for testing the risk ratio \code{pi1/pi2}) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively. #' @param pi1 The assumed probability in the active treatment group if two treatment groups #' are considered, or the alternative probability for a one treatment group design, #' default is \code{seq(0.4,0.6,0.1)}. #' @param pi2 The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups design. \cr #' If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the #' smallest overall sample size is determined, default is \code{1}. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing rates. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' If a null hypothesis value thetaH0 != 0 for testing the difference of two rates #' thetaH0 != 1 for testing the risk ratio is specified, the sample size #' formula according to Farrington & Manning (Statistics in Medicine, 1990) is used. #' Critical bounds and stopping for futility bounds are provided at the effect scale #' (rate, rate difference, or rate ratio, respectively) for each sample size calculation separately. #' For the two-sample case, the calculation here is performed at fixed pi2 as given as argument #' in the function. #' #' @return Returns a \code{\link{TrialDesignPlanRates}} object. #' #' @export #' #' @examples #' #' # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum #' # allocation ratios for a range of pi1 values when testing #' # H0: pi1 - pi2 = -0.1 within a two-stage O'Brien & Fleming design; #' # alpha = 0.05 one-sided, power 1- beta = 90%: #' getSampleSizeRates(design = getDesignGroupSequential(kMax = 2, alpha = 0.05, beta = 0.1, #' sided = 1), groups = 2, thetaH0 = -0.1, pi1 = seq(0.4, 0.55, 0.025), #' pi2 = 0.4, allocationRatioPlanned = 0) #' #' # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum #' # allocation ratios for a range of pi1 values when testing #' # H0: pi1 / pi2 = 0.80 within a three-stage O'Brien & Fleming design; #' # alpha = 0.025 one-sided, power 1- beta = 90%: #' getSampleSizeRates(getDesignGroupSequential(kMax = 3, alpha = 0.025, beta = 0.1, #' sided = 1), groups = 2, riskRatio = TRUE, thetaH0 = 0.80, pi1 = seq(0.3,0.5,0.025), #' pi2 = 0.3, allocationRatioPlanned = 0) #' getSampleSizeRates <- function(design = NULL, ..., groups = 2, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.4, 0.6, 0.1), pi2 = 0.2, allocationRatioPlanned = NA_real_) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeRates", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeRates", ...) .warnInCaseOfTwoSidedPowerArgument(...) } designPlan <- .createDesignPlanRates(objectType = "sampleSize", design = design, normalApproximation = normalApproximation, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ...) return(.getSampleSize(designPlan)) } #' @title #' Get Sample Size Survival #' #' @description #' Returns the sample size for testing the hazard ratio in a two treatment groups survival design. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, \code{twoSidedPower}, and #' \code{sided} can be directly entered as argument. #' @param typeOfComputation Three options are available: "Schoenfeld", "Freedman", "HsiehFreedman", #' the default is "Schoenfeld". For details, see Hsieh (Statistics in Medicine, 1992). #' For non-inferiority testing (i.e., thetaH0 != 1), only Schoenfelds formula can be used #' @param thetaH0 The null hypothesis value. The default value is \code{1}. For one-sided testing, #' a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified. #' @param pi1 The assumed event rate in the active treatment group, default is \code{seq(0.4,0.6,0.1)}. #' @param pi2 The assumed event rate in the control group, default is \code{0.2}. #' @param lambda1 The assumed hazard rate in the treatment group, there is no default. #' lambda1 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param lambda2 The assumed hazard rate in the reference group, there is no default. #' lambda2 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param median1 The assumed median survival time in the treatment group, there is no default. #' @param median2 The assumed median survival time in the reference group, there is no default. #' @param piecewiseSurvivalTime A vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function (see details). #' @param hazardRatio The vector of hazard ratios under consideration. #' If the event or hazard rates in both treatment groups are defined, the hazard ratio needs #' not to be specified as it is calculated. #' @param kappa The shape parameter of the Weibull distribution, default is \code{1}. #' The Weibull distribution cannot be used for the piecewise definition of the #' survival time distribution. #' Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} #' are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact. #' @param allocationRatioPlanned The planned allocation ratio, default is \code{1}. #' If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the #' smallest number of subjects is determined. #' @param accountForObservationTimes If \code{accountForObservationTimes = TRUE}, the number of #' subjects is calculated assuming specific accrual and follow-up time, default is \code{TRUE} #' (see details). #' @param eventTime The assumed time under which the event rates are calculated, default is \code{12}. #' @param accrualTime The assumed accrual time intervals for the study, default is #' \code{c(0,12)} (see details). #' @param accrualIntensity A vector of accrual intensities, default is the relative #' intensity \code{0.1} (see details). #' @param followUpTime The assumed (additional) follow-up time for the study, default is \code{6}. #' The total study duration is \code{accrualTime + followUpTime}. #' @param dropoutRate1 The assumed drop-out rate in the treatment group, default is \code{0}. #' @param dropoutRate2 The assumed drop-out rate in the control group, default is \code{0}. #' @param dropoutTime The assumed time for drop-out rates in the control and the #' treatment group, default is \code{12}. #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the follow-up time for the required number of events is determined. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the number of events and an estimate for the #' necessary number of subjects for testing the hazard ratio in a survival design. #' It also calculates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' The formula of Kim & Tsiatis (Biometrics, 1990) #' is used to calculate the expected number of events under the alternative #' (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized #' to piecewise survival times and non-constant piecewise accrual over time.\cr #' If \code{accountForObservationTimes = FALSE}, only the event rates are used for the calculation #' of the maximum number of subjects. #' #' \code{piecewiseSurvivalTime} #' The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also #' be a list that combines the definition of the time intervals and hazard rates in the reference group. #' The definition of the survival time in the treatment group is obtained by the specification #' of the hazard ratio (see examples for details). #' #' \code{accrualTime} can also be used to define a non-constant accrual over time. #' For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and #' \code{accrualIntensity} needs to be specified. The first element of #' \code{accrualTime} must be equal to 0.\cr #' \code{accrualTime} can also be a list that combines the definition of the accrual time and #' accrual intensity \code{accrualIntensity} (see below and examples for details). #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are #' the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to #' be specified and the end of accrual is calculated. #' #' \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same #' (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified #' and the end of accrual is calculated. #' In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr #' If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} #' (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated. \cr #' If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines #' the *relative* intensity how subjects enter the trial, and \code{maxNumberOfSubjects} must be #' given or can be calculated at given follow-up time. #' For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval #' the intensity is doubled as compared to the first accrual interval. The actual accrual intensity #' is calculated for the given (or calculated) \code{maxNumberOfSubjects}. #' Note that the default is \code{accrualIntensity = 0.1} meaning that the *absolute* accrual intensity #' will be calculated. #' #' \code{accountForObservationTime} can be selected as \code{FALSE}. In this case, #' the number of subjects is calculated from the event probabilities only. #' This kind of computation does not account for the specific accrual pattern and survival distribution. #' #' @return Returns a \code{\link{TrialDesignPlanSurvival}} object. #' #' @export #' #' @examples #' #' # Fixed sample size trial with median survival 20 vs. 30 months in treatment and #' # reference group, respectively, alpha = 0.05 (two-sided), and power 1 - beta = 90%. #' # 20 subjects will be recruited per month up to 400 subjects, i.e., accrual time is 20 months. #' getSampleSizeSurvival(alpha = 0.05, sided = 2, beta = 0.1, lambda1 = log(2) / 20, #' lambda2 = log(2) / 30, accrualTime = c(0,20), accrualIntensity = 20) #' #' \donttest{ #' #' # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.6) and #' # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, #' # only alpha = 0.01 is specified #' getSampleSizeSurvival(alpha = 0.01) #' #' # Four stage O'Brien & Fleming group sequential design with minimum required #' # definitions, pi1 = c(0.4,0.5,0.6) and pi2 = 0.2 at event time 12, #' # accrual time 12 and follow-up time 6 as default #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 4)) #' #' # For fixed sample design, determine necessary accrual time if 200 subjects and #' # 30 subjects per time unit can be recruited #' getSampleSizeSurvival(accrualTime = c(0), accrualIntensity = c(30), #' maxNumberOfSubjects = 200) #' #' # Determine necessary accrual time if 200 subjects and if the first 6 time units #' # 20 subjects per time unit can be recruited, then 30 subjects per time unit #' getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(20, 30), #' maxNumberOfSubjects = 200) #' #' # Determine maximum number of Subjects if the first 6 time units 20 subjects #' # per time unit can be recruited, and after 10 time units 30 subjects per time unit #' getSampleSizeSurvival(accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) #' #' # Specify accrual time as a list #' at <- list( #' "0 - <6" = 20, #' "6 - Inf" = 30) #' getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 200) #' #' # Specify accrual time as a list, if maximum number of subjects need to be calculated #' at <- list( #' "0 - <6" = 20, #' "6 - <=10" = 30) #' getSampleSizeSurvival(accrualTime = at) #' #' # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries #' # Effect size is based on event rates at specified event time #' # needs to be specified because it should be shown that hazard ratio < 1 #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' pi1 = 0.2, pi2 = 0.3, eventTime = 24) #' #' # Effect size is based on event rate at specified event #' # time for the reference group and hazard ratio #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) #' #' # Effect size is based on hazard rate for the reference group and hazard ratio #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' hazardRatio = 0.5, lambda2 = 0.02) #' #' # Specification of piecewise exponential survival time and hazard ratios #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' hazardRatio = c(1.5, 1.8, 2)) #' #' # Specification of piecewise exponential survival time as a list and hazard ratios #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) #' #' # Specification of piecewise exponential survival time for both treatment arms #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' lambda1 = c(0.015, 0.03, 0.06)) #' #' # Specification of piecewise exponential survival time as a list #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) #' #' # Specify effect size based on median survival times #' getSampleSizeSurvival(median1 = 5, median2 = 3) #' #' # Specify effect size based on median survival times of Weibull distribtion with kappa = 2 #' getSampleSizeSurvival(median1 = 5, median2 = 3, kappa = 2) #' #' # Identify minimal and maximal required subjects to #' # reach the required events in spite of dropouts #' getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), #' lambda2 = 0.4, lambda1 = 0.3, followUpTime = Inf, dropoutRate1 = 0.001, #' dropoutRate2 = 0.005) #' getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), #' lambda2 = 0.4, lambda1 = 0.3, followUpTime = 0, dropoutRate1 = 0.001, #' dropoutRate2 = 0.005) #' #' } #' getSampleSizeSurvival <- function(design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = NA_real_, accountForObservationTimes = TRUE, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeSurvival", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeSurvival", ...) .warnInCaseOfTwoSidedPowerArgument(...) } if (!is.na(maxNumberOfSubjects) && maxNumberOfSubjects == 0) { maxNumberOfSubjects <- NA_real_ } # identify accrual time case accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, showWarnings = FALSE) accrualSetup$.validate() if (!accrualSetup$maxNumberOfSubjectsCanBeCalculatedDirectly && accrualSetup$followUpTimeMustBeUserDefined) { if (is.na(followUpTime)) { if (accrualSetup$piecewiseAccrualEnabled && !accrualSetup$endOfAccrualIsUserDefined) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'followUpTime', 'maxNumberOfSubjects' or end of accrual must be defined") } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'followUpTime' or 'maxNumberOfSubjects' must be defined") } if (followUpTime == Inf) { followUpTime <- 1e12 } if (!any(is.na(hazardRatio)) && !is.na(thetaH0)) { .assertIsValidHazardRatio(hazardRatio, thetaH0) } pwst <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2, pi1 = pi1, pi2 = pi2, median1 = median1, median2 = median2, hazardRatio = hazardRatio, eventTime = eventTime, kappa = kappa, .silent = TRUE) paramName <- NULL if (!pwst$piecewiseSurvivalEnabled) { if (pwst$.getParameterType("pi1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE || pwst$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { paramName <- "pi1" } else if (pwst$.getParameterType("lambda1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("lambda2") == C_PARAM_USER_DEFINED) { paramName <- "lambda1" } else if (pwst$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { paramName <- "hazardRatio" } else if (pwst$.getParameterType("median1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("median2") == C_PARAM_USER_DEFINED) { paramName <- "median1" } } else if (pwst$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { paramName <- "hazardRatio" } if (!is.null(paramName)) { paramValue <- pwst[[paramName]] if (!is.null(paramValue) && length(paramValue) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single '", paramName, "'; ", paramName, " = ", .arrayToString( paramValue, vectorLookAndFeelEnabled = TRUE)) } } hr <- hazardRatio if (all(is.na(hazardRatio))) { hr <- pwst$hazardRatio } if (all(is.na(hazardRatio))) { .assertIsValidHazardRatio(hr, thetaH0) } maxNumberOfSubjectsTarget <- NA_real_ withCallingHandlers({ # search for accrual time that provides a result at <- accrualSetup$accrualTime additionalAccrual <- 1 searchAccrualTimeEnabled <- TRUE maxSearchIterations <- 50 maxNumberOfSubjectsLower <- NA_real_ maxNumberOfSubjectsLowerBefore <- 0 sampleSize <- NULL expectionMessage <- NA_character_ while (searchAccrualTimeEnabled && maxSearchIterations >= 0 && (is.na(maxNumberOfSubjectsLower) || maxNumberOfSubjectsLower < maxNumberOfSubjectsLowerBefore || maxNumberOfSubjectsLower < 1e8)) { tryCatch({ maxNumberOfSubjectsLowerBefore <- ifelse(is.na(maxNumberOfSubjectsLower), 0, maxNumberOfSubjectsLower) maxNumberOfSubjectsLower <- getAccrualTime( accrualTime = c(at, at[length(at)] + additionalAccrual), accrualIntensity = accrualSetup$accrualIntensity)$maxNumberOfSubjects additionalAccrual <- 2 * additionalAccrual sampleSize <- .getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsLower, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio) searchAccrualTimeEnabled <- FALSE }, error = function(e) { expectionMessage <<- e$message }) maxSearchIterations <- maxSearchIterations - 1 } if (is.null(sampleSize) || is.na(sampleSize$followUpTime)) { if (!is.na(expectionMessage) && grepl("'allocationRatioPlanned' > 0", expectionMessage)) { stop(expectionMessage, call. = FALSE) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'additionalAccrual' could not be found, change accrual time specification", call. = FALSE) } # define lower bound for maxNumberOfSubjects maxNumberOfSubjectsLower <- ceiling(max(na.omit(c(sampleSize$eventsFixed, as.vector(sampleSize$eventsPerStage))))) if (is.na(maxNumberOfSubjectsLower)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'maxNumberOfSubjectsLower' could not be found", call. = FALSE) } # check whether accrual time already fulfills requirement # (followUpTime < given value) or need to be increased, # then define upper bound for maxNumberOfSubjects maxSearchIterations <- 50 maxNumberOfSubjectsUpper <- NA_real_ fut <- sampleSize$followUpTime if (fut <= followUpTime) { fut <- 2 * followUpTime } while (!is.na(fut) && fut > followUpTime && maxSearchIterations >= 0) { maxNumberOfSubjectsUpper <- getAccrualTime( accrualTime = c(at, at[length(at)] + additionalAccrual), accrualIntensity = accrualSetup$accrualIntensity)$maxNumberOfSubjects additionalAccrual <- 2 * additionalAccrual fut <- .getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsUpper, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio)$followUpTime maxSearchIterations <- maxSearchIterations - 1 } if (is.na(maxNumberOfSubjectsUpper)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'maxNumberOfSubjectsUpper' could not be found ", "(fut = ", fut, ", followUpTime = ", followUpTime, ")", call. = FALSE) } # use maxNumberOfSubjectsLower and maxNumberOfSubjectsUpper to find end of accrual if (dropoutRate1 != 0 || dropoutRate2 != 0) { prec <- 1 maxSearchIterations <- 50 while (prec > 1e-04 && maxSearchIterations >= 0) { maxNumberOfSubjectsTarget <- (maxNumberOfSubjectsLower + maxNumberOfSubjectsUpper) / 2 fut <- .getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsTarget, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio)$followUpTime ifelse(fut <= followUpTime, maxNumberOfSubjectsUpper <- maxNumberOfSubjectsTarget, maxNumberOfSubjectsLower <- maxNumberOfSubjectsTarget) prec <- maxNumberOfSubjectsUpper - maxNumberOfSubjectsLower maxSearchIterations <- maxSearchIterations - 1 } } else { maxNumberOfSubjectsTarget <- .getOneDimensionalRootBisectionMethod( f = function(x) { fut <- .getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = x, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio)$followUpTime return(followUpTime - fut) }, lower = maxNumberOfSubjectsLower, upper = maxNumberOfSubjectsUpper, tolerance = 1e-04, acceptResultsOutOfTolerance = TRUE, maxSearchIterations = 50, direction = 0, suppressWarnings = FALSE) } }, warning = function(w) { invokeRestart("muffleWarning") }) if (is.na(maxNumberOfSubjectsTarget)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'maxNumberOfSubjects' by given 'followUpTime' ", "(lower = ", maxNumberOfSubjectsLower, ", upper = ", maxNumberOfSubjectsUpper, ")") } sampleSizeSurvival <- .getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsTarget, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio) sampleSizeSurvival$.setParameterType("followUpTime", C_PARAM_USER_DEFINED) if (!is.na(sampleSizeSurvival$followUpTime)) { if (followUpTime == 1e12) { followUpTime <- Inf } if (sampleSizeSurvival$followUpTime >= -1e-02 && sampleSizeSurvival$followUpTime <= 1e-02) { sampleSizeSurvival$followUpTime <- 0 } if (sampleSizeSurvival$followUpTime < followUpTime - 1e-02 || sampleSizeSurvival$followUpTime > followUpTime + 1e-02) { sampleSizeSurvival$.setParameterType("followUpTime", C_PARAM_GENERATED) warning("User defined 'followUpTime' (", followUpTime, ") ignored because ", "follow-up time is ", round(sampleSizeSurvival$followUpTime, 4), call. = FALSE) } } sampleSizeSurvival$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) sampleSizeSurvival$.setParameterType("accrualTime", C_PARAM_GENERATED) return(sampleSizeSurvival) } return(.getSampleSizeSurvival(design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio, ...)) } .getSampleSizeSurvival <- function(design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, pi2 = NA_real_, pi1 = NA_real_, allocationRatioPlanned = NA_real_, accountForObservationTimes = TRUE, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, median1 = NA_real_, median2 = NA_real_, followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = 0, dropoutRate2 = dropoutRate1, dropoutTime = NA_real_, hazardRatio = NA_real_) { designPlan <- .createDesignPlanSurvival(objectType = "sampleSize", design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio) return(.getSampleSize(designPlan)) } .createDesignPlanSurvival = function(objectType = c("power", "sampleSize"), ..., design, typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0, pi2, pi1, allocationRatioPlanned, accountForObservationTimes, eventTime, accrualTime, accrualIntensity, kappa, piecewiseSurvivalTime, lambda2, lambda1, median1, median2, followUpTime = NA_real_, directionUpper = NA, maxNumberOfEvents = NA_real_, maxNumberOfSubjects, dropoutRate1, dropoutRate2, dropoutTime, hazardRatio) { objectType <- match.arg(objectType) typeOfComputation <- match.arg(typeOfComputation) .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsSingleLogical(accountForObservationTimes, "accountForObservationTimes", naAllowed = TRUE) .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsValidThetaH0(thetaH0, endpoint = "survival", groups = 2) .assertIsValidKappa(kappa) directionUpper <- .assertIsValidDirectionUpper(directionUpper, design$sided, objectType) if (objectType == "power") { .assertIsSingleNumber(maxNumberOfEvents, "maxNumberOfEvents") .assertIsInClosedInterval(maxNumberOfEvents, "maxNumberOfEvents", lower = 1, upper = maxNumberOfSubjects) } if (!any(is.na(pi1)) && (any(pi1 <= 0) || any(pi1 >= 1))) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "event rate 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)") } if (!any(is.na(pi2)) && (any(pi2 <= 0) || any(pi2 >= 1))) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "event rate 'pi2' (", .arrayToString(pi2), ") is out of bounds (0; 1)") } if (design$sided == 2 && thetaH0 != 1) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "two-sided case is implemented for superiority testing only (i.e., thetaH0 = 1)") } if (thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis hazard ratio is not allowed be negative or zero") } if (!(typeOfComputation %in% c("Schoenfeld", "Freedman", "HsiehFreedman"))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "computation type ('", typeOfComputation, "') must be one of the following: ", "'Schoenfeld', 'Freedman', or 'HsiehFreedman' ") } if (typeOfComputation != "Schoenfeld" && thetaH0 != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Freedman test calculation is possible only for superiority testing (thetaH0 != 1)") } if (is.numeric(accrualTime) && all(is.na(accrualTime))) { accrualTime <- C_ACCRUAL_TIME_DEFAULT } if (all(is.na(accrualIntensity))) { accrualIntensity <- C_ACCRUAL_INTENSITY_DEFAULT } accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects) accrualSetup$.validate() if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } .assertIsValidAllocationRatioPlannedSampleSize(allocationRatioPlanned, maxNumberOfSubjects) designPlan <- TrialDesignPlanSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$.getAccrualTimeWithoutLeadingZero(), accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) .setValueAndParameterType(designPlan, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) .setValueAndParameterType(designPlan, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(designPlan, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(designPlan, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 } if (any(design$futilityBounds > -6)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } designPlan$.accrualTime <- accrualSetup designPlan$totalAccrualTime <- accrualSetup$accrualTime[length(accrualSetup$accrualTime)] if (length(accrualSetup$accrualTime) > 2) { designPlan$.setParameterType("totalAccrualTime", C_PARAM_GENERATED) } else { designPlan$.setParameterType("totalAccrualTime", C_PARAM_NOT_APPLICABLE) } if (is.na(maxNumberOfSubjects)) { if (!is.na(accrualSetup$maxNumberOfSubjects)) { designPlan$maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects designPlan$.setParameterType("maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects")) } } else if (maxNumberOfSubjects == 0) { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } else { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) } if (identical(as.integer(accrualSetup$accrualTime), C_ACCRUAL_TIME_DEFAULT) || identical(as.integer(c(0L, accrualSetup$.getAccrualTimeWithoutLeadingZero())), C_ACCRUAL_TIME_DEFAULT)) { designPlan$.setParameterType("accrualTime", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) } if (length(designPlan$accrualIntensity) == 1 && designPlan$accrualIntensity == C_ACCRUAL_INTENSITY_DEFAULT) { designPlan$.setParameterType("accrualIntensity", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType("accrualIntensity", accrualSetup$.getParameterType("accrualIntensity")) } .assertIsSingleNumber(designPlan$eventTime, "eventTime") .assertIsSingleNumber(designPlan$allocationRatioPlanned, "allocationRatioPlanned") .assertIsSingleNumber(designPlan$kappa, "kappa") if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(designPlan$maxNumberOfSubjects) } .assertIsSingleNumber(designPlan$dropoutRate1, "dropoutRate1") .assertIsSingleNumber(designPlan$dropoutRate2, "dropoutRate2") .assertIsSingleNumber(designPlan$dropoutTime, "dropoutTime") if (objectType == "power") { pi1Default <- C_PI_1_DEFAULT } else { pi1Default <- C_PI_1_SAMPLE_SIZE_DEFAULT } designPlan$.piecewiseSurvivalTime <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, eventTime = eventTime, kappa = kappa, .pi1Default = pi1Default) designPlan$.setParameterType("kappa", designPlan$.piecewiseSurvivalTime$.getParameterType("kappa")) if (designPlan$.piecewiseSurvivalTime$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE && length(designPlan$.piecewiseSurvivalTime$pi1) > 1 && length(accrualSetup$accrualIntensity) > 1 && all(accrualSetup$accrualIntensity < 1)) { designPlan$.piecewiseSurvivalTime$pi1 <- designPlan$.piecewiseSurvivalTime$pi1[1] warning("Only the first default 'pi1' (", designPlan$.piecewiseSurvivalTime$pi1, ") was used ", "because the accrual intensities (", .arrayToString(accrualSetup$accrualIntensity), ") ", "were defined relative (all accrual intensities are < 1)", call. = FALSE) } .initDesignPlanSurvival(designPlan) designPlan$.setParameterType("followUpTime", C_PARAM_NOT_APPLICABLE) if (designPlan$accountForObservationTimes) { .assertIsSingleNumber(dropoutRate1, "dropoutRate1") .assertIsSingleNumber(dropoutRate2, "dropoutRate2") .assertIsSingleNumber(dropoutTime, "dropoutTime") if (!is.na(dropoutTime) && dropoutTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dropoutTime' (", dropoutTime, ") must be > 0") } if (dropoutRate1 < 0 || dropoutRate1 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate1' (", dropoutRate1, ") is out of bounds [0; 1)") } if (dropoutRate2 < 0 || dropoutRate2 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate2' (", dropoutRate2, ") is out of bounds [0; 1)") } if (!is.na(eventTime) && eventTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'eventTime' (", eventTime, ") must be > 0") } .assertIsValidAccrualTime(accrualSetup$.getAccrualTimeWithoutLeadingZero()) .assertIsValidFollowUpTime(followUpTime) .setValueAndParameterType(designPlan, "followUpTime", followUpTime, C_FOLLOW_UP_TIME_DEFAULT) if (.isUserDefinedMaxNumberOfSubjects(designPlan) && !is.null(followUpTime) && length(followUpTime) == 1 && !is.na(followUpTime)) { warning("Follow-up time will be calculated, value entered (", followUpTime, ") is not taken into account", call. = FALSE) } else if (is.na(followUpTime)) { designPlan$followUpTime <- C_FOLLOW_UP_TIME_DEFAULT designPlan$.setParameterType("followUpTime", C_PARAM_DEFAULT_VALUE) } if (objectType == "power") { designPlan$followUpTime <- NA_real_ designPlan$.setParameterType("followUpTime", C_PARAM_NOT_APPLICABLE) } } else { for (p in c("accrualTime", "accrualIntensity", "eventTime", "dropoutRate1", "dropoutRate2", "dropoutTime", "followUpTime", "analysisTime", "studyDuration")) { designPlan$.setParameterType(p, C_PARAM_NOT_APPLICABLE) } if (designPlan$.getParameterType("accrualTime") == C_PARAM_USER_DEFINED || !identical(accrualTime, C_ACCRUAL_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(accrualSetup$accrualTime, "accrualTime") } if (!identical(eventTime, C_EVENT_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(eventTime, "eventTime") } designPlan$.warnInCaseArgumentExists(dropoutRate1, "dropoutRate1") designPlan$.warnInCaseArgumentExists(dropoutRate2, "dropoutRate2") if (!identical(dropoutTime, C_DROP_OUT_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(dropoutTime, "dropoutTime") } designPlan$.warnInCaseArgumentExists(maxNumberOfSubjects, "maxNumberOfSubjects") if (!identical(followUpTime, C_FOLLOW_UP_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(followUpTime, "followUpTime") } } .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) if (objectType == "power") { .setValueAndParameterType(designPlan, "maxNumberOfEvents", maxNumberOfEvents, NA_real_) designPlan$.setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) } return(designPlan) } .isUserDefinedMaxNumberOfSubjects <- function(designPlan) { if (!is.null(designPlan) && length(designPlan$.getParameterType("maxNumberOfSubjects")) > 0) { type <- designPlan$.getParameterType("maxNumberOfSubjects") if (type == C_PARAM_USER_DEFINED) { return(TRUE) } } return(!is.null(designPlan$maxNumberOfSubjects) && length(designPlan$maxNumberOfSubjects) == 1 && !is.na(designPlan$maxNumberOfSubjects) && designPlan$maxNumberOfSubjects > 0) } .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject <- function(designPlan, pwstSetup) { designPlan$pi1 <- pwstSetup$pi1 designPlan$.setParameterType("pi1", pwstSetup$.getParameterType("pi1")) designPlan$pi2 <- pwstSetup$pi2 designPlan$.setParameterType("pi2", pwstSetup$.getParameterType("pi2")) designPlan$hazardRatio <- pwstSetup$hazardRatio designPlan$.setParameterType("hazardRatio", pwstSetup$.getParameterType("hazardRatio")) designPlan$lambda1 <- pwstSetup$lambda1 designPlan$.setParameterType("lambda1", pwstSetup$.getParameterType("lambda1")) designPlan$lambda2 <- pwstSetup$lambda2 designPlan$.setParameterType("lambda2", pwstSetup$.getParameterType("lambda2")) designPlan$median1 <- pwstSetup$median1 designPlan$.setParameterType("median1", pwstSetup$.getParameterType("median1")) designPlan$median2 <- pwstSetup$median2 designPlan$.setParameterType("median2", pwstSetup$.getParameterType("median2")) designPlan$piecewiseSurvivalTime <- pwstSetup$piecewiseSurvivalTime designPlan$.setParameterType("piecewiseSurvivalTime", pwstSetup$.getParameterType("piecewiseSurvivalTime")) designPlan$eventTime <- pwstSetup$eventTime designPlan$.setParameterType("eventTime", pwstSetup$.getParameterType("eventTime")) if (pwstSetup$.isLambdaBased()) { return(length(designPlan$hazardRatio)) } return(length(designPlan$pi1)) } .initDesignPlanSurvival <- function(designPlan) { numberOfResults <- .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject(designPlan, designPlan$.piecewiseSurvivalTime) if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { if (length(designPlan$accountForObservationTimes) == 0 || is.na(designPlan$accountForObservationTimes) || !designPlan$accountForObservationTimes) { designPlan$accountForObservationTimes <- TRUE designPlan$.setParameterType("accountForObservationTimes", C_PARAM_DEFAULT_VALUE) } if (!designPlan$accountForObservationTimes) { designPlan$accountForObservationTimes <- TRUE warning("'accountForObservationTimes' was set to TRUE ", "because piecewise exponential survival function is enabled", call. = FALSE) } } else { if (.isUserDefinedMaxNumberOfSubjects(designPlan)) { if (length(designPlan$accountForObservationTimes) != 0 && !is.na(designPlan$accountForObservationTimes) && !designPlan$accountForObservationTimes) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accountForObservationTimes' must be TRUE because 'maxNumberOfSubjects' is > 0") } designPlan$.setParameterType("accountForObservationTimes", C_PARAM_GENERATED) designPlan$accountForObservationTimes <- TRUE } else { if (length(designPlan$accountForObservationTimes) == 0 || is.na(designPlan$accountForObservationTimes)) { designPlan$accountForObservationTimes <- FALSE designPlan$.setParameterType("accountForObservationTimes", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType("accountForObservationTimes", ifelse(designPlan$accountForObservationTimes, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) } } } designPlan$.setParameterType("omega", C_PARAM_NOT_APPLICABLE) if (designPlan$.isSampleSizeObject()) { designPlan$.setParameterType("directionUpper", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("maxNumberOfEvents", C_PARAM_NOT_APPLICABLE) } return(numberOfResults = numberOfResults) } .warnInCaseOfDefinedPiValue <- function(designPlan, piValueName) { piValue <- designPlan[[piValueName]] if (!is.null(piValue) && !is.na(piValue) && length(piValue) > 0) { designPlan$.setParameterType(piValueName, C_PARAM_NOT_APPLICABLE) warning("'pi2' (", .arrayToString(piValue), ") will be ignored ", "because piecewise exponential survival function is enabled", call. = FALSE) designPlan[[piValueName]] <- NA_real_ } } .getSampleSize <- function(designPlan) { if (.isTrialDesignPlanMeans(designPlan) || .isTrialDesignPlanRates(designPlan)) { if (identical(designPlan$allocationRatioPlanned, 0)) { designPlan$optimumAllocationRatio <- TRUE designPlan$.setParameterType("optimumAllocationRatio", C_PARAM_USER_DEFINED) } if (.isTrialDesignPlanMeans(designPlan)) { sampleSizeFixed <- .getSampleSizeFixedMeans( alpha = designPlan$getAlpha(), beta = designPlan$getBeta(), sided = designPlan$getSided(), twoSidedPower = designPlan$getTwoSidedPower(), normalApproximation = designPlan$normalApproximation, meanRatio = designPlan$meanRatio, thetaH0 = designPlan$thetaH0, alternative = designPlan$alternative, stDev = designPlan$stDev, groups = designPlan$groups, allocationRatioPlanned = designPlan$allocationRatioPlanned) } else { sampleSizeFixed <- .getSampleSizeFixedRates( alpha = designPlan$getAlpha(), beta = designPlan$getBeta(), sided = designPlan$getSided(), normalApproximation = designPlan$normalApproximation, riskRatio = designPlan$riskRatio, thetaH0 = designPlan$thetaH0, pi1 = designPlan$pi1, pi2 = designPlan$pi2, groups = designPlan$groups, allocationRatioPlanned = designPlan$allocationRatioPlanned) } # Fixed designPlan$nFixed <- sampleSizeFixed$nFixed designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$nFixed1 <- sampleSizeFixed$n1Fixed designPlan$nFixed2 <- sampleSizeFixed$n2Fixed designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) designPlan$numberOfSubjects1 <- matrix(designPlan$nFixed1, nrow = 1) designPlan$numberOfSubjects2 <- matrix(designPlan$nFixed2, nrow = 1) } designPlan$numberOfSubjects <- matrix(designPlan$nFixed, nrow = 1) if (!is.null(sampleSizeFixed$allocationRatioPlanned) && (length(designPlan$allocationRatioPlanned) != length(sampleSizeFixed$allocationRatioPlanned) || sum(designPlan$allocationRatioPlanned == sampleSizeFixed$allocationRatioPlanned) != length(designPlan$allocationRatioPlanned))) { designPlan$allocationRatioPlanned = sampleSizeFixed$allocationRatioPlanned designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_GENERATED) } # Sequential if (designPlan$.design$kMax > 1) { designCharacteristics <- getDesignCharacteristics(designPlan$.design) if (.isTrialDesignPlanMeans(designPlan)) { sampleSizeSequential <- .getSampleSizeSequentialMeans( sampleSizeFixed, designCharacteristics) } else { sampleSizeSequential <- .getSampleSizeSequentialRates( sampleSizeFixed, designCharacteristics) } designPlan$informationRates <- sampleSizeSequential$informationRates designPlan$.setParameterType("informationRates", C_PARAM_GENERATED) designPlan$maxNumberOfSubjects <- sampleSizeSequential$maxNumberOfSubjects designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$maxNumberOfSubjects1 <- .getNumberOfSubjects1( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned) designPlan$maxNumberOfSubjects2 <- .getNumberOfSubjects2( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned) designPlan$.setParameterType("maxNumberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("maxNumberOfSubjects2", C_PARAM_GENERATED) } designPlan$numberOfSubjects <- sampleSizeSequential$numberOfSubjects designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$numberOfSubjects1 <- sampleSizeSequential$numberOfSubjects1 designPlan$numberOfSubjects2 <- sampleSizeSequential$numberOfSubjects2 designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } designPlan$expectedNumberOfSubjectsH0 <- sampleSizeSequential$expectedNumberOfSubjectsH0 designPlan$expectedNumberOfSubjectsH01 <- sampleSizeSequential$expectedNumberOfSubjectsH01 designPlan$expectedNumberOfSubjectsH1 <- sampleSizeSequential$expectedNumberOfSubjectsH1 designPlan$.setParameterType("expectedNumberOfSubjectsH0", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH01", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed2", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed", C_PARAM_NOT_APPLICABLE) if (designPlan$allocationRatioPlanned[1] == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } if (!is.null(sampleSizeSequential$rejectPerStage)) { designPlan$rejectPerStage <- matrix(sampleSizeSequential$rejectPerStage, nrow = designPlan$.design$kMax) designPlan$.setParameterType("rejectPerStage", C_PARAM_GENERATED) designPlan$earlyStop <- sum(designPlan$rejectPerStage[1:(designPlan$.design$kMax - 1), ]) designPlan$.setParameterType("earlyStop", C_PARAM_GENERATED) } if (!is.null(sampleSizeSequential$futilityPerStage) && any(designPlan$.design$futilityBounds != C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityPerStage <- matrix(sampleSizeSequential$futilityPerStage, nrow = designPlan$.design$kMax - 1) designPlan$.setParameterType("futilityPerStage", C_PARAM_GENERATED) designPlan$futilityStop <- sum(designPlan$futilityPerStage) designPlan$.setParameterType("futilityStop", C_PARAM_GENERATED) designPlan$earlyStop <- designPlan$earlyStop + sum(designPlan$futilityPerStage) } } .addEffectScaleBoundaryDataToDesignPlan(designPlan) return(designPlan) } else if (.isTrialDesignPlanSurvival(designPlan)) { # Fixed designPlan <- .getSampleSizeFixedSurvival(designPlan) # Sequential if (designPlan$.design$kMax > 1) { designCharacteristics <- getDesignCharacteristics(designPlan$.design) designPlan <- .getSampleSizeSequentialSurvival(designPlan, designCharacteristics) } if (designPlan$accountForObservationTimes && !any(is.na(designPlan$followUpTime)) && all(designPlan$followUpTime == C_FOLLOW_UP_TIME_DEFAULT)) { designPlan$.setParameterType("followUpTime", C_PARAM_DEFAULT_VALUE) } .addEffectScaleBoundaryDataToDesignPlan(designPlan) if (designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_GENERATED && designPlan$.accrualTime$.getParameterType("maxNumberOfSubjects") != C_PARAM_GENERATED && all(designPlan$accrualIntensity < 1)) { numberOfDefinedAccrualIntensities <- length(designPlan$accrualIntensity) accrualTime <- designPlan$accrualTime if (length(accrualTime) > 0 && accrualTime[1] != 0) { accrualTime <- c(0, accrualTime) } if (any(designPlan$accrualIntensity < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'accrualIntensityRelative' (", .arrayToString(designPlan$accrualIntensity), ") must be >= 0") } designPlan$accrualIntensityRelative <- designPlan$accrualIntensity if (identical(designPlan$accrualIntensityRelative, C_ACCRUAL_INTENSITY_DEFAULT)) { designPlan$.setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType("accrualIntensityRelative", designPlan$.getParameterType("accrualIntensity")) } accrualIntensityAbsolute <- c() for (maxNumberOfSubjects in designPlan$maxNumberOfSubjects) { accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = designPlan$accrualIntensityRelative, maxNumberOfSubjects = maxNumberOfSubjects) accrualIntensityAbsolute <- c(accrualIntensityAbsolute, accrualSetup$accrualIntensity) designPlan$.accrualTime <- accrualSetup } designPlan$accrualIntensity <- accrualIntensityAbsolute designPlan$.setParameterType("accrualIntensity", C_PARAM_GENERATED) if (numberOfDefinedAccrualIntensities > 1) { paramName <- NULL if (designPlan$.getParameterType("pi1") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE || designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { paramName <- "pi1" } else if (designPlan$.getParameterType("median1") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("median2") == C_PARAM_USER_DEFINED) { paramName <- "median1" } if (!is.null(paramName)) { paramValue <- designPlan[[paramName]] if (!is.null(paramValue) && length(paramValue) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the definition of relative accrual intensities ", "(all 'accrualIntensity' values < 1) ", "is only available for a single '", paramName, "' ", "(", paramName, " = ", .arrayToString( paramValue, vectorLookAndFeelEnabled = TRUE), ")") } } } } designPlan$maxNumberOfEvents <- designPlan$eventsPerStage[designPlan$.design$kMax, ] designPlan$.setParameterType("maxNumberOfEvents", C_PARAM_GENERATED) if (!any(is.na(designPlan$followUpTime))) { if (any(designPlan$followUpTime < -1e-02)) { warning("Accrual duration longer than maximal study ", "duration (time to maximal number of events); followUpTime = ", .arrayToString(designPlan$followUpTime), call. = FALSE) } } else { indices <- which(is.na(designPlan$followUpTime)) warning("Follow-up time could not be calculated for pi1 = ", .arrayToString(designPlan$pi1[indices]), call. = FALSE) } if (designPlan$.design$kMax == 1) { designPlan$.setParameterType("kappa", C_PARAM_NOT_APPLICABLE) } if (designPlan$.getParameterType("accountForObservationTimes") != C_PARAM_USER_DEFINED) { designPlan$.setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) } designPlan$.setParameterType("omega", C_PARAM_NOT_APPLICABLE) .addStudyDurationToDesignPlan(designPlan) return(designPlan) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "unknown trial plan class '", class(designPlan), "'") } .checkFollowUpTime <- function(followUpTime) { if (is.null(followUpTime) || length(followUpTime) == 0) { return(invisible()) } naFollowUpTimes <- c() negativeFollowUpTimes <- c() for (i in 1:length(followUpTime)) { if (is.na(followUpTime[i])) { naFollowUpTimes <- c(naFollowUpTimes, i) } else if (followUpTime[i] < -1e-02) { negativeFollowUpTimes <- c(negativeFollowUpTimes, i) } } if (length(negativeFollowUpTimes) > 0) { warning("Accrual duration longer than maximal study ", "duration (time to maximal number of events; 'followUpTime' = ", .arrayToString(followUpTime), ")", call. = FALSE) } if (length(naFollowUpTimes) > 0) { if (length(naFollowUpTimes) == 1) { warning("Follow-up time could not be calculated", call. = FALSE) } else { warning("Follow-up time 'followUpTime[1]' could not be calculated", call. = FALSE) } } } .getSampleSizeFixedMeans <- function(..., alpha = 0.025, beta = 0.2, sided = 1, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = 0, alternative = C_ALTERNATIVE_DEFAULT, stDev = C_STDEV_DEFAULT, groups = 2, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT) { nFixed <- rep(NA_real_, length(alternative)) for (i in 1:length(alternative)) { theta <- alternative[i] if (groups == 1) { if (sided == 1 || !twoSidedPower) { if (normalApproximation == FALSE) { up <- 2 while (stats::pt(stats::qt(1 - alpha / sided, up - 1), max(0.001, up - 1), sqrt(up) * abs(theta - thetaH0) / stDev) > beta) { up <- 2*up } nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pt(stats::qt(1 - alpha / sided, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * abs(theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } else { nFixed[i] <- (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 / ((theta - thetaH0) / stDev)^2 } } else { up <- 2 while (stats::pt(stats::qt(1 - alpha / 2, max(0.001, up - 1)), max(0.001, up - 1), sqrt(up) * (theta - thetaH0) / stDev) - stats::pt(-stats::qt(1 - alpha / 2, max(0.001, up - 1)), max(0.001, up - 1), sqrt(up) * (theta - thetaH0) / stDev) > beta) { up <- 2 * up } if (normalApproximation == FALSE) { nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pt(stats::qt(1 - alpha / 2, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * (theta - thetaH0) / stDev) - stats::pt(-stats::qt(1 - alpha / 2, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } else { nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n) * (theta - thetaH0) / stDev) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } } } else if (groups == 2) { if (sided == 1 || !twoSidedPower) { if (!meanRatio) { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 } if (normalApproximation == FALSE) { up <- 2 while (stats::pt(stats::qt(1 - alpha / sided, up * (1 + allocationRatioPlanned) - 2), up * (1 + allocationRatioPlanned) - 2, sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * abs(theta - thetaH0) / stDev) > beta) { up <- 2 * up } n2Fixed <- .getOneDimensionalRoot( function(x) { return(stats::pt(stats::qt(1 - alpha / sided, max(0.001, x * (1 + allocationRatioPlanned) - 2)), max(0.001, x * (1 + allocationRatioPlanned) - 2), sqrt(x) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * abs(theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { nFixed[i] <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 / ((theta - thetaH0) / stDev)^2 } } else { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 / thetaH0 } if (!normalApproximation) { up <- 2 while (stats::pt(stats::qt(1 - alpha / sided, up * (1 + allocationRatioPlanned) - 2), up * (1 + allocationRatioPlanned) - 2, sqrt(up * allocationRatioPlanned / (1 + allocationRatioPlanned * thetaH0^2)) * abs(theta - thetaH0) / stDev) > beta) { up <- 2 * up } n2Fixed <- .getOneDimensionalRoot( function(n2) { return(stats::pt(stats::qt(1 - alpha / sided, max(0.001, n2 * (1 + allocationRatioPlanned) - 2)), max(0.001, n2 * (1 + allocationRatioPlanned) - 2), sqrt(n2 * allocationRatioPlanned / (1 + allocationRatioPlanned * thetaH0^2)) * abs(theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { nFixed[i] <- (1 + 1/allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) * (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 / ((theta - thetaH0) / stDev)^2 } } } else { if (!normalApproximation) { if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 } up <- 2 while (stats::pt(stats::qt(1 - alpha / 2, max(0.001, up * (1 + allocationRatioPlanned) - 2)), max(0.001, up * (1 + allocationRatioPlanned) - 2), sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev) - stats::pt(-stats::qt(1 - alpha / 2, up * (1 + allocationRatioPlanned) - 2), up * (1 + allocationRatioPlanned) - 2, sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev) > beta) { up <- 2*up } n2Fixed <- .getOneDimensionalRoot( function(n2) { return(stats::pt(stats::qt(1 - alpha / 2, max(0.001, n2 * (1 + allocationRatioPlanned) - 2)), max(0.001, n2*(1 + allocationRatioPlanned) - 2), sqrt(n2) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev) - stats::pt(-stats::qt(1 - alpha / 2, max(0.001, n2 * (1 + allocationRatioPlanned) - 2)), max(0.001, n2 * (1 + allocationRatioPlanned) - 2), sqrt(n2) * sqrt(allocationRatioPlanned/(1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { up <- 2 while (stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(up / 4) * (theta - thetaH0) / stDev) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(up / 4) * (theta - thetaH0) / stDev) > beta) { up <- 2 * up } nFixed[i] <- (1 + allocationRatioPlanned)^2 / (4 * allocationRatioPlanned) * .getOneDimensionalRoot( function(n) { return(stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n / 4) * (theta - thetaH0) / stDev) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n / 4) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } } } } if (groups == 1) { return(list(alpha = alpha, beta = beta, sided = sided, groups = groups, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, normalApproximation = normalApproximation, nFixed = nFixed) ) } else if (groups == 2) { n1Fixed <- nFixed * allocationRatioPlanned / (1 + allocationRatioPlanned) n2Fixed <- n1Fixed / allocationRatioPlanned return(list(alpha = alpha, beta = beta, sided = sided, groups = groups, allocationRatioPlanned = allocationRatioPlanned, thetaH0 = thetaH0, meanRatio = meanRatio, alternative = alternative, stDev = stDev, normalApproximation = normalApproximation, n1Fixed = n1Fixed, n2Fixed = n2Fixed, nFixed = nFixed) ) } } .getSampleSizeSequentialMeans <- function(fixedSampleSize, designCharacteristics) { kMax <- designCharacteristics$.design$kMax numberOfSubjects <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) numberOfSubjects1 <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) numberOfSubjects2 <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) maxNumberOfSubjects <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH0 <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH01 <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH1 <- rep(NA_real_, length(fixedSampleSize$alternative)) informationRates <- designCharacteristics$information / designCharacteristics$shift for (i in (1:length(fixedSampleSize$alternative))) { maxNumberOfSubjects[i] <- fixedSampleSize$nFixed[i] * designCharacteristics$inflationFactor numberOfSubjects[, i] <- maxNumberOfSubjects[i] * c(informationRates[1], (informationRates[2:kMax] - informationRates[1:(kMax - 1)])) expectedNumberOfSubjectsH0[i] <- designCharacteristics$averageSampleNumber0 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH01[i] <- designCharacteristics$averageSampleNumber01 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH1[i] <- designCharacteristics$averageSampleNumber1 * fixedSampleSize$nFixed[i] if (fixedSampleSize$groups == 2) { if (length(fixedSampleSize$allocationRatioPlanned) > 1) { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned[i] } else { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned } numberOfSubjects1[, i] <- numberOfSubjects[, i] * allocationRatioPlanned / (1 + allocationRatioPlanned) numberOfSubjects2[, i] <- numberOfSubjects[, i] / (1 + allocationRatioPlanned) } } if (fixedSampleSize$groups == 1) { return(list(alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, thetaH0 = fixedSampleSize$thetaH0, alternative = fixedSampleSize$alternative, stDev = fixedSampleSize$stDev, normalApproximation = fixedSampleSize$normalApproximation, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } else { return(list(alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, allocationRatioPlanned = fixedSampleSize$allocationRatioPlanned, thetaH0 = fixedSampleSize$thetaH0, alternative = fixedSampleSize$alternative, stDev = fixedSampleSize$stDev, normalApproximation = fixedSampleSize$normalApproximation, meanRatio = fixedSampleSize$meanRatio, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), numberOfSubjects1 = .getColumnCumSum(numberOfSubjects1), numberOfSubjects2 = .getColumnCumSum(numberOfSubjects2), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } } .getColumnCumSum <- function(x) { if (is.matrix(x)) { result <- x for (i in 1:ncol(x)) { result[, i] <- cumsum(x[, i]) } return(result) } return(cumsum(x)) } .getFarringtonManningValuesDiff <- function(rate1, rate2, theta0, allocation) { if (theta0 == 0) { ml1 <- (allocation * rate1 + rate2) / (1 + allocation) ml2 <- ml1 return(c(ml1, ml2)) } a <- 1 + 1 / allocation b <- -(1 + 1 / allocation + rate1 + rate2 / allocation + theta0 * (1 / allocation + 2)) c <- theta0^2 + theta0 * (2 * rate1 + 1 / allocation + 1) + rate1 + rate2 / allocation d <- -theta0 * (1 + theta0) * rate1 v <- b^3 / (3 * a)^3 - b * c / (6 * a^2) + d / (2 * a) if (!is.na(v) && (v == 0)) { u <- sqrt(b^2 / (3 * a)^2 - c / (3 * a)) w <- acos(-1) / 2 } else { u <- sign(v) * sqrt(b^2 / (3 * a)^2 - c / (3 * a)) w <- 1 / 3 * (acos(-1) + acos(v / u^3)) } ml1 <- min(max(0, 2 * u * cos(w) - b / (3 * a)), 1) ml2 <- min(max(0, ml1 - theta0), 1) return(c(ml1, ml2)) } .getFarringtonManningValuesRatio <- function(rate1, rate2, theta0, allocation) { if (theta0 == 1) { ml1 <- (allocation * rate1 + rate2) / (1 + allocation) ml2 <- ml1 return(c(ml1, ml2)) } a <- 1 + 1 / allocation b <- -((1 + rate2 / allocation) * theta0 + 1 / allocation + rate1) c <- (rate1 + rate2 / allocation) * theta0 ml1 <- (-b - sqrt(b^2 - 4 * a * c)) / (2 * a) ml2 <- ml1 / theta0 return(c(ml1, ml2)) } # # @title # Get Farrington Manning Values # # @description # Calculates and returns the maximum likelihood estimates under H0. # # @details # Calculation of maximum likelihood estimates under H0: # pi1 - pi2 = theta0 or H0: pi1 / pi2 = theta0 # # @references # Farrington & Manning (1990) # Wassmer (2003) # # @keywords internal # .getFarringtonManningValues <- function(rate1, rate2, theta0, allocation, method = c("diff", "ratio")) { method <- match.arg(method) if (method == "diff") { ml <- .getFarringtonManningValuesDiff(rate1, rate2, theta0, allocation) } else { ml <- .getFarringtonManningValuesRatio(rate1, rate2, theta0, allocation) } return(list(theta0 = theta0, method = method, ml1 = ml[1], ml2 = ml[2])) } .getSampleSizeFixedRates <- function(..., alpha = 0.025, beta = 0.2, sided = 1, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = 0, pi1 = seq(0.4, 0.6, 0.1), pi2 = 0.2, groups = 2, allocationRatioPlanned = 1) { if (groups == 1) { nFixed <- rep(NA_real_, length(pi1)) for (i in 1:length(pi1)) { if (normalApproximation) { nFixed[i] <- (stats::qnorm(1 - alpha / sided) * sqrt(thetaH0 * (1 - thetaH0)) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i])))^2 / (pi1[i] - thetaH0)^2 } else { ifelse(pi1[i] > thetaH0, lower.tail <- FALSE, lower.tail <- TRUE) iterations <- 1 if (lower.tail) { nup <- 2 while ((stats::pbinom(stats::qbinom(alpha, nup, thetaH0, lower.tail = lower.tail) - 1, nup, pi1[i], lower.tail = lower.tail) < 1 - beta) && (iterations <= 50)) { nup <- 2 * nup iterations <- iterations + 1 } if (iterations > 50) { nFixed[i] <- Inf } else { prec <- 2 nlow <- 2 while (prec > 1) { nFixed[i] <- round((nlow + nup) / 2) ifelse(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail) - 1, nFixed[i], pi1[i], lower.tail = lower.tail) < 1 - beta, nlow <- nFixed[i], nup <- nFixed[i]) prec <- nup - nlow } if(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail) - 1, nFixed[i], pi1[i], lower.tail = lower.tail) < 1 - beta) { nFixed[i] <- nFixed[i] + 1 } } } else { nup <- 2 while ((stats::pbinom(stats::qbinom(alpha, nup, thetaH0, lower.tail = lower.tail), nup, pi1[i], lower.tail = lower.tail) < 1 - beta) && (iterations <= 50)) { nup <- 2 * nup iterations <- iterations + 1 } if (iterations > 50) { nFixed[i] <- Inf } else { prec <- 2 nlow <- 2 while (prec > 1) { nFixed[i] <- round((nlow + nup) / 2) ifelse(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail), nFixed[i], pi1[i], lower.tail = lower.tail) < 1 - beta, nlow <- nFixed[i], nup <- nFixed[i]) prec <- nup - nlow } if(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail), nFixed[i], pi1[i], lower.tail = lower.tail) < 1 - beta) { nFixed[i] <- nFixed[i] + 1 } } } } } return(list(alpha = alpha, beta = beta, sided = sided, groups = groups, thetaH0 = thetaH0, pi1 = pi1, normalApproximation = normalApproximation, nFixed = nFixed )) } if (groups == 2) { n1Fixed <- rep(NA_real_, length(pi1)) n2Fixed <- rep(NA_real_, length(pi1)) nFixed <- rep(NA_real_, length(pi1)) if (allocationRatioPlanned == 0) { allocationRatioPlannedVec <- rep(NA_real_, length(pi1)) } for (i in 1:length(pi1)) { if (!riskRatio) { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlannedVec[i] <- stats::optimize(function(x) { fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, x, method = "diff") n1 <- (stats::qnorm(1 - alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * x) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2*(1 - pi2) * x))^2 / (pi1[i] - pi2 - thetaH0)^2 return((1 + x) / x * n1) }, interval = c(0, 5), tol = 0.0001)$minimum fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlannedVec[i], method = "diff") n1Fixed[i] <- (stats::qnorm(1 - alpha / sided)*sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2*(1 - fm$ml2) * allocationRatioPlannedVec[i]) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlannedVec[i]))^2 / (pi1[i] - pi2 - thetaH0)^2 } else { fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlanned, method = "diff") n1Fixed[i] <- (stats::qnorm(1 - alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2*(1 - fm$ml2) * allocationRatioPlanned) + stats::qnorm(1 - beta) * sqrt(pi1[i]*(1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlanned))^2 / (pi1[i] - pi2 - thetaH0)^2 } } else { if (allocationRatioPlanned == 0) { # allocationRatioPlanned = 0 provides optimum sample size allocationRatioPlannedVec[i] <- stats::optimize(function(x) { fm <- .getFarringtonManningValues(pi1[i], pi2 , thetaH0, x, method = "ratio") n1 <- (stats::qnorm(1 - alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * x * thetaH0^2) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2)*x*thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 return((1 + x) / x * n1) }, interval = c(0, 5), tol = 0.0001)$minimum fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlannedVec[i], method = "ratio") n1Fixed[i] <- (stats::qnorm(1 - alpha / sided)*sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlannedVec[i] * thetaH0^2) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlannedVec[i] * thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 } else { fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlanned, method = "ratio") n1Fixed[i] <- (stats::qnorm(1 - alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlanned * thetaH0^2) + stats::qnorm(1 - beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlanned * thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 } } } if (allocationRatioPlanned == 0) { allocationRatioPlanned <- allocationRatioPlannedVec } n2Fixed <- n1Fixed / allocationRatioPlanned nFixed <- n1Fixed + n2Fixed return(list(alpha = alpha, beta = beta, sided = sided, groups = groups, allocationRatioPlanned = allocationRatioPlanned, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, normalApproximation = normalApproximation, riskRatio = riskRatio, n1Fixed = n1Fixed, n2Fixed = n2Fixed, nFixed = nFixed )) } } .getSampleSizeSequentialRates <- function(fixedSampleSize, designCharacteristics) { kMax <- designCharacteristics$.design$kMax numberOfSubjects <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) numberOfSubjects1 <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) numberOfSubjects2 <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) maxNumberOfSubjects <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH0 <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH01 <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH1 <- rep(NA_real_, length(fixedSampleSize$pi1)) informationRates <- designCharacteristics$information/designCharacteristics$shift for (i in 1:length(fixedSampleSize$pi1)) { maxNumberOfSubjects[i] <- fixedSampleSize$nFixed[i] * designCharacteristics$inflationFactor numberOfSubjects[, i] <- maxNumberOfSubjects[i]*c(informationRates[1], (informationRates[2:kMax] - informationRates[1:(kMax - 1)])) expectedNumberOfSubjectsH0[i] <- designCharacteristics$averageSampleNumber0 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH01[i] <- designCharacteristics$averageSampleNumber01 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH1[i] <- designCharacteristics$averageSampleNumber1 * fixedSampleSize$nFixed[i] if (fixedSampleSize$groups == 2) { if (length(fixedSampleSize$allocationRatioPlanned) > 1) { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned[i] } else { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned } numberOfSubjects1[, i] <- numberOfSubjects[, i] * allocationRatioPlanned / (1 + allocationRatioPlanned) numberOfSubjects2[, i] <- numberOfSubjects[, i] / (1 + allocationRatioPlanned) } } if (fixedSampleSize$groups == 1) { return(list(alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, thetaH0 = fixedSampleSize$thetaH0, pi1 = fixedSampleSize$pi1, normalApproximation = fixedSampleSize$normalApproximation, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } else { return(list(alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, allocationRatioPlanned = fixedSampleSize$allocationRatioPlanned, thetaH0 = fixedSampleSize$thetaH0, pi1 = fixedSampleSize$pi1, pi2 = fixedSampleSize$pi2, normalApproximation = fixedSampleSize$normalApproximation, riskRatio = fixedSampleSize$riskRatio, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), numberOfSubjects1 = .getColumnCumSum(numberOfSubjects1), numberOfSubjects2 = .getColumnCumSum(numberOfSubjects2), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } } .getPiecewiseExpStartTimesWithoutLeadingZero <- function(piecewiseSurvivalTime) { if (is.null(piecewiseSurvivalTime) || length(piecewiseSurvivalTime) == 0 || all(is.na(piecewiseSurvivalTime))) { return(NA_real_) } if (piecewiseSurvivalTime[1] != 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be 0", call. = FALSE) } if (length(piecewiseSurvivalTime) == 1) { return(numeric(0)) } if (length(piecewiseSurvivalTime) < 2) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be > 1") } return(piecewiseSurvivalTime[2:length(piecewiseSurvivalTime)]) } .getEventProbabilityFunction <- function(time, piecewiseLambda, piecewiseSurvivalTime = NA_real_, phi, kappa) { if (length(piecewiseLambda) == 1) { if ((kappa != 1) && (phi > 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used together with specified dropout rate (use simulation instead)", call. = FALSE) } return(piecewiseLambda / (piecewiseLambda + phi) * pweibull(time, shape = kappa, scale = 1 / (piecewiseLambda + phi), lower.tail = TRUE, log.p = FALSE)) } if (length(piecewiseSurvivalTime) != length(piecewiseLambda)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be equal to length of 'piecewiseLambda' (", .arrayToString(piecewiseLambda), ")") } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (kappa != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition", call. = FALSE) } len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (i == 1) { if (time <= piecewiseSurvivalTime[1]) { return(piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * time)))) } } if (i == 2) { cdfPart <- piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * piecewiseSurvivalTime[1]))) if (time <= piecewiseSurvivalTime[2]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] cdf <- cdfPart + piecewiseLambda[2] / (piecewiseLambda[2] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[1]) - exp(-piecewiseLambda[2] * (time - piecewiseSurvivalTime[1]) - phi*time)) return(cdf) } } if (i == 3) { cdfPart <- cdfPart + piecewiseLambda[2] / (piecewiseLambda[2] + phi) * exp(-piecewiseLambda[1] * piecewiseSurvivalTime[1]) * ( exp(-phi*piecewiseSurvivalTime[1]) - exp(-piecewiseLambda[2] * (piecewiseSurvivalTime[2] - piecewiseSurvivalTime[1]) - phi*piecewiseSurvivalTime[2])) if (time <= piecewiseSurvivalTime[3]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + piecewiseLambda[2] * (piecewiseSurvivalTime[2] - piecewiseSurvivalTime[1]) cdf <- cdfPart + piecewiseLambda[3] / (piecewiseLambda[3] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[2]) - exp(-piecewiseLambda[3] * (time - piecewiseSurvivalTime[2]) - phi*time)) return(cdf) } } if (i > 3) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(i - 2)] * (piecewiseSurvivalTime[2:(i - 2)] - piecewiseSurvivalTime[1:(i - 3)])) cdfPart <- cdfPart + piecewiseLambda[i - 1] / (piecewiseLambda[i - 1] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[i - 2]) - exp(-piecewiseLambda[i - 1] * (piecewiseSurvivalTime[i - 1] - piecewiseSurvivalTime[i - 2]) - phi*piecewiseSurvivalTime[i - 1])) if (time <= piecewiseSurvivalTime[i]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) cdf <- cdfPart + piecewiseLambda[i] / (piecewiseLambda[i] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[i - 1]) - exp(-piecewiseLambda[i] * (time - piecewiseSurvivalTime[i - 1]) - phi*time)) return(cdf) } } } if (len == 1) { cdfPart <- piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * piecewiseSurvivalTime[1]))) } else if (len == 2) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] cdfPart <- cdfPart + piecewiseLambda[len] / (piecewiseLambda[len] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[len - 1]) - exp(-piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) - phi*piecewiseSurvivalTime[len])) } else { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(len - 1)] * (piecewiseSurvivalTime[2:(len - 1)] - piecewiseSurvivalTime[1:(len - 2)])) cdfPart <- cdfPart + piecewiseLambda[len] / (piecewiseLambda[len] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[len - 1]) - exp(-piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) - phi*piecewiseSurvivalTime[len])) } if (len == 1) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] } else { cdfFactor <- cdfFactor + piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) } cdf <- cdfPart + piecewiseLambda[len + 1] / (piecewiseLambda[len + 1] + phi) * exp(-cdfFactor) * ( exp(-phi*piecewiseSurvivalTime[len]) - exp(-piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) - phi*time)) return(cdf) } .getEventProbabilityFunctionVec <- function(x, piecewiseLambda, piecewiseSurvivalTime, phi, kappa) { result <- c() for (time in x) { result <- c(result, .getEventProbabilityFunction(time, piecewiseLambda, piecewiseSurvivalTime, phi, kappa)) } return(result) } #' @title #' Get Event Probabilities #' #' @description #' Returns the event probabilities for specified parameters at given time vector. #' #' @param time A numeric vector with time values. #' @param lambda1 The assumed hazard rate in the treatment group, there is no default. #' lambda1 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param lambda2 The assumed hazard rate in the reference group, there is no default. #' lambda2 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param piecewiseSurvivalTime A vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function (see details). #' @param hazardRatio The vector of hazard ratios under consideration. #' If the event or hazard rates in both treatment groups are defined, the hazard ratio needs #' not to be specified as it is calculated. #' @param kappa The shape parameter of the Weibull distribution, default is \code{1}. #' The Weibull distribution cannot be used for the piecewise definition of the #' survival time distribution. #' Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} #' are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact. #' @param allocationRatioPlanned The planned allocation ratio, default is \code{1}. #' If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the #' smallest number of subjects is determined. #' @param accrualTime The assumed accrual time intervals for the study, default is #' \code{c(0, 12)} (see details). #' @param accrualIntensity A vector of accrual intensities, default is the relative #' intensity \code{0.1} (see details). #' @param dropoutRate1 The assumed drop-out rate in the treatment group, default is \code{0}. #' @param dropoutRate2 The assumed drop-out rate in the control group, default is \code{0}. #' @param dropoutTime The assumed time for drop-out rates in the control and the #' treatment group, default is \code{12}. #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or #' \code{accrualIntensity} is calculated at fixed end of accrual. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' #' For details of the parameters see \code{\link{getSampleSizeSurvival}}. #' #' @return Returns a \code{\link{EventProbabilities}} object. #' #' @keywords internal #' #' @export #' getEventProbabilities <- function(time, ..., accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, allocationRatioPlanned = 1, hazardRatio = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT, maxNumberOfSubjects = NA_real_) { .assertIsNumericVector(time, "time") .assertIsValidAllocationRatioPlannedSampleSize(allocationRatioPlanned, maxNumberOfSubjects) .assertIsValidKappa(kappa) if (!is.na(dropoutTime) && dropoutTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dropoutTime' (", dropoutTime, ") must be > 0", call. = FALSE) } if (dropoutRate1 < 0 || dropoutRate1 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate1' (", dropoutRate1, ") is out of bounds [0; 1)") } if (dropoutRate2 < 0 || dropoutRate2 >= 1) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate2' (", dropoutRate2, ") is out of bounds [0; 1)") } accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects) accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() accrualIntensity <- accrualSetup$accrualIntensity setting <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, hazardRatio = hazardRatio, kappa = kappa) #if (setting$.isLambdaBased()) { piecewiseSurvivalTime <- setting$piecewiseSurvivalTime lambda2 <- setting$lambda2 lambda1 <- setting$lambda1 hazardRatio <- setting$hazardRatio #} phi <- -log(1 - c(dropoutRate1, dropoutRate2)) / dropoutTime if (length(accrualTime) != length(accrualIntensity)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", (length(accrualTime) + 1), ") must be equal to length of 'accrualIntensity' (", length(accrualIntensity), ")") } if (any(accrualIntensity <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualIntensity' must be > 0") } if (any(accrualTime <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualTime' must be > 0") } if (kappa != 1 && any(phi > 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "for Weibull distribution (kappa != 1) drop-out rates (phi) cannot be specified") } if (any(phi < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all drop-out rates (phi) must be >= 0") } .assertIsNumericVector(lambda2, "lambda2") if (any(lambda2 <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all rates (lambda2) must be > 0") } eventProbabilities <- EventProbabilities( .piecewiseSurvivalTime = setting, .accrualTime = accrualSetup, time = time, accrualTime = accrualTime, accrualIntensity = accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, maxNumberOfSubjects = maxNumberOfSubjects) eventProbabilities$.setParameterType("time", C_PARAM_GENERATED) eventProbabilities$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) eventProbabilities$.setParameterType("accrualIntensity", accrualSetup$.getParameterType("accrualIntensity")) eventProbabilities$.setParameterType("kappa", setting$.getParameterType("kappa")) eventProbabilities$.setParameterType("piecewiseSurvivalTime", setting$.getParameterType("piecewiseSurvivalTime")) eventProbabilities$.setParameterType("lambda1", setting$.getParameterType("lambda1")) eventProbabilities$.setParameterType("lambda2", setting$.getParameterType("lambda2")) .setValueAndParameterType(eventProbabilities, "allocationRatioPlanned", allocationRatioPlanned, 1) eventProbabilities$.setParameterType("hazardRatio", setting$.getParameterType("hazardRatio")) .setValueAndParameterType(eventProbabilities, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(eventProbabilities, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(eventProbabilities, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) eventProbabilities$.setParameterType("maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects")) eventProbabilities$overallEventProbabilities <- numeric(0) eventProbabilities$eventProbabilities1 <- numeric(0) eventProbabilities$eventProbabilities2 <- numeric(0) for (timeValue in time) { eventProbs <- .getEventProbabilitiesGroupwise(timeValue, accrualSetup$.getAccrualTimeWithoutLeadingZero(), accrualSetup$accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) eventProbabilities$overallEventProbabilities <- c(eventProbabilities$overallEventProbabilities, .getEventProbabilitiesOverall(eventProbs, allocationRatioPlanned)) eventProbabilities$eventProbabilities1 <- c(eventProbabilities$eventProbabilities1, eventProbs[1]) eventProbabilities$eventProbabilities2 <- c(eventProbabilities$eventProbabilities2, eventProbs[2]) } eventProbabilities$.setParameterType("overallEventProbabilities", C_PARAM_GENERATED) eventProbabilities$.setParameterType("eventProbabilities1", C_PARAM_GENERATED) eventProbabilities$.setParameterType("eventProbabilities2", C_PARAM_GENERATED) return(eventProbabilities) } #' @title #' Get Number Of Subjects #' #' @description #' Returns the number of recruited subjects at given time vector. #' #' @param time A numeric vector with time values. #' @param accrualTime The assumed accrual time intervals for the study, default is #' \code{c(0,12)} (see details). #' @param accrualIntensity A vector of accrual intensities, default is the relative #' intensity \code{0.1} (see details). #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or #' \code{accrualIntensity} is calculated at fixed end of accrual. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' #' For details of the parameters \code{accrualTime} and \code{accrualIntensity} #' see \code{\link{getSampleSizeSurvival}}. #' #' @return Returns a \code{\link{NumberOfSubjects}} object. #' #' @keywords internal #' #' @export #' getNumberOfSubjects <- function(time, ..., accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, maxNumberOfSubjects = NA_real_) { .assertIsNumericVector(time, "time") accrualSetup <- getAccrualTime(accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects) accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() accrualIntensity <- accrualSetup$accrualIntensity maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects if (length(accrualTime) != length(accrualIntensity)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", length(accrualTime), ") must be equal to length of 'accrualIntensity' (", length(accrualIntensity), ")") } if (any(accrualIntensity <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualIntensity' must be > 0") } if (all(accrualIntensity < 1)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "at least one value of 'accrualIntensity' must be >= 1") } if (any(accrualTime <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualTime' must be > 0") } numberOfSubjects <- .getNumberOfSubjects(time, accrualTime, accrualIntensity, maxNumberOfSubjects) result <- NumberOfSubjects( .accrualTime = accrualSetup, time = time, accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = numberOfSubjects ) result$.setParameterType("time", C_PARAM_GENERATED) result$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) result$.setParameterType("accrualIntensity", accrualSetup$.getParameterType("accrualIntensity")) result$.setParameterType("maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects")) result$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) return(result) } .getLambda <- function(groupNumber, time, piecewiseSurvivalTime, lambda2, lambda1, hazardRatio, kappa) { if (groupNumber == 1) { if (!any(is.na(lambda1))) { return(lambda1) } lambda2 <- lambda2 * hazardRatio^(1/kappa) } return(lambda2) } .getEventProbabilitiesGroupwise <- function(time, accrualTimeVector, accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) { .assertIsSingleNumber(time, "time") if (length(accrualTimeVector) > 1 && accrualTimeVector[1] == 0) { accrualTimeVector <- accrualTimeVector[2:length(accrualTimeVector)] } accrualTimeVectorLength <- length(accrualTimeVector) densityIntervals <- accrualTimeVector if (accrualTimeVectorLength > 1) { densityIntervals[2:accrualTimeVectorLength] <- accrualTimeVector[2:accrualTimeVectorLength] - accrualTimeVector[1:(accrualTimeVectorLength - 1)] } if (length(densityIntervals) > 1 && length(accrualIntensity) > 1 && length(densityIntervals) != length(accrualIntensity)) { stop("'densityIntervals' (", .arrayToString(densityIntervals), ") and 'accrualIntensity' (", .arrayToString(accrualIntensity), ") must have same length") } densityVector <- accrualIntensity / sum(densityIntervals * accrualIntensity) eventProbs <- rep(NA_real_, 2) for (k in 1:accrualTimeVectorLength) { if (time <= accrualTimeVector[k]) { for (groupNumber in c(1, 2)) { # two groups: 1 = treatment, 2 = control lambdaTemp <- .getLambda(groupNumber, time, piecewiseSurvivalTime, lambda2, lambda1, hazardRatio, kappa) inner <- function(x) { .getEventProbabilityFunctionVec(x, piecewiseLambda = lambdaTemp, piecewiseSurvivalTime, phi[groupNumber], kappa) } timeValue1 <- 0 if (k > 1) { timeValue1 <- time - accrualTimeVector[1] } eventProbs[groupNumber] <- densityVector[1] * integrate(inner, timeValue1, time)$value if (k > 2) { for (j in 2:(k - 1)) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[j] * integrate(inner, time - accrualTimeVector[j], time - accrualTimeVector[j - 1])$value } } if (k > 1) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[k] * integrate(inner, 0, time - accrualTimeVector[k - 1])$value } } return(eventProbs) } } for (groupNumber in c(1, 2)) { lambdaTemp <- .getLambda(groupNumber, time, piecewiseSurvivalTime, lambda2, lambda1, hazardRatio, kappa) inner <- function(x) { .getEventProbabilityFunctionVec(x, piecewiseLambda = lambdaTemp, piecewiseSurvivalTime, phi[groupNumber], kappa) } eventProbs[groupNumber] <- densityVector[1] * integrate(inner, time - accrualTimeVector[1], time)$value if (accrualTimeVectorLength > 1) { for (j in (2:accrualTimeVectorLength)) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[j] * integrate(inner, time - accrualTimeVector[j], time - accrualTimeVector[j - 1])$value } } } return(eventProbs) } .getEventProbabilitiesOverall <- function(eventProbs, allocationRatioPlanned) { return((allocationRatioPlanned * eventProbs[1] + eventProbs[2]) / (1 + allocationRatioPlanned)) } .getEventProbabilities <- function(time, accrualTimeVector, accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) { eventProbs <- .getEventProbabilitiesGroupwise(time, accrualTimeVector, accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) return(.getEventProbabilitiesOverall(eventProbs, allocationRatioPlanned)) } .getEventsFixed <- function(typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), twoSidedPower, alpha, beta, sided, hazardRatio, thetaH0, allocationRatioPlanned) { typeOfComputation <- match.arg(typeOfComputation) if (typeOfComputation == "Schoenfeld") { eventsFixed <- (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 / (log(hazardRatio) - log(thetaH0))^2 * (1 + allocationRatioPlanned)^2 / allocationRatioPlanned if (twoSidedPower && (sided == 2)) { up <- 2*eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n) * (log(hazardRatio) - log(thetaH0)) * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned)) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n) * (log(hazardRatio) - log(thetaH0)) * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } return(eventsFixed) } if (typeOfComputation == "Freedman") { eventsFixed <- (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 * (1 + hazardRatio * allocationRatioPlanned)^2 / (1 - hazardRatio)^2 / allocationRatioPlanned if (twoSidedPower && (sided == 2)) { up <- 2*eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n) * sqrt(allocationRatioPlanned) * (1 - hazardRatio) / (1 + allocationRatioPlanned*hazardRatio)) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n) * sqrt(allocationRatioPlanned) * (1 - hazardRatio) / (1 + allocationRatioPlanned*hazardRatio)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } return(eventsFixed) } if (typeOfComputation == "HsiehFreedman") { eventsFixed <- (stats::qnorm(1 - alpha / sided) + stats::qnorm(1 - beta))^2 * (1 + hazardRatio)^2 / (1 - hazardRatio)^2* (1 + allocationRatioPlanned)^2 / (4 * allocationRatioPlanned) if (twoSidedPower && sided == 2) { up <- 2 * eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(stats::qnorm(1 - alpha / 2) - sqrt(n) * 2 * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (1 - hazardRatio) /(1 + hazardRatio)) - stats::pnorm(-stats::qnorm(1 - alpha / 2) - sqrt(n) * 2 * sqrt(allocationRatioPlanned)/(1 + allocationRatioPlanned) * (1 - hazardRatio) /(1 + hazardRatio)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04 ) } return(eventsFixed) } } .getSampleSizeFixedSurvival <- function(designPlan) { alpha <- designPlan$getAlpha() beta <- designPlan$getBeta() sided <- designPlan$getSided() twoSidedPower <- designPlan$getTwoSidedPower() typeOfComputation <- designPlan$typeOfComputation thetaH0 <- designPlan$thetaH0 pi1 <- designPlan$pi1 pi2 <- designPlan$pi2 allocationRatioPlanned <- designPlan$allocationRatioPlanned accountForObservationTimes <- designPlan$accountForObservationTimes accrualTime <- designPlan$accrualTime kappa <- designPlan$kappa piecewiseSurvivalTime <- designPlan$piecewiseSurvivalTime maxNumberOfSubjects <- designPlan$maxNumberOfSubjects hazardRatio <- designPlan$hazardRatio .assertIsValidHazardRatio(hazardRatio, thetaH0) if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(hazardRatio) } else { numberOfResults <- length(pi1) } designPlan$eventsFixed <- rep(NA_real_, numberOfResults) # number of events designPlan$nFixed <- rep(NA_real_, numberOfResults) # number of subjects designPlan$omega <- rep(NA_real_, numberOfResults) # probability of an event calculateAllocationRatioPlanned <- FALSE if (allocationRatioPlanned == 0) { allocationRatioPlannedVec <- rep(NA_real_, numberOfResults) calculateAllocationRatioPlanned <- TRUE designPlan$optimumAllocationRatio <- TRUE designPlan$.setParameterType("optimumAllocationRatio", C_PARAM_USER_DEFINED) } userDefinedMaxNumberOfSubjects <- .isUserDefinedMaxNumberOfSubjects(designPlan) if (userDefinedMaxNumberOfSubjects && allocationRatioPlanned == 0) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "determination of optimum allocation ('allocationRatioPlanned' = 0) not possible ", "for given 'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ")") } if (userDefinedMaxNumberOfSubjects) { timeVec <- rep(NA_real_, numberOfResults) } designPlan$calculateFollowUpTime <- FALSE lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { phi <- -c(log(1 - designPlan$dropoutRate1), log(1 - designPlan$dropoutRate2)) / designPlan$dropoutTime if (!userDefinedMaxNumberOfSubjects) { if (calculateAllocationRatioPlanned) { # allocationRatioPlanned = 0 provides optimum sample size allocationRatioPlanned <- stats::optimize(function(x) { numberEvents <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = x) if (!accountForObservationTimes) { probEvent <- (x * pi1[i] + pi2) / (1 + x) } else { probEvent <- .getEventProbabilities( time = accrualTime[length(accrualTime)] + designPlan$followUpTime, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = x, hazardRatio = hazardRatio[i]) } return(numberEvents / probEvent) }, interval = c(0, 5), tol = 0.0001)$minimum allocationRatioPlannedVec[i] <- allocationRatioPlanned } designPlan$eventsFixed[i] <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned) if (!accountForObservationTimes) { designPlan$omega[i] <- (allocationRatioPlanned * pi1[i] + pi2) / (1 + allocationRatioPlanned) } else { designPlan$omega[i] <- .getEventProbabilities( accrualTime[length(accrualTime)] + designPlan$followUpTime, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i]) } designPlan$.setParameterType("omega", C_PARAM_GENERATED) designPlan$nFixed[i] <- designPlan$eventsFixed[i] / designPlan$omega[i] } else { if (length(maxNumberOfSubjects) > 1) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "length of user defined 'maxNumberOfSubjects' (", .arrayToString(maxNumberOfSubjects), ") must be 1") } designPlan$calculateFollowUpTime <- TRUE designPlan$eventsFixed[i] <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned) designPlan$nFixed[i] <- maxNumberOfSubjects if (designPlan$eventsFixed[i] > maxNumberOfSubjects) { if (length(hazardRatio) > 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("'maxNumberOfSubjects' (%s) is smaller than the number ", "of events (%.3f) at index %s (hazard ratio = %.3f)"), maxNumberOfSubjects, designPlan$eventsFixed[i], i, hazardRatio[i])) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("'maxNumberOfSubjects' (%s) is smaller than the number ", "of events (%.3f)"), maxNumberOfSubjects, designPlan$eventsFixed[i])) } } up <- 2 iterate <- 1 while (designPlan$eventsFixed[i] / .getEventProbabilities( up, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i]) > maxNumberOfSubjects) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the number of subjects is too small to reach maximum number of events ", "(presumably due to drop-out rates)") } } timeVec[i] <- .getOneDimensionalRoot(function(x) { designPlan$eventsFixed[i] / .getEventProbabilities(x, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i]) - maxNumberOfSubjects }, lower = 0, upper = up, tolerance = 1E-6) if (!is.na(timeVec[i])) { designPlan$omega[i] <- .getEventProbabilities(timeVec[i], accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i]) designPlan$.setParameterType("omega", C_PARAM_GENERATED) } } } if (calculateAllocationRatioPlanned) { allocationRatioPlanned <- allocationRatioPlannedVec designPlan$allocationRatioPlanned <- allocationRatioPlanned designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_GENERATED) } if (userDefinedMaxNumberOfSubjects) { designPlan$followUpTime <- timeVec - accrualTime[length(accrualTime)] designPlan$.setParameterType("followUpTime", C_PARAM_GENERATED) } designPlan$nFixed2 <- designPlan$nFixed / (1 + allocationRatioPlanned) designPlan$nFixed1 <- designPlan$nFixed2 * allocationRatioPlanned designPlan$numberOfSubjects1 <- matrix(designPlan$nFixed1, nrow = 1) designPlan$numberOfSubjects2 <- matrix(designPlan$nFixed2, nrow = 1) if (!designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { eventRatio <- allocationRatioPlanned * pi1 / pi2 } else { eventRatio <- NA_real_ } # Fixed designPlan$hazardRatio <- hazardRatio designPlan$expectedEventsH1 <- designPlan$eventsFixed designPlan$maxNumberOfSubjects <- designPlan$nFixed designPlan$numberOfSubjects <- matrix(designPlan$nFixed, nrow = 1) designPlan$.setParameterType("calculateFollowUpTime", C_PARAM_GENERATED) designPlan$.setParameterType("eventsFixed", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$accountForObservationTimes) { designPlan$analysisTime <- matrix(accrualTime[length(accrualTime)] + designPlan$followUpTime, nrow = 1) designPlan$.setParameterType("analysisTime", C_PARAM_GENERATED) } return(designPlan) } # note that fixed sample size must be calculated before on 'designPlan' .getSampleSizeSequentialSurvival <- function(designPlan, designCharacteristics) { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(designPlan$hazardRatio) } else { numberOfResults <- length(designPlan$pi1) } kMax <- designCharacteristics$.design$kMax designPlan$eventsPerStage <- matrix(NA_real_, kMax, numberOfResults) analysisTime <- matrix(NA_real_, kMax, numberOfResults) numberOfSubjects <- matrix(NA_real_, kMax, numberOfResults) designPlan$expectedEventsH0 <- rep(NA_real_, numberOfResults) designPlan$expectedEventsH01 <- rep(NA_real_, numberOfResults) designPlan$expectedEventsH1 <- rep(NA_real_, numberOfResults) expectedNumberOfSubjectsH1 <- rep(NA_real_, numberOfResults) studyDurationH1 <- rep(NA_real_, numberOfResults) designPlan$omega <- rep(NA_real_, numberOfResults) informationRates <- designCharacteristics$information / designCharacteristics$shift lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } if (designPlan$accountForObservationTimes && designPlan$calculateFollowUpTime) { designPlan$followUpTime <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { designPlan$eventsPerStage[, i] <- designPlan$eventsFixed[i] * informationRates * designCharacteristics$inflationFactor if (!designPlan$accountForObservationTimes) { if (length(designPlan$allocationRatioPlanned) > 1) { allocationRatioPlanned <- designPlan$allocationRatioPlanned[i] } else { allocationRatioPlanned <- designPlan$allocationRatioPlanned } designPlan$omega[i] <- (allocationRatioPlanned * designPlan$pi1[i] + designPlan$pi2) / (1 + allocationRatioPlanned) designPlan$.setParameterType("omega", C_PARAM_GENERATED) numberOfSubjects[kMax, i] <- designPlan$eventsPerStage[kMax, i] / designPlan$omega[i] } else { phi <- -c(log(1 - designPlan$dropoutRate1), log(1 - designPlan$dropoutRate2)) / designPlan$dropoutTime if (designPlan$calculateFollowUpTime) { if (designPlan$eventsPerStage[kMax, i] > designPlan$maxNumberOfSubjects[i]) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("the number of subjects (%s) is smaller than the number ", "of events (%s) at stage %s"), designPlan$maxNumberOfSubjects[i], designPlan$eventsPerStage[kMax, i], i)) } up <- 2 iterate <- 1 while (designPlan$eventsPerStage[kMax, i] / .getEventProbabilities(up, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) > designPlan$maxNumberOfSubjects[i]) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the number of subjects is too small to reach maximum number of events ", "(presumably due to drop-out rates)") } } totalTime <- .getOneDimensionalRoot(function(x) { designPlan$eventsPerStage[kMax, i] / designPlan$maxNumberOfSubjects[i] - .getEventProbabilities(x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) }, lower = 0, upper = up, tolerance = 1E-6) # analysis times for (j in 1:kMax) { analysisTime[j, i] <- .getOneDimensionalRoot(function(x) { designPlan$eventsPerStage[j, i] / designPlan$maxNumberOfSubjects[i] - .getEventProbabilities(x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) }, lower = 0, upper = totalTime, tolerance = 1E-6, acceptResultsOutOfTolerance = TRUE) } analysisTime[kMax, i] <- totalTime designPlan$followUpTime[i] <- totalTime - designPlan$accrualTime[length(designPlan$accrualTime)] numberOfSubjects[, i] <- .getNumberOfSubjects(analysisTime[, i], designPlan$accrualTime, designPlan$accrualIntensity, designPlan$maxNumberOfSubjects[i]) } else { if (length(designPlan$allocationRatioPlanned) > 1) { allocationRatioPlanned <- designPlan$allocationRatioPlanned[i] } else { allocationRatioPlanned <- designPlan$allocationRatioPlanned } if (is.na(designPlan$followUpTime)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'followUpTime' must be defined because 'calculateFollowUpTime' = ", designPlan$calculateFollowUpTime) } designPlan$omega[i] <- .getEventProbabilities( designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) designPlan$.setParameterType("omega", C_PARAM_GENERATED) numberOfSubjects[kMax, i] <- designPlan$eventsPerStage[kMax, i] / designPlan$omega[i] # Analysis times for (j in 1:(kMax - 1)) { analysisTime[j, i] <- .getOneDimensionalRoot(function(x) { designPlan$eventsPerStage[j, i] / numberOfSubjects[kMax, i] - .getEventProbabilities( x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) }, lower = 0, upper = designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime, tolerance = 1E-6) } analysisTime[kMax, i] <- designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime numberOfSubjects[, i] <- .getNumberOfSubjects(analysisTime[, i] , designPlan$accrualTime, designPlan$accrualIntensity, numberOfSubjects[kMax, i]) } stoppingProbs <- designCharacteristics$rejectionProbabilities + c(designCharacteristics$futilityProbabilities, 0) if (all(is.na(designCharacteristics$futilityProbabilities))) { warning("Expected number of subjects H1 and study duration H1 ", "cannot be calculated because the futility probabilities ", "are not applicable for the specified design", call. = FALSE) } stoppingProbs[kMax] <- 1 - sum(stoppingProbs[1:(kMax - 1)]) studyDurationH1[i] <- analysisTime[, i] %*% stoppingProbs expectedNumberOfSubjectsH1[i] <- numberOfSubjects[, i] %*% stoppingProbs } designPlan$expectedEventsH0[i] <- designCharacteristics$averageSampleNumber0 * designPlan$eventsFixed[i] designPlan$expectedEventsH01[i] <- designCharacteristics$averageSampleNumber01 * designPlan$eventsFixed[i] designPlan$expectedEventsH1[i] <- designCharacteristics$averageSampleNumber1 * designPlan$eventsFixed[i] designPlan$.setParameterType("expectedEventsH0", C_PARAM_GENERATED) designPlan$.setParameterType("expectedEventsH01", C_PARAM_GENERATED) designPlan$.setParameterType("expectedEventsH1", C_PARAM_GENERATED) designPlan$numberOfSubjects2 <- numberOfSubjects / (1 + designPlan$allocationRatioPlanned) designPlan$numberOfSubjects1 <- designPlan$numberOfSubjects2 * designPlan$allocationRatioPlanned designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } if (!is.null(designCharacteristics$rejectionProbabilities)) { designPlan$rejectPerStage <- matrix(designCharacteristics$rejectionProbabilities, nrow = designPlan$.design$kMax) designPlan$.setParameterType("rejectPerStage", C_PARAM_GENERATED) designPlan$earlyStop <- sum(designPlan$rejectPerStage[1:(designPlan$.design$kMax - 1), ]) designPlan$.setParameterType("earlyStop", C_PARAM_GENERATED) } if (!is.null(designCharacteristics$futilityProbabilities) && any(designPlan$.design$futilityBounds != C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityPerStage <- matrix(designCharacteristics$futilityProbabilities, nrow = designPlan$.design$kMax - 1) designPlan$.setParameterType("futilityPerStage", C_PARAM_GENERATED) designPlan$futilityStop <- sum(designPlan$futilityPerStage) designPlan$.setParameterType("futilityStop", C_PARAM_GENERATED) designPlan$earlyStop <- designPlan$earlyStop + sum(designPlan$futilityPerStage) } designPlan$informationRates = matrix(informationRates, ncol = 1) if (!is.matrix(numberOfSubjects)) { designPlan$numberOfSubjects = matrix(numberOfSubjects[kMax, ], nrow = 1) } else { designPlan$numberOfSubjects = numberOfSubjects } designPlan$maxNumberOfSubjects <- designPlan$numberOfSubjects[kMax, ] if (designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_NOT_APPLICABLE) { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } designPlan$maxNumberOfSubjects1 <- .getNumberOfSubjects1( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned) designPlan$maxNumberOfSubjects2 <- .getNumberOfSubjects2( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned) designPlan$.setParameterType("maxNumberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("maxNumberOfSubjects2", C_PARAM_GENERATED) designPlan$.setParameterType("informationRates", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) designPlan$.setParameterType("eventsPerStage", C_PARAM_GENERATED) if (designPlan$accountForObservationTimes) { designPlan$analysisTime <- analysisTime designPlan$expectedNumberOfSubjectsH1 <- expectedNumberOfSubjectsH1 designPlan$studyDurationH1 <- studyDurationH1 designPlan$.setParameterType("analysisTime", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH1", C_PARAM_GENERATED) designPlan$.setParameterType("studyDurationH1", C_PARAM_GENERATED) } designPlan$.setParameterType("nFixed1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed2", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed", C_PARAM_NOT_APPLICABLE) if (designPlan$allocationRatioPlanned[1] == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } return(designPlan) } # Note that 'directionUpper' and 'maxNumberOfSubjects' are only applicable # for 'objectType' = "sampleSize" .createDesignPlanMeans <- function(objectType = c("power", "sampleSize"), ..., design, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_DEFAULT, stDev = C_STDEV_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, groups = 2, allocationRatioPlanned = NA_real_) { objectType <- match.arg(objectType) .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsValidStandardDeviation(stDev) .assertIsValidGroupsParameter(groups) .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsSingleLogical(meanRatio, "meanRatio") .assertIsValidThetaH0(thetaH0, endpoint = "means", groups = groups, ratioEnabled = meanRatio) .assertIsSingleLogical(normalApproximation, "normalApproximation") directionUpper <- .assertIsValidDirectionUpper(directionUpper, design$sided, objectType) if (objectType == "sampleSize" && !any(is.na(alternative))) { if (design$sided == 1 && any(alternative - thetaH0 <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'alternative' (", .arrayToString(alternative), ") must be > 'thetaH0' (", thetaH0, ")") } if (any(alternative - thetaH0 == 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'alternative' (", .arrayToString(alternative), ") must be != 'thetaH0' (", thetaH0, ")") } } designPlan <- TrialDesignPlanMeans(design = design, meanRatio = meanRatio) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 } if (any(design$futilityBounds > -6)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } if (groups == 2) { if (design$sided == 2 && ((thetaH0 != 0 && !meanRatio) || (thetaH0 != 1 && meanRatio))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "two-sided case is implemented only for superiority testing (i.e., thetaH0 = ", ifelse(meanRatio, 1, 0), ")") } if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (allocationRatioPlanned < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") must be >= 0") } .setValueAndParameterType(designPlan, "allocationRatioPlanned", allocationRatioPlanned, 1) if (meanRatio && thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis mean ratio is not allowed be negative or zero, ", "i.e. 'thetaH0' must be > 0 if 'meanRatio' = TRUE") } } .setValueAndParameterType(designPlan, "normalApproximation", normalApproximation, FALSE) .setValueAndParameterType(designPlan, "meanRatio", meanRatio, FALSE) .setValueAndParameterType(designPlan, "thetaH0", thetaH0, 0) if (objectType == "power") { .setValueAndParameterType(designPlan, "alternative", alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT) } else { .setValueAndParameterType(designPlan, "alternative", alternative, C_ALTERNATIVE_DEFAULT) } .setValueAndParameterType(designPlan, "stDev", stDev, C_STDEV_DEFAULT) if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) .setValueAndParameterType(designPlan, "maxNumberOfSubjects", maxNumberOfSubjects, NA_real_) .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) designPlan$.setParameterType("effect", C_PARAM_GENERATED) } .setValueAndParameterType(designPlan, "groups", groups, 2) if (groups == 1) { if (isTRUE(meanRatio)) { warning("'meanRatio' (", meanRatio, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } designPlan$.setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) if (length(allocationRatioPlanned) == 1 && !is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } return(designPlan) } # Note that 'directionUpper' and 'maxNumberOfSubjects' are only applicable for 'objectType' = "sampleSize" .createDesignPlanRates <- function(objectType = c("power", "sampleSize"), ..., design, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_SAMPLE_SIZE_DEFAULT, pi2 = C_PI_2_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, groups = 2, allocationRatioPlanned = NA_real_) { objectType <- match.arg(objectType) .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsValidGroupsParameter(groups) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsSingleLogical(riskRatio, "riskRatio") directionUpper <- .assertIsValidDirectionUpper(directionUpper, design$sided, objectType) if (groups == 1) { if (!any(is.na(pi1)) && any(pi1 == thetaH0) && (objectType == "sampleSize")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1' (", .arrayToString(pi1), ") must be != 'thetaH0' (", thetaH0, ")") } if (any(is.na(pi1)) || any(pi1 <= 0) || any(pi1 >= 1)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)") } if (thetaH0 >= 1 || thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (0; 1)") } if (!normalApproximation && design$sided == 2) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "exact sample size calculation not available for two-sided testing") } } else if (groups == 2) { if (!any(is.na(c(pi1, pi2))) && any(pi1 - pi2 == thetaH0) && (objectType == "sampleSize") && !riskRatio) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1 - pi2' (", .arrayToString(pi1 - pi2), ") must be != 'thetaH0' (", thetaH0, ")") } if (!any(is.na(c(pi1, pi2))) && any(pi1 / pi2 == thetaH0) && (objectType == "sampleSize") && riskRatio) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1 / pi2' (", .arrayToString(pi1 / pi2), ") must be != 'thetaH0' (", thetaH0, ")") } if (any(is.na(pi1)) || any(pi1 <= 0) || any(pi1 >= 1)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)") } if (any(is.na(pi2)) || any(pi2 <= 0) || any(pi2 >= 1)) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi2' (", .arrayToString(pi2), ") is out of bounds (0; 1)") } if (design$sided == 2 && ((thetaH0 != 0 && !riskRatio) || (thetaH0 != 1 && riskRatio))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "two-sided case is implemented only for superiority testing") } if (!normalApproximation) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "only normal approximation case is implemented for two groups") } if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (allocationRatioPlanned < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") must be >= 0") } if (riskRatio && thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis risk ratio is not allowed be negative or zero, ", "i.e. 'thetaH0' must be > 0 if 'riskRatio' = TRUE") } } designPlan <- TrialDesignPlanRates(design = design) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 } if (any(design$futilityBounds > -6)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) .setValueAndParameterType(designPlan, "maxNumberOfSubjects", maxNumberOfSubjects, NA_real_) .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) designPlan$.setParameterType("effect", C_PARAM_GENERATED) } .setValueAndParameterType(designPlan, "normalApproximation", normalApproximation, TRUE) .setValueAndParameterType(designPlan, "thetaH0", thetaH0, ifelse(riskRatio, 1, 0)) .assertIsValidThetaH0(thetaH0, endpoint = "rates", groups = groups, ratioEnabled = riskRatio) if (objectType == "power") { .setValueAndParameterType(designPlan, "pi1", pi1, C_PI_1_DEFAULT) } else { .setValueAndParameterType(designPlan, "pi1", pi1, C_PI_1_SAMPLE_SIZE_DEFAULT) } .setValueAndParameterType(designPlan, "pi2", pi2, 0.2) if (groups == 1) { if (designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { warning("'pi2' (", pi2, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } designPlan$.setParameterType("pi2", C_PARAM_NOT_APPLICABLE) if (isTRUE(riskRatio)) { warning("'riskRatio' (", riskRatio, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } designPlan$.setParameterType("riskRatio", C_PARAM_NOT_APPLICABLE) if (length(allocationRatioPlanned) == 1 && !is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) } designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } else { .setValueAndParameterType(designPlan, "riskRatio", riskRatio, FALSE) .setValueAndParameterType(designPlan, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) } .setValueAndParameterType(designPlan, "groups", groups, 2) return(designPlan) } #' @title #' Get Power Means #' #' @description #' Returns the power, stopping probabilities, and expected sample size for testing means in one or two samples at given sample size. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument. #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param normalApproximation If \code{normalApproximation = TRUE} is specified, the variance is #' assumed to be known, default is FALSE, i.e., the calculations are performed with the t distribution. #' @param meanRatio If \code{meanRatio = TRUE} is specified, the power for #' one-sided testing of H0: mu1/mu2 = thetaH0 is calculated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or a value != 1 for testing the mean ratio) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively. #' @param alternative The alternative hypothesis value. This can be a vector of assumed #' alternatives, default is \code{seq(0,1,0.2)}. #' @param stDev The standard deviation, default is 1. If \code{meanRatio = TRUE} #' is specified, stDev defines the coefficient of variation sigma/mu2. #' @param directionUpper Specifies the direction of the alternative, #' only applicable for one-sided testing, default is \code{TRUE}. #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified for power calculations. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups #' design, default is \code{1}. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the power, stopping probabilities, and expected sample size, for testing means at given sample size. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' A null hypothesis value thetaH0 != 0 for testing the difference of two means or #' thetaH0 != 1 for testing the ratio of two means can be specified. #' For the specified sample size, critical bounds and stopping for futility bounds are provided at the effect scale (mean, mean difference, or mean ratio, respectively) #' #' #' @return Returns a \code{\link{TrialDesignPlanMeans}} object. #' #' @export #' #' @examples #' #' # Calculate the power, stopping probabilities, and expected sample size for testing H0: #' # mu1 - mu2 = 0 in a two-armed design #' # against a range of alternatives H1: mu1 - m2 = delta, delta = (0, 1, 2, 3, 4, 5), #' # standard deviation sigma = 8, maximum sample size N = 80 (both treatment arms), #' # and an allocation ratio n1/n2 = 2. The design is a three stage O'Brien & Fleming design #' # with non-binding futility bounds (-0.5, 0.5) for the two interims. #' # The computation takes into account that the t test is used (normalApproximation = FALSE). #' getPowerMeans(getDesignGroupSequential(alpha = 0.025, #' sided = 1, futilityBounds = c(-0.5, 0.5)), #' groups = 2, alternative = c(0:5), stDev = 8, #' normalApproximation = FALSE, maxNumberOfSubjects = 80, #' allocationRatioPlanned = 2) #' getPowerMeans <- function(design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = C_ALTERNATIVE_POWER_SIMULATION_DEFAULT, stDev = C_STDEV_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_) { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(..., powerEnabled = TRUE) .warnInCaseOfUnknownArguments(functionName = "getPowerMeans", ignore = c("alpha", "beta", "sided"), ...) } else { .warnInCaseOfUnknownArguments(functionName = "getPowerMeans", ...) .assertIsTrialDesign(design) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanMeans(objectType = "power", design = design, normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, directionUpper = directionUpper, maxNumberOfSubjects = maxNumberOfSubjects, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ...) if (designPlan$groups == 1) { theta <- (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } if (designPlan$normalApproximation) { powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, theta, maxNumberOfSubjects) } else { thetaAdj <- (sign(theta) * qnorm(1 - design$alpha / design$sided) - qnorm(pt(sign(theta) * qt(1- design$alpha / design$sided, maxNumberOfSubjects - 1), maxNumberOfSubjects - 1, theta * sqrt(maxNumberOfSubjects)))) / sqrt(maxNumberOfSubjects) powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, thetaAdj, maxNumberOfSubjects) } } else { if (!designPlan$meanRatio) { theta <- sqrt(designPlan$allocationRatioPlanned) / (1 + designPlan$allocationRatioPlanned) * (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev } else { theta <- sqrt(designPlan$allocationRatioPlanned) / sqrt((1 + designPlan$allocationRatioPlanned * designPlan$thetaH0^2) * (1 + designPlan$allocationRatioPlanned)) * (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } if (designPlan$normalApproximation) { powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, theta, maxNumberOfSubjects) } else { thetaAdj <- (sign(theta) * qnorm(1 - design$alpha / design$sided) - qnorm(pt(sign(theta) * qt(1- design$alpha / design$sided, maxNumberOfSubjects - 2), maxNumberOfSubjects - 2, theta * sqrt(maxNumberOfSubjects)))) / sqrt(maxNumberOfSubjects) powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, thetaAdj, maxNumberOfSubjects) } } designPlan$effect <- designPlan$alternative - designPlan$thetaH0 designPlan$expectedNumberOfSubjects <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c("overallReject") if (design$kMax > 1) { parameterNames <- c(parameterNames, "expectedNumberOfSubjects", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop") } for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } .addNumberOfSubjectsToPowerResult(designPlan) .addEffectScaleBoundaryDataToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } #' @title #' Get Power Rates #' #' @description #' Returns the power, stopping probabilities, and expected sample size for testing rates in one or two samples at given sample sizes. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param riskRatio If \code{riskRatio = TRUE} is specified, the power for one-sided #' testing of H0: \code{pi1/pi2 = thetaH0} is calculated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or != 1 for testing the risk ratio \code{pi1/pi2}) can be specified, default is \code{0} or \code{1} for difference and ratio testing, respectively. #' @param pi1 The assumed probability in the active treatment group if two treatment groups #' are considered, or the alternative probability for a one treatment group design, #' default is \code{seq(0.2,0.5,0.1)}. #' @param pi2 The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}. #' @param directionUpper Specifies the direction of the alternative, only applicable for one-sided testing, default is \code{TRUE}. #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups design, default is \code{1}. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the power, stopping probabilities, and expected sample size, #' for testing rates for given maximum sample size. #' The sample sizes over the stages are calculated according to the specified information rate in the design. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' If a null hypothesis value thetaH0 != 0 for testing the difference of two rates #' or thetaH0 != 1 for testing the risk ratio is specified, the #' formulas according to Farrington & Manning (Statistics in Medicine, 1990) are used (only one-sided testing). #' Critical bounds and stopping for futility bounds are provided at the effect scale (rate, rate difference, or rate ratio, respectively). #' For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. #' Note that the power calculation for rates is always based on the normal approximation. #' #' @return Returns a \code{\link{TrialDesignPlanRates}} object. #' #' @export #' #' @examples #' #' # Calculate the power, stopping probabilities, and expected sample size in a two-armed #' # design at given maximum sample size N = 200 #' # in a three-stage O'Brien & Fleming design with information rate vector (0.2,0.5,1), #' # non-binding futility boundaries (0,0), i.e., #' # the study stops for futility if the p-value exceeds 0.5 at interm, and #' # allocation ratio = 2 for a range of pi1 values when testing H0: pi1 - pi2 = -0.1: #' getPowerRates(getDesignGroupSequential(informationRates = c(0.2,0.5,1), #' futilityBounds = c(0,0)), groups = 2, thetaH0 = -0.1, #' pi1 = seq(0.3, 0.6, 0.1), directionUpper = FALSE, #' pi2 = 0.7, allocationRatioPlanned = 2, maxNumberOfSubjects = 200) #' #' # Calculate the power, stopping probabilities, and expected sample size in a single #' # arm design at given maximum sample size N = 60 in a three-stage two-sided #' # O'Brien & Fleming design with information rate vector (0.2,0.5,1) #' # for a range of pi1 values when testing H0: pi = 0.3: #' getPowerRates(getDesignGroupSequential(informationRates = c(0.2,0.5,1), #' sided = 2), groups = 1, thetaH0 = 0.3, pi1 = seq(0.3, 0.5, 0.05), #' maxNumberOfSubjects = 60) #' getPowerRates <- function(design = NULL, ..., groups = 2, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_DEFAULT, pi2 = 0.2, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(..., powerEnabled = TRUE) .warnInCaseOfUnknownArguments(functionName = "getPowerRates", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .warnInCaseOfUnknownArguments(functionName = "getPowerRates", ...) .assertIsTrialDesign(design) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanRates(objectType = "power", design = design, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, directionUpper = directionUpper, maxNumberOfSubjects = maxNumberOfSubjects, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ...) if (!is.na(allocationRatioPlanned) && allocationRatioPlanned <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "allocation ratio must be positive") } allocationRatioPlanned <- designPlan$allocationRatioPlanned theta <- rep(NA_real_, length(pi1)) if (groups == 1) { designPlan$effect <- pi1 - thetaH0 theta <- (pi1 - thetaH0) / sqrt(pi1 * (1 - pi1)) + sign(pi1 - thetaH0) * qnorm(1 - design$alpha / design$sided) * (1 - sqrt(thetaH0 * (1 - thetaH0) / (pi1 * (1 - pi1)))) / sqrt(maxNumberOfSubjects) } else { if (!riskRatio) { designPlan$effect <- pi1 - pi2 - thetaH0 for (i in (1:length(pi1))) { fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlanned, method = "diff") theta[i] <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * ( (pi1[i] - pi2 - thetaH0)*sqrt(1 + allocationRatioPlanned)/ sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * pi2 * (1 - pi2)) + sign(pi1[i] - pi2 - thetaH0) * qnorm(1 - design$alpha / design$sided) * (1 - sqrt(fm$ml1 * (1 - fm$ml1) + allocationRatioPlanned * fm$ml2 * (1 - fm$ml2)) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * maxNumberOfSubjects)) } } else { designPlan$effect <- pi1 / pi2 - thetaH0 for (i in (1:length(pi1))) { fm <- .getFarringtonManningValues(pi1[i], pi2, thetaH0, allocationRatioPlanned, method = "ratio") theta[i] <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * ( (pi1[i] - thetaH0 * pi2) * sqrt(1 + allocationRatioPlanned) / sqrt(pi1[i]*(1 - pi1[i]) + allocationRatioPlanned*thetaH0^2 * pi2 * (1 - pi2)) + sign(pi1[i] - thetaH0 * pi2) * qnorm(1 - design$alpha / design$sided)* (1 - sqrt(fm$ml1 * (1 - fm$ml1) + allocationRatioPlanned * thetaH0^2 * fm$ml2 * (1 - fm$ml2)) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * thetaH0^2 * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * maxNumberOfSubjects)) } } } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber(design, theta, maxNumberOfSubjects) designPlan$expectedNumberOfSubjects <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c("overallReject") if (design$kMax > 1) { parameterNames <- c(parameterNames, "expectedNumberOfSubjects", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop") } for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } .addNumberOfSubjectsToPowerResult(designPlan) .addEffectScaleBoundaryDataToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } .getNumberOfSubjectsInner <- function(timeValue, accrualTime, accrualIntensity, maxNumberOfSubjects) { .assertIsSingleNumber(timeValue, "timeValue") if (length(accrualTime) != length(accrualIntensity)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "length of 'accrualTime' (", length(accrualIntensity), ") ", "must be equel to length of 'accrualIntensity' (", length(accrualIntensity), ")") } densityIntervals <- accrualTime if (length(accrualTime) > 1) { densityIntervals[2:length(accrualTime)] <- accrualTime[2:length(accrualTime)] - accrualTime[1:(length(accrualTime) - 1)] } densityVector <- accrualIntensity / sum(densityIntervals * accrualIntensity) for (l in 1:length(densityVector)) { if (timeValue <= accrualTime[l]) { if (l == 1) { return(timeValue * densityVector[l] * maxNumberOfSubjects) } else { return((sum(densityVector[1:(l - 1)] * densityIntervals[1:(l - 1)]) + (timeValue - accrualTime[l - 1]) * densityVector[l]) * maxNumberOfSubjects) } } } return(maxNumberOfSubjects) } .getNumberOfSubjects <- function(time, accrualTime, accrualIntensity, maxNumberOfSubjects) { subjectNumbers <- c() for (timeValue in time) { if (is.na(timeValue)) { return(NA_real_) } subjectNumbers <- c(subjectNumbers, .getNumberOfSubjectsInner(timeValue, accrualTime, accrualIntensity, maxNumberOfSubjects)) } return(subjectNumbers) } #' @title #' Get Power Survival #' #' @description #' Returns the power, stopping probabilities, and expected sample size for testing the hazard ratio in a two treatment groups survival design. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument. #' @param typeOfComputation Three options are available: "Schoenfeld", "Freedman", "HsiehFreedman", #' the default is "Schoenfeld". For details, see Hsieh #' (Statistics in Medicine, 1992). For non-inferiority testing (i.e., thetaH0 != 1), #' only Schoenfelds formula can be used #' @param thetaH0 The null hypothesis value. The default value is 1. For one-sided testing, #' a bound for testing H0: hazard ratio = thetaH0 != 1 can be specified. #' @param directionUpper Specifies the direction of the alternative, only applicable for one-sided testing, default is TRUE. #' @param pi1 The assumed event rate in the treatment group, default is \code{seq(0.2,0.5,0.1)}. #' @param pi2 The assumed event rate in the control group, default is 0.2. #' @param lambda1 The assumed hazard rate in the treatment group, there is no default. #' lambda1 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param lambda2 The assumed hazard rate in the reference group, there is no default. #' lambda2 can also be used to define piecewise exponentially distributed survival times #' (see details). #' @param median1 The assumed median survival time in the treatment group, there is no default. #' @param median2 The assumed median survival time in the reference group, there is no default. #' @param piecewiseSurvivalTime A vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function (see details). #' @param hazardRatio The vector of hazard ratios under consideration. #' If the event or hazard rates in both treatment groups are defined, the hazard ratio needs #' not to be specified as it is calculated. #' @param kappa The shape parameter of the Weibull distribution, default is \code{1}. #' The Weibull distribution cannot be used for the piecewise definition of the survival time distribution. #' Note that the parameters \code{shape} and \code{scale} in \code{\link[stats]{Weibull}} #' are equivalent to \code{kappa} and \code{1 / lambda}, respectively, in rpact. #' @param allocationRatioPlanned The planned allocation ratio, default is \code{1}. #' @param eventTime The assumed time under which the event rates are calculated, default is \code{12}. #' @param accrualTime The assumed accrual time intervals for the study, default is \code{c(0,12)} (see details). #' @param accrualIntensity A vector of accrual intensities, default is \code{1} (see details). #' @param dropoutRate1 The assumed drop-out rate in the treatment group, default is \code{0}. #' @param dropoutRate2 The assumed drop-out rate in the control group, default is \code{0}. #' @param dropoutTime The assumed time for drop-out rates in the control and the #' treatment group, default is \code{12}. #' @param maxNumberOfEvents \code{maxNumberOfEvents > 0} is the maximum number of events, determines #' the power of the test and needs to be specified. #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified. #' If accrual time and accrual intensity is specified, this will be calculated. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function calculates the power, stopping probabilities, and expected #' sample size at given number of events and number of subjects. #' It also calculates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' The formula of Kim & Tsiatis (Biometrics, 1990) #' is used to calculated the expected number of events under the alternative #' (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and #' non-constant piecewise accrual over time.\cr #' #' \code{piecewiseSurvivalTime} #' The first element of this vector must be equal to \code{0}. \code{piecewiseSurvivalTime} can also #' be a list that combines the definition of the time intervals and hazard rates in the reference group. #' The definition of the survival time in the treatment group is obtained by the specification #' of the hazard ratio (see examples for details). #' #' \code{accrualTime} can also be used to define a non-constant accrual over time. #' For this, \code{accrualTime} needs to be a vector that defines the accrual intervals and #' \code{accrualIntensity} needs to be specified. The first element of \code{accrualTime} must be equal to 0.\cr #' \code{accrualTime} can also be a list that combines the definition of the accrual time and #' accrual intensity \code{accrualIntensity} (see below and examples for details). #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are #' the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to #' be specified and the end of accrual is calculated. #' #' \code{accrualIntensity} needs to be defined if a vector of \code{accrualTime} is specified.\cr #' If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same #' (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified #' and the end of accrual is calculated. #' In that case, \code{accrualIntensity} is given by the number of subjects per time unit.\cr #' If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} #' (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated.\cr #' If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines #' the *relative* intensity how subjects enter the trial. #' For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval #' the intensity is doubled as compared to the first accrual interval. The actual accrual intensity #' is calculated for the given \code{maxNumberOfSubjects}. #' Note that the default is \code{accrualIntensity = 0.1} meaning that the *absolute* accrual intensity #' will be calculated. #' #' @return Returns a \code{\link{TrialDesignPlanSurvival}} object. #' #' @export #' #' @examples #' #' # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.5) and #' # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default #' getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' \donttest{ #' #' # Four stage O'Brien & Fleming group sequential design with minimum required #' # definitions, pi1 = c(0.4,0.5,0.5) and pi2 = 0.2 at event time 12, #' # accrual time 12 and follow-up time 6 as default #' getPowerSurvival(design = getDesignGroupSequential(kMax = 4), #' maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' # For fixed sample design, determine necessary accrual time if 200 subjects and #' # 30 subjects per time unit can be recruited #' getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0), #' accrualIntensity = 30, maxNumberOfSubjects = 200) #' #' # Determine necessary accrual time if 200 subjects and if the first 6 time units #' # 20 subjects per time unit can be recruited, then 30 subjects per time unit #' getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6), #' accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) #' #' # Determine maximum number of Subjects if the first 6 time units 20 subjects per #' # time unit can be recruited, and after 10 time units 30 subjects per time unit #' getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) #' #' # Specify accrual time as a list #' at <- list( #' "0 - <6" = 20, #' "6 - Inf" = 30) #' getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) #' #' # Specify accrual time as a list, if maximum number of subjects need to be calculated #' at <- list( #' "0 - <6" = 20, #' "6 - <=10" = 30) #' getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) #' #' # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries #' # Effect size is based on event rates at specified event time, directionUpper = FALSE #' # needs to be specified because it should be shown that hazard ratio < 1 #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, #' eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) #' #' # Effect size is based on event rate at specified event time for the reference group #' # and hazard ratio, directionUpper = FALSE needs to be specified #' # because it should be shown that hazard ratio < 1 #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, #' eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) #' #' # Effect size is based on hazard rate for the reference group and hazard ratio, #' # directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1 #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, #' lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) #' #' # Specification of piecewise exponential survival time and hazard ratios #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01,0.02,0.04), #' hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' # Specification of piecewise exponential survival time as list and hazard ratios #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), #' maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' # Specification of piecewise exponential survival time for both treatment arms #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), #' lambda1 = c(0.015,0.03,0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' # Specification of piecewise exponential survival time as a list #' pws <- list( #' "0 - <5" = 0.01, #' "5 - <10" = 0.02, #' ">=10" = 0.04) #' getPowerSurvival(design = getDesignGroupSequential(kMax = 2), #' piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), #' maxNumberOfEvents = 40, maxNumberOfSubjects = 200) #' #' # Specify effect size based on median survival times #' getPowerSurvival(median1 = 5, median2 = 3, #' maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) #' #' # Specify effect size based on median survival times of Weibull distribtion with kappa = 2 #' getPowerSurvival(median1 = 5, median2 = 3, kappa = 2, #' maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) #' #' } #' getPowerSurvival <- function(design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = NA, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = 1, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, maxNumberOfSubjects = NA_real_, maxNumberOfEvents = NA_real_, dropoutRate1 = C_DROP_OUT_RATE_1_DEFAULT, dropoutRate2 = C_DROP_OUT_RATE_2_DEFAULT, dropoutTime = C_DROP_OUT_TIME_DEFAULT) { typeOfComputation <- match.arg(typeOfComputation) if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(..., powerEnabled = TRUE) .warnInCaseOfUnknownArguments(functionName = "getPowerSurvival", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getPowerSurvival", ...) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanSurvival(objectType = "power", design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = TRUE, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, directionUpper = directionUpper, maxNumberOfEvents = maxNumberOfEvents, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio) if (!is.na(allocationRatioPlanned) && allocationRatioPlanned <= 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "allocation ratio must be positive", call. = FALSE) } if (typeOfComputation == "Schoenfeld") { theta <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (log(designPlan$hazardRatio / thetaH0)) } if (typeOfComputation == "Freedman") { theta <- sqrt(allocationRatioPlanned) * (designPlan$hazardRatio - 1) / (allocationRatioPlanned * designPlan$hazardRatio + 1) } if (typeOfComputation == "HsiehFreedman") { theta <- sqrt(4 * allocationRatioPlanned) / (1 + allocationRatioPlanned) * (designPlan$hazardRatio - 1) / (designPlan$hazardRatio + 1) } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design = design, theta = theta, nMax = maxNumberOfEvents) kMax <- design$kMax sided <- design$sided if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(designPlan$hazardRatio) } else { numberOfResults <- length(designPlan$pi1) } stoppingProbs <- matrix(NA_real_, kMax, numberOfResults) designPlan$analysisTime <- matrix(NA_real_, kMax, numberOfResults) designPlan$numberOfSubjects <- matrix(NA_real_, kMax, numberOfResults) designPlan$studyDuration <- rep(NA_real_, numberOfResults) designPlan$expectedNumberOfSubjects <- rep(NA_real_, numberOfResults) eventsPerStage <- maxNumberOfEvents * design$informationRates parameterNames <- c( "analysisTime", "numberOfSubjects", "studyDuration", "expectedNumberOfSubjects", "eventsPerStage") phi <- -c(log(1 - dropoutRate1), log(1 - dropoutRate2)) / dropoutTime lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { # Analysis times up <- 2 iterate <- 1 while (eventsPerStage[kMax] / designPlan$maxNumberOfSubjects > .getEventProbabilities( up, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], phi = phi, piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i])) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ") ", "is too small to reach maximum number of events ", "(presumably due to drop-out rates)") } } for (j in 1:kMax) { designPlan$analysisTime[j, i] <- .getOneDimensionalRoot(function(x) { eventsPerStage[j] / designPlan$maxNumberOfSubjects - .getEventProbabilities(x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], phi = phi, piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i]) }, lower = 0, upper = up, tolerance = 1e-06) # , acceptResultsOutOfTolerance = TRUE if (is.na(designPlan$analysisTime[j, i])) { warning("Cannot calculate analysis time at stage ", j, ": ", "'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ") is too ", "small to reach maximum number of events", call. = FALSE) } } if (kMax > 1) { designPlan$numberOfSubjects[, i] <- .getNumberOfSubjects( designPlan$analysisTime[, i], designPlan$accrualTime, designPlan$accrualIntensity, designPlan$maxNumberOfSubjects) powerAndAverageSampleNumber$futilityPerStage[is.na( powerAndAverageSampleNumber$futilityPerStage[,i]), i] <- 0 stoppingProbs[, i] <- powerAndAverageSampleNumber$rejectPerStage[, i] + c(powerAndAverageSampleNumber$futilityPerStage[, i], 0) stoppingProbs[kMax, i] <- 1 - sum(stoppingProbs[1:(kMax - 1), i]) designPlan$studyDuration[i] <- designPlan$analysisTime[, i] %*% stoppingProbs[, i] designPlan$.setParameterType("studyDuration", C_PARAM_GENERATED) designPlan$expectedNumberOfSubjects[i] <- designPlan$numberOfSubjects[, i] %*% stoppingProbs[, i] } } if (kMax == 1) { designPlan$expectedNumberOfSubjects <- .getNumberOfSubjects( designPlan$analysisTime[1, ], designPlan$accrualTime, designPlan$accrualIntensity, designPlan$maxNumberOfSubjects) designPlan$numberOfSubjects <- matrix(designPlan$expectedNumberOfSubjects, nrow = 1) } designPlan$eventsPerStage <- matrix(eventsPerStage, ncol = 1) designPlan$.setParameterType("eventsPerStage", C_PARAM_GENERATED) designPlan$expectedNumberOfEvents <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c(parameterNames, "expectedNumberOfEvents", "overallReject", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop") for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } if (kMax == 1L) { designPlan$.setParameterType("numberOfSubjects", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("eventsPerStage", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("futilityStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("earlyStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("rejectPerStage", C_PARAM_NOT_APPLICABLE) } if (!any(is.na(designPlan$analysisTime)) && !any(is.na(designPlan$accrualTime))) { designPlan$followUpTime <- designPlan$analysisTime[kMax, ] - designPlan$accrualTime[length(designPlan$accrualTime)] designPlan$.setParameterType("followUpTime", C_PARAM_GENERATED) } .addEffectScaleBoundaryDataToDesignPlan(designPlan) .addStudyDurationToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } .hideFutilityStopsIfNotApplicable <- function(designPlan) { if (all(designPlan$.design$futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$.setParameterType("futilityStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("futilityPerStage", C_PARAM_NOT_APPLICABLE) } } .addStudyDurationToDesignPlan <- function(designPlan) { if (!designPlan$accountForObservationTimes) { return(invisible()) } kMax <- designPlan$.design$kMax if (kMax == 1) { designPlan$studyDuration <- designPlan$analysisTime[1, ] designPlan$.setParameterType("studyDuration", C_PARAM_GENERATED) designPlan$maxStudyDuration <- designPlan$studyDuration } else { designPlan$maxStudyDuration <- designPlan$analysisTime[kMax, ] designPlan$.setParameterType("maxStudyDuration", C_PARAM_GENERATED) } } .addNumberOfSubjectsToPowerResult <- function(designPlan) { design <- designPlan$.design designPlan$numberOfSubjects <- matrix(rep(NA_real_, design$kMax), ncol = 1) designPlan$numberOfSubjects[1, 1] <- design$informationRates[1] * designPlan$maxNumberOfSubjects if (design$kMax > 1) { designPlan$numberOfSubjects[2:design$kMax, 1] <- (design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) * designPlan$maxNumberOfSubjects } designPlan$numberOfSubjects <- .getColumnCumSum(designPlan$numberOfSubjects) designPlan$numberOfSubjects1 <- .getNumberOfSubjects1( designPlan$numberOfSubjects, designPlan$allocationRatioPlanned) designPlan$numberOfSubjects2 <- .getNumberOfSubjects2( designPlan$numberOfSubjects, designPlan$allocationRatioPlanned) if (designPlan$.design$kMax == 1) { designPlan$nFixed <- as.numeric(designPlan$numberOfSubjects) designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$nFixed1 <- as.numeric(designPlan$numberOfSubjects1) designPlan$nFixed2 <- as.numeric(designPlan$numberOfSubjects2) designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) } } else { designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) if ((designPlan$groups == 1) || designPlan$allocationRatioPlanned == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } } } rpact/R/class_event_probabilities.R0000644000176200001440000001540613556061055017141 0ustar liggesusers###################################################################################### # # # -- Event probabilities classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06-05-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' #' @name EventProbabilities #' #' @title #' Event Probabilities #' #' @description #' Class for definition of event probabilities. #' #' @details #' \code{EventProbabilities} is a class for definition of event probabilities. #' #' @importFrom methods new #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include class_time.R #' #' @keywords internal #' EventProbabilities <- setRefClass("EventProbabilities", contains = "ParameterSet", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", time = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", kappa = "numeric", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", allocationRatioPlanned = "numeric", hazardRatio = "numeric", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", maxNumberOfSubjects = "numeric", overallEventProbabilities = "numeric", eventProbabilities1 = "numeric", eventProbabilities2 = "numeric" ), methods = list( initialize = function(...) { callSuper(...) .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing event probabilities objects' .resetCat() if (showType == 2) { .cat("Technical summary of the event probabilities object of class ", methods::classLabel(class(.self)), ":\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Event probabilities at given time:\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Time and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } ) ) #' #' @name NumberOfSubjects #' #' @title #' Number Of Subjects #' #' @description #' Class for definition of number of subjects results. #' #' @details #' \code{NumberOfSubjects} is a class for definition of number of subjects results. #' #' @importFrom methods new #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include class_time.R #' #' @keywords internal #' NumberOfSubjects <- setRefClass("NumberOfSubjects", contains = "ParameterSet", fields = list( .accrualTime = "AccrualTime", time = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", maxNumberOfSubjects = "numeric", numberOfSubjects = "numeric" ), methods = list( initialize = function(...) { callSuper(...) .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing number of subjects objects' .resetCat() if (showType == 2) { .cat("Technical summary of the number of subjects object of class ", methods::classLabel(class(.self)), ":\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Number of recruited subjects at given time:\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getGeneratedParameters(), "Time and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } ) ) rpact/R/f_analysis_base_survival.R0000644000176200001440000013513313574407504017003 0ustar liggesusers###################################################################################### # # # -- Analysis of survival data with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 25-11-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### # @title # Get Analysis Results Survival # # @description # Returns an analysis result object. # # @param design the trial design. # # @return Returns a \code{AnalysisResultsSurvival} object. # # @keywords internal # .getAnalysisResultsSurvival <- function(design, dataInput, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsSurvivalGroupSequential(design, dataInput = dataInput, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsSurvivalInverseNormal(design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsSurvivalFisher(design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsSurvivalInverseNormal <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsSurvivalInverseNormal", ignore = c("stage"), ...) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsSurvivalAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance) return(results) } .getAnalysisResultsSurvivalGroupSequential <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsSurvivalGroupSequential", ignore = c("stage"), ...) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) .getAnalysisResultsSurvivalAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance) return(results) } .getAnalysisResultsSurvivalFisher <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsSurvivalFisher", ignore = c("stage"), ...) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .getAnalysisResultsSurvivalAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed) return(results) } # # The following parameters will be taken from 'design': # stages, informationRate, criticalValues, futilityBounds, alphaSpent, stageLevels # .getAnalysisResultsSurvivalAll <- function(results, design, dataInput, stage, directionUpper, thetaH0, thetaH1, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper) results$.stageResults <- stageResults .logProgress("Stage results calculated", startTime = startTime) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .assertIsInOpenInterval(thetaH1, "thetaH1",0,Inf) results$directionUpper <- directionUpper results$normalApproximation <- TRUE results$allocationRatioPlanned <- allocationRatioPlanned results$thetaH0 <- thetaH0 results$thetaH1 <- thetaH1 results$nPlanned <- nPlanned while (length(results$nPlanned) < design$kMax) { results$nPlanned <- c(NA_real_, results$nPlanned) } # effect size results$effectSizes <- stageResults$effectSizes # test statistic results$testStatistics <- stageResults$logRanks # p-value results$pValues <- stageResults$pValues # combined test statistic and test action if (.isTrialDesignInverseNormal(design)) { results$combinationTestStatistics <- stageResults$combInverseNormal } else if (.isTrialDesignGroupSequential(design)) { results$overallTestStatistics <- stageResults$overallLogRanks results$overallPValues <- stageResults$overallPValues } else if (.isTrialDesignFisher(design)) { results$combinationTestStatistics <- stageResults$combFisher } # test actions results$testActions <- getTestActions(design = design, stageResults = stageResults, stage = stage) # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { conditionalPowerList <- .getConditionalPowerSurvival(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed) if (conditionalPowerList$simulated) { results$conditionalPowerSimulated <- conditionalPowerList$conditionalPower } else { results$conditionalPower <- conditionalPowerList$conditionalPower results$conditionalPowerSimulated <- numeric(0) } } else { results$conditionalPower <- .getConditionalPowerSurvival(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1)$conditionalPower } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities( design = design, stageResults = stageResults, stage = stage) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsSurvival( design = design, dataInput = dataInput, stage = stage, tolerance = tolerance) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues(design = design, stageResults = stageResults, stage = stage, tolerance = tolerance) .logProgress("Repeated p-values calculated", startTime = startTime) # final p-value startTime <- Sys.time() finalPValue <- getFinalPValue(design = design, stageResults = stageResults, stage = stage) results$finalPValues <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage) results$finalStage <- finalPValue$finalStage .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalSurvival(design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, tolerance = tolerance) .logProgress("Final confidence interval calculated", startTime = startTime) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage) } return(results) } # @title # Get Stage Results Survival # # @description # Returns a stage results object # # @param design the trial design. # # @return Returns a \code{StageResultsSurvival} object. # # @keywords internal # .getStageResultsSurvival <- function(..., design, dataInput, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT) { .assertIsDatasetSurvival(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getStageResultsSurvival", ignore = c("stage"), ...) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) overallEvents <- dataInput$getOverallEvents(group = 1) overallAllocationRatios <- dataInput$getOverallAllocationRatios(group = 1) # Calculation of overall logRanks for specified hypothesis overallLogRanks <- dataInput$getOverallLogRanks() - sqrt(overallEvents) * sqrt(overallAllocationRatios) / (1 + overallAllocationRatios) * log(thetaH0) effectSizes <- exp(dataInput$getOverallLogRanks()[1:stage] * (1 + overallAllocationRatios[1:stage]) / sqrt(overallAllocationRatios[1:stage] * overallEvents[1:stage])) events <- dataInput$getEvents(group = 1) allocationRatios <- dataInput$getAllocationRatios(group = 1) # Calculation of logRanks for specified hypothesis logRanks <- dataInput$getLogRanks(group = 1) - sqrt(events) * sqrt(allocationRatios) / (1 + allocationRatios) * log(thetaH0) # Calculation of stagewise test statistics and combination tests pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) if (directionUpper) { pValues <- 1 - stats::pnorm(logRanks) overallPValues <- 1 - stats::pnorm(overallLogRanks) } else { pValues <- stats::pnorm(logRanks) overallPValues <- stats::pnorm(overallLogRanks) } for (k in 1:stage) { # Inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% stats::qnorm(1 - pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } stageResults <- StageResultsSurvival( design = design, dataInput = dataInput, overallLogRanks = .fillWithNAs(overallLogRanks, design$kMax), overallPValues = .fillWithNAs(overallPValues, design$kMax), overallEvents = .fillWithNAs(overallEvents, design$kMax), overallAllocationRatios = .fillWithNAs(overallAllocationRatios, design$kMax), events = .fillWithNAs(events, design$kMax), allocationRatios = .fillWithNAs(allocationRatios, design$kMax), logRanks = .fillWithNAs(logRanks, design$kMax), pValues = .fillWithNAs(pValues, design$kMax), effectSizes = .fillWithNAs(effectSizes, design$kMax), combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER) ) if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } # # Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Survival # .getRepeatedConfidenceIntervalsSurvival <- function(design, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsSurvivalGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsSurvivalInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsSurvivalFisher(design = design, ...)) } .stopWithWrongDesignMessage(design) } .getRootThetaSurvival <- function(..., design, dataInput, stage, directionUpper, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance) return(result) } .getUpperLowerThetaSurvival <- function(..., design, dataInput, theta, stage, directionUpper, conditionFunction, firstParameterName, secondValue) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop(sprintf(paste0("Failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)"), stage, stageResults[[firstParameterName]][stage], secondValue, firstValue, theta)) } } return(theta) } .getRepeatedConfidenceIntervalsSurvivalAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) futilityCorr <- rep(NA_real_, design$kMax) # necessary for adjustment for binding futility boundaries criticalValues <- design$criticalValues if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, 2, design$kMax) for (k in (1:stage)) { startTime <- Sys.time() # Finding maximum upper and minimum lower bounds for RCIs thetaLow <- exp(.getUpperLowerThetaSurvival(design = design, dataInput = dataInput, theta = -1, stage = k, directionUpper = TRUE, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k])) thetaUp <- exp(.getUpperLowerThetaSurvival(design = design, dataInput= dataInput, theta = 1, stage = k, directionUpper = FALSE, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k])) # Finding upper and lower RCI limits through root function repeatedConfidenceIntervals[1, k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance) repeatedConfidenceIntervals[2, k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance) # Adjustment for binding futility bounds if (k > 1 && conditionFunction(bounds[k - 1], border) & design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) futilityCorr[k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]) { repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } } return(repeatedConfidenceIntervals) } # # RCIs based on group sequential method # .getRepeatedConfidenceIntervalsSurvivalGroupSequential <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsSurvivalGroupSequential", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsSurvivalAll(design = design, dataInput = dataInput, firstParameterName = "overallPValues", directionUpper = directionUpper, tolerance = tolerance, ...)) } # # RCIs based on inverse normal combination test # .getRepeatedConfidenceIntervalsSurvivalInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsSurvivalInverseNormal", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsSurvivalAll(design = design, dataInput = dataInput, firstParameterName = "combInverseNormal", directionUpper = directionUpper, tolerance = tolerance, ...)) } # # RCIs based on Fisher's combination test # .getRepeatedConfidenceIntervalsSurvivalFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsSurvivalFisher", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsSurvivalAll(design = design, dataInput = dataInput, firstParameterName = "combFisher", directionUpper = directionUpper, tolerance = tolerance, ...)) } # # Calculation of conditional power based on group sequential method # .getConditionalPowerSurvivalGroupSequential <- function(design, stageResults, ..., allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalGroupSequential", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1/stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1/stageResults$thetaH0) } # Shifted decision region for use in getGroupSeqProbs # Group sequential method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2){ shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - stats::qnorm(1 - stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list(nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on inverse normal method # .getConditionalPowerSurvivalInverseNormal <- function(design, stageResults, ..., allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalInverseNormal", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA,stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1/stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1/stageResults$thetaH0) } # Shifted decision region for use in getGroupSeqProbs # Inverse normal method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2){ shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - stats::qnorm(1 - stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on Fisher combination test # .getConditionalPowerSurvivalFisher <- function(design, stageResults, ..., allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalFisher", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE nPlanned <- c(rep(NA, stage), nPlanned) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1/stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1/stageResults$thetaH0) } criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stage, nPlanned = nPlanned) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1/weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Could not calculate conditional power for stage ", kMax, call. = FALSE) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(stats::qnorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = iterations, seed = seed, simulated = simulated )) } .getConditionalPowerSurvival <- function(..., design, stageResults, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_) { if (any(is.na(nPlanned))) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, Inf) if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stage)) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } if (.isTrialDesignGroupSequential(design)) { return(.getConditionalPowerSurvivalGroupSequential( design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerSurvivalInverseNormal( design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ...)) } if (.isTrialDesignFisher(design)) { return(.getConditionalPowerSurvivalFisher( design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ...)) } .stopWithWrongDesignMessage(design) } .getConditionalPowerPlotSurvival <- function(..., design, stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange) { .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, 2) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerPlotSurvival", ...) if (!.associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange)) { warning("You must specify a planned sample size (nPlanned) and ", "a range of effect sizes (thetaRange)", call. = FALSE) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stage)) { return(list( xValues = 0, condPowerValues = 0, likelihoodValues = 0, main = "Conditional Power Plot with Likelihood", xlab = "Hazard ratio", ylab = "Conditional power / Likelihood", sub = "" )) } thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange, survivalDataEnabled = TRUE) condPowerValues <- rep(NA, length(thetaRange)) likelihoodValues <- rep(NA, length(thetaRange)) for (i in seq(along = thetaRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalGroupSequential(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i])$conditionalPower[design$kMax] } if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalInverseNormal(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i])$conditionalPower[design$kMax] } if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalFisher(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i])$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(log(thetaRange[i]), log(stageResults$effectSizes[stage]), 2 / sqrt(stageResults$overallEvents[stage])) / stats::dnorm(0, 0, 2 / sqrt(stageResults$overallEvents[stage])) } subTitle <- paste0("Stage = ", stage, ", maximum number of remaining events = ", sum(nPlanned), ", allocation ratio = ", allocationRatioPlanned) return(list( xValues = thetaRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = "Conditional Power Plot with Likelihood", xlab = "Hazard ratio", ylab = "Conditional power / Likelihood", sub = subTitle )) } # # Calculation of final confidence interval # based on group sequential test without SSR (general case). # .getFinalConfidenceIntervalSurvivalGroupSequential <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageGroupSeq <- .getStageGroupSeq(design, stageResults, stage) finalStage <- min(stageGroupSeq, design$kMax) # Early stopping or at end of study if (stageGroupSeq < design$kMax || stage == design$kMax) { if (stageGroupSeq == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$logRanks[1] - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$logRanks[1] + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$logRanks[1] } else { finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageGroupSeq > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation y <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = finalStage, thetaH0 = thetaH0, directionUpper = directionUpper) stderr <- (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage])/ sqrt(stageResults$overallEvents[finalStage]) directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageGroupSeq == 1) { finalConfidenceInterval <- exp(stderr*finalConfidenceIntervalGeneral) medianUnbiased <- exp(stderr*medianUnbiasedGeneral) } else { finalConfidenceInterval[1] <- exp(finalConfidenceIntervalGeneral[1] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) finalConfidenceInterval[2] <- exp(finalConfidenceIntervalGeneral[2] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) medianUnbiased <- exp(medianUnbiasedGeneral * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) } } if (!directionUpper) { medianUnbiasedGeneral = 1/medianUnbiasedGeneral finalConfidenceIntervalGeneral = 1/finalConfidenceIntervalGeneral if (stageGroupSeq > 1) { medianUnbiased = 1/medianUnbiased finalConfidenceInterval = 1/finalConfidenceInterval } } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral), medianUnbiased = medianUnbiased, finalConfidenceInterval = sort(finalConfidenceInterval) )) } # # Calculation of final confidence interval # based on inverse normal method, only valid for kMax <= 2 or no SSR. # .getFinalConfidenceIntervalSurvivalInverseNormal <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageInvNormal <- .getStageInverseNormal(design, stageResults, stage) finalStage <- min(stageInvNormal, design$kMax) # Early stopping or at end of study if (stageInvNormal < design$kMax || stage == design$kMax) { if (stageInvNormal == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$logRanks[1] - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$logRanks[1] + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$logRanks[1] } else { if (design$kMax > 2) { warning("Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)", call. = FALSE) } finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageInvNormal > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation y <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = finalStage, thetaH0 = thetaH0, directionUpper = directionUpper) stderr <- (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage])/ sqrt(stageResults$overallEvents[finalStage]) directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageInvNormal == 1) { finalConfidenceInterval <- exp(stderr*finalConfidenceIntervalGeneral) medianUnbiased <- exp(stderr*medianUnbiasedGeneral) } else { finalConfidenceInterval[1] <- exp(finalConfidenceIntervalGeneral[1] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) finalConfidenceInterval[2] <- exp(finalConfidenceIntervalGeneral[2] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) medianUnbiased <- exp(medianUnbiasedGeneral * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) } } if (!directionUpper) { medianUnbiasedGeneral = 1/medianUnbiasedGeneral finalConfidenceIntervalGeneral = 1/finalConfidenceIntervalGeneral if (stageInvNormal > 1) { medianUnbiased = 1/medianUnbiased finalConfidenceInterval = 1/finalConfidenceInterval } } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral), medianUnbiased = medianUnbiased, finalConfidenceInterval = sort(finalConfidenceInterval) )) } .getQFunctionResultBasedOnDataInputSurvival <- function(design, dataInput, theta, stage, infRate, directionUpper = directionUpper) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper) return(.getQFunctionResult(design = design, stageResults = stageResults, theta = theta, infRate = infRate)) } # # Calculation of final confidence interval # based on Fisher combination test, only valid for kMax <= 2. # .getFinalConfidenceIntervalSurvivalFisher <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper) stageFisher <- .getStageFisher(design, stageResults, stage) finalStage <- min(stageFisher, design$kMax) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ # early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { warning("Calculation of final confidence interval for Fisher's ", "design not implemented yet.", call. = FALSE) return(list(finalStage = NA_integer_ , medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax))) } return(list( finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalSurvival <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getFinalConfidenceIntervalSurvival", ignore = c("stage"), ...) if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_SURVIVAL_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalSurvivalGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance)) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalSurvivalInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance)) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalSurvivalFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance)) } .stopWithWrongDesignMessage(design) } rpact/R/f_analysis_base_rates.R0000644000176200001440000020526113574405020016235 0ustar liggesusers###################################################################################### # # # -- Analysis of rates with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 25-11-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### # @title # Get Analysis Results Rates # # @description # Returns an analysis result object. # # @param design The trial design. # # @return Returns a \code{AnalysisResultsRates} object. # # @keywords internal # .getAnalysisResultsRates <- function(design, dataInput, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsRatesGroupSequential(design, dataInput = dataInput, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsRatesInverseNormal(design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsRatesFisher(design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsRatesInverseNormal <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsRatesInverseNormal", ignore = c("stage"), ...) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsRatesAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance) return(results) } .getAnalysisResultsRatesGroupSequential <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsRatesGroupSequential", ignore = c("stage"), ...) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) .getAnalysisResultsRatesAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance) return(results) } .getAnalysisResultsRatesFisher <- function(design, ..., dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .warnInCaseOfUnknownArguments(functionName = ".getAnalysisResultsRatesFisher", ignore = c("stage"), ...) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .getAnalysisResultsRatesAll(results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed) return(results) } # # The following parameters will be taken from 'design': # stages, informationRates, criticalValues, futilityBounds, alphaSpent, stageLevels # .getAnalysisResultsRatesAll <- function(results, design, dataInput, stage, directionUpper, normalApproximation, thetaH0, pi1, pi2, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsRates(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation) results$.stageResults <- stageResults .logProgress("Stage results calculated", startTime = startTime) .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, dataInput$getNumberOfGroups()) pi1 <- .assertIsValidPi1(pi1, stageResults, stage) if (dataInput$getNumberOfGroups() == 2) { pi2 <- .assertIsValidPi2(pi2, stageResults, stage) } results$directionUpper <- directionUpper results$normalApproximation <- normalApproximation results$allocationRatioPlanned <- allocationRatioPlanned results$thetaH0 <- thetaH0 results$pi1 <- pi1 results$pi2 <- pi2 results$nPlanned <- nPlanned while (length(results$nPlanned) < design$kMax) { results$nPlanned <- c(NA_real_, results$nPlanned) } # effect size results$effectSizes <- stageResults$effectSizes # test statistic results$testStatistics <- stageResults$testStatistics # p-value results$pValues <- stageResults$pValues # combined test statistic and test action if (.isTrialDesignInverseNormal(design)) { results$combinationTestStatistics <- stageResults$combInverseNormal } else if (.isTrialDesignGroupSequential(design)) { results$overallTestStatistics <- stageResults$overallTestStatistics results$overallPValues <- stageResults$overallPValues } else if (.isTrialDesignFisher(design)) { results$combinationTestStatistics <- stageResults$combFisher } # test actions results$testActions <- getTestActions(design = design, stageResults = stageResults, stage = stage) # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { conditionalPowerList <- .getConditionalPowerRates(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2, iterations = iterations, seed = seed) if (conditionalPowerList$simulated) { results$conditionalPowerSimulated <- conditionalPowerList$conditionalPower } else { results$conditionalPower <- conditionalPowerList$conditionalPower results$conditionalPowerSimulated <- numeric(0) } } else { results$conditionalPower <- .getConditionalPowerRates(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2)$conditionalPower } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities( design = design, stageResults = stageResults, stage = stage) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsRates( design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, tolerance = tolerance) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues(design = design, stageResults = stageResults, stage = stage, tolerance = tolerance) .logProgress("Repeated p-values calculated", startTime = startTime) # final p-value startTime <- Sys.time() finalPValue <- getFinalPValue(design = design, stageResults = stageResults, stage = stage) results$finalPValues <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage) results$finalStage <- finalPValue$finalStage .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalRates(design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance) .logProgress("Final confidence interval calculated", startTime = startTime) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage(kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage) } return(results) } # @title # Get Stage Results Rates # # @description # Returns a stage results object. # # @param design the trial design. # # @return Returns a \code{StageResultsRates} object. # # @keywords internal # .getStageResultsRates <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT) { .assertIsDatasetRates(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getStageResultsRates", ignore = c("stage"), ...) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) effectSizes <- rep(NA_real_, design$kMax) if (dataInput$getNumberOfGroups() == 1) { if (is.na(thetaH0)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'thetaH0' must be defined") } if (normalApproximation) { overallTestStatistics <- c((dataInput$getOverallEventsUpTo(stage) / dataInput$getOverallSampleSizesUpTo(stage) - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(dataInput$getOverallSampleSizesUpTo(stage)), rep(NA_real_, design$kMax - stage)) if (directionUpper) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { overallPValues <- stats::pnorm(overallTestStatistics) } } else { overallTestStatistics <- rep(NA_real_, design$kMax) if (directionUpper) { overallPValues <- stats::pbinom(dataInput$getOverallEventsUpTo(stage) - 1, dataInput$getOverallSampleSizesUpTo(stage), thetaH0, lower.tail = FALSE) } else { overallPValues <- stats::pbinom(dataInput$getOverallEventsUpTo(stage), dataInput$getOverallSampleSizesUpTo(stage), thetaH0, lower.tail = TRUE) } overallTestStatistics <- stats::qnorm(1 - overallPValues) } effectSizes[1:stage] <- dataInput$getOverallEventsUpTo(stage) / dataInput$getOverallSampleSizesUpTo(stage) } if (dataInput$getNumberOfGroups() == 2) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } overallEvents1 <- dataInput$getOverallEvents(group = 1) overallEvents2 <- dataInput$getOverallEvents(group = 2) overallTestStatistics <- rep(NA_real_, design$kMax) overallPValues <- rep(NA_real_, design$kMax) for (k in 1:stage) { if (normalApproximation) { if (thetaH0 == 0) { if ((overallEvents1[k] + overallEvents2[k] == 0) || (overallEvents1[k] + overallEvents2[k] == sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)))) { overallTestStatistics[k] <- 0 } else { overallRateH0 <- (overallEvents1[k] + overallEvents2[k]) / (sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2))) overallTestStatistics[k] <- (overallEvents1[k]/sum(dataInput$getSampleSizesUpTo(k, 1)) - overallEvents2[k]/sum(dataInput$getSampleSizesUpTo(k, 2)) - thetaH0) / sqrt(overallRateH0 * (1 - overallRateH0) * (1 / sum(dataInput$getSampleSizesUpTo(k, 1)) + 1 / sum(dataInput$getSampleSizesUpTo(k, 2)))) } } else { y <- .getFarringtonManningValues(overallEvents1[k] / sum(dataInput$getSampleSizesUpTo(k, 1)), overallEvents2[k] / sum(dataInput$getSampleSizesUpTo(k, 2)), thetaH0, sum(dataInput$getSampleSizesUpTo(k, 1)) / sum(dataInput$getSampleSizesUpTo(k, 2)), "diff") overallTestStatistics[k] <- (overallEvents1[k] / sum(dataInput$getSampleSizesUpTo(k, 1)) - overallEvents2[k]/sum(dataInput$getSampleSizesUpTo(k, 2)) - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / sum(dataInput$getSampleSizesUpTo(k, 1)) + y$ml2 * (1 - y$ml2) / sum(dataInput$getSampleSizesUpTo(k, 2))) } if (directionUpper) { overallPValues[k] <- 1 - stats::pnorm(overallTestStatistics[k]) } else { overallPValues[k] <- stats::pnorm(overallTestStatistics[k]) } } else { if (thetaH0 != 0) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "thetaH0 must be equal 0 for performing Fisher's exact test") } if (directionUpper) { overallPValues[k] <- stats::phyper(overallEvents1[k] - 1, overallEvents1[k] + overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - overallEvents1[k] - overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)), lower.tail = FALSE) } else { overallPValues[k] <- stats::phyper(overallEvents1[k], overallEvents1[k] + overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - overallEvents1[k] - overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)), lower.tail = TRUE) } overallTestStatistics <- stats::qnorm(1 - overallPValues) } } effectSizes[1:stage] <- overallEvents1[1:stage]/cumsum(dataInput$getSampleSizesUpTo(stage,1)) - overallEvents2[1:stage]/cumsum(dataInput$getSampleSizesUpTo(stage,2)) } # calculation of stagewise test statistics and combination tests testStatistics <- rep(NA_real_, design$kMax) pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) for (k in 1:stage) { if (dataInput$getNumberOfGroups() == 1) { if (normalApproximation) { # stage-wise test statistics testStatistics[k] <- (dataInput$getEvent(k) / dataInput$getSampleSize(k) - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(dataInput$getSampleSize(k)) pValues[k] <- 1 - stats::pnorm(testStatistics[k]) if (!directionUpper) { pValues[k] <- 1 - pValues[k] } } else { testStatistics[k] <- NA_real_ if (directionUpper) { pValues[k] <- stats::pbinom(dataInput$getEvent(k) - 1, dataInput$getSampleSize(k), thetaH0, lower.tail = FALSE) } else { pValues[k] <- stats::pbinom(dataInput$getEvent(k), dataInput$getSampleSize(k), thetaH0, lower.tail = TRUE) } } } if (dataInput$getNumberOfGroups() == 2) { if (normalApproximation) { # stage-wise test statistics if (thetaH0 == 0) { if ((dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2) == 0) || (dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2) == dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2))) { testStatistics[k] <- 0 } else { rateH0 <- (dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2)) / (dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2)) testStatistics[k] <- (dataInput$getEvent(k, 1)/dataInput$getSampleSize(k, 1) - dataInput$getEvent(k, 2)/dataInput$getSampleSize(k, 2) - thetaH0) / sqrt(rateH0 * (1 - rateH0) * (1 / dataInput$getSampleSize(k, 1) + 1 / dataInput$getSampleSize(k, 2))) } } else { y <- .getFarringtonManningValues(dataInput$getEvent(k, 1)/dataInput$getSampleSize(k, 1), dataInput$getEvent(k, 2)/dataInput$getSampleSize(k, 2), thetaH0, dataInput$getSampleSize(k, 1)/dataInput$getSampleSize(k, 2), "diff") testStatistics[k] <- (dataInput$getEvent(k, 1)/dataInput$getSampleSize(k, 1) - dataInput$getEvent(k, 2)/dataInput$getSampleSize(k, 2) - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / dataInput$getSampleSize(k, 1) + y$ml2 * (1 - y$ml2) / dataInput$getSampleSize(k, 2)) } if (directionUpper) { pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { pValues[k] <- stats::pnorm(testStatistics[k]) } } else { testStatistics[k] <- NA_real_ if (directionUpper) { pValues[k] <- stats::phyper(dataInput$getEvent(k, 1) - 1, dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - dataInput$getEvent(k, 1) - dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1), lower.tail = FALSE) } else { pValues[k] <- stats::phyper(dataInput$getEvent(k, 1), dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - dataInput$getEvent(k, 1) - dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1), lower.tail = TRUE) } } } # inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% stats::qnorm(1 - pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } direction <- ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER) if (dataInput$getNumberOfGroups() == 1) { stageResults <- StageResultsRates( design = design, dataInput = dataInput, overallTestStatistics = overallTestStatistics, overallPValues = overallPValues, effectSizes = effectSizes, overallEvents = dataInput$getOverallEvents(group = 1), overallSampleSizes = dataInput$getOverallSampleSizesUpTo(stage, 1), testStatistics = testStatistics, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsInverseNormal = weightsInverseNormal, weightsFisher = weightsFisher, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation ) } else if (dataInput$getNumberOfGroups() == 2) { stageResults <- StageResultsRates( design = design, dataInput = dataInput, overallTestStatistics = overallTestStatistics, overallPValues = overallPValues, effectSizes = effectSizes, overallEvents1 = dataInput$getOverallEvents(group = 1), overallEvents2 = dataInput$getOverallEvents(group = 2), overallSampleSizes1 = dataInput$getOverallSampleSizesUpTo(stage, 1), overallSampleSizes2 = dataInput$getOverallSampleSizesUpTo(stage, 2), testStatistics = testStatistics, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsInverseNormal = weightsInverseNormal, weightsFisher = weightsFisher, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation ) } if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } # # Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Rates # .getRepeatedConfidenceIntervalsRates <- function(design, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsRatesGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsRatesInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsRatesFisher(design = design, ...)) } .stopWithWrongDesignMessage(design) } .getRootThetaRates <- function(..., design, dataInput, stage, directionUpper, normalApproximation, firstParameterName, secondValue, tolerance, acceptResultsOutOfTolerance) { if (dataInput$getNumberOfGroups() == 2) { thetaLow <- -1 + tolerance } else { thetaLow <- tolerance } thetaUp <- 1 - tolerance if (dataInput$getNumberOfGroups() == 1 && !normalApproximation) { acceptResultsOutOfTolerance <- FALSE } result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsRates(design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, acceptResultsOutOfTolerance = acceptResultsOutOfTolerance) return(result) } .getRepeatedConfidenceIntervalsRatesAll <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) if (!normalApproximation && dataInput$getNumberOfGroups() == 2) { normalApproximation <- TRUE warning("Repeated confidence intervals will be calculated under the normal approximation", call. = FALSE) } futilityCorr <- rep(NA_real_, design$kMax) # necessary for adjustment for binding futility boundaries criticalValues <- design$criticalValues if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, 2, design$kMax) for (k in (1:stage)) { startTime <- Sys.time() # finding upper and lower RCI limits through root function if (dataInput$getNumberOfGroups() == 1) { if (dataInput$overallEvents[k] == 0){ repeatedConfidenceIntervals[1, k] <- 0 } else { repeatedConfidenceIntervals[1, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) } if (dataInput$overallEvents[k] == dataInput$overallSampleSizes[k]){ repeatedConfidenceIntervals[2, k] <- 1 } else { repeatedConfidenceIntervals[2, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) } } if (dataInput$getNumberOfGroups() == 2) { repeatedConfidenceIntervals[1, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) repeatedConfidenceIntervals[2, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) } # adjustment for binding futility bounds if (k > 1 && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) futilityCorr[k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) } if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]){ repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } return(repeatedConfidenceIntervals) } # # RCIs based on group sequential method # .getRepeatedConfidenceIntervalsRatesGroupSequential <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsRatesGroupSequential", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsRatesAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "overallPValues", tolerance = tolerance, ...)) } # # RCIs based on inverse normal combination test # .getRepeatedConfidenceIntervalsRatesInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsRatesInverseNormal", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsRatesAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "combInverseNormal", tolerance = tolerance, ...)) } # # RCIs based on Fisher's combination test # .getRepeatedConfidenceIntervalsRatesFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedConfidenceIntervalsRatesFisher", ignore = c("stage"), ...) return(.getRepeatedConfidenceIntervalsRatesAll(design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "combFisher", tolerance = tolerance, ...)) } # # Calculation of conditional power based on group sequential method # .getConditionalPowerRatesGroupSequential <- function(..., design, stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1, pi2) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerRatesGroupSequential", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues condError <- getConditionalRejectionProbabilities(design = design, stageResults = stageResults, stage = stage)[stage] if (stageResults$isOneSampleDataset()) { adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(stageResults$thetaH0 * (1 - stageResults$thetaH0)) / sqrt(pi1*(1 - pi1))) / sqrt(sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } else { thetaH1 <- -(pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } } if (stageResults$isTwoSampleDataset()) { x <- .getFarringtonManningValues(pi1, pi2, stageResults$thetaH0, allocationRatioPlanned) adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(x$ml1 * (1 - x$ml1) + allocationRatioPlanned * x$ml2 * (1 - x$ml2)) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { thetaH1 <- -(pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } } if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list(nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on inverse normal method # .getConditionalPowerRatesInverseNormal <- function(..., design, stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1, pi2) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerRatesInverseNormal", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA,stage), nPlanned) if (stage == kMax) { .logDebug("Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")") return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues # Shifted decision region for use in getGroupSeqProbs # Inverse normal method condError <- getConditionalRejectionProbabilities(design = design, stageResults = stageResults, stage = stage)[stage] if (stageResults$isOneSampleDataset()) { adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(stageResults$thetaH0 * (1 - stageResults$thetaH0)) / sqrt(pi1*(1 - pi1))) / sqrt(sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } else { thetaH1 <- -(pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } } if (stageResults$isTwoSampleDataset()) { x <- .getFarringtonManningValues(pi1, pi2, stageResults$thetaH0, allocationRatioPlanned) adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(x$ml1 * (1 - x$ml1) + allocationRatioPlanned * x$ml2 * (1 - x$ml2)) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { thetaH1 <- -(pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } } if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% stats::qnorm(1 - stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) if (design$twoSidedPower){ conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } # # Calculation of conditional power based on Fisher combination test # .getConditionalPowerRatesFisher <- function(..., design, stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1, pi2, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerRatesFisher", ignore = c("stage"), ...) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE nPlanned <- c(rep(NA,stage), nPlanned) condError <- getConditionalRejectionProbabilities(design = design, stageResults = stageResults, stage = stage)[stage] if (stageResults$isOneSampleDataset()) { adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(stageResults$thetaH0 * (1 - stageResults$thetaH0)) / sqrt(pi1*(1 - pi1))) / sqrt(sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } else { thetaH1 <- -(pi1 - stageResults$thetaH0) / sqrt(pi1*(1 - pi1)) + adjustment } } if (stageResults$isTwoSampleDataset()) { x <- .getFarringtonManningValues(pi1, pi2, stageResults$thetaH0, allocationRatioPlanned) adjustment <- stats::qnorm(1 - condError) * (1 - sqrt(x$ml1 * (1 - x$ml1) + allocationRatioPlanned * x$ml2 * (1 - x$ml2)) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1) : kMax])) if (stageResults$direction == "upper") { thetaH1 <- (pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { thetaH1 <- -(pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } } if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stage, nPlanned = nPlanned) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1/weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Could not calculate conditional power for stage ", kMax, call. = FALSE) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(stats::qnorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = iterations, seed = seed, simulated = simulated )) } .getConditionalPowerRates <- function(..., design, stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1 = NA_real_, pi2 = NA_real_) { stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) pi1 <- .assertIsValidPi1(pi1, stageResults, stage) if (!stageResults$isOneSampleDataset()){ pi2 <- .assertIsValidPi2(pi2, stageResults, stage) } if (any(is.na(nPlanned))) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stage)) { return(list(conditionalPower = rep(NA_real_, design$kMax), simulated = FALSE)) } if (.isTrialDesignGroupSequential(design)) { return(.getConditionalPowerRatesGroupSequential(..., design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2)) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerRatesInverseNormal(..., design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2)) } if (.isTrialDesignFisher(design)) { return(.getConditionalPowerRatesFisher(..., design = design, stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2)) } .stopWithWrongDesignMessage(design) } .getConditionalPowerPlotRates <- function(..., design, stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piRange, pi2) { if (stageResults$isOneSampleDataset()) { if (!.associatedArgumentsAreDefined(nPlanned = nPlanned, piRange = piRange)) { warning("You must specify a planned sample size (nPlanned) and ", "a range of rates (piRange)", call. = FALSE) } pi2 <- NA_real_ } else { if (!.associatedArgumentsAreDefined(nPlanned = nPlanned, pi2 = pi2, piRange = piRange)) { warning("You must specify a planned sample size (nPlanned), ", "a control rate (pi2), and a range of treatment rates (piRange)", call. = FALSE) } } .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, stageResults$getDataInput()$getNumberOfGroups()) .assertIsValidPi(pi2, "pi2") piRange <- .assertIsValidPiRange(piRange = piRange) condPowerValues <- rep(NA, length(piRange)) likelihoodValues <- rep(NA, length(piRange)) if (stageResults$isOneSampleDataset()) { mu <- stageResults$effectSizes[stage] stdErr <- sqrt(stageResults$effectSizes[stage] * (1 - stageResults$effectSizes[stage]) / stageResults$overallSampleSizes[stage]) for (i in seq(along = piRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerRatesGroupSequential(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerRatesInverseNormal(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerRatesFisher(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(piRange[i], mu, stdErr) / stats::dnorm(0, 0, stdErr) } } if (stageResults$isTwoSampleDataset()) { mu <- stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] stdErr <- sqrt(stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] * (1 - stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage]) / stageResults$overallSampleSizes1[stage]) for (i in seq(along = piRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerRatesGroupSequential(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerRatesInverseNormal(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerRatesFisher(..., design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piRange[i], pi2 = pi2)$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(piRange[i], mu, stdErr) / stats::dnorm(0, 0, stdErr) } } if (stageResults$isOneSampleDataset()) { subTitle <- paste0("Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned)) } else { subTitle <- paste0("Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", pi2 = ", pi2, ", allocation ratio = ", allocationRatioPlanned) } return(list( xValues = piRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = "Conditional Power Plot with Likelihood", xlab = "pi1", ylab = "Conditional power / Likelihood", sub = subTitle )) } # # Calculation of final confidence interval # based on group sequential test without SSR (general case). # .getFinalConfidenceIntervalRatesGroupSequential <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageGroupSeq <- .getStageGroupSeq(design, stageResults, stage) finalStage <- min(stageGroupSeq, design$kMax) # early stopping or at end of study if (stageGroupSeq < design$kMax || stage == design$kMax) { if (stageGroupSeq == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$overallTestStatistics[1] - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$overallTestStatistics[1] + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$overallTestStatistics[1] if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } } else { finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageGroupSeq > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stErrRates <- sqrt(stageResults$overallEvents[finalStage]/stageResults$overallSampleSizes[finalStage] * (1 - stageResults$overallEvents[finalStage]/stageResults$overallSampleSizes[finalStage])) / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stErrRates <- sqrt(stageResults$overallEvents1[finalStage]/stageResults$overallSampleSizes1[finalStage] * (1 - stageResults$overallEvents1[finalStage]/stageResults$overallSampleSizes1[finalStage])/ stageResults$overallSampleSizes1[finalStage] + stageResults$overallEvents2[finalStage]/stageResults$overallSampleSizes2[finalStage] * (1 - stageResults$overallEvents2[finalStage]/stageResults$overallSampleSizes2[finalStage])/ stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageGroupSeq == 1) { #finalConfidenceInterval[1] <- stageResults$effectSizes[1] - stats::qnorm(1 - design$alpha / design$sided) * stErrRates #finalConfidenceInterval[2] <- stageResults$effectSizes[1] + stats::qnorm(1 - design$alpha / design$sided) * stErrRates finalConfidenceInterval[1] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, firstParameterName = "overallPValues", secondValue = stats::qnorm(1 - design$alpha/design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) finalConfidenceInterval[2] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = FALSE, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, firstParameterName = "overallPValues", secondValue = stats::qnorm(1 - design$alpha/design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) medianUnbiased <- stageResults$effectSizes[1] } else { if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 } } } if (!directionUpper) { medianUnbiasedGeneral = -medianUnbiasedGeneral finalConfidenceIntervalGeneral = -finalConfidenceIntervalGeneral if (stageGroupSeq > 1){ medianUnbiased = -medianUnbiased finalConfidenceInterval = -finalConfidenceInterval } } finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral) finalConfidenceInterval = sort(finalConfidenceInterval) if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- max(0, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } else { finalConfidenceInterval[1] <- max(-1, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } # # Calculation of final confidence interval # based on inverse normal method, only valid for kMax <= 2 or no SSR. # .getFinalConfidenceIntervalRatesInverseNormal <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageInverseNormal <- .getStageInverseNormal(design, stageResults, stage) finalStage <- min(stageInverseNormal, design$kMax) # Early stopping or at end of study if (stageInverseNormal < design$kMax || stage == design$kMax) { if (stageInverseNormal == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$combInverseNormal[1] - stats::qnorm(1 - design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$combInverseNormal[1] + stats::qnorm(1 - design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$combInverseNormal[1] if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) } } else { if (design$kMax > 2) { warning("Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)", call. = FALSE) } finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralLower") finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralUpper") medianUnbiasedGeneral <- .getDecisionMatrixRoot(design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "medianUnbiasedGeneral") } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageInverseNormal > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stErrRates <- sqrt(stageResults$overallEvents[finalStage]/stageResults$overallSampleSizes[finalStage] * (1 - stageResults$overallEvents[finalStage]/stageResults$overallSampleSizes[finalStage])) / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stErrRates <- sqrt(stageResults$overallEvents1[finalStage]/stageResults$overallSampleSizes1[finalStage] * (1 - stageResults$overallEvents1[finalStage]/stageResults$overallSampleSizes1[finalStage])/ stageResults$overallSampleSizes1[finalStage] + stageResults$overallEvents2[finalStage]/stageResults$overallSampleSizes2[finalStage] * (1 - stageResults$overallEvents2[finalStage]/stageResults$overallSampleSizes2[finalStage])/ stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageInverseNormal == 1) { #finalConfidenceInterval[1] <- stageResults$effectSizes[1] - stats::qnorm(1 - design$alpha / design$sided) * stErrRates #finalConfidenceInterval[2] <- stageResults$effectSizes[1] + stats::qnorm(1 - design$alpha / design$sided) * stErrRates finalConfidenceInterval[1] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = T, firstParameterName = "combInverseNormal", secondValue = stats::qnorm(1 - design$alpha/design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) finalConfidenceInterval[2] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = FALSE, normalApproximation = T, firstParameterName = "combInverseNormal", secondValue = stats::qnorm(1 - design$alpha/design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE) medianUnbiased <- stageResults$effectSizes[1] } else { if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral / sqrt(1/stageResults$overallSampleSizes1[finalStage] + 1/stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 } } } if (!directionUpper){ medianUnbiasedGeneral = -medianUnbiasedGeneral finalConfidenceIntervalGeneral = -finalConfidenceIntervalGeneral if (stageInverseNormal > 1){ medianUnbiased = -medianUnbiased finalConfidenceInterval = -finalConfidenceInterval } } finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral) finalConfidenceInterval = sort(finalConfidenceInterval) if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- max(0, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } else { finalConfidenceInterval[1] <- max(-1, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } # # Calculation of final confidence interval # based on Fisher combination test, only valid for kMax <= 2. # .getFinalConfidenceIntervalRatesFisher <- function(design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates(design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ stageFisher <- .getStageFisher(design, stageResults, stage) finalStage <- min(stageFisher, design$kMax) # Early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { warning("Calculation of final confidence interval for Fisher's ", "design not implemented yet.", call. = FALSE) return(list(finalStage = NA_integer_ , medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax))) } return(list( finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalRates <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidStage(stage, design$kMax) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments(functionName = "getFinalConfidenceIntervalRates", ignore = c("stage"), ...) if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalRatesGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance)) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalRatesInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance)) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalRatesFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance)) } .stopWithWrongDesignMessage(design) } rpact/R/f_design_fisher_combination_test.R0000644000176200001440000007761413541110635020464 0ustar liggesusers###################################################################################### # # # -- Fisher combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 28-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R #' @include f_core_utilities.R NULL .isEqual <- function(x, y) { return(abs(x - y) < 1e-10) } .getFisherCombinationCaseKmax2 <- function(tVec) { return(ifelse(.isEqual(tVec[1], 1), 1L, 2L)) } .getFisherCombinationSizeKmax2 <- function(alpha0Vec, criticalValues, tVec, piValue, case = .getFisherCombinationCaseKmax2(tVec)) { a1 <- alpha0Vec[1] c1 <- criticalValues[1] c2 <- criticalValues[2] t2 <- tVec[1] .assertIsValidForLogarithmization(list(a1 = a1, c1 = c1)) if (case == 1) { return(piValue + c2 * (log(a1) - log(c1))) } else { return(piValue + c2^(1/t2) * t2/(t2 - 1) * (a1^(1 - 1/t2) - c1^(1 - 1/t2))) } } .getFisherCombinationCaseKmax3 <- function(tVec) { t2 <- tVec[1] t3 <- tVec[2] if (.isEqual(t2, 1) && .isEqual(t3, 1)) { return(1L) } else if (!.isEqual(t2, t3) && !.isEqual(t2, 1) && !.isEqual(t3, 1)) { return(2L) } else if (.isEqual(t2, t3) && !.isEqual(t2, 1)) { return(3L) } else if (.isEqual(t2, 1) && !.isEqual(t3, 1)) { return(4L) } else if (!.isEqual(t2, 1) && .isEqual(t3, 1)) { return(5L) } } .getFisherCombinationSizeKmax3 <- function(alpha0Vec, criticalValues, tVec, piValue, case = .getFisherCombinationCaseKmax3(tVec)) { a1 <- alpha0Vec[1] a2 <- alpha0Vec[2] c1 <- criticalValues[1] c2 <- criticalValues[2] c3 <- criticalValues[3] t2 <- tVec[1] t3 <- tVec[2] .assertIsValidForLogarithmization(list(a1=a1, a2=a2, c1=c1, c2=c2)) if (case == 1) { ## Wassmer 1999, recursive formula return(piValue + c3*(log(a2)*log(a1) - log(a2)*log(c1) + 0.5*(log(a1/c2))^2 - 0.5*(log(c1/c2))^2)) } else if (case == 2) { return(piValue + c3^(1/t3)*t3/(t3 - t2)*( a2^(1 - t2/t3)*t3/(t3 - 1)*(a1^(1 - 1/t3) - c1^(1 - 1/t3)) - c2^(1/t2 - 1/t3)*t2/(t2 - 1)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)))) } else if (case == 3) { return(piValue + c3^(1/t3)*t3/(t3 - 1)*( a1^(1 - 1/t3)*(log(a2) - 1/t2*(log(c2) - log(a1) + t3/(t3 - 1))) - c1^(1 - 1/t3)*(log(a2) - 1/t2*(log(c2) - log(c1) + t3/(t3 - 1))))) } else if (case == 4) { return(piValue + c3^(1/t3)*t3/(t3 - 1)* (a2^(1 - 1/t3)*t3/(t3 - 1)*(a1^(1 - 1/t3) - c1^(1 - 1/t3)) - c2^(1 - 1/t3)*(log(a1) - log(c1)))) } else if (case == 5) { return(piValue + c3 /(1 - t2)*(a2^(1 - t2)*(log(a1) - log(c1)) - c2^(1/t2 - 1)*t2/(t2 - 1)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)))) } } .getFisherCombinationCaseKmax4 <- function(tVec) { t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] return(ifelse( .isEqual(t2, 1) && .isEqual(t3, 1) && .isEqual(t4, 1), 1L, 2L )) } .getFisherCombinationSizeApproximatelyKmax4 <- function(alpha0Vec, criticalValues, tVec, piValue, case = .getFisherCombinationCaseKmax4(tVec)) { a1 <- alpha0Vec[1] a2 <- alpha0Vec[2] a3 <- alpha0Vec[3] c1 <- criticalValues[1] c2 <- criticalValues[2] c3 <- criticalValues[3] c4 <- criticalValues[4] t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] .assertIsValidForLogarithmization(list(a1=a1, a2=a2, a3=a3, c1=c1, c2=c2, c3=c3)) ## Wassmer 1999, recursive formula if (case == 1) { return(piValue + c4*(1/6*log(a1*a2/c3)^3 - 1/6*log(c1*a2/c3)^3 + 0.5*log(c2/c3)^2*log(c1) - 0.5*log(c2/c3)^2*log(a1) + 0.5*log(a1/c2)^2*log(a3) - 0.5*log(c1/c2)^2*log(a3) + log(a3)*log(a2)*log(a1) - log(c1)*log(a2)*log(a3))) } ## general case for K = 4 else { eps <- 1E-5 if (.isEqual(t2, 1)) t2 <- t2 + eps if (.isEqual(t3, 1)) t3 <- t3 + eps if (.isEqual(t4, 1)) t4 <- t4 + eps if (.isEqual(t2, t3)) t3 <- t2 + eps if (.isEqual(t2, t4)) t4 <- t2 + eps if (.isEqual(t3, t4)) t4 <- t3 + eps return(piValue + c4^(1/t4)*t4/(t4 - t3)*( t4/(t4 - t2)*t4/(t4 - 1)*a3^(1 - t3/t4)*a2^(1 - t2/t4)*(a1^(1 - 1/t4) - c1^(1 - 1/t4)) - t4/(t4 - t2)*t2/(t2 - 1)*a3^(1 - t3/t4)*c2^(1/t2 - 1/t4)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t3/(t3 - t2)*t3/(t3 - 1)*c3^(1/t3 - 1/t4)*a2^(1 - t2/t3)*(a1^(1 - 1/t3) - c1^(1 - 1/t3)) + t3/(t3 - t2)*t2/(t2 - 1)*c3^(1/t3 - 1/t4)*c2^(1/t2 - 1/t3)*(a1^(1 - 1 /t2) - c1^(1 - 1/t2)))) } } .getFisherCombinationCaseKmax5 <- function(tVec) { t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] t5 <- tVec[4] return(ifelse( .isEqual(t2, 1) && .isEqual(t3, 1) && .isEqual(t4, 1) && .isEqual(t5, 1), 1L, 2L )) } .getFisherCombinationSizeApproximatelyKmax5 <- function(alpha0Vec, criticalValues, tVec, piValue, case = .getFisherCombinationCaseKmax5(tVec)) { a1 <- alpha0Vec[1] a2 <- alpha0Vec[2] a3 <- alpha0Vec[3] a4 <- alpha0Vec[4] c1 <- criticalValues[1] c2 <- criticalValues[2] c3 <- criticalValues[3] c4 <- criticalValues[4] c5 <- criticalValues[5] t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] t5 <- tVec[4] .assertIsValidForLogarithmization(list(a1=a1, a2=a2, a3=a3, a4=a4, c1=c1, c2=c2, c3=c3, c4=c4)) ## Wassmer 1999, recursive formula if (case == 1) { return(piValue + c5 *(1/24*log(a1*a2*a3/c4)^4 - 1/24*log(c1*a2*a3/c4)^4 + 1/6*log(c2*a3/c4)^3*log(c1) - 1/6*log(c2*a3/c4)^3*log(a1) + 1/4*log(c3/c4)^2*log(c1/c2)^2 - 1/4*log(c3/c4)^2*log(a1/c2)^2 + 0.5*log(c3/c4)^2*log(a2)*log(c1) - 0.5*log(c3/c4)^2*log(a2)*log(a1) + 1/6*log(a1*a2/c3)^3*log(a4) - 1/6*log(c1*a2/c3)^3*log(a4) + 0.5*log(c2/c3)^2*log(a4)*log(c1) - 0.5*log(c2/c3)^2*log(a4)*log(a1) + 0.5*log(a1/c2)^2*log(a3)*log(a4) - 0.5*log(c1/c2)^2*log(a3)*log(a4) + log(a4)*log(a3)*log(a2)*log(a1) - log(c1)*log(a2)*log(a3)*log(a4))) } ## general case for K = 5 else { eps <- 1E-5 if (.isEqual(t2, 1)) t2 <- t2 + eps if (.isEqual(t3, 1)) t3 <- t3 + eps if (.isEqual(t4, 1)) t4 <- t4 + eps if (.isEqual(t5, 1)) t5 <- t5 + eps if (.isEqual(t2, t3)) t3 <- t2 + eps if (.isEqual(t2, t4)) t4 <- t2 + eps if (.isEqual(t2, t5)) t5 <- t2 + eps if (.isEqual(t3, t4)) t4 <- t3 + eps if (.isEqual(t3, t5)) t5 <- t3 + eps if (.isEqual(t4, t5)) t5 <- t4 + eps return(piValue + c5^(1/t5)*t5/(t5 - t4)*( t5/(t5 - t3)*t5/(t5 - t2)*t5/(t5 - 1)*a4^(1 - t4/t5)*a3^(1 - t3/t5)*a2^(1 - t2/t5)*(a1^(1 - 1/t5) - c1^(1 - 1/t5)) - t5/(t5 - t3)*t5/(t5 - t2)*t2/(t2 - 1)*a4^(1 - t4/t5)*a3^(1 - t3/t5)*c2^(1/t2 - 1/t5)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t5/(t5 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*a4^(1 - t4/t5)*c3^(1/t3 - 1/t5)*a2^(1 - t2/t3)*(a1^(1 - 1/t3) - c1^(1 - 1/t3)) + t5/(t5 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*a4^(1 - t4/t5)*c3^(1/t3 - 1/t5)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t4/(t4 - t3)*t4/(t4 - t2)*t4/(t4 - 1)*c4^(1/t4 - 1/t5)*a3^(1 - t3/t4)*a2^(1 - t2/t4)*(a1^(1 - 1/t4) - c1^(1 - 1/t4)) + t4/(t4 - t3)*t4/(t4 - t2)*t2/(t2 - 1)*c4^(1/t4 - 1/t5)*a3^(1 - t3/t4)*c2^(1/t2 - 1/t4)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) + t4/(t4 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*c4^(1/t4 - 1/t5)*c3^(1/t3 - 1/t4)*a2^(1 - t2/t3)*(a1^(1 - 1/t3) - c1^(1 - 1/t3)) - t4/(t4 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*c4^(1/t4 - 1/t5)*c3^(1/t3 - 1/t4)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)))) } } .getFisherCombinationCaseKmax6 <- function(tVec) { t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] t5 <- tVec[4] t6 <- tVec[5] return(ifelse( .isEqual(t2, 1) && .isEqual(t3, 1) && .isEqual(t4, 1) && .isEqual(t5, 1) && .isEqual(t6, 1), 1L, 2L )) } .getFisherCombinationSizeApproximatelyKmax6 <- function(alpha0Vec, criticalValues, tVec, piValue, case = .getFisherCombinationCaseKmax6(tVec)) { a1 <- alpha0Vec[1] a2 <- alpha0Vec[2] a3 <- alpha0Vec[3] a4 <- alpha0Vec[4] a5 <- alpha0Vec[5] c1 <- criticalValues[1] c2 <- criticalValues[2] c3 <- criticalValues[3] c4 <- criticalValues[4] c5 <- criticalValues[5] c6 <- criticalValues[6] t2 <- tVec[1] t3 <- tVec[2] t4 <- tVec[3] t5 <- tVec[4] t6 <- tVec[5] .assertIsValidForLogarithmization(list(a1=a1, a2=a2, a3=a3, a4=a4, a5=a5, c1=c1, c2=c2, c3=c3, c4=c4, c5=c5)) ## Wassmer 1999, recursive formula if (case == 1) { return(piValue + c6*( log(a1)*log(a2)*log(a3)*log(a4)*log(a5) + 1/24*log(a1*a2*a3/c4)^4*log(a5) + 1/120*log(a1*a2*a3*a4/c5)^5 - 0.5*log(c4/c5)^2*log(a3)*log(a2)*log(a1) + 1/6*log(a1*a2/c3)^3*log(a4)*log(a5) - 0.5*log(c3/c4)^2*log(a5)*log(a2)*log(a1) - 1/6*log(c3*a4/c5)^3*log(a2)*log(a1) - 1/12*log(a1*a2/c3)^3*log(c4/c5)^2 + 0.5*log(a1/c2)^2*log(a3)*log(a4)*log(a5) - 1/6*log(c2*a3/c4)^3*log(a5)*log(a1) - 1/24*log(c2*a3*a4/c5)^4*log(a1) - 1/4*log(c4/c5)^2*log(a3)*log(a1/c2)^2 - 0.5*log(c2/c3)^2*log(a4)*log(a5)*log(a1) - 1/4*log(c3/c4)^2*log(a5)*log(a1/c2)^2 - 1/12*log(c3*a4/c5)^3*log(a1/c2)^2 + 1/4*log(c2/c3)^2*log(c4 /c5)^2*log(a1) - log(c1)*log(a2)*log(a3)*log(a4)*log(a5) - 1/24*log(c1*a2*a3/c4)^4*log(a5) - 1/120*log(c1*a2*a3*a4/c5)^5 + 0.5*log(c4/c5)^2*log(a3)*log(a2) * log(c1) - 1/6*log(c1*a2/c3)^3*log(a4)*log(a5) + 0.5*log(c3/c4)^2*log(a5)*log(a2)*log(c1) + 1/6*log(c3*a4/c5)^3*log(a2)*log(c1) + 1/12*log(c1*a2/c3)^3*log(c4/c5)^2 - 0.5*log(c1/c2)^2*log(a3)*log(a4)*log(a5) + 1/6*log(c2*a3/c4)^3*log(a5)*log(c1) + 1/24*log(c2*a3*a4/c5)^4*log(c1) + 1/4*log(c4/c5)^2*log(a3)*log(c1/c2)^2 + 0.5*log(c2/c3)^2*log(a4)*log(a5)*log(c1) + 1/4*log(c3/c4)^2*log(a5)*log(c1/c2)^2 + 1/12*log(c3*a4/c5)^3*log(c1/c2)^2 - 1/4*log(c2/c3)^2*log(c4/c5)^2*log(c1))) } ## general case for K = 6 else { eps <- 1E-4 if (.isEqual(t2, 1)) t2 <- t2 + eps if (.isEqual(t3, 1)) t3 <- t3 + eps if (.isEqual(t4, 1)) t4 <- t4 + eps if (.isEqual(t5, 1)) t5 <- t5 + eps if (.isEqual(t6, 1)) t6 <- t6 + eps if (.isEqual(t2, t3)) t3 <- t2 + eps if (.isEqual(t2, t4)) t4 <- t2 + eps if (.isEqual(t2, t5)) t5 <- t2 + eps if (.isEqual(t2, t6)) t6 <- t2 + eps if (.isEqual(t3, t4)) t4 <- t3 + eps if (.isEqual(t3, t5)) t5 <- t3 + eps if (.isEqual(t3, t6)) t6 <- t3 + eps if (.isEqual(t4, t5)) t5 <- t4 + eps if (.isEqual(t4, t6)) t6 <- t4 + eps if (.isEqual(t5, t6)) t6 <- t5 + eps return(piValue + c6^(1/t6)*t6/(t6 - t5)*( t6/(t6 - t4)*t6/(t6 - t3)*t6/(t6 - t2)*t6/(t6 - 1)*a5^(1 - t5/t6)*a4^(1 - t4/t6)* a3^(1 - t3/t6)* a2^(1 - t2/t6)* (a1^(1 - 1/t6) - c1^(1 - 1/t6)) - t6/(t6 - t4)*t6/(t6 - t3)*t6/(t6 - t2)*t2/(t2 - 1)*a5^(1 - t5/t6)*a4^(1 - t4/t6)* a3^(1 - t3/t6)* c2^(1/t2 - 1/t6)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t6/(t6 - t4)*t6/(t6 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*a5^(1 - t5/t6)*a4^(1 - t4/t6)* c3^(1/t3 - 1/t6)*a2^(1 - t2/t3)* (a1^(1 - 1/t3) - c1^(1 - 1/t3)) + t6/(t6 - t4)*t6/(t6 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*a5^(1 - t5/t6)*a4^(1 - t4/t6)* c3^(1/t3 - 1/t6)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t6/(t6 - t4)*t4/(t4 - t3)*t4/(t4 - t2)*t4/(t4 - 1)*a5^(1 - t5/t6)*c4^(1/t4 - 1/t6)*a3^(1 - t3/t4)* a2^(1 - t2/t4)* (a1^(1 - 1/t4) - c1^(1 - 1/t4)) + t6/(t6 - t4)*t4/(t4 - t3)*t4/(t4 - t2)*t2/(t2 - 1)*a5^(1 - t5/t6)*c4^(1/t4 - 1/t6)*a3^(1 - t3/t4)* c2^(1/t2 - 1/t4)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) + t6/(t6 - t4)*t4/(t4 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*a5^(1 - t5/t6)*c4^(1/t4 - 1/t6)*c3^(1/t3 - 1/t4)*a2^(1 - t2/t3)* (a1^(1 - 1/t3) - c1^(1 - 1/t3)) - t6/(t6 - t4)*t4/(t4 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*a5^(1 - t5/t6)*c4^(1/t4 - 1/t6)*c3^(1/t3 - 1/t4)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t5/(t5 - t4)*t5/(t5 - t3)*t5/(t5 - t2)*t5/(t5 - 1)*c5^(1/t5 - 1/t6)*a4^(1 - t4/t5)* a3^(1 - t3/t5)* a2^(1 - t2/t5)* (a1^(1 - 1/t5) - c1^(1 - 1/t5)) + t5/(t5 - t4)*t5/(t5 - t3)*t5/(t5 - t2)*t2/(t2 - 1)*c5^(1/t5 - 1/t6)*a4^(1 - t4/t5)* a3^(1 - t3/t5)* c2^(1/t2 - 1/t5)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) + t5/(t5 - t4)*t5/(t5 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*c5^(1/t5 - 1/t6)*a4^(1 - t4/t5)* c3^(1/t3 - 1/t5)*a2^(1 - t2/t3)* (a1^(1 - 1/t3) - c1^(1 - 1/t3)) - t5/(t5 - t4)*t5/(t5 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*c5^(1/t5 - 1/t6)*a4^(1 - t4/t5)* c3^(1/t3 - 1/t5)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) + t5/(t5 - t4)*t4/(t4 - t3)*t4/(t4 - t2)*t4/(t4 - 1)*c5^(1/t5 - 1/t6)*c4^(1/t4 - 1/t5)*a3^(1 - t3/t4)* a2^(1 - t2/t4)* (a1^(1 - 1/t4) - c1^(1 - 1/t4)) - t5/(t5 - t4)*t4/(t4 - t3)*t4/(t4 - t2)*t2/(t2 - 1)*c5^(1/t5 - 1/t6)*c4^(1/t4 - 1/t5)*a3^(1 - t3/t4)* c2^(1/t2 - 1/t4)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)) - t5/(t5 - t4)*t4/(t4 - t3)*t3/(t3 - t2)*t3/(t3 - 1)*c5^(1/t5 - 1/t6)*c4^(1/t4 - 1/t5)*c3^(1/t3 - 1/t4)*a2^(1 - t2/t3)* (a1^(1 - 1/t3) - c1^(1 - 1/t3)) + t5/(t5 - t4)*t4/(t4 - t3)*t3/(t3 - t2)*t2/(t2 - 1)*c5^(1/t5 - 1/t6)*c4^(1/t4 - 1/t5)*c3^(1/t3 - 1/t4)*c2^(1/t2 - 1/t3)*(a1^(1 - 1/t2) - c1^(1 - 1/t2)))) } } .getFisherCombinationSize <- function(kMax, alpha0Vec, criticalValues, tVec, cases = .getFisherCombinationCases(kMax = kMax, tVec = tVec)) { if (length(criticalValues) < 1 || length(criticalValues) > C_KMAX_UPPER_BOUND_FISHER) { stop(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'criticalValues' (", length(criticalValues), ") is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]") } piValue <- criticalValues[1] if (kMax > 1) { piValue <- .getFisherCombinationSizeKmax2(alpha0Vec, criticalValues, tVec, piValue, case = cases[1]) } if (kMax > 2) { piValue <- .getFisherCombinationSizeKmax3(alpha0Vec, criticalValues, tVec, piValue, case = cases[2]) } if (kMax > 3) { piValue <- .getFisherCombinationSizeApproximatelyKmax4(alpha0Vec, criticalValues, tVec, piValue, case = cases[3]) } if (kMax > 4) { piValue <- .getFisherCombinationSizeApproximatelyKmax5(alpha0Vec, criticalValues, tVec, piValue, case = cases[4]) } if (kMax > 5) { piValue <- .getFisherCombinationSizeApproximatelyKmax6(alpha0Vec, criticalValues, tVec, piValue, case = cases[5]) } return(piValue) } .getRejectValueForOneTrial <- function(kMax, alpha0, criticalValues, weightsFisher, stage, pValues) { if (stage < kMax && pValues[stage] >= alpha0[stage]) { return(0) } p <- prod(pValues[1:stage] ^ weightsFisher[1:stage]) if (p < criticalValues[stage]) { return(1) } return(-1) } .getRejectValueRejectionProbability <- function(settings) { pValues <- stats::runif(settings$kMax) for (stage in 1:settings$kMax) { rejectValue <- .getRejectValueForOneTrial(settings$kMax, settings$alpha0, settings$criticalValues, settings$weightsFisher, stage, pValues) if (rejectValue >= 0) { return(rejectValue) } } return(0) } .getSimulatedAlpha <- function( kMax, alpha, alpha0, criticalValues, tVec, iterations, seed) { weightsFisher <- c(1, tVec) settings <- list( kMax = kMax, alpha = alpha, alpha0 = alpha0, criticalValues = criticalValues, weightsFisher = weightsFisher, iterations = iterations, seed = seed ) cases <- rep(list(settings), iterations) # 'mclapply' requires package 'parallel' # Improvement: implement as cluster based routine if (requireNamespace("parallel", quietly = TRUE)) { simResults <- parallel::mclapply(cases, .getRejectValueRejectionProbability, mc.preschedule = TRUE) } else { simResults <- base::lapply(cases, .getRejectValueRejectionProbability) } settings$alphaSimulated <- do.call(sum, simResults) / iterations return(settings) } .setKMaxToDesign <- function(design, kMax) { if (.isUndefinedArgument(design$kMax)) { design$kMax <- as.integer(kMax) design$.setParameterType("kMax", C_PARAM_GENERATED) } else { design$.setParameterType("kMax", ifelse(design$kMax == C_KMAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) } } #' @title #' Get Design Fisher #' #' @description #' Performs Fisher's combination test and returns critical values for this design. #' #' @param kMax The maximum number of stages K. K = 1, 2, 3, ..., 6, default is 3. #' @param alpha The significance level alpha, default is 0.025. #' @param sided Is the alternative one-sided (1) or two-sided (2), default is 1. #' @param method "equalAlpha", "fullAlpha", "noInteraction", or "userDefinedAlpha", default is "equalAlpha". #' @param userAlphaSpending A vector of levels 0 < alpha_1 < ... < alpha_K < alpha #' specifying the cumulative Type I error rate. #' @param alpha0Vec Stopping for futility bounds for stage-wise p-values. #' @param bindingFutility If \code{bindingFutility = FALSE} is specified the calculation of #' the critical values is not affected by the futility bounds (default is \code{TRUE}). #' @param informationRates Information rates that must be fixed prior to the trial, #' default is \code{(1 : kMax) / kMax}. #' @param tolerance The tolerance, default is 1E-14. #' @param iterations The number of simulation iterations, e.g., #' getDesignFisher(iterations = 100000) checks the validity of the critical values for the default design. #' The default value of \code{iterations} is 0, i.e., no simulation will be executed. #' @param seed Seed for simulating the power for Fisher's combination test. See above, default is a random seed. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' \code{getDesignFisher} calculates the critical values and stage levels for Fisher's combination test as described in Bauer (1989), Bauer and Koehne (1994), #' Bauer and Roehmel (1995), and Wassmer (1999) for equally and unequally sized stages. #' #' @return Returns a \code{\link{TrialDesignFisher}} object #' #' @export #' #' @seealso \code{\link{getDesignSet}} for creating a set of designs to compare. #' #' @examples #' # Run with default values #' getDesignFisher() #' #' # The output is: #' # #' # Design parameters and output of Fisher design: #' # User defined parameters: not available #' # #' # Derived from user defined parameters: not available #' # #' # Default parameters: #' # Method : equalAlpha #' # Maximum number of stages : 3 #' # Stages : 1, 2, 3 #' # Information rates : 0.333, 0.667, 1.000 #' # Significance level : 0.0250 #' # Alpha_0 : 1.0000, 1.0000 #' # Binding futility : TRUE #' # Test : one-sided #' # Tolerance : 1e-14 #' # #' # Output: #' # Cumulative alpha spending : 0.01231, 0.01962, 0.02500 #' # Critical values : 0.0123085, 0.0016636, 0.0002911 #' # Stage levels : 0.01231, 0.01231, 0.01231 #' # Scale : 1, 1 #' # Non stochastic curtailment : FALSE #' getDesignFisher <- function(..., kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = NA, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_) { .assertIsValidIterationsAndSeed(iterations, seed) .warnInCaseOfUnknownArguments(functionName = "getDesignFisher", ...) return(.getDesignFisher( kMax = kMax, alpha = alpha, method = method, userAlphaSpending = userAlphaSpending, alpha0Vec = alpha0Vec, informationRates = informationRates, sided = sided, bindingFutility = bindingFutility, tolerance = tolerance, iterations = iterations, seed = seed, userFunctionCallEnabled = TRUE) ) } .getDesignFisherDefaultValues <- function() { return(list( kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = C_BINDING_FUTILITY_FISHER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_ )) } .getFisherCombinationCases <- function(kMax, tVec) { if (kMax == 1) { return(c()) } cases <- c() if (kMax > 1) { cases <- c(cases, .getFisherCombinationCaseKmax2(tVec)) } if (kMax > 2) { cases <- c(cases, .getFisherCombinationCaseKmax3(tVec)) } if (kMax > 3) { cases <- c(cases, .getFisherCombinationCaseKmax4(tVec)) } if (kMax > 4) { cases <- c(cases, .getFisherCombinationCaseKmax5(tVec)) } if (kMax > 5) { cases <- c(cases, .getFisherCombinationCaseKmax6(tVec)) } return(cases) } # # @param userFunctionCallEnabled if \code{TRUE}, additional parameter validation methods will be called. # .getDesignFisher <- function( kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = C_BINDING_FUTILITY_FISHER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_, userFunctionCallEnabled = FALSE) { if (.isDefinedArgument(kMax, argumentExistsValidationEnabled = userFunctionCallEnabled)) { .assertIsValidKMax(kMax, kMaxUpperBound = C_KMAX_UPPER_BOUND_FISHER) if (!is.integer(kMax)) { kMax <- as.integer(kMax) } } if (!is.integer(sided) && sided %in% c(1, 2)) { sided <- as.integer(sided) } if (is.na(bindingFutility)) { bindingFutility <- C_BINDING_FUTILITY_FISHER_DEFAULT } else if (userFunctionCallEnabled && ((!is.na(kMax) && kMax == 1) || (!any(is.na(alpha0Vec)) && all(alpha0Vec == C_ALPHA_0_VEC_DEFAULT)))) { warning("'bindingFutility' (", bindingFutility, ") will be ignored", call. = FALSE) } design <- TrialDesignFisher( kMax = kMax, alpha = alpha, method = method, sided = sided, userAlphaSpending = userAlphaSpending, alpha0Vec = alpha0Vec, informationRates = informationRates, bindingFutility = bindingFutility, tolerance = tolerance, iterations = iterations, seed = seed ) .assertDesignParameterExists(design, "sided", 1) .assertIsValidSidedParameter(design$sided) .assertDesignParameterExists(design, "method", C_FISHER_METHOD_DEFAULT) .assertIsCharacter(design$method, "method") if (!.isFisherMethod(design$method)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'method' must be one of the following: ", .printFisherMethods()) } .assertDesignParameterExists(design, "bindingFutility", C_BINDING_FUTILITY_FISHER_DEFAULT) .assertDesignParameterExists(design, "tolerance", C_ANALYSIS_TOLERANCE_FISHER_DEFAULT) .setKmaxBasedOnAlphaSpendingDefintion(design) design$informationRates <- .getValidatedInformationRates(design) design$alpha0Vec <- .getValidatedAlpha0Vec(design) if (design$sided == 2 && design$bindingFutility && any(design$alpha0Vec < 1)) { warning("Binding futility will be ignored because the test is defined as two-sided", call. = FALSE) } if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { .validateUserAlphaSpending(design) } else { design$.setParameterType("userAlphaSpending", C_PARAM_NOT_APPLICABLE) if (.isDefinedArgument(design$userAlphaSpending)) { warning("'userAlphaSpending' will be ignored because 'method' is not '", C_FISHER_METHOD_USER_DEFINED_ALPHA, "'", call. = FALSE) } } if (.isUndefinedArgument(design$alpha)) { design$alpha = C_ALPHA_DEFAULT } .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertIsSingleNumber(design$alpha, "alpha") .assertIsValidSidedParameter(sided) if (sided != 1) { design$alpha <- design$alpha / sided } if (userFunctionCallEnabled) { .assertIsValidAlpha(design$alpha) } .assertDesignParameterExists(design, "kMax", 3) .assertIsSingleInteger(design$kMax, "kMax") .assertIsValidKMax(design$kMax, kMaxUpperBound = C_KMAX_UPPER_BOUND_FISHER) if (design$method == C_FISHER_METHOD_NO_INTERACTION && design$kMax < 3) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "method '", C_FISHER_METHOD_NO_INTERACTION, "' is only allowed for kMax > 2 (kMax is ", design$kMax, ")") } if (design$kMax > 1) { design$scale <- round(sqrt((design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) / design$informationRates[1]), 10) } design$criticalValues <- rep(NA_real_, design$kMax) design$.setParameterType("scale", C_PARAM_GENERATED) design$.setParameterType("criticalValues", C_PARAM_GENERATED) if (design$bindingFutility) { alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, design$kMax - 1) } design$.setParameterType("stageLevels", C_PARAM_GENERATED) design$.setParameterType("alphaSpent", C_PARAM_GENERATED) design$.setParameterType("nonStochasticCurtailment", C_PARAM_GENERATED) tryCatch({ cases <- .getFisherCombinationCases(kMax = design$kMax, tVec = design$scale) if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { design$criticalValues[1] <- design$userAlphaSpending[1] design$alphaSpent <- design$criticalValues if (design$kMax > 1) { for (k in 2:design$kMax) { cLower <- 0 cUpper <- design$alpha prec <- 1 while (prec > design$tolerance) { alpha1 <- (cLower + cUpper) * 0.5 design$criticalValues[k] <- alpha1 size <- .getFisherCombinationSize(k, alpha0Vec[1:(k - 1)], design$criticalValues, design$scale, cases = cases) ifelse(size < design$userAlphaSpending[k], cLower <- alpha1, cUpper <- alpha1) prec <- cUpper - cLower } } } } else { prec <- 1 cLower <- 0 cUpper <- design$alpha maxIter <- 100 while (prec > design$tolerance && maxIter >= 0) { # no use of uniroot because there might be no positive solution # (f(cl) and f(cu) might not have opposite signs) alpha1 <- (cLower + cUpper) * 0.5 if (design$method == C_FISHER_METHOD_EQUAL_ALPHA) { design$criticalValues <- sapply(1:design$kMax, function(k) .getOneDimensionalRoot(function(c) { .getFisherCombinationSize(k, rep(1, k - 1), rep(c, k), design$scale, cases = cases) - alpha1 }, lower = design$tolerance, upper = design$alpha, tolerance = design$tolerance) ) } else if (design$method == C_FISHER_METHOD_FULL_ALPHA) { design$criticalValues[1:(design$kMax - 1)] <- sapply(1:(design$kMax - 1), function(k) { prec2 <- 1 cLower2 <- 0 cUpper2 <- design$alpha while (prec2 > design$tolerance) { c <- (cLower2 + cUpper2) * 0.5 y <- .getFisherCombinationSize(k, rep(1, k - 1), rep(c, k), design$scale, cases = cases) ifelse(y < alpha1, cLower2 <- c, cUpper2 <- c) prec2 <- cUpper2 - cLower2 } return(c) }) design$criticalValues[design$kMax] <- .getOneDimensionalRoot( function(c) { .getFisherCombinationSize(design$kMax, rep(1, design$kMax - 1), rep(c, design$kMax), design$scale, cases = cases) - design$alpha }, lower = design$tolerance, upper = design$alpha, tolerance = design$tolerance ) } else if (design$method == C_FISHER_METHOD_NO_INTERACTION) { design$criticalValues[design$kMax] <- .getOneDimensionalRoot( function(c) { .getFisherCombinationSize(design$kMax, rep(1, design$kMax - 1), rep(c, design$kMax), design$scale, cases = cases) - design$alpha }, lower = design$tolerance, upper = design$alpha, tolerance = design$tolerance ) design$criticalValues[1] <- alpha1 for (k in ((kMax - 1): 2)) { design$criticalValues[k] <- design$criticalValues[k + 1] / design$alpha0Vec[k]^(1/design$scale[k]) } } size <- .getFisherCombinationSize(design$kMax, alpha0Vec, design$criticalValues, design$scale, cases = cases) ifelse(size < design$alpha, cLower <- alpha1, cUpper <- alpha1) prec <- cUpper - cLower maxIter <- maxIter - 1 } } design$stageLevels <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, rep(1, k - 1), rep(design$criticalValues[k], k), design$scale, cases = cases) }) design$alphaSpent <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, alpha0Vec[1:(k - 1)], design$criticalValues[1:k], design$scale, cases = cases) }) design$nonStochasticCurtailment <- FALSE if (design$stageLevels[1] < 1e-10) { design$criticalValues[1:(design$kMax - 1)] <- design$criticalValues[design$kMax] design$stageLevels <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, rep(1, k - 1), rep(design$criticalValues[k], k), design$scale, cases = cases) } ) design$alphaSpent <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, alpha0Vec[1:(k - 1)], design$criticalValues[1:k], design$scale, cases = cases) } ) design$nonStochasticCurtailment <- TRUE } if (userFunctionCallEnabled) { if (design$method == C_FISHER_METHOD_NO_INTERACTION && abs(size - design$alpha) > 1e-03) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, 'numerical overflow in computation routine') } if (design$method == C_FISHER_METHOD_EQUAL_ALPHA && abs(mean(design$stageLevels) - design$stageLevels[1]) > 1e-03) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, 'numerical overflow in computation routine') } if (design$kMax > 1) { if (any(design$criticalValues[2:design$kMax] - design$criticalValues[1:(design$kMax - 1)] > 1e-12)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, 'no calculation possible') } if (any(design$stageLevels[1:(design$kMax - 1)] > design$alpha)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'alpha' (", design$alpha, ") not correctly specified") } } if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { if (any(abs(design$alphaSpent - design$userAlphaSpending) > 1e-05)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no calculation possible or ", "'alpha' (", design$alpha, ") not correctly specified") } } } }, error = function(e) { warning("Output may be wrong because an error occured: ", e$message, call. = FALSE) }) design$.setParameterType("simAlpha", C_PARAM_NOT_APPLICABLE) design$simAlpha <- NA_real_ if (!is.null(design$iterations) && design$iterations > 0) { design$seed <- .setSeed(design$seed) simResult <- .getSimulatedAlpha( kMax = design$kMax, alpha = design$alpha, alpha0 = design$alpha0Vec, criticalValues = design$criticalValues, tVec = design$scale, iterations = iterations, seed = seed) design$simAlpha <- simResult$alphaSimulated design$.setParameterType("simAlpha", C_PARAM_GENERATED) } if (design$kMax == 1) { design$.setParameterType("alpha0Vec", C_PARAM_NOT_APPLICABLE) } if (length(design$alpha0Vec) == 0 || all(design$alpha0Vec == C_ALPHA_0_VEC_DEFAULT)) { # design$bindingFutility <- NA design$.setParameterType("bindingFutility", C_PARAM_NOT_APPLICABLE) } return(design) } rpact/R/pkgname.R0000644000176200001440000000667513540361137013352 0ustar liggesusers #' #' @title #' rpact - Confirmatory Adaptive Clinical Trial Design and Analysis #' #' @description #' rpact (R Package for Adaptive Clinical Trials) is a comprehensive package that enables #' the design and analysis of confirmatory adaptive group sequential designs. #' Particularly, the methods described in the recent #' \href{http://monograph.wassmer.brannath.rpact.com}{monograph by Wassmer and Brannath} #' (published by Springer, 2016) are implemented. It also comprises advanced methods for sample #' size calculations for fixed sample size designs incl., e.g., sample size calculation for survival #' trials with piecewise exponentially distributed survival times and staggered patients entry. #' #' @details #' rpact includes the classical group sequential designs (incl. user spending function approaches) #' where the sample sizes per stage (or the time points of interim analysis) cannot be changed #' in a data-driven way. #' Confirmatory adaptive designs explicitly allow for this under control of the Type I error rate. #' They are either based on the combination testing or the conditional rejection #' probability (CRP) principle. #' Both are available, for the former the inverse normal combination test and #' Fisher's combination test can be used. #' #' Specific techniques of the adaptive methodology are also available, e.g., #' overall confidence intervals, overall p-values, and conditional and predictive power assessments. #' Simulations can be performed to assess the design characteristics of a (user-defined) sample size #' recalculation strategy. Designs are available for trials with continuous, binary, and survival endpoint. #' #' For more information please visit \href{https://www.rpact.org}{www.rpact.org}. #' If you are interested in professional services round about the package or need #' a comprehensive validation documentation to fulfill regulatory requirements #' please visit \href{https://www.rpact.com}{www.rpact.com}. #' #' rpact is developed by #' \itemize{ #' \item Gernot Wassmer (\href{mailto:gernot.wassmer@rpact.com}{gernot.wassmer@rpact.com}) and #' \item Friedrich Pahlke (\href{mailto:friedrich.pahlke@rpact.com}{friedrich.pahlke@rpact.com}). #' } #' #' @references #' Wassmer, G., Brannath, W. (2016) Group Sequential and Confirmatory Adaptive Designs #' in Clinical Trials (Springer Series in Pharmaceutical Statistics) <\href{https://doi.org/10.1007/978-3-319-32562-0}{doi:10.1007/978-3-319-32562-0}> #' #' @docType package #' @author Gernot Wassmer, Friedrich Pahlke #' @importFrom Rcpp evalCpp #' @useDynLib rpact, .registration = TRUE #' @name rpact #' #' @import methods #' @import stats #' @import utils #' @import graphics #' @import tools #' "_PACKAGE" #> [1] "_PACKAGE" .onLoad <- function(libname, pkgname) { } .onAttach <- function(libname, pkgname) { #packageStartupMessage("Thank you for using rpact! Need help or more information? Visit www.rpact.com") } .onUnload <- function(libpath) { if (!is.null(.parallelComputingCluster)) { tryCatch({ parallel::stopCluster(.parallelComputingCluster) }, error = function(e) { .logWarn("Failed to stop parallel computing cluster", e) }) } tryCatch({ library.dynam.unload("rpact", libpath) }, error = function(e) { .logWarn("Failed to unload dynamic C library", e) }) } .onDetach <- function(libpath) { packageStartupMessage("rpact successfully unloaded\n") } rpact/R/class_core_plot_settings.R0000644000176200001440000003161013556061055017011 0ustar liggesusers###################################################################################### # # # -- Plot setting classes -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' #' @name PlotSettings #' #' @title #' Plot Settings #' #' @description #' Class for plot settings. #' #' @field lineSize The line size. #' @field pointSize The point size. #' @field mainTitleFontSize The main tile font size. #' @field axesTextFontSize The text font size. #' @field legendFontSize The legend font size. #' #' @details #' Collects typical plot settings in an object. #' #' @keywords internal #' #' @include class_core_parameter_set.R #' #' @importFrom methods new #' PlotSettings <- setRefClass("PlotSettings", contains = "ParameterSet", fields = list( .legendLineBreakIndex = "numeric", .pointSize = "numeric", .legendFontSize = "numeric", lineSize = "numeric", pointSize = "numeric", mainTitleFontSize = "numeric", axesTextFontSize = "numeric", legendFontSize = "numeric" ), methods = list( initialize = function( lineSize = 0.8, pointSize = 3, mainTitleFontSize = 14, axesTextFontSize = 10, legendFontSize = 11, ...) { callSuper( lineSize = lineSize, pointSize = pointSize, mainTitleFontSize = mainTitleFontSize, axesTextFontSize = axesTextFontSize, legendFontSize = legendFontSize, ...) .legendLineBreakIndex <<- 15 .pointSize <<- pointSize .legendFontSize <<- legendFontSize .parameterNames <<- list( "lineSize" = "Line size", "pointSize" = "Point size", "mainTitleFontSize" = "Main title font size", "axesTextFontSize" = "Axes text font size", "legendFontSize" = "Legend font size" ) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { 'Method for automatically printing plot setting objects' .resetCat() .showParametersOfOneGroup(parameters = .getVisibleFieldNames(), title = "Plot settings", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled) }, setColorPalette = function(p, palette, mode = c("colour", "fill", "all")) { "Sets the color palette" mode <- match.arg(mode) # l = 45: make colors slightly darker if (is.null(palette) || is.na(palette)) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_hue(l = 45) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_hue(l = 45) } } else if (is.character(palette)) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_brewer(palette = palette) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_brewer(palette = palette) } } else if (palette == 0) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_grey() } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_grey() } } else { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_hue(l = 45) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_hue(l = 45) } } return(p) }, enlargeAxisTicks = function(p) { "Enlarges the axis ticks" p <- p + ggplot2::theme(axis.ticks.length = ggplot2::unit(0.3, "cm")) return(p) }, setAxesAppearance = function(p) { "Sets the font size and face of the axes titles and texts" p <- p + ggplot2::theme(axis.title.x = ggplot2::element_text(size = axesTextFontSize + 1, face = "bold")) p <- p + ggplot2::theme(axis.title.y = ggplot2::element_text(size = axesTextFontSize + 1, face = "bold")) p <- p + ggplot2::theme(axis.text.x = ggplot2::element_text(size = axesTextFontSize)) p <- p + ggplot2::theme(axis.text.y = ggplot2::element_text(size = axesTextFontSize)) return(p) }, # Sets the axes labels setAxesLabels = function(p, xAxisLabel = NULL, yAxisLabel1 = NULL, yAxisLabel2 = NULL, xlab = NA_character_, ylab = NA_character_, scalingFactor1 = 1, scalingFactor2 = 1) { if (is.call(xlab) || !is.na(xlab)) { xAxisLabel <- xlab } else if (xAxisLabel == "Theta") { xAxisLabel <- bquote(bold("Theta"~Theta)) } if (xAxisLabel == "pi1") { xAxisLabel <- bquote(bold('pi'['1'])) } else if (xAxisLabel == "pi2") { xAxisLabel <- bbquote(bold('pi'['2'])) } else if (xAxisLabel == "Theta") { xAxisLabel <- bquote(bold("Theta"~Theta)) } p <- p + ggplot2::xlab(xAxisLabel) if (sum(is.na(ylab)) == 0) { yAxisLabel1 = ylab[1] if (length(ylab) == 2) { yAxisLabel2 = ylab[2] } } p <- p + ggplot2::ylab(yAxisLabel1) p <- setSecondYAxisOnRightSide(p, yAxisLabel1, yAxisLabel2, scalingFactor1, scalingFactor2) return(p) }, setSecondYAxisOnRightSide = function(p, yAxisLabel1, yAxisLabel2, scalingFactor1 = 1, scalingFactor2 = 1) { if (!is.null(yAxisLabel2) && scalingFactor1 != scalingFactor2) { p <- p + ggplot2::scale_y_continuous(yAxisLabel1, sec.axis = ggplot2::sec_axis(~ . * scalingFactor1 / scalingFactor2, name = yAxisLabel2)) } return(p) }, setLegendTitle = function(p, legendTitle, mode = c("colour", "fill")) { mode <- match.arg(mode) if (!is.null(legendTitle) && !is.na(legendTitle) && trimws(legendTitle) != "") { if (mode == "colour") { p <- p + ggplot2::labs(colour = .getTextLineWithLineBreak(legendTitle, lineBreakIndex = .legendLineBreakIndex)) } else { p <- p + ggplot2::labs(fill = .getTextLineWithLineBreak(legendTitle, lineBreakIndex = .legendLineBreakIndex)) } p <- p + ggplot2::theme(legend.title = ggplot2::element_text( colour = "black", size = legendFontSize + 1, face = "bold")) } else { p <- p + ggplot2::labs(colour = NULL) } return(p) }, setLegendLabelSize = function(p) { p <- p + ggplot2::theme(legend.text = ggplot2::element_text(size = legendFontSize)) return(p) }, setLegendPosition = function(p, legendPosition) { .assertIsValidLegendPosition(legendPosition) switch(as.character(legendPosition), '-1' = { p <- p + ggplot2::theme(legend.position = "none") }, '0' = { p <- p + ggplot2::theme(aspect.ratio = 1) }, '1' = { p <- p + ggplot2::theme(legend.position = c(0.05, 1), legend.justification = c(0, 1)) }, '2' = { p <- p + ggplot2::theme(legend.position = c(0.05, 0.5), legend.justification = c(0, 0.5)) }, '3' = { p <- p + ggplot2::theme(legend.position = c(0.05, 0.05), legend.justification = c(0, 0)) }, '4' = { p <- p + ggplot2::theme(legend.position = c(0.95, 1), legend.justification = c(1, 1)) }, '5' = { p <- p + ggplot2::theme(legend.position = c(0.95, 0.5), legend.justification = c(1, 0.5)) }, '6' = { p <- p + ggplot2::theme(legend.position = c(0.95, 0.05), legend.justification = c(1, 0)) } ) return(p) }, setLegendBorder = function(p) { "Sets the legend border" p <- p + ggplot2::theme(legend.background = ggplot2::element_rect(fill = "white", colour = "black", size = 0.4)) return(p) }, adjustPointSize = function(adjustingValue) { pointSize <<- .pointSize + adjustingValue }, adjustLegendFontSize = function(adjustingValue) { "Adjusts the legend font size, e.g., run \\cr \\code{adjustLegendFontSize(-2)} # makes the font size 2 points smaller" legendFontSize <<- .legendFontSize + adjustingValue }, setMainTitle = function(p, mainTitle, subtitle = NA_character_) { "Sets the main title" p <- p + ggplot2::ggtitle(mainTitle) p <- p + ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, size = mainTitleFontSize, face = "bold")) if (!is.na(subtitle)) { p <- p + ggplot2::ggtitle(mainTitle, subtitle = subtitle) subtitleFontSize <- nchar(subtitle) / 14.5 if (subtitleFontSize > mainTitleFontSize - 2) { subtitleFontSize <- mainTitleFontSize - 2 } p <- p + ggplot2::theme( plot.title = ggplot2::element_text(hjust = 0.5, size = mainTitleFontSize, face = "bold"), plot.subtitle = ggplot2::element_text(hjust = 0.5, size = subtitleFontSize)) } else { p <- p + ggplot2::ggtitle(mainTitle) p <- p + ggplot2::theme(plot.title = ggplot2::element_text( hjust = 0.5, size = mainTitleFontSize, face = "bold")) } return(p) }, setMarginAroundPlot = function(p, margin = 0.2) { "Sets the margin around the plot, e.g., run \\cr \\code{setMarginAroundPlot(p, .2)} or \\cr \\code{setMarginAroundPlot(p, c(.1, .2, .1, .2)}" if (length(margin == 1)) { margin = base::rep(margin, 4) } if (!(length(margin) %in% c(1, 4))) { stop("'margin' must be a numeric vector with length 1 or 4") } p <- p + ggplot2::theme(plot.margin = ggplot2::unit(margin, "cm")) return(p) }, expandAxesRange = function(p, x = NA_real_, y = NA_real_) { "Expands the axes range" if (!is.na(x)) { p <- p + ggplot2::expand_limits(x = x) } if (!is.na(y)) { p <- p + ggplot2::expand_limits(y = y) } return(p) }, hideGridLines = function(p) { "Hides the grid lines" p <- p + ggplot2::theme(panel.grid.major.x = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.minor.x = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.major.y = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.minor.y = ggplot2::element_blank()) return(p) }, setTheme = function(p) { "Sets the theme" p <- p + ggplot2::theme_bw() p <- p + ggplot2::theme(panel.border = ggplot2::element_blank(), axis.line = ggplot2::element_line(colour = "black")) return(p) }, plotValues = function(p, ..., plotLineEnabled = TRUE, plotPointsEnabled = TRUE, pointBorder = 4) { if (plotLineEnabled) { p <- p + ggplot2::geom_line(size = lineSize) } if (plotPointsEnabled) { # plot white border around the points if (pointBorder > 0) { p <- p + ggplot2::geom_point(color = "white", size = pointSize, alpha = 1, shape = 21, stroke = pointBorder / 2.25) } p <- p + ggplot2::geom_point(size = pointSize) } return(p) }, mirrorYValues = function(p, yValues, plotLineEnabled = TRUE, plotPointsEnabled = TRUE, pointBorder = 4) { if (plotLineEnabled) { p <- p + ggplot2::geom_line(ggplot2::aes(y = -yValues), size = lineSize) } if (plotPointsEnabled) { # plot white border around the points if (pointBorder > 0) { p <- p + ggplot2::geom_point(ggplot2::aes(y = -yValues), color = "white", size = pointSize, alpha = 1, shape = 21, stroke = pointBorder / 2.25) } p <- p + ggplot2::geom_point(ggplot2::aes(y = -yValues), size = pointSize) } return(p) }, addCompanyAnnotation = function(p, enabled = TRUE) { if (!enabled) { return(p) } label <- "www.rpact.org" p <- p + ggplot2::annotate("label", x = -Inf, y = Inf, hjust = -0.1, vjust=1, label = label, size = 2.8, colour = "white", fill = "white") p <- p + ggplot2::annotate("text", x = -Inf, y = Inf, label = label, hjust=-.12, vjust=1, colour = "lightgray", size = 2.7) return(p) } ) ) rpact/R/f_core_utilities.R0000644000176200001440000012410613574437312015254 0ustar liggesusers###################################################################################### # # # -- RPACT utilities -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @include f_core_constants.R NULL utils::globalVariables(".parallelComputingCluster") utils::globalVariables(".parallelComputingCaseNumbers") utils::globalVariables(".parallelComputingArguments") .parallelComputingCluster <- NULL .parallelComputingCaseNumbers <- NULL .parallelComputingArguments <- NULL #' #' @title #' Set Log Level #' #' @description #' Sets the \code{rpact} log level. #' #' @param logLevel The new log level to set. Can be one of #' "PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED". #' #' @details #' This function is intended for debugging purposes only. #' #' @export #' #' @keywords internal #' #' @examples #' #' \dontrun{ #' setLogLevel("DEBUG") #' } #' setLogLevel <- function(logLevel = c("PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED")) { logLevel <- match.arg(logLevel) if (!is.character(logLevel) || !(logLevel %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS, C_LOG_LEVEL_DISABLED))) { stop("Illegal argument: 'logLevel' must be one of ", "c(", paste(paste0("'", c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS, C_LOG_LEVEL_DISABLED), "'"), collapse = ", "), ")") } Sys.setenv("RPACT_LOG_LEVEL" = logLevel) } #' #' @title #' Get Log Level #' #' @description #' Returns the current \code{rpact} log level. #' #' @details #' This function is intended for debugging purposes only. #' #' @export #' #' @keywords internal #' #' @examples #' #' \dontrun{ #' getLogLevel() #' } #' getLogLevel <- function() { logLevel <- Sys.getenv("RPACT_LOG_LEVEL") if (logLevel == "") { logLevel <- C_LOG_LEVEL_PROGRESS Sys.setenv("RPACT_LOG_LEVEL" = logLevel) } return(logLevel) } #' #' @title #' Reset Log Level #' #' @description #' Resets the \code{rpact} log level. #' #' @details #' This function is intended for debugging purposes only. #' #' @export #' #' @keywords internal #' #' @examples #' #' \dontrun{ #' resetLogLevel() #' } #' resetLogLevel <- function() { setLogLevel(C_LOG_LEVEL_PROGRESS) } .createParallelComputingCluster <- function() { if (!is.null(.parallelComputingCluster)) { return(TRUE) } if (requireNamespace("parallel", quietly = TRUE)) { startTime <- Sys.time() cores <- parallel::detectCores(logical = FALSE) if (is.na(cores) || cores < 2) { return(FALSE) } tryCatch({ .parallelComputingCluster <<- parallel::makeCluster(cores) .logProgress("Parallel computing cluster created with " + cores + " cores", startTime = startTime) return(TRUE) }, error = function(e) { .logWarn("Failed to create parallel computing cluster", e) }) } return(FALSE) } .areEqualVectors <- function(v1, v2, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { if (is.null(v1) || is.null(v2)) { return(FALSE) } if (length(v1) != length(v2)) { return(FALSE) } if (length(v1) == 0) { return(TRUE) } vec1 <- v1 vec2 <- v2 vec1[is.na(vec1)] <- -99999999999999 vec2[is.na(vec2)] <- -99999999999999 d <- nchar(as.character(1 / tolerance)) - 1 vec1 <- round(vec1, d) vec2 <- round(vec2, d) return(sum(vec1 == vec2) == length(vec1)) } #.areEqualVectors(c(0.152206629, 0.165328755, 0.002777922, NA), c(0.152206631, 0.165328753, 0.002777917, NA), tolerance = 1e-08) .toCapitalized <- function(x, ignoreBlackList = FALSE) { if (is.null(x) || is.na(x) || !is.character(x)) { return(x) } if (!ignoreBlackList) { if (x %in% c("pi1", "pi2")) { return(x) } } s <- strsplit(x, " ")[[1]] s <- paste0(toupper(substring(s, 1, 1)), substring(s, 2)) wordsToExclude <- c("And", "The", "Of", "Or") s[s %in% wordsToExclude] <- tolower(s[s %in% wordsToExclude]) s <- paste(s, collapse = " ") return(s) } .firstCharacterToUpperCase <- function(x) { substr(x, 1, 1) <- toupper(substr(x, 1, 1)) return(x) } .equalsRegexpIgnoreCase <- function(x, pattern) { x <- tolower(x) pattern <- tolower(pattern) result <- grep(pattern, x) return(sum(result) > 0) } # # @title # Get Optional Argument # # @description # Returns the value of an optional argument if it exists. # # @param optionalArgumentName the name of the optional argument. # # @details # Internal function. # # @return the value of the optional argument if it exists; NULL otherwise. # # @examples # # f = function(...) { # print(.getOptionalArgument("x", ...)) # } # # > f(x = 1) # [1] 1 # # > f(y = 1) # NULL # # @keywords internal # .getOptionalArgument <- function(optionalArgumentName, ...) { args <- list(...) if (optionalArgumentName %in% names(args)) { return(args[[optionalArgumentName]]) } return(NULL) } .isUndefinedArgument <- function(arg) { if (missing(arg) || is.null(arg)) { return(TRUE) } tryCatch({ if (length(arg) == 0) { return(TRUE) } if (length(arg) > 1) { return(FALSE) } }, error = function(e) { paramName <- deparse(substitute(arg)) .logWarn("Failed to execute '.isUndefinedArgument(%s)' ('%s' is an instance of class '%s'): %s", paramName, paramName, class(arg), e) }) return(is.na(arg)) } .isDefinedArgument <- function(arg, argumentExistsValidationEnabled = TRUE) { paramName <- deparse(substitute(arg)) if (argumentExistsValidationEnabled && length(grep("\\$|\\[|\\]", paramName)) == 0 && !exists(paramName)) { tryCatch({ if (missing(arg) || is.null(arg)) { return(FALSE) } }, error = function(e) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "the object '", paramName, "' has not been defined anywhere. ", "Please define it first, e.g., run '", paramName, " <- 1'") }) } if (missing(arg) || is.null(arg)) { return(FALSE) } tryCatch({ if (length(arg) == 0) { return(FALSE) } if (length(arg) > 1) { return(TRUE) } }, error = function(e) { paramName <- deparse(substitute(arg)) .logWarn("Failed to execute '.isDefinedArgument(%s)' ('%s' is an instance of class '%s'): %s", paramName, paramName, class(arg), e) }) return(!is.na(arg)) } .arrayToString <- function(x, separator = ", ", vectorLookAndFeelEnabled = FALSE, encapsulate = FALSE, digits = 3) { if (digits < 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'digits' (", digits, ") must be >= 0") } if (missing(x) || is.null(x) || length(x) == 0) { return("NULL") } if (length(x) == 1 && is.na(x)) { return("NA") } if (!is.numeric(x) && !is.character(x) && !is.logical(x) && !is.integer(x)) { return(class(x)) } if (is.numeric(x)) { if (digits > 0) { indices <- which(!is.na(x) & abs(x) >= 10^-digits) } else { indices <- which(!is.na(x)) } x[indices] <- round(x[indices], digits) } if (encapsulate) { x <- paste0("'", x, "'") } if (!vectorLookAndFeelEnabled) { return(paste(x, collapse = separator)) } return(paste0("c(", paste(x, collapse = separator), ")")) } .listToString <- function(a, separator = ", ", listLookAndFeelEnabled = FALSE, encapsulate = FALSE) { if (missing(a) || is.null(a) || length(a) == 0) { return("NULL") } if (length(a) == 1 && is.na(a)) { return("NA") } result <- "" for (name in names(a)) { value <- a[[name]] if (encapsulate) { value <- paste0("'", value, "'") } entry <- paste(name, "=", value) if (nchar(result) > 0) { result <- paste(result, entry, sep = ", ") } else { result <- entry } } if (!listLookAndFeelEnabled) { return(result) } return(paste0("list(", result, ")")) } # # @title # Set Seed # # @description # Sets the seed, generates it if \code{is.na(seed) == TRUE} and returns it. # # @param seed the seed to set. # # @details # Internal function. # # @return the (generated) seed. # # @examples # # .setSeed(12345) # # mySeed <- .setSeed() # # @keywords internal # .setSeed <- function(seed = NA_real_) { if (!is.null(seed) && !is.na(seed)) { if (is.na(as.integer(seed))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'seed' must be a valid integer") } set.seed(seed = seed, kind = "Mersenne-Twister", normal.kind = "Inversion") return(seed) } if (exists(".Random.seed") && length(.Random.seed) > 0) { seed <- .Random.seed[length(.Random.seed)] } else { seed <- round(stats::runif(1) * 1e8) } .logDebug("Set seed to %s", seed) tryCatch({ set.seed(seed, kind = "Mersenne-Twister", normal.kind = "Inversion") }, error = function(e) { .logError("Failed to set seed to '%s' (%s): %s", seed, class(seed), e) seed <- NA_real_ traceback() }) invisible(seed) } .getInputForZeroOutputInsideTolerance <- function(input, output, tolerance = .Machine$double.eps^0.25) { if (is.null(tolerance) || is.na(tolerance)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'tolerance' must be a valid double") } if (tolerance < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'tolerance' (", tolerance, ") must be >= 0") } if (is.null(input)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'input' must be a valid double or NA") } if (is.null(output) || is.na(output)) { return(NA_real_) } if (abs(output) <= tolerance) { return(input) } return(NA_real_) } .getInputProducingZeroOutput <- function(input1, output1, input2, output2, tolerance = .Machine$double.eps^0.25) { if ((is.na(output1) || is.null(output1)) && (is.na(output2) || is.null(output2))) { return(NA_real_) } if (is.na(output1) || is.null(output1)) { return(.getInputForZeroOutputInsideTolerance(input2, output2, tolerance)) } if (is.na(output2) || is.null(output2)) { return(.getInputForZeroOutputInsideTolerance(input1, output1, tolerance)) } if (abs(output1) <= abs(output2) && !is.na(input1)) { return(.getInputForZeroOutputInsideTolerance(input1, output1, tolerance)) } return(.getInputForZeroOutputInsideTolerance(input2, output2, tolerance)) } # # @title # Get One Dimensional Root # # @description # Searches and returns the one dimensional root of a function using \code{uniroot}. # # @param acceptResultsOutOfTolerance if \code{TRUE}, results will be accepted in any case; # if \code{FALSE}, \code{NA_real_} will be returned in case of tolerance discrepancy # # @details # Internal function. # # @return the root. # # @keywords internal # .getOneDimensionalRoot <- function( f, ..., lower, upper, tolerance = .Machine$double.eps^0.25, acceptResultsOutOfTolerance = FALSE, suppressWarnings = FALSE) { .assertIsSingleNumber(lower, "lower") .assertIsSingleNumber(upper, "upper") .assertIsSingleNumber(tolerance, "tolerance") resultLower <- f(lower, ...) resultUpper <- f(upper, ...) result <- .getInputProducingZeroOutput(lower, resultLower, upper, resultUpper, tolerance) if (!is.na(result)) { return(result) } unirootResult <- NULL tryCatch({ unirootResult <- stats::uniroot(f = f, lower = lower, upper = upper, tol = tolerance, trace = 2, extendInt = "no", ...) }, warning = function(w) { .logWarn("uniroot(f, lower = %s, upper = %s, tol = %s) produced a warning: %s", lower, upper, tolerance, w) }, error = function(e) { msg <- "Failed to run uniroot(f, lower = %s, upper = %s, tol = %s): %s" if (getLogLevel() == C_LOG_LEVEL_DEBUG) { .logError(msg, lower, upper, tolerance, e) } else { .logWarn(msg, lower, upper, tolerance, e) } }) if (is.null(unirootResult)) { direction <- ifelse(f(lower) < f(upper), 1, -1) if (is.na(direction)) { return(NA_real_) } return(.getOneDimensionalRootBisectionMethod(f = f, lower = lower, upper = upper, tolerance = tolerance, acceptResultsOutOfTolerance = acceptResultsOutOfTolerance, direction = direction, suppressWarnings = suppressWarnings)) } if (is.infinite(unirootResult$f.root) || abs(unirootResult$f.root) > max(tolerance * 100, 1e-07)) { if (!acceptResultsOutOfTolerance) { if (!suppressWarnings) { warning("NA returned because root search by 'uniroot' produced a function result (", unirootResult$f.root, ") that differs from target 0 ", "(lower = ", lower, ", upper = ", upper, ", tolerance = ", tolerance, ", last function argument was ", unirootResult$root, ")", call. = FALSE) } return(NA_real_) } else if (!suppressWarnings) { warning("Root search by 'uniroot' produced a function result (", unirootResult$f.root, ") ", "that differs from target 0 ", "(lower = ", lower, ", upper = ", upper, ", tolerance = ", tolerance, ", last function argument was ", unirootResult$root, ")", call. = FALSE) } } return(unirootResult$root) } # # @title # Get One Dimensional Root Bisection Method # # @description # Searches and returns the one dimensional root of a function using the bisection method. # # @param acceptResultsOutOfTolerance if \code{TRUE}, results will be accepted in any case; # if \code{FALSE}, \code{NA_real_} will be returned in case of tolerance discrepancy # # @details # Internal function. # # @keywords internal # .getOneDimensionalRootBisectionMethod <- function( f, ..., lower, upper, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, acceptResultsOutOfTolerance = FALSE, maxSearchIterations = 50, direction = 0, suppressWarnings = FALSE) { lowerStart <- lower upperStart <- upper if (direction == 0) { direction <- ifelse(f(lower) < f(upper), 1, -1) } .logTrace("Start special root search: lower = %s, upper = %s, tolerance = %s, direction = %s", lower, upper, tolerance, direction) precision <- 1 while (!is.na(precision) && precision > tolerance) { argument <- (lower + upper) / 2 result <- f(argument) .logTrace("Root search step: f(%s, lower = %s, upper = %s, direction = %s) = %s", argument, lower, upper, direction, result) ifelse(result * direction < 0, lower <- argument, upper <- argument) maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { if (!suppressWarnings) { warning("Root search via 'bisection' stopped: maximum number of search iterations reached. ", "Check if lower and upper search bounds were calculated correctly", call. = FALSE) } .plotMonotoneFunctionRootSearch(f, lowerStart, upperStart) return(NA_real_) } precision <- upper - lower } if (is.infinite(result) || abs(result) > max(tolerance * 100, 1e-07)) { # 0.01) { # tolerance * 20 .plotMonotoneFunctionRootSearch(f, lowerStart, upperStart) if (!acceptResultsOutOfTolerance) { if (!suppressWarnings) { warning("NA returned because root search via 'bisection' produced a function result (", result, ") that differs from target 0 ", "(tolerance is ", tolerance, ", last function argument was ", argument, ")", call. = FALSE) } return(NA_real_) } else if (!suppressWarnings) { warning("Root search via 'bisection' produced a function result (", result, ") ", "that differs from target 0 ", "(tolerance is ", tolerance, ", last function argument was ", argument, ")", call. = FALSE) } } return(argument) } .plotMonotoneFunctionRootSearch <- function(f, lowerStart, upperStart) { if (getLogLevel() != C_LOG_LEVEL_TRACE) { return(invisible()) } values <- c() params <- seq(from = lowerStart, to = upperStart, by = (upperStart - lowerStart) / 20) for (i in params) { values <- c(values, f(i)) } graphics::plot(params, values) } .getTextLineWithLineBreak <- function(line, lineBreakIndex) { index <- .getSpaceIndex(line, lineBreakIndex) if (index == -1) { return(line) } a <- substr(line, 0, index - 1) b <- substr(line, index + 1, nchar(line)) return(paste0(a, "\n", b)) } .getSpaceIndex <- function(line, lineBreakIndex) { if (nchar(line) <= lineBreakIndex) { return(-1) } if (regexpr('\\n', line) > 0) { return(-1) } len <- nchar(line) lineSplit <- strsplit(line, "")[[1]] for (i in (len/2):length(lineSplit)) { char <- lineSplit[i] if (char == " ") { return(i) } } return(-1) } .getRelativeFigureOutputPath <- function(subDir = NULL) { if (is.null(subDir)) { subDir <- format(Sys.Date(), format="%Y-%m-%d") } figPath <- file.path(getwd(), "_examples", "output", "figures", subDir) if (!dir.exists(figPath)) { dir.create(figPath, showWarnings = FALSE, recursive = TRUE) } return(figPath) } # @title # Save Last Plot # # @description # Saves the last plot to a PNG file located in # '[getwd()]/_examples/output/figures/[current date]/[filename].png'. # # @param filename The filename (without extension!). # # @details # This is a wrapper function that creates a output path and uses \code{ggsave} to save the last plot. # # @examples # # # saveLastPlot('my_plot') # # @keywords internal # saveLastPlot <- function(filename, outputPath = .getRelativeFigureOutputPath()) { .assertGgplotIsInstalled() if (!grepl("\\.png$", filename)) { filename <- paste0(filename, ".png") } path <- file.path(outputPath, filename) ggplot2::ggsave(filename = path, plot = ggplot2::last_plot(), device = NULL, path = NULL, scale = 1.2, width = 16, height = 15, units = "cm", dpi = 600, limitsize = TRUE) cat("Last plot was saved to '", path, "'\n") } .isFirstValueGreaterThanSecondValue <- function(firstValue, secondValue) { if (is.null(firstValue) || length(firstValue) != 1 || is.na(firstValue)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'firstValue' (", firstValue, ") must be a valid numeric value") } if (is.null(secondValue) || length(secondValue) != 1 || is.na(secondValue)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'secondValue' (", secondValue, ") must be a valid numeric value") } return(firstValue > secondValue) } .isFirstValueSmallerThanSecondValue <- function(firstValue, secondValue) { if (is.null(firstValue) || length(firstValue) != 1 || is.na(firstValue)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'firstValue' (", firstValue, ") must be a valid numeric value") } if (is.null(secondValue) || length(secondValue) != 1 || is.na(secondValue)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'secondValue' (", secondValue, ") must be a valid numeric value") } return(firstValue < secondValue) } .logBase <- function(s, ..., logLevel) { if (length(list(...)) > 0) { cat(paste0("[", logLevel, "]"), sprintf(s, ...), "\n") } else { cat(paste0("[", logLevel, "]"), s, "\n") } } .logTrace <- function(s, ...) { if (getLogLevel() == C_LOG_LEVEL_TRACE) { .logBase(s, ..., logLevel = C_LOG_LEVEL_TRACE) } } .logDebug <- function(s, ...) { if (getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG)) { .logBase(s, ..., logLevel = C_LOG_LEVEL_DEBUG) } } .logInfo <- function(s, ...) { if (getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO)) { .logBase(s, ..., logLevel = C_LOG_LEVEL_INFO) } } .logWarn <- function(s, ...) { if (getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN)) { .logBase(s, ..., logLevel = C_LOG_LEVEL_WARN) } } .logError <- function(s, ...) { if (getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR)) { .logBase(s, ..., logLevel = C_LOG_LEVEL_ERROR) } } .logProgress <- function(s, ..., startTime) { if (!(getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS))) { return(invisible()) } #return(invisible()) time <- Sys.time() - startTime timeStr <- paste0("[", round(as.numeric(time), 4), " secs]") if (length(list(...)) > 0) { cat(paste0("[", C_LOG_LEVEL_PROGRESS, "]"), sprintf(s, ...), timeStr, "\n") } else { cat(paste0("[", C_LOG_LEVEL_PROGRESS, "]"), s, timeStr, "\n") } } ## ## -- Design utilities ## .getValidatedFutilityBounds <- function(design, kMaxLowerBound = 1, writeToDesign = TRUE) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) return(.getValidatedFutilityBoundsOrAlpha0Vec(design = design, parameterName = "futilityBounds", defaultValue = C_FUTILITY_BOUNDS_DEFAULT, kMaxLowerBound = kMaxLowerBound, writeToDesign = writeToDesign)) } .getValidatedAlpha0Vec <- function(design, kMaxLowerBound = 1, writeToDesign = TRUE) { .assertIsTrialDesignFisher(design) return(.getValidatedFutilityBoundsOrAlpha0Vec(design = design, parameterName = "alpha0Vec", defaultValue = C_ALPHA_0_VEC_DEFAULT, kMaxLowerBound = kMaxLowerBound, writeToDesign = writeToDesign)) } .getValidatedFutilityBoundsOrAlpha0Vec <- function(design, parameterName, defaultValue, kMaxLowerBound, writeToDesign) { parameterValues <- design[[parameterName]] kMaxUpperBound <- ifelse(.isTrialDesignFisher(design), C_KMAX_UPPER_BOUND_FISHER, C_KMAX_UPPER_BOUND) if (.isDefinedArgument(parameterValues) && .isDefinedArgument(design$kMax)) { if (.isTrialDesignFisher(design)) { .assertIsValidAlpha0Vec(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) } else { .assertAreValidFutilityBounds(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) } } if (design$sided == 2 && .isDefinedArgument(parameterValues) && any(na.omit(parameterValues) != defaultValue)) { warning("'", parameterName, "' (", .arrayToString(parameterValues), ") will be ignored because the design is two-sided", call. = FALSE) } if (writeToDesign) { .setParameterType(design, parameterName, C_PARAM_USER_DEFINED) } if (.isUndefinedArgument(design$informationRates) && .isUndefinedArgument(parameterValues)) { if (writeToDesign) { if (.setKMaxToDefaultIfUndefined(design, writeToDesign) || design$kMax == C_KMAX_DEFAULT) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } else { .setParameterType(design, parameterName, C_PARAM_DERIVED) } } return(rep(defaultValue, design$kMax - 1)) } if (.isDefinedArgument(design$informationRates) && .isUndefinedArgument(parameterValues)) { if (writeToDesign) { if (.isUndefinedArgument(design$kMax)) { .setKMax(design, kMax = length(design$informationRates)) } .setParameterType(design, parameterName, ifelse(design$kMax == C_KMAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED)) } return(rep(defaultValue, design$kMax - 1)) } if (.isUndefinedArgument(design$informationRates) && .isDefinedArgument(parameterValues, argumentExistsValidationEnabled = FALSE)) { if (writeToDesign) { .setKMax(design, kMax = length(parameterValues) + 1) if (.isDefaultVector(parameterValues, rep(defaultValue, design$kMax - 1))) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } } if (.isBetaSpendingDesignWithDefinedFutilityBounds(design, parameterName, writeToDesign)) { return(rep(defaultValue, design$kMax - 1)) } return(parameterValues) } if (writeToDesign) { .setKMax(design, kMax = length(parameterValues) + 1) if (.isDefaultVector(parameterValues, rep(defaultValue, design$kMax - 1))) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } } if (.isTrialDesignFisher(design)) { .assertIsValidAlpha0Vec(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) } else { .assertAreValidFutilityBounds(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) } if (.isBetaSpendingDesignWithDefinedFutilityBounds(design, parameterName, writeToDesign)) { return(rep(defaultValue, design$kMax - 1)) } return(parameterValues) } .isBetaSpendingDesignWithDefinedFutilityBounds <- function(design, parameterName, writeToDesign) { if (.isTrialDesignFisher(design) || !.isBetaSpendingDesignType(design$typeBetaSpending)) { return(FALSE) } if (design$.getParameterType(parameterName) == C_PARAM_USER_DEFINED) { warning("'", parameterName, "' (", .arrayToString(design[[parameterName]]), ") will be ignored because it will be calculated", call. = FALSE) } else if (design$.getParameterType(parameterName) == C_PARAM_GENERATED) { return(FALSE) } if (writeToDesign) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } return(TRUE) } .setParameterType <- function(parameterSet, parameterName, parameterType) { if (is.null(parameterSet)) { return(invisible()) } parameterSet$.setParameterType(parameterName, parameterType) } .setValueAndParameterType <- function(parameterSet, parameterName, value, defaultValue, notApplicableIfNA = FALSE) { .assertIsParameterSetClass(parameterSet, "parameterSet") if (is.null(parameterSet)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterSet' must be not null") } if (!(parameterName %in% names(parameterSet$getRefClass()$fields()))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", class(parameterSet), "' does not contain a field with name '", parameterName, "'") } parameterSet[[parameterName]] <- value if (notApplicableIfNA && all(is.na(value))) { parameterSet$.setParameterType(parameterName, C_PARAM_NOT_APPLICABLE) } else if (!is.null(value) && length(value) == length(defaultValue) && ( (all(is.na(value)) && all(is.na(value) == is.na(defaultValue))) || (!is.na(all(value == defaultValue)) && all(value == defaultValue)) )) { parameterSet$.setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { parameterSet$.setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .setKMax <- function(design, kMax) { design$kMax <- as.integer(kMax) .setParameterType(design, "kMax", C_PARAM_DERIVED) invisible(kMax) } .getValidatedInformationRates <- function(design, kMaxLowerBound = 1, writeToDesign = TRUE) { kMaxUpperBound <- ifelse(.isTrialDesignFisher(design), C_KMAX_UPPER_BOUND_FISHER, C_KMAX_UPPER_BOUND) if (.isDefinedArgument(design$informationRates) && .isDefinedArgument(design$kMax)) { .assertAreValidInformationRates(informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) } .setParameterType(design, "informationRates", C_PARAM_USER_DEFINED) if (.isTrialDesignFisher(design)) { futilityBounds <- design$alpha0Vec } else { futilityBounds <- design$futilityBounds } if (.isUndefinedArgument(design$informationRates) && .isUndefinedArgument(futilityBounds)) { if (writeToDesign) { if (.setKMaxToDefaultIfUndefined(design, writeToDesign) || design$kMax == C_KMAX_DEFAULT) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } else { .setParameterType(design, "informationRates", C_PARAM_DERIVED) } } return((1:design$kMax) / design$kMax) } if (.isDefinedArgument(design$informationRates) && .isUndefinedArgument(futilityBounds)) { if (writeToDesign) { .setKMax(design, kMax = length(design$informationRates)) if (.isDefaultVector(design$informationRates, (1:design$kMax) / design$kMax)) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } } .assertAreValidInformationRates(informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) return(design$informationRates) } if (.isUndefinedArgument(design$informationRates) && .isDefinedArgument(futilityBounds, argumentExistsValidationEnabled = FALSE)) { if (writeToDesign) { if (.isUndefinedArgument(design$kMax)) { .setKMax(design, kMax = length(futilityBounds) + 1) } .setParameterType(design, "informationRates", ifelse(design$kMax == C_KMAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED)) } return((1:design$kMax) / design$kMax) } if (writeToDesign) { .setKMax(design, kMax = length(design$informationRates)) if (.isDefaultVector(design$informationRates, (1:design$kMax) / design$kMax)) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } } .assertAreValidInformationRates(informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) return(design$informationRates) } .setKMaxToDefaultIfUndefined <- function(design, writeToDesign = TRUE) { if (writeToDesign && .isUndefinedArgument(design$kMax)) { design$kMax <- C_KMAX_DEFAULT design$.setParameterType("kMax", C_PARAM_DEFAULT_VALUE) return(TRUE) } return(FALSE) } .isDefaultVector <- function(x, default) { if (length(x) != length(default)) { return(FALSE) } return(sum(x == default) == length(x)) } .validateAlphaAndBeta <- function(design) { .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertDesignParameterExists(design, "beta", C_BETA_DEFAULT) .assertIsValidAlphaAndBeta(alpha = design$alpha, beta = design$beta) } .validateUserAlphaSpending <- function(design) { .assertIsTrialDesign(design) .assertDesignParameterExists(design, "userAlphaSpending", NA_real_) design$.setParameterType("userAlphaSpending", C_PARAM_USER_DEFINED) if ((design$isUserDefinedParameter("informationRates") || (design$isDefaultParameter("informationRates") && !design$isUserDefinedParameter("kMax"))) && length(design$informationRates) != length(design$userAlphaSpending)) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userAlphaSpending' (%s) must be equal to length of 'informationRates' (%s)"), length(design$userAlphaSpending), length(design$informationRates))) } if (length(design$userAlphaSpending) != design$kMax) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userAlphaSpending' (%s) must be equal to 'kMax' (%s)"), length(design$userAlphaSpending), design$kMax)) } .validateUserAlphaSpendingLength(design) if (.isUndefinedArgument(design$alpha)) { design$alpha <- design$userAlphaSpending[design$kMax] design$.setParameterType("alpha", ifelse(design$alpha == C_ALPHA_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED)) } .assertIsValidAlpha(design$alpha) if (design$kMax > 1 && (design$userAlphaSpending[1] < 0 || design$userAlphaSpending[design$kMax] > design$alpha || any(design$userAlphaSpending[2:design$kMax] - design$userAlphaSpending[1:(design$kMax - 1)] < 0))) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'userAlphaSpending' = %s must be a vector that satisfies the following condition: ", "0 <= alpha_1 <= .. <= alpha_%s <= alpha = %s"), .arrayToString(design$userAlphaSpending, vectorLookAndFeelEnabled = TRUE), design$kMax, design$alpha)) } } .validateUserBetaSpending <- function(design) { .assertIsTrialDesign(design) .assertDesignParameterExists(design, "userBetaSpending", NA_real_) design$.setParameterType("userBetaSpending", C_PARAM_USER_DEFINED) if ((design$isUserDefinedParameter("informationRates") || (design$isDefaultParameter("informationRates") && !design$isUserDefinedParameter("kMax"))) && length(design$informationRates) != length(design$userBetaSpending)) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userBetaSpending' (%s) must be equal to length of 'informationRates' (%s)"), length(design$userBetaSpending), length(design$informationRates))) } if (length(design$userBetaSpending) != design$kMax) { stop(sprintf(paste0(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userBetaSpending' (%s) must be equal to 'kMax' (%s)"), length(design$userBetaSpending), design$kMax)) } if (length(design$userBetaSpending) < 2 || length(design$userBetaSpending) > C_KMAX_UPPER_BOUND) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'userBetaSpending' (%s) is out of bounds [2; %s]"), length(design$userBetaSpending), C_KMAX_UPPER_BOUND)) } if (.isUndefinedArgument(design$beta)) { design$beta <- design$userBetaSpending[design$kMax] design$.setParameterType("beta", ifelse(design$beta == C_BETA_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED)) } .assertIsValidBeta(beta = design$beta, alpha = design$alpha) if (design$kMax > 1 && (design$userBetaSpending[1] < 0 || design$userBetaSpending[design$kMax] > design$beta || any(design$userBetaSpending[2:design$kMax] - design$userBetaSpending[1:(design$kMax - 1)] < 0))) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'userBetaSpending' = %s must be a vector that satisfies the following condition: ", "0 <= beta_1 <= .. <= beta_%s <= beta = %s"), .arrayToString(design$userBetaSpending, vectorLookAndFeelEnabled = TRUE), design$kMax, design$beta)) } } .validateUserAlphaSpendingLength <- function(design) { if (length(design$userAlphaSpending) < 1 || length(design$userAlphaSpending) > C_KMAX_UPPER_BOUND) { stop(sprintf(paste0(C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'userAlphaSpending' (%s) is out of bounds [1; %s]"), length(design$userAlphaSpending), C_KMAX_UPPER_BOUND)) } } .setKmaxBasedOnAlphaSpendingDefintion <- function(design) { if (.isTrialDesignFisher(design)) { if (design$method != C_FISHER_METHOD_USER_DEFINED_ALPHA) { return(invisible()) } } else { if (design$typeOfDesign != C_TYPE_OF_DESIGN_AS_USER) { return(invisible()) } } if (.isDefinedArgument(design$kMax)) { return(invisible()) } if (.isUndefinedArgument(design$userAlphaSpending)) { return(invisible()) } if (.isDefinedArgument(design$informationRates)) { return(invisible()) } if (.isTrialDesignFisher(design)) { if (.isDefinedArgument(design$alpha0Vec)) { return(invisible()) } } else { if (.isDefinedArgument(design$futilityBounds)) { return(invisible()) } } .validateUserAlphaSpendingLength(design) .setKMax(design, kMax = length(design$userAlphaSpending)) } .skipTestifDisabled <- function() { if (!isTRUE(.isCompleteUnitTestSetEnabled()) && base::requireNamespace("testthat", quietly = TRUE)) { testthat::skip("Test is disabled") } } .getNumberOfZeroesDirectlyAfterDecimalSeparator <- function(x) { zeroCounter <- 0 startEnabled <- FALSE x <- round(x, 15) x <- sprintf("%.15f", x) for (i in 1:nchar(x)) { num <- substring(x, i, i) if (num == ".") { startEnabled <- TRUE } else if (startEnabled) { if (num == "0") { zeroCounter <- zeroCounter + 1 } else { return(zeroCounter) } } } return(zeroCounter) } .getNextHigherValue <- function(x) { .assertIsNumericVector(x, "x") values <- c() for (value in x) { value <- round(value, 15) values <- c(values, 1 / 10^.getNumberOfZeroesDirectlyAfterDecimalSeparator(value)) } return(values) } #' @title #' Test Package # #' @description #' This function allows the installed package \code{rpact} to be tested. #' #' @param outDir The output directory where all test results shall be saved. #' By default the current working directory is used. #' @param completeUnitTestSetEnabled If \code{TRUE} (default) all existing unit tests will #' be executed; a subset of all unit tests will be used otherwise. #' @param types The type(s) of tests to be done. Can be one or more of #' \code{c("tests", "examples", "vignettes")}. Default is "tests" only. #' @param sourceDirectory An optional directory to look for \code{.save} files. #' #' @details #' This function creates the subdirectory \code{rpact-tests} in the specified output directory #' and copies all unit test files of the package to this newly created directory. #' Then the function runs all tests (or a subset of all tests if #' \code{completeUnitTestSetEnabled} is \code{FALSE}) using #' \code{\link[tools]{testInstalledPackage}}. #' The test results will be saved to the text file \code{testthat.Rout} that can be found #' in the subdirectory \code{rpact-tests}. #' #' @keywords internal #' #' @export #' #' @examples #' #' \dontrun{ #' testPackage() #' } #' testPackage <- function(outDir = ".", ..., completeUnitTestSetEnabled = TRUE, types = "tests", sourceDirectory = NULL) { .assertTestthatIsInstalled() if (!dir.exists(outDir)) { stop("Test output directory '", outDir, "' does not exist") } startTime <- Sys.time() Sys.setenv("LANGUAGE" = "EN") on.exit(Sys.unsetenv("LANGUAGE")) temp <- .isCompleteUnitTestSetEnabled() on.exit(Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = temp), add = TRUE) Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = completeUnitTestSetEnabled) setLogLevel(C_LOG_LEVEL_DISABLED) on.exit(resetLogLevel(), add = TRUE) if (.isCompleteUnitTestSetEnabled()) { cat("Run all tests. Please wait...\n") cat("Have a big coffee - it will take 5 minutes or more.\n") } else { cat("Run a subset of all tests. Please wait...\n") cat("Have a little coffee - it will take a minute or more.\n") } tools::testInstalledPackage(pkg = "rpact", outDir = outDir, types = types, srcdir = sourceDirectory) if (outDir == ".") { outDir <- getwd() } outDir <- file.path(outDir, "rpact-tests") endTime <- Sys.time() timeTotal <- as.numeric(endTime - startTime) * 60 cat("Total runtime for testing:", floor(timeTotal / 60), "minutes and", ((round(timeTotal / 60, 2) - floor(timeTotal / 60)) * 60), "seconds\n") cat("Test results were written to directory '", outDir, "' (see file 'testthat.Rout')\n", sep = "") invisible(.isCompleteUnitTestSetEnabled()) } .isCompleteUnitTestSetEnabled <- function() { completeUnitTestSetEnabled <- as.logical(Sys.getenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED")) if (is.na(completeUnitTestSetEnabled)) { completeUnitTestSetEnabled <- FALSE Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = completeUnitTestSetEnabled) } return(isTRUE(completeUnitTestSetEnabled)) } .getVariedParameterVectorByValue <- function(variedParameter) { return((variedParameter[2] - variedParameter[1]) / C_VARIED_PARAMETER_SEQUENCE_LENGTH_DEFAULT) } .getVariedParameterVector <- function(variedParameter, variedParameterName) { if (is.null(variedParameter) || length(variedParameter) != 2 || any(is.na(variedParameter))) { return(variedParameter) } minValue <- variedParameter[1] maxValue <- variedParameter[2] if (minValue == maxValue) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", variedParameterName, "' with length 2 must contain minimum != maximum (", minValue, " == ", maxValue , ")") } by <- .getVariedParameterVectorByValue(variedParameter) variedParameter <- seq(minValue, maxValue, by) return(variedParameter) } .getVariedParameterVectorSeqCommand <- function(variedParameter) { return(paste0("seq(", round(variedParameter[1], 4), ", ", round(variedParameter[2], 4), ", ", round(.getVariedParameterVectorByValue(variedParameter), 6),")")) } .getNumberOfSubjects1 <- function(numberOfSubjects, allocationRatioPlanned) { return((numberOfSubjects * allocationRatioPlanned) / (allocationRatioPlanned + 1)) } .getNumberOfSubjects2 <- function(numberOfSubjects, allocationRatioPlanned) { return(numberOfSubjects / (allocationRatioPlanned + 1)) } .fillWithNAs <- function(x, kMax) { if (length(x) >= kMax) { return(x) } x[(length(x) + 1):kMax] <- NA_real_ return(x) } #' @title #' Print Citation # #' @description #' How to cite \code{rpact} and \code{R} in publications. #' #' @param inclusiveR If \code{TRUE} (default) the information on how to cite the base R system in publications will be added. #' #' @details #' This function shows how to cite \code{rpact} and \code{R} (\code{inclusiveR = TRUE}) in publications. #' #' @keywords internal #' #' @export #' #' @examples #' #' printCitation() #' printCitation <- function(inclusiveR = TRUE) { if (inclusiveR) { citR <- capture.output(print(citation("base"), bibtex = FALSE)) indices <- which(citR == "") indices <- indices[indices != 1 & indices != length(citR)] if (length(indices) > 1) { index <- indices[length(indices)] citR <- citR[1:min(index, length(citR))] } cat("\n", trimws(paste(citR, collapse = "\n")), "\n", sep = "") } print(citation("rpact"), bibtex = FALSE) } rpact/R/f_simulation_rates.R0000644000176200001440000012236613537652241015620 0ustar liggesusers################################################################################################## # # # -- Simulation of binary data with group sequential and combination test -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 09-05-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ################################################################################################## .getTestStatisticsRates <- function(..., designNumber, informationRates, groups, riskRatio, thetaH0, directionUpper, eventsPerStage, sampleSizesPerStage, testStatisticsPerStage) { stage <- ncol(sampleSizesPerStage) if (groups == 1) { stagewiseRates <- eventsPerStage[, stage] / sampleSizesPerStage[, stage] overallRates <- sum(eventsPerStage[, 1:stage]) / sum(sampleSizesPerStage[, 1:stage]) } else { if (stage == 1) { stagewiseRates <- eventsPerStage / sampleSizesPerStage overallRates <- eventsPerStage / sampleSizesPerStage } else { stagewiseRates <- eventsPerStage[, stage] / sampleSizesPerStage[, stage] overallRates <- rowSums(eventsPerStage[, 1:stage]) / rowSums(sampleSizesPerStage[, 1:stage]) } } if (designNumber == 1L) { n1 <- sum(sampleSizesPerStage[1, ]) r1 <- sum(eventsPerStage[1, ]) / n1 if (groups == 1) { value <- (r1 - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(n1) } else { n2 <- sum(sampleSizesPerStage[2, ]) r2 <- sum(eventsPerStage[2, ]) / n2 if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { value <- 0 } else { fm <- .getFarringtonManningValuesDiff(r1, r2, thetaH0, n1 / n2) value <- (r1 - r2 - thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + fm[2] * (1 - fm[2]) / n2) } } else { if (r1 - r2 * thetaH0 == 0) { value <- 0 } else { fm <- .getFarringtonManningValuesRatio(r1, r2, thetaH0, n1 / n2) value <- (r1 - r2 * thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + thetaH0^2 * fm[2] * (1 - fm[2]) / n2) } } } value <- (2 * directionUpper - 1) * value pValuesSeparate <- NA_real_ testStatisticsPerStage <- NA_real_ } else { if (stage == 1) { n1 <- sampleSizesPerStage[1, 1] r1 <- eventsPerStage[1, 1] / n1 if (groups == 1) { testStatisticsPerStage <- (2 * directionUpper - 1) * (r1 - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(n1) } else { n2 <- sampleSizesPerStage[2, 1] r2 <- eventsPerStage[2, 1] / n2 if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { testStatisticsPerStage <- 0 } else { fm <- .getFarringtonManningValuesDiff(r1, r2, thetaH0, n1 / n2) testStatisticsPerStage <- (2 * directionUpper - 1) * (r1 - r2 - thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + fm[2] * (1 - fm[2]) / n2) } } else { if (r1 - r2 * thetaH0 == 0) { testStatisticsPerStage <- 0 } else { fm <- .getFarringtonManningValuesRatio(r1, r2, thetaH0, n1 / n2) testStatisticsPerStage <- (2 * directionUpper - 1) * (r1 - r2 * thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + thetaH0^2 * fm[2] * (1 - fm[2]) / n2) } } } } else { n1 <- sampleSizesPerStage[1, stage] r1 <- eventsPerStage[1, stage] / n1 if (groups == 1) { testStatisticsPerStage <- c(testStatisticsPerStage, (2 * directionUpper - 1) * (r1 - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(n1)) } else { n2 <- sampleSizesPerStage[2, stage] r2 <- eventsPerStage[2, stage] / n2 if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { testStatisticsPerStage <- c(testStatisticsPerStage, 0) } else { fm <- .getFarringtonManningValuesDiff(r1, r2, thetaH0, n1 / n2) testStatisticsPerStage <- c(testStatisticsPerStage, (2 * directionUpper - 1) * (r1 - r2 - thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + fm[2] * (1 - fm[2]) / n2)) } } else { if (r1 - r2 * thetaH0 == 0) { testStatisticsPerStage <- c(testStatisticsPerStage, 0) } else { fm <- .getFarringtonManningValuesRatio(r1, r2, thetaH0, n1 / n2) testStatisticsPerStage <- c(testStatisticsPerStage, (2 * directionUpper - 1) * (r1 - r2 * thetaH0) / sqrt( fm[1] * (1 - fm[1]) / n1 + thetaH0^2 * fm[2] * (1 - fm[2]) / n2)) } } } } if (designNumber == 2L) { if (stage == 1) { value <- testStatisticsPerStage } else { value <- (sqrt(informationRates[1]) * testStatisticsPerStage[1] + sqrt(informationRates[2:stage] - informationRates[1:(stage - 1)]) %*% testStatisticsPerStage[2:stage]) / sqrt(informationRates[stage]) } } else if (designNumber == 3L) { if (stage == 1) { value <- 1 - pnorm(testStatisticsPerStage) } else { weightFisher <- rep(NA_real_, stage) weightFisher[1] <- 1 weightFisher[2:stage] <- sqrt(informationRates[2:stage] - informationRates[1:(stage-1)]) / sqrt(informationRates[1]) value <- prod((1 - pnorm(testStatisticsPerStage[1:stage]))^weightFisher[1:stage]) } } pValuesSeparate <- 1 - pnorm(testStatisticsPerStage) } return(list(value = value, stagewiseRates = stagewiseRates, overallRates = overallRates, sampleSizesPerStage = sampleSizesPerStage, testStatisticsPerStage = testStatisticsPerStage, pValuesSeparate = pValuesSeparate)) } .getSimulationRatesStageSubjects <- function(..., stage, riskRatio, thetaH0, groups, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, conditionalPower, conditionalCriticalValue, overallRate, farringtonManningValue1, farringtonManningValue2) { if (is.na(conditionalPower)) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } if (groups == 1) { stageSubjects <- (max(0, conditionalCriticalValue * sqrt(thetaH0 * (1 - thetaH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]))))^2 / (max(1e-12, (2 * directionUpper - 1) * (overallRate[1] - thetaH0)))^2 } else { mult <- 1 corr <- thetaH0 if (riskRatio) { mult <- thetaH0 corr <- 0 } stageSubjects <- (1 + 1 / allocationRatioPlanned) * (max(0, conditionalCriticalValue * sqrt(farringtonManningValue1 * (1 - farringtonManningValue1) + farringtonManningValue2 * (1 - farringtonManningValue2) * allocationRatioPlanned * mult^2) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]) * allocationRatioPlanned * mult^2)))^2 / (max(1e-12, (2 * directionUpper - 1) * (overallRate[1] - mult * overallRate[2] - corr)))^2 } stageSubjects <- ceiling(min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } .getSimulationStepRates <- function(..., k, kMax, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, riskRatio, thetaH0, pi1, pi2, groups, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, pi1H1, pi2H1, sampleSizesPerStage, eventsPerStage, testStatisticsPerStage, testStatistic, calcSubjectsFunction) { stageSubjects <- plannedSubjects[k] # perform event size recalculation for stages 2,..., kMax simulatedConditionalPower <- 0 if (k > 1) { # used effect size is either estimated from test statistic or pre-fixed if (is.na(pi1H1)) { overallRate <- testStatistic$overallRate } else { if (groups == 1) { overallRate <- pi1H1 } else { overallRate <- c(pi1H1, pi2H1) } } # conditional critical value to reject the null hypotheses at the next stage of the trial if (designNumber == 3L) { conditionalCriticalValue <- stats::qnorm(1 - (criticalValues[k] / testStatistic$value)^(1 / sqrt((informationRates[k] - informationRates[k - 1]) / informationRates[1]))) } else { conditionalCriticalValue <- (criticalValues[k] * sqrt(informationRates[k]) - testStatistic$value * sqrt(informationRates[k - 1])) / sqrt(informationRates[k] - informationRates[k - 1]) } if (groups == 2) { if (!riskRatio) { fm <- .getFarringtonManningValuesDiff(overallRate[1], overallRate[2], thetaH0, allocationRatioPlanned) } else { fm <- .getFarringtonManningValuesRatio(overallRate[1], overallRate[2], thetaH0, allocationRatioPlanned) } } stageSubjects <- calcSubjectsFunction( stage = k, riskRatio = riskRatio, thetaH0 = thetaH0, groups = groups, plannedSubjects = plannedSubjects, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, sampleSizesPerStage = sampleSizesPerStage, conditionalPower = conditionalPower, overallRate = overallRate, conditionalCriticalValue = conditionalCriticalValue, farringtonManningValue1 = fm[1], farringtonManningValue2 = fm[2]) # calculate conditional power for selected stageSubjects if (groups == 1) { if (overallRate[1] == 0) overallRate[1] <- 1e-6 if (overallRate[1] == 1) overallRate[1] <- 1 - 1e-6 if (overallRate[1] * (1 - overallRate[1]) == 0) { theta <- 0 } else { theta <- (overallRate[1] - thetaH0) / sqrt(overallRate[1] * (1 - overallRate[1])) + sign(overallRate[1] - thetaH0) * conditionalCriticalValue * (1 - sqrt(thetaH0 * (1 - thetaH0) / (overallRate[1] * (1 - overallRate[1])))) / sqrt(stageSubjects) } } else { if (overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]) == 0) { theta <- 0 } else { if (!riskRatio) { theta <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * ( (overallRate[1] - overallRate[2] - thetaH0) * sqrt(1 + allocationRatioPlanned) / sqrt(overallRate[1] * (1 - overallRate[1]) + allocationRatioPlanned * overallRate[2]*(1 - overallRate[2])) + sign(overallRate[1] - overallRate[2] - thetaH0) * conditionalCriticalValue * (1 - sqrt(fm[1] * (1 - fm[1]) + allocationRatioPlanned * fm[2] * (1 - fm[2])) / sqrt(overallRate[1] * (1 - overallRate[1]) + allocationRatioPlanned * overallRate[2] * (1 - overallRate[2]))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * stageSubjects)) } else { theta <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * ( (overallRate[1] - thetaH0 * overallRate[2]) * sqrt(1 + allocationRatioPlanned) / sqrt(overallRate[1] * (1 - overallRate[1]) + allocationRatioPlanned * thetaH0^2 * overallRate[2] * (1 - overallRate[2])) + sign(overallRate[1] - thetaH0 * overallRate[2]) * conditionalCriticalValue * (1 - sqrt(fm[1] * (1 - fm[1]) + allocationRatioPlanned * thetaH0^2 * fm[2] * (1 - fm[2])) / sqrt(overallRate[1] * (1 - overallRate[1]) + allocationRatioPlanned * thetaH0^2 * overallRate[2] * (1 - overallRate[2]))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * stageSubjects)) } } } if (!directionUpper) { theta <- -theta } simulatedConditionalPower <- 1 - stats::pnorm(conditionalCriticalValue - theta * sqrt(stageSubjects)) } if (groups == 1) { n1 <- stageSubjects eventsPerStage <- cbind(eventsPerStage, matrix(c(stats::rbinom(1, n1, pi1)), nrow = 1)) sampleSizesPerStage <- cbind(sampleSizesPerStage, matrix(n1, nrow = 1)) } else { n1 <- ceiling(allocationRatioPlanned * stageSubjects / (1 + allocationRatioPlanned)) n2 <- stageSubjects - n1 eventsPerStage <- cbind(eventsPerStage, matrix(c(stats::rbinom(1, n1, pi1), stats::rbinom(1, n2, pi2)), nrow = 2)) sampleSizesPerStage <- cbind(sampleSizesPerStage, matrix(c(n1, n2), nrow = 2)) } testStatistic <- .getTestStatisticsRates(designNumber = designNumber, informationRates = informationRates, groups = groups, riskRatio = riskRatio, thetaH0 = thetaH0, directionUpper = directionUpper, eventsPerStage = eventsPerStage, sampleSizesPerStage = sampleSizesPerStage, testStatisticsPerStage = testStatisticsPerStage) testStatisticsPerStage <- c(testStatisticsPerStage, testStatistic$testStatisticsPerStage[k]) simulatedRejections <- 0 simulatedFutilityStop <- 0 trialStop <- FALSE if (k == kMax) { trialStop <- TRUE } if (designNumber <= 2) { if (testStatistic$value >= criticalValues[k]) { simulatedRejections <- 1 trialStop <- TRUE } # add small number to avoid ties if (k < kMax && testStatistic$value <= futilityBounds[k]) { simulatedFutilityStop <- 1 trialStop <- TRUE } } else { if (testStatistic$value <= criticalValues[k]) { simulatedRejections <- 1 trialStop <- TRUE } if (k < kMax && testStatistic$pValuesSeparate >= alpha0Vec[k]) { simulatedFutilityStop <- 1 trialStop <- TRUE } } return(list( trialStop = trialStop, sampleSizesPerStage = sampleSizesPerStage, eventsPerStage = eventsPerStage, testStatisticsPerStage = testStatisticsPerStage, testStatistic = testStatistic, simulatedSubjects = stageSubjects, simulatedRejections = simulatedRejections, simulatedFutilityStop = simulatedFutilityStop, simulatedConditionalPower = simulatedConditionalPower )) } #' @title #' Get Simulation Rates #' #' @description #' Returns the simulated power, stopping probabilities, conditional power, and expected sample size for #' testing rates in a one or two treatment groups testing situation. #' #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, \code{alpha}, \code{beta}, and \code{sided} can be directly entered as argument. #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @param riskRatio If \code{riskRatio = TRUE} is specified, the design characteristics for #' one-sided testing of H0: pi1/pi2 = thetaH0 are simulated, default is \code{FALSE}. #' @param thetaH0 The null hypothesis value. For one-sided testing, a value != 0 #' (or a value != 1 for testing the mean ratio) can be specified, default is #' \code{0} or \code{1} for difference and ratio testing, respectively. #' @param pi1 The assumed probability in the active treatment group if two treatment groups #' are considered, or the alternative probability for a one treatment group design, #' default is \code{seq(0.2,0.5,0.1)}. #' @param pi2 The assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}. #' @param directionUpper Specifies the direction of the alternative, only applicable #' for one-sided testing, default is \code{TRUE}. #' @param allocationRatioPlanned The planned allocation ratio for a two treatment groups #' design, default is \code{1}. #' @param plannedSubjects \code{plannedSubjects} is a vector of length kMax (the number of stages of the design) #' that determines the number of cumulated (overall) subjects when the interim stages are planned. #' @param minNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{minNumberOfSubjectsPerStage} determines the #' minimum number of subjects per stage (i.e., not cumulated), the first element #' is not taken into account. #' @param maxNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the vector with length kMax \code{maxNumberOfSubjectsPerStage} determines the maximum number #' of subjects per stage (i.e., not cumulated), the first element is not taken into account. #' @param conditionalPower The conditional power for the subsequent stage under which the sample size recalculation is performed. #' @param pi1H1 If specified, the assumed probability in the active treatment group if two treatment groups #' are considered, or the assumed probability for a one treatment group design, for which the conditional #' power was calculated. #' @param pi2H1 If specified, the assumed probability in the reference group if two treatment groups #' are considered, for which the conditional power was calculated, default is \code{0.2}. #' @param maxNumberOfIterations The number of simulation iterations. #' @param calcSubjectsFunction Optionally, a function can be entered that defines the way of performing the sample size #' recalculation. By default, sample size recalulation is performed with conditional power and specified #' \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details #' and examples). #' @param seed The seed to reproduce the simulation, default is a random seed. #' @param ... Ensures that all arguments are be named and #' that a warning will be displayed if unknown arguments are passed. #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of subjects and parameter configuration. #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' calcSubjectsFunction\cr #' This function returns the number of subjects at given conditional power and conditional Type I error rate for specified #' testing situation. The function might depend on variables \code{stage}, \code{riskRatio}, \code{thetaH0}, \code{groups}, #' \code{plannedSubjects}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, \code{sampleSizesPerStage}, \code{conditionalPower}, #' \code{conditionalCriticalValue}, \code{overallRate}, \code{farringtonManningValue1}, and \code{farringtonManningValue2}. #' The function has to obtain the three-dots arument '...' (see examples). #' #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link{getData}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{pi1}: The assumed or derived event rate in the treatment group (if available). #' \item \code{pi2}: The assumed or derived event rate in the control group (if available). #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, #' or Fisher combination test)' #' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from #' the considered stage is taken into account. #' \item \code{overallRates1}: The overall rate in treatment group 1. #' \item \code{overallRates2}: The overall rate in treatment group 2. #' \item \code{stagewiseRates1}: The stagewise rate in treatment group 1. #' \item \code{stagewiseRates2}: The stagewise rate in treatment group 2. #' \item \code{sampleSizesPerStage1}: The stagewise sample size in treatment group 1. #' \item \code{sampleSizesPerStage2}: The stagewise sample size in treatment group 2. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{pi1H1} and \code{pi2H1}. #' } #' #' @return Returns a \code{\link{SimulationResultsRates}} object. #' #' @export #' #' @examples #' #' # Fixed sample size with minimum required definitions, pi1 = (0.3,0.4,0.5, 0.6) and pi2 = 0.3 #' getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, #' plannedSubjects = 120, maxNumberOfIterations = 50) #' #' \donttest{ #' #' # Increase number of simulation iterations and compare results with power calculator #' getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, #' plannedSubjects = 120, maxNumberOfIterations = 50) #' getPowerRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 120) #' #' # Do the same for a two-stage Pocock inverse normal group sequential #' # design with non-binding futility stops #' designIN <- getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0)) #' getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, #' plannedSubjects = c(40, 80), maxNumberOfIterations = 50) #' getPowerRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 80) #' #' # Assess power and average sample size if a sample size reassessment is #' # foreseen at conditional power 80% for the subsequent stage (decrease and increase) #' # based on observed overall rates and specified minNumberOfSubjectsPerStage #' # and maxNumberOfSubjectsPerStage #' #' # Do the same under the assumption that a sample size increase only takes place #' # if the rate difference exceeds the value 0.1 at interim. For this, the sample #' # size recalculation method needs to be redefined: #' mySampleSizeCalculationFunction <- function(..., stage, #' plannedSubjects, #' minNumberOfSubjectsPerStage, #' maxNumberOfSubjectsPerStage, #' conditionalPower, #' conditionalCriticalValue, #' overallRate) { #' if (overallRate[1] - overallRate[2] < 0.1) { #' return(plannedSubjects[stage] - plannedSubjects[stage - 1]) #' } else { #' rateUnderH0 <- (overallRate[1] + overallRate[2]) / 2 #' stageSubjects <- 2 * (max(0, conditionalCriticalValue * #' sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + #' stats::qnorm(conditionalPower) * sqrt(overallRate[1] * #' (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / #' (max(1e-12, (overallRate[1] - overallRate[2])))^2 #' stageSubjects <- ceiling(min(max( #' minNumberOfSubjectsPerStage[stage], #' stageSubjects), maxNumberOfSubjectsPerStage[stage])) #' return(stageSubjects) #' } #' } #' getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, #' plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), #' maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, #' calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = 50) #' #' } #' getSimulationRates <- function(design = NULL, ..., groups = 2L, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_DEFAULT, pi2 = NA_real_, plannedSubjects = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, pi1H1 = NA_real_, pi2H1 = 0.2, maxNumberOfIterations = C_MAX_SIMULATION_ITERATIONS_DEFAULT, seed = NA_real_, calcSubjectsFunction = NULL) { if (is.null(design)) { design <- .getDefaultDesignForSampleSizeCalculations(...) .warnInCaseOfUnknownArguments(functionName = "getSimulationRates", ignore = c("alpha", "beta", "sided", "twoSidedPower"), ...) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationRates", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") if (groups == 1) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, 1, naAllowed = FALSE) } else { if (riskRatio) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) } else { .assertIsInOpenInterval(thetaH0, "thetaH0", -1, 1, naAllowed = TRUE) } } .assertIsNumericVector(pi1, "pi1", naAllowed = FALSE) .assertIsInOpenInterval(pi1, "pi1", 0, 1, naAllowed = FALSE) .assertIsNumericVector(pi2, "pi2", naAllowed = TRUE) .assertIsInOpenInterval(pi2, "pi2", 0, 1, naAllowed = TRUE) .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(pi1H1, "pi1H1", naAllowed = TRUE) .assertIsInOpenInterval(pi1H1, "pi1H1", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(pi2H1, "pi2H1", naAllowed = TRUE) .assertIsInOpenInterval(pi2H1, "pi2H1", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, NULL, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationRatesStageSubjects } .assertIsValidFunction(fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationRatesStageSubjects) if (design$sided == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only one-sided case is implemented for the simulation design") } seed <- .setSeed(seed) simulationResults <- SimulationResultsRates(design) .setValueAndParameterType(simulationResults, "pi2", pi2, NA_real_) .setValueAndParameterType(simulationResults, "allocationRatioPlanned", allocationRatioPlanned, NA_real_) if (groups == 1) { if (isTRUE(riskRatio)) { warning("'riskRatio' (", riskRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE) } if (!is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) simulationResults$allocationRatioPlanned <- NA_real_ } simulationResults$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) if (!is.na(pi2)) { warning("'pi2' (", pi2, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE) simulationResults$pi2 <- NA_real_ } simulationResults$.setParameterType("pi2", C_PARAM_NOT_APPLICABLE) } else { if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT simulationResults$allocationRatioPlanned <- allocationRatioPlanned simulationResults$.setParameterType("allocationRatioPlanned", C_PARAM_DEFAULT_VALUE) } if (is.na(pi2)) { pi2 <- C_PI_2_DEFAULT simulationResults$pi2 <- pi2 simulationResults$.setParameterType("pi2", C_PARAM_DEFAULT_VALUE) } } if (groups ==1) { effect <- pi1 - thetaH0 } else { if (riskRatio) { effect <- pi1 / pi2 - thetaH0 } else { effect <- pi1 - pi2 - thetaH0 } } simulationResults$effect <- effect minNumberOfSubjectsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, design$kMax) maxNumberOfSubjectsPerStage <- .assertIsValidMinNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, design$kMax) if (!is.na(conditionalPower)) { if (design$kMax > 1) { if (any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")") } .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_) } else { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } } else { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") ", "will be ignored because no 'conditionalPower' is defined", call. = FALSE) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } .assertIsIntegerVector(plannedSubjects, "plannedSubjects", validateType = FALSE) if (length(plannedSubjects) != design$kMax) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedSubjects' (", .arrayToString(plannedSubjects), ") must have length ", design$kMax) } .assertIsInClosedInterval(plannedSubjects, "plannedSubjects", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedSubjects, "plannedSubjects") .setValueAndParameterType(simulationResults, "riskRatio", riskRatio, FALSE) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, ifelse(riskRatio, 1, 0)) .setValueAndParameterType(simulationResults, "pi1", pi1, C_PI_1_DEFAULT) .setValueAndParameterType(simulationResults, "groups", as.integer(groups), 2L) .setValueAndParameterType(simulationResults, "plannedSubjects", plannedSubjects, NA_real_) .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "pi1H1", pi1H1, NA_real_, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "pi2H1", pi2H1, 0.2, notApplicableIfNA = TRUE) .setValueAndParameterType(simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT) .setValueAndParameterType(simulationResults, "seed", seed, NA_real_) if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } informationRates <- design$informationRates criticalValues <- design$criticalValues kMax <- design$kMax cols <- length(pi1) sampleSizes <- matrix(0, kMax, cols) rejectPerStage <- matrix(0, kMax, cols) overallReject <- rep(0, cols) futilityPerStage <- matrix(0, kMax - 1, cols) futilityStop <- rep(0, cols) iterations <- matrix(0, kMax, cols) expectedNumberOfSubjects <- rep(0, cols) conditionalPowerAchieved <- matrix(NA_real_, kMax, cols) len <- length(pi1) * maxNumberOfIterations * kMax dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataPi1 <- rep(NA_real_, len) dataPi2 <- rep(pi2, len) dataNumberOfSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA_real_, len) dataFutilityPerStage <- rep(NA_real_, len) dataTestStatistic <- rep(NA_real_, len) dataTestStatisticsPerStage = rep(NA_real_, len) dataOverallRates1 <- rep(NA_real_, len) dataOverallRates2 <- rep(NA_real_, len) dataStagewiseRates1 <- rep(NA_real_, len) dataStagewiseRates2 <- rep(NA_real_, len) dataSampleSizesPerStage1 <- rep(NA_real_, len) dataSampleSizesPerStage2 <- rep(NA_real_, len) dataTrialStop <- rep(NA, len) dataConditionalPowerAchieved <- rep(NA_real_, len) if (designNumber != 1L) { dataPValuesSeparate <- rep(NA_real_, len) } index <- 1 for (i in 1:length(pi1)) { simulatedSubjects <- rep(0, kMax) simulatedOverallSubjects <- rep(0, kMax) simulatedRejections <- rep(0, kMax) simulatedFutilityStop <- rep(0, kMax - 1) simulatedOverallSubjects <- 0 simulatedConditionalPower <- rep(0, kMax) for (j in 1:maxNumberOfIterations) { trialStop <- FALSE sampleSizesPerStage <- matrix(rep(numeric(0), 2), nrow = groups) eventsPerStage <- matrix(rep(numeric(0), 2), nrow = groups) testStatisticsPerStage <- c() testStatistic <- NULL for (k in 1:kMax) { if (!trialStop) { stepResult <- .getSimulationStepRates( k = k, kMax = kMax, designNumber = designNumber, informationRates = informationRates, futilityBounds = futilityBounds, alpha0Vec = alpha0Vec, criticalValues = criticalValues, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1[i], pi2 = pi2, groups = groups, plannedSubjects = plannedSubjects, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, pi1H1 = pi1H1, pi2H1 = pi2H1, sampleSizesPerStage = sampleSizesPerStage, eventsPerStage = eventsPerStage, testStatisticsPerStage = testStatisticsPerStage, testStatistic = testStatistic, calcSubjectsFunction = calcSubjectsFunction) trialStop <- stepResult$trialStop sampleSizesPerStage <- stepResult$sampleSizesPerStage eventsPerStage <- stepResult$eventsPerStage testStatisticsPerStage <- stepResult$testStatisticsPerStage testStatistic <- stepResult$testStatistic simulatedSubjectsStep <- stepResult$simulatedSubjects simulatedRejectionsStep <- stepResult$simulatedRejections simulatedFutilityStopStep <- stepResult$simulatedFutilityStop simulatedConditionalPowerStep <- NA_real_ if (k > 1) { simulatedConditionalPowerStep <- stepResult$simulatedConditionalPower } iterations[k, i] <- iterations[k, i] + 1 simulatedSubjects[k] <- simulatedSubjects[k] + simulatedSubjectsStep simulatedRejections[k] <- simulatedRejections[k] + simulatedRejectionsStep if (k < kMax) { simulatedFutilityStop[k] <- simulatedFutilityStop[k] + simulatedFutilityStopStep } simulatedConditionalPower[k] <- simulatedConditionalPower[k] + simulatedConditionalPowerStep dataIterationNumber[index] <- j dataStageNumber[index] <- k dataPi1[index] <- pi1[i] dataNumberOfSubjects[index] <- simulatedSubjectsStep dataRejectPerStage[index] <- simulatedRejectionsStep dataFutilityPerStage[index] <- simulatedFutilityStopStep dataTestStatistic[index] <- testStatistic$value dataTestStatisticsPerStage[index] <- testStatisticsPerStage[k] dataOverallRates1[index] <- testStatistic$overallRates[1] dataStagewiseRates1[index] <- testStatistic$stagewiseRates[1] dataSampleSizesPerStage1[index] <- testStatistic$sampleSizesPerStage[1] if (length(testStatistic$stagewiseRates) > 1) { dataOverallRates2[index] <- testStatistic$overallRates[2] dataStagewiseRates2[index] <- testStatistic$stagewiseRates[2] dataSampleSizesPerStage2[index] <- testStatistic$sampleSizesPerStage[2] } else { dataStagewiseRates2[index] <- NA_real_ dataOverallRates2[index] <- NA_real_ dataSampleSizesPerStage2[index] <- NA_real_ } dataTrialStop[index] <- trialStop dataConditionalPowerAchieved[index] <- simulatedConditionalPowerStep if (designNumber != 1L) { dataPValuesSeparate[index] <- testStatistic$pValuesSeparate[k] } index <- index + 1 } } } simulatedOverallSubjects <- sum(simulatedSubjects[1:k]) sampleSizes[, i] <- simulatedSubjects / iterations[, i] rejectPerStage[, i] <- simulatedRejections / maxNumberOfIterations overallReject[i] <- sum(simulatedRejections / maxNumberOfIterations) futilityPerStage[, i] <- simulatedFutilityStop / maxNumberOfIterations futilityStop[i] <- sum(simulatedFutilityStop / maxNumberOfIterations) expectedNumberOfSubjects[i] <- simulatedOverallSubjects / maxNumberOfIterations if (kMax > 1) { conditionalPowerAchieved[2:kMax, i] <- simulatedConditionalPower[2:kMax] / iterations[2:kMax, i] } } simulationResults$iterations <- iterations simulationResults$sampleSizes <- sampleSizes simulationResults$rejectPerStage <- rejectPerStage simulationResults$overallReject <- overallReject simulationResults$futilityPerStage <- futilityPerStage simulationResults$futilityStop <- futilityStop if (kMax > 1) { if (length(pi1) == 1) { simulationResults$earlyStop <- sum(futilityPerStage) + sum(rejectPerStage[1:(kMax - 1)]) } else { if (kMax > 2) { rejectPerStageColSum <- colSums(rejectPerStage[1:(kMax - 1), ]) } else { rejectPerStageColSum <- rejectPerStage[1, ] } simulationResults$earlyStop <- colSums(futilityPerStage) + rejectPerStageColSum } } else { simulationResults$earlyStop <- rep(0, length(pi1)) } simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$conditionalPowerAchieved <- conditionalPowerAchieved if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, pi1 = dataPi1, pi2 = dataPi2, numberOfSubjects = dataNumberOfSubjects, rejectPerStage = dataRejectPerStage, futilityPerStage = dataFutilityPerStage, testStatistic = dataTestStatistic, testStatisticsPerStage = dataTestStatisticsPerStage, overallRates1 = dataOverallRates1, overallRates2 = dataOverallRates2, stagewiseRates1 = dataStagewiseRates1, stagewiseRates2 = dataStagewiseRates2, sampleSizesPerStage1 = dataSampleSizesPerStage1, sampleSizesPerStage2 = dataSampleSizesPerStage2, trialStop = dataTrialStop, conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6) ) if (designNumber == 3L) { data$pValue <- dataPValuesSeparate } data <- data[!is.na(data$pi1), ] simulationResults$.data <- data return(simulationResults) } rpact/R/f_core_output_formats.R0000644000176200001440000002501613556013540016325 0ustar liggesusers###################################################################################### # # # -- RPACT output formats -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### .getFormattedValue <- function(value, digits, nsmall = NA_integer_, futilityProbabilities = FALSE) { if (missing(value)) { return("NA") } if (is.null(value) || length(value) == 0) { return(value) } if (!is.numeric(value)) { stop("'value' must be a numeric vector") } if (futilityProbabilities) { value[value >= 0 & value < 1e-09] <- 0 # nur futility probilities } if (is.na(nsmall)) { formattedValue <- format(value, digits = digits, justify = "left", trim = TRUE) } else { formattedValue <- format(value, digits = digits, nsmall = nsmall, justify = "left", trim = TRUE) } if (futilityProbabilities) { formattedValue[value == 0] <- "0" } #formattedValue <- trimws(formattedValue) return(formattedValue) } .getZeroCorrectedValue <- function(value) { if (is.numeric(value)) { value[abs(value) < 1e-08] <- 0 } return(value) } .getPValueDecimalPlaces <- function(value) { value <- stats::na.omit(value) if (length(value) == 0) { return(4) } fv <- .getFormattedValue(value[value >= 1e-4], digits = 4, nsmall = 4) fv <- fv[!((1:length(fv)) %in% grep("e", fv))] numberOfCharacters <- ifelse(length(fv) > 0, nchar(fv[1]), 6) numberOfCharacters <- ifelse(numberOfCharacters < 6, 6, numberOfCharacters) decimalPlaces <- numberOfCharacters - 2 return(decimalPlaces) } # # @title # Format P Values # # @description # Formats the output of p-values. # # @details # Digits = 4, nsmall = 4. # Replaces p-values in scientific format (e.g., 1e-07) by a non-scientific format (e.g., <0.00001). # # @param value a vector of p-values. # # @keywords internal # formatPValues <- function(value) { if (sum(is.na(value)) == length(value)) { return(value) } decimalPlaces <- .getPValueDecimalPlaces(value) if (is.na(decimalPlaces) || is.nan(decimalPlaces)) { decimalPlaces <- 4 } else if (decimalPlaces > 4) { decimalPlaces <- decimalPlaces - 1 } threshold <- 10^-decimalPlaces text <- "<0." for (i in 1:(decimalPlaces - 1)) { text <- paste0(text, "0") } text <- paste0(text, "1") indices <- (value < threshold) value[indices] <- threshold formattedValue <- .getFormattedValue(value, digits = 4, nsmall = 4) formattedValue[indices] <- text return(formattedValue) } # # @title # Format Repeated P Values # # @description # Formats the output of repeated p-values. # # @details # If p-value > 0.4999 then ">=0.5" will be returned. # # @param value a vector of p-values. # # @keywords internal # formatRepeatedPValues <- function(value) { pValues <- formatPValues(value) pValues[value > 0.4999] <- ">0.5" #pValues[value < 1e-05] <- "<1e-05" return(pValues) } # # @title # Format Probabilities # # @description # Formats the output of probabilities. # # @details # Digits = 4, nsmall = 4 # # @keywords internal # formatProbabilities <- function(value) { return(.getFormattedValue(value, digits = 4, nsmall = 4)) } # # @title # Format Sample Sizes # # @description # Formats the output of sample sizes. # # @details # Digits = 1, nsmall = 1 # # @keywords internal # formatSampleSizes <- function(value) { return(.getFormattedValue(value, digits = 1, nsmall = 1)) } # # @title # Format Conditional Power # # @description # Formats the output of contional power. # # @details # Digits = 4 # # @keywords internal # formatConditionalPower <- function(value) { value <- round(value, digits = 4) conditionalPower <- .getFormattedValue(value, digits = 4) conditionalPower[value == 0] <- "0" #conditionalPower <- sprintf("%.4f", value) return(conditionalPower) } # # @title # Format Futility Probabilities # # @description # Formats the output of futility probabilities. # # @details # Digits = 4, nsmall = 4 # # @keywords internal # formatFutilityProbabilities <- function(value) { return(.getFormattedValue(value, digits = 4, nsmall = 4, futilityProbabilities = TRUE)) } # # @title # Format Group Sequential Critical Values # # @description # Formats the output of group sequential critical values. # # @details # Digits = 3, nsmall = 3 # # @keywords internal # formatGroupSequentialCriticalValues <- function(value) { value[value == C_FUTILITY_BOUNDS_DEFAULT] <- -Inf return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Fisher Critical Values # # @description # Formats the output of Fisher's combination critical values. # # @details # Digits = 4 # # @keywords internal # formatFisherCriticalValues <- function(value) { return(.getFormattedValue(value, digits = 4)) } # # @title # Format Fisher Test Statistics # # @description # Formats the output of Fisher's combination test statistics. # # @details # Digits = 4 # # @keywords internal # formatFisherTestStatistics <- function(value) { return(.getFormattedValue(value, digits = 4)) } # # @title # Format Test Statistics # # @description # Formats the output of test statistics (e.g., inverse normal). # # @details # Digits = 3, nsmall = 3 # # @keywords internal # formatTestStatistics <- function(value) { return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Rates # # @description # Formats the output of rates. # # @details # Digits = 3, nsmall = 3 # # @keywords internal # formatRates <- function(value) { return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Rates Dynamic # # @description # Formats the output of rates. # # @details # Digits = 3, nsmall = 3 if value < 1; digits = 1, nsmall = 1 otherwise # # @keywords internal # formatRatesDynamic <- function(value) { if (!any(is.na(value)) && all(value >= 1)) { return(.getFormattedValue(value, digits = 1, nsmall = 1)) } return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Accrual Intensities # # @description # Formats the output of accrual intensities. # # @details # Digits = 1, nsmall = 1 # # @keywords internal # formatAccrualIntensities <- function(value) { return(.getFormattedValue(value, digits = 2, nsmall = 1)) } # # @title # Format Means # # @description # Formats the output of means. # # @details # Digits = 4 # # @keywords internal # formatMeans <- function(value) { return(.getFormattedValue(value, digits = 4)) } # # @title # Format Ratios # # @description # Formats the output of ratios. # # @details # Digits = 3 # # @keywords internal # formatRatios <- function(value) { return(.getFormattedValue(value, digits = 3)) } # # @title # Format StDevs # # @description # Formats the output of standard deviations. # # @details # Digits = 4 # # @keywords internal # formatStDevs <- function(value) { return(.getFormattedValue(value, digits = 4)) } # # @title # Format Double # # @description # Formats the output of double values. # # @details # Digits = 3 # # @keywords internal # formatDouble <- function(value) { return(.getFormattedValue(.getZeroCorrectedValue(value), digits = 3)) } # # @title # Format Durations # # @description # Formats the output of study durations. # # @details # Digits = 3 # # @keywords internal # formatDurations <- function(value) { #return(sprintf("%.2f", value)) return(.getFormattedValue(value, digits = 2, nsmall = 2)) } # # @title # Format Time # # @description # Formats the output of time values, e.g. months. # # @details # Digits = 3 # # @keywords internal # formatTime <- function(value) { #return(sprintf("%.2f", value)) return(.getFormattedValue(value, digits = 2, nsmall = 2)) } # # @title # Format Simulation Output # # @description # Formats the output of simulations. # # @details # Digits = 3 # # @keywords internal # formatSimulationOutput <- function(value) { return(.getFormattedValue(.getZeroCorrectedValue(value), digits = 3)) } # # @title # Format Variable Name # # @description # Formats a variable name. # # @details # An optional prefix and postix can be added. # # @keywords internal # formatVariableName <- function(name, n, prefix = "", postfix = "") { if (!is.character(name)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'name' must be of type 'character' (is '", class(name), "')") } if (!is.numeric(n)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'n' must be of type 'numeric' (is '", class(n), "')") } if (n < 1 || n > 300) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'n' (", n, ") is out of bounds [1; 300]") } if (nchar(prefix) > 0) { name <- paste(prefix, name) } if (nchar(postfix) > 0) { name <- paste(name, postfix) } while (nchar(name) < n) { name <- paste0(name, " ") } name <- paste0(" ", name, " :") return(name) } formatHowItIs <- function(value) { return(format(value, scientific = FALSE)) } rpact/R/f_analysis_base.R0000644000176200001440000017355113574406666015066 0ustar liggesusers###################################################################################### # # # -- Analysis functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.1 # # Date: 03-12-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### #' @title #' Get Analysis Results #' #' @description #' Calculates and returns the analysis results for the specified design and data. #' @param design The trial design. #' @param dataInput The summary data used for calculating the test results. #' This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. #' For more information see \code{details} below. #' @param directionUpper The direction of one-sided testing. #' Default is \code{directionUpper = TRUE} which means that larger values of the #' test statistics yield smaller p-values. #' @param thetaH0 The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for #' defining the null hypothesis H0: pi = thetaH0.\cr #' For non-inferiority designs, this is the non-inferiority bound. #' @param nPlanned The sample size planned for the subsequent stages. #' It should be a vector with length equal to the remaining stages and is the #' overall sample size in the two treatment groups if two groups are considered. #' @param ... Further arguments to be passed to methods (cp. separate functions in See Also), e.g., #' \describe{ #' \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} #' \item{allocationRatioPlanned}{The allocation ratio n1/n2 for two treatment groups planned for #' the subsequent stages, the default value is 1.} #' \item{thetaH1 and assumedStDev or pi1, pi2}{The assumed effect size or assumed rates to calculate the #' conditional power. Depending on the type of dataset, either thetaH1 (means and survival) #' or pi1, pi2 (rates) can be specified. Additionally, if testing means is specified, #' an assumed standard deviation can be specified, default is 1.} #' \item{normalApproximation}{The type of computation of the p-values. Default is FALSE for #' testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. #' For testing rates, if \cr #' \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \cr #' \code{normalApproximation = FALSE} has no effect.} #' \item{equalVariances}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}.} #' \item{iterations}{Iterations for simulating the power for Fisher's combination test. #' If the power for more than one remaining stages is to be determined for #' Fisher's combination test, it is estimated via simulation with specified \cr #' \code{iterations}, the default value is 10000.} #' \item{seed}{Seed for simulating the power for Fisher's combination test. #' See above, default is a random seed.} #' } #' #' @details #' Given a design and a dataset, at given stage the function calculates the test results #' (effect sizes, stage-wise test statistics and p-values, overall p-values and test statistics, #' conditional rejection probability (CRP), conditional power, Repeated Confidence Intervals (RCIs), #' repeated overall p-values, and final stage p-values, median unbiased effect estimates, #' and final confidence intervals. \cr #' #' \code{dataInput} is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of #' \code{DatasetSurvival} and should be created with the function \code{\link{getDataset}}. #' #' @section Note: #' The conditional power is calculated only if effect size and sample size #' is specified. Median unbiased effect estimates and confidence intervals are calculated if #' a group sequential design or an inverse normal combination test design was chosen, i.e., it is not applicable #' for Fisher's p-value combination test design. #' #' A final stage p-value for Fisher's combination test is calculated only if a two-stage design was chosen. #' For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. #' #' @return Returns an \code{\link{AnalysisResults}} object. #' #' @export #' #' @seealso #' Alternatively the analysis results can be calculated separately using one of the following functions: #' \itemize{ #' \item \code{\link{getTestActions}}, #' \item \code{\link{getConditionalPower}}, #' \item \code{\link{getConditionalRejectionProbabilities}}, #' \item \code{\link{getRepeatedConfidenceIntervals}}, #' \item \code{\link{getRepeatedPValues}}, #' \item \code{\link{getFinalConfidenceInterval}}, #' \item \code{\link{getFinalPValue}}. #' } #' #' @examples #' #' \donttest{ #' #' design <- getDesignGroupSequential() #' dataMeans <- getDataset( #' n = c(10,10), #' means = c(1.96,1.76), #' stDevs = c(1.92,2.01)) #' getAnalysisResults(design, dataMeans) #' #' } #' getAnalysisResults <- function( design, dataInput, ..., directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = NA_real_, nPlanned = NA_real_) { .assertIsTrialDesign(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) .assertIsValidStage(stage, design$kMax) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertAreSuitableInformationRates(design, dataInput, stage = stage) if (design$kMax < 2) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "getAnalysisResults only available for design with interim stage(s)") if (dataInput$isDatasetMeans()) { if (is.na(thetaH0)) { thetaH0 = C_THETA_H0_MEANS_DEFAULT } return(.getAnalysisResultsMeans(design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, ...)) } if (dataInput$isDatasetRates()) { if (is.na(thetaH0)) { thetaH0 = C_THETA_H0_RATES_DEFAULT } return(.getAnalysisResultsRates(design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, ...)) } if (dataInput$isDatasetSurvival()) { if (is.na(thetaH0)) { thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT } return(.getAnalysisResultsSurvival(design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(dataInput), "' is not implemented yet") } #' @title #' Get Stage Results #' #' @description #' Returns summary statistics and p-values for a given data set and a given design. #' #' @param design The trial design. #' @param dataInput The summary data used for calculating the test results. #' This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. #' See \code{\link{getDataset}}. #' @param ... Further (optional) arguments to be passed: #' \describe{ #' \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} #' \item{thetaH0}{The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for #' defining the null hypothesis H0: pi = thetaH0. \cr #' For non-inferiority designs, this is the non-inferiority bound. } #' \item{thetaH1 and assumedStDev or pi1, pi2}{The assumed effect size or assumed rates to calculate the #' conditional power. Depending on the type of dataset, either thetaH1 (means and survival) #' or pi1, pi2 (rates) can be specified. Additionally, if testing means is specified, #' an assumed standard deviation can be specified, default is 1.} #' \item{normalApproximation}{The type of computation of the p-values. Default is FALSE for #' testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. #' For testing rates, if \cr #' \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \cr #' \code{normalApproximation = FALSE} has no effect.} #' \item{equalVariances}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}.} #' \item{directionUpper}{The direction of one-sided testing. #' Default is \code{directionUpper = TRUE} which means that larger values of the #' test statistics yield smaller p-values.} #' } #' #' @details #' Calculates and returns the stage results of the specified design and data input at the specified stage. #' #' @return Returns a \code{\link{StageResults}} object. #' #' @export #' #' @examples #' #' design <- getDesignInverseNormal() #' dataRates <- getDataset( #' n1 = c(10,10), #' n2 = c(20,20), #' events1 = c(8,10), #' events2 = c(10,16)) #' getStageResults(design, dataRates) #' getStageResults <- function(design, dataInput, ...) { .assertIsTrialDesign(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) .assertIsValidStage(stage, design$kMax) if (dataInput$isDatasetMeans()) { return(.getStageResultsMeans(design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetRates()) { return(.getStageResultsRates(design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetSurvival()) { return(.getStageResultsSurvival(design = design, dataInput = dataInput, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(dataInput), "' is not supported") } .getStageFromOptionalArguments <- function(..., dataInput) { stage <- .getOptionalArgument("stage", ...) if (is.null(stage)) { .assertIsDataset(dataInput) stage <- dataInput$getNumberOfStages() } return(as.integer(stage)) } #' #' @title #' Get Test Actions #' #' @description #' Returns test actions. #' #' @param design The trial design. #' @param stageResults The results at given stage, obtained from \code{\link{getStageResults}}. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' #' @details #' Returns the test actions of the specified design and stage results at the specified stage. #' #' @export #' #' @keywords internal #' getTestActions <- function(design, stageResults, ...) { .assertIsTrialDesign(design) .assertIsStageResults(stageResults) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = "getTestActions", ignore = c("stage"), ...) testActions <- rep(NA_character_, design$kMax) if (.isTrialDesignInverseNormal(design)) { for (k in 1 : stage) { if (design$sided == 1) { if (k < design$kMax) { if (stageResults$combInverseNormal[k] > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (stageResults$combInverseNormal[k] < design$futilityBounds[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (stageResults$combInverseNormal[k] > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (abs(stageResults$combInverseNormal[k]) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (abs(stageResults$combInverseNormal[k]) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } else if (.isTrialDesignGroupSequential(design)) { for (k in 1 : stage) { if (design$sided == 1) { if (k < design$kMax) { if (stats::qnorm(1 - stageResults$overallPValues[k]) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (stats::qnorm(1 - stageResults$overallPValues[k]) < design$futilityBounds[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (stats::qnorm(1 - stageResults$overallPValues[k]) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (abs(stats::qnorm(1 - stageResults$overallPValues[k])) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (abs(stats::qnorm(1 - stageResults$overallPValues[k])) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } else if (.isTrialDesignFisher(design)) { for (k in 1 : stage) { if (design$sided == 1) { if (k < design$kMax) { if (stageResults$combFisher[k] < design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (stageResults$pValues[k] > design$alpha0Vec[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (stageResults$combFisher[k] < design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (min(stageResults$combFisher[k], 1 - stageResults$combFisher[k]) < design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (min(stageResults$combFisher[k], 1 - stageResults$combFisher[k]) < design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } return(testActions) } #' #' @title #' Get Repeated Confidence Intervals #' #' @description #' Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial. #' #' @param design The trial design. #' @param dataInput The summary data used for calculating the test results. #' This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival}. #' See \code{\link{getDataset}}. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' #' @details #' The repeated confidence interval at a given stage of the trial contains the parameter values that are not rejected using the specified sequential design. #' It can be calculated at each stage of the trial and can thus be used as a monitoring tool. #' #' The repeated confidence intervals are provided up to the specified stage. #' #' @export #' #' @keywords internal #' getRepeatedConfidenceIntervals <- function(design, dataInput, ...) { .assertIsTrialDesign(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) .assertIsValidStage(stage, design$kMax) if (dataInput$isDatasetMeans()) { return(.getRepeatedConfidenceIntervalsMeans( design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetRates()) { return(.getRepeatedConfidenceIntervalsRates( design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetSurvival()) { return(.getRepeatedConfidenceIntervalsSurvival( design = design, dataInput = dataInput, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(dataInput), "' is not implemented yet") } #' #' @title #' Get Conditional Power #' #' @description #' Calculates and returns the conditional power. #' #' @param design The trial design. #' @param stageResults The results at given stage, obtained from \code{\link{getStageResults}}. #' @param nPlanned The sample size planned for the subsequent stages. #' It should be a vector with length equal to the remaining stages and is the #' overall sample size in the two treatment groups if two groups are considered. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' @param allocationRatioPlanned The allocation ratio for two treatment groups planned for #' the subsequent stages, the default value is 1. #' @param thetaH1 or pi1, pi2 Assumed effect sizes or assumed rates pi1 to calculate the #' conditional power. Depending on the type of dataset, either thetaH1 (means and survival) #' or pi1, pi2 (rates) needs to be specified. #' Additionally, if testing means is specified, an assumed standard (\code{assumedStDev}) #' deviation can be specified, default is 1. #' @param iterations Iterations for simulating the power for Fisher's combination test. #' If the power for more than one remaining stages is to be determined for Fisher's combination test, #' it is estimated via simulation with specified \code{iterations}, the default value is 10000. #' @param seed Seed for simulating the power for Fisher's combination test. See above, default is a random seed. #' #' @details #' The conditional power is calculated only if effect size and sample size is specified. #' #' For Fisher's combination test, the conditional power for more than one remaining stages is #' estimated via simulation. #' #' @export #' #' @seealso #' \code{\link{plot.StageResults}} or \code{\link{plot.AnalysisResults}} for plotting the conditional power. #' #' @keywords internal #' getConditionalPower <- function(design, stageResults, ..., nPlanned) { .assertIsTrialDesign(design) .assertIsStageResults(stageResults) if (stageResults$isDatasetMeans()) { return(.getConditionalPowerMeans(design = design, stageResults = stageResults, nPlanned = nPlanned, ...)) } if (stageResults$isDatasetRates()) { return(.getConditionalPowerRates(design = design, stageResults = stageResults, nPlanned = nPlanned, ...)) } if (stageResults$isDatasetSurvival()) { return(.getConditionalPowerSurvival(design = design, stageResults = stageResults, nPlanned = nPlanned, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(stageResults$.dataInput), "' is not implemented yet") } .getConditionalPowerPlot <- function(..., stageResults, nPlanned, stage = stageResults$getDataInput()$getNumberOfStages(), allocationRatioPlanned = NA_real_) { .assertIsStageResults(stageResults) design <- stageResults$.design .assertIsValidStage(stage, design$kMax) .assertIsValidNPlanned(nPlanned, design$kMax, stage) if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (stageResults$isDatasetMeans()) { return(.getConditionalPowerPlotMeans(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ...)) } if (stageResults$isDatasetRates()) { return(.getConditionalPowerPlotRates(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ...)) } if (stageResults$isDatasetSurvival()) { return(.getConditionalPowerPlotSurvival(design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(stageResults$.dataInput), "' is not implemented yet") } #' #' @title #' Get Repeated P Values #' #' @description #' Calculates the repeated p-values for given test results. #' #' @param design The trial design. #' @param stageResults The results at given stage, obtained from \code{\link{getStageResults}}. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' #' @details #' The repeated p-value at a given stage of the trial is defined as the smallest #' significance level under which at given test design the test results #' obtain rejection of the null hypothesis. It can be calculated at each #' stage of the trial and can thus be used as a monitoring tool. #' #' The repeated p-values are provided up to the specified stage. #' #' @export #' #' @keywords internal #' getRepeatedPValues <- function(design, stageResults, ...) { .assertIsTrialDesign(design) .assertIsStageResults(stageResults) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) if (.isTrialDesignInverseNormalOrGroupSequential(design)) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { warning("Repeated p-values not available for 'typeOfDesign' = '", C_TYPE_OF_DESIGN_AS_USER, "'", call. = FALSE) return(rep(NA_real_, design$kMax)) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { warning("Repeated p-values not available for 'typeOfDesign' = '", C_TYPE_OF_DESIGN_WT_OPTIMUM, "'", call. = FALSE) return(rep(NA_real_, design$kMax)) } } if (.isTrialDesignFisher(design)) { if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { warning("Repeated p-values not available for 'method' = '", C_FISHER_METHOD_USER_DEFINED_ALPHA, "'", call. = FALSE) return(rep(NA_real_, design$kMax)) } } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedPValuesInverseNormal(design = design, stageResults = stageResults, ...)) } if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedPValuesGroupSequential(design = design, stageResults = stageResults, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedPValuesFisher(design = design, stageResults = stageResults, ...)) } .stopWithWrongDesignMessage(design) } # # Get final p-value based on inverse normal method # .getFinalPValueInverseNormalOrGroupSequential <- function(..., design, stageResults) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getFinalPValueInverseNormalOrGroupSequential", ignore = c("stage"), ...) if (.isTrialDesignInverseNormal(design)) { stageInverseNormalOrGroupSequential <- .getStageInverseNormal(design, stageResults, stage) } else { stageInverseNormalOrGroupSequential <- .getStageGroupSeq(design, stageResults, stage) } finalStage <- min(stageInverseNormalOrGroupSequential, design$kMax) # Early stopping or at end of study if (stageInverseNormalOrGroupSequential < design$kMax || stage == design$kMax) { if (stageInverseNormalOrGroupSequential == 1) { pFinal <- stageResults$pValues[1] } else { if (design$bindingFutility){ if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix(c(design$futilityBounds[1:(finalStage - 1)], C_FUTILITY_BOUNDS_DEFAULT, c(design$criticalValues[1:(finalStage - 1)], stageResults$combInverseNormal[finalStage])), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(design$futilityBounds[1:(finalStage - 1)], C_FUTILITY_BOUNDS_DEFAULT, c(design$criticalValues[1:(finalStage - 1)], stats::qnorm(1 - stageResults$overallPValues[finalStage]))), nrow = 2, byrow = TRUE) } } else { if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT,finalStage), c(design$criticalValues[1:(finalStage - 1)], stageResults$combInverseNormal[finalStage])), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT,finalStage), c(design$criticalValues[1:(finalStage - 1)], stats::qnorm(1 - stageResults$overallPValues[finalStage]))), nrow = 2, byrow = TRUE) } } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:finalStage]) pFinal <- sum(probs[3, ] - probs[2, ]) if (design$sided == 2){ if (stageInverseNormalOrGroupSequential == 1) { pFinalOtherDirection <- 1 - stageResults$pValues[1] } else { if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT,finalStage), c(design$criticalValues[1:(finalStage - 1)], -stageResults$combInverseNormal[finalStage])), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT,finalStage), c(design$criticalValues[1:(finalStage - 1)], -stats::qnorm(1 - stageResults$overallPValues[finalStage]))), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:finalStage]) pFinalOtherDirection <- sum(probs[3, ] - probs[2, ]) } pFinal <- 2*min(pFinal, pFinalOtherDirection) } } return(list(finalStage = finalStage, pFinal = pFinal)) } return(list(finalStage = NA_integer_, pFinal = NA_real_)) } # # Returns the weights for inverse normal statistic # .getWeightsInverseNormal <- function(design) { weights <- rep(NA, design$kMax) weights[1] <- sqrt(design$informationRates[1]) if (design$kMax == 1) { return(weights) } weights[2:design$kMax] <- sqrt(design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) return(weights) } # # Returns the weights for Fisher's combination test statistic # .getWeightsFisher <- function(design) { weights <- rep(NA, design$kMax) weights[1] <- 1 if (design$kMax == 1) { return(weights) } weights[2:design$kMax] <- sqrt((design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) / design$informationRates[1]) return(weights) } # # Returns the stage when using the inverse normal combination test # .getStageInverseNormal <- function(design, stageResults, stage) { for (k in 1:stage) { if (stageResults$combInverseNormal[k] >= design$criticalValues[k]) { return(k) } if (design$sided == 2){ if (stageResults$combInverseNormal[k] <= -design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && stageResults$combInverseNormal[k] <= design$futilityBounds[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # # Returns the stage when using the group sequential test # .getStageGroupSeq <- function(design, stageResults, stage) { for (k in 1:stage) { if (stats::qnorm(1 - stageResults$overallPValues[k]) >= design$criticalValues[k]) { return(k) } if (design$sided == 2){ if (stats::qnorm(1 - stageResults$overallPValues[k]) <= -design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && stats::qnorm(max(1e-8,1 - stageResults$overallPValues[k])) <= design$futilityBounds[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # # Returns the stage when using Fisher's combination test # .getStageFisher <- function(design, stageResults, stage) { for (k in 1:stage) { if (stageResults$combFisher[k] <= design$criticalValues[k]) { return(k) } if (design$sided == 2){ if (1 - stageResults$combFisher[k] <= design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && stageResults$pValues[k] >= design$alpha0Vec[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # @title # q function # # @description # Function for calculating the final p-value for two-stage design with Fisher's combination test # and its use for calculating confidence intervals, see Wassmer & Brannath, p. 192 and Brannath et al. (2002), p. 241. # Formula generalized for arbitrary weight in combination test. # .getQFunctionResult <- function(..., design, stageResults, theta, infRate) { alpha1 <- design$criticalValues[1] alpha0 <- design$alpha0Vec[1] if (!design$bindingFutility || (design$sided == 2)) alpha0 <- 1 weightForFisher <- stageResults$weightsFisher[2] if (theta != 0) { alpha1Adj <- ifelse(alpha1 <= 0, 0, 1 - stats::pnorm(stats::qnorm(1 - alpha1) - theta / stageResults$overallStDevs[1] * infRate[1])) } else { alpha1Adj <- alpha1 } if (is.na(alpha1Adj)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'alpha1Adj'") } if (theta != 0) { alpha0Adj <- ifelse(alpha0 >= 1, 1, 1 - stats::pnorm(stats::qnorm(1 - alpha0) - theta / stageResults$overallStDevs[1] * infRate[1])) } else { alpha0Adj <- alpha0 } if (is.na(alpha0Adj)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'alpha0Adj'") } if (stageResults$pValues[1] <= alpha1Adj || stageResults$pValues[1] >= alpha0Adj) { return(stageResults$pValues[1]) } if (weightForFisher == 1) { return(max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]) + stageResults$pValues[1] * stageResults$pValues[2] * (log(alpha0Adj) - log(max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2])))) } return(max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]^weightForFisher) + weightForFisher / (weightForFisher - 1) * stageResults$pValues[1]^(1 / weightForFisher) * stageResults$pValues[2] * (alpha0Adj^(1 - 1 / weightForFisher) - max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]^weightForFisher)^(1 - 1 / weightForFisher))) } # # Get final p-value based on Fisher combination test # .getFinalPValueFisher <- function(..., design, stageResults) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getFinalPValueFisher", ignore = c("stage"), ...) stageFisher <- .getStageFisher(design, stageResults, stage) finalStage <- min(stageFisher, design$kMax) # Early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { if (stageFisher == 1) { pFinal <- stageResults$pValues[1] } else { if (design$kMax > 2) { warning("Final p-value cannot be calculated for kMax = ", design$kMax, " ", "because the function for Fisher's design is implemented only for kMax <= 2", call. = FALSE) return(list(finalStage = NA_integer_, pFinal = NA_real_)) } # Final p-value for kMax = 2 pFinal <- .getQFunctionResult(design = design, stageResults = stageResults, theta = 0, infRate = 0) } if (design$sided == 2){ if (stageFisher == 1) { pFinalOtherDirection <- 1 - stageResults$pValues[1] } else { stageResults$pValues <- 1 - stageResults$pValues pFinalOtherDirection <- .getQFunctionResult(design = design, stageResults = stageResults, theta = 0, infRate = 0) stageResults$pValues <- 1 - stageResults$pValues } # Final p-value for kMax = 2 pFinal <- 2*min(pFinal, pFinalOtherDirection) } return(list(finalStage = finalStage, pFinal = pFinal)) } return(list(finalStage = NA_integer_, pFinal = NA_real_)) } #' #' @title #' Get Final P Value #' #' @description #' Returns the final p-value for given stage results. #' #' @param design The trial design. #' @param stageResults The results at given stage, obtained from \code{\link{getStageResults}}. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' #' @details #' The calculation of the final p-value is based on the stagewise ordering of the sample space. #' This enables the calculation for both the non-adaptive and the adaptive case. #' For Fisher's combination test, it is available for \code{kMax = 2} only. #' #' @export #' #' @keywords internal #' getFinalPValue <- function(design, stageResults, ...) { .assertIsTrialDesign(design) .assertIsStageResults(stageResults) if (.isTrialDesignInverseNormalOrGroupSequential(design)) { return(.getFinalPValueInverseNormalOrGroupSequential(design = design, stageResults = stageResults, ...)) } if (.isTrialDesignFisher(design)) { return(.getFinalPValueFisher(design = design, stageResults = stageResults, ...)) } .stopWithWrongDesignMessage(design) } .getVectorWithFinalValueAtFinalStage <- function(kMax, finalValue, finalStage) { v <- rep(NA_real_, kMax) if (is.null(finalValue) || is.na(finalValue) || is.null(finalStage) || is.na(finalStage) || finalStage < 1 || finalStage > kMax) { return(v) } v[finalStage] <- finalValue return(v) } #' @title #' Get Final Confidence Interval #' #' @description #' Returns the final confidence interval for the parameter of interest. #' It is based on the prototype case, i.e., the test for testing a mean for #' normally distributed variables. #' #' @param design The trial design. #' @param dataInput The data input. #' @param stage The stage number. #' @param thetaH0 The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value \code{thetaH0} in (0,1) has to be specified for #' defining the null hypothesis H0: pi= thetaH0. \cr #' For non-inferiority designs, this is the non-inferiority bound. #' @param directionUpper The direction of one-sided testing. #' Default is \code{directionUpper = TRUE} which means that larger values of the #' test statistics yield smaller p-values. #' @param normalApproximation The type of computation of the p-values. Default is FALSE for #' testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting \code{normalApproximation = FALSE} has no effect. #' @param equalVariances The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{equalVariances = TRUE}. #' #' @details #' Depending on \code{design} and \code{dataInput} the final confidence interval and median unbiased estimate #' that is based on the stagewise ordering of the sample space will be calculated and returned. #' Additionally, a non-standardized ("general") version is provided, use the standard deviation to obtain #' the confidence interval for the parameter of interest. #' #' @return Returns a \code{list} containing #' \itemize{ #' \item \code{finalStage}, #' \item \code{medianUnbiased}, #' \item \code{finalConfidenceInterval}, #' \item \code{medianUnbiasedGeneral}, and #' \item \code{finalConfidenceIntervalGeneral}. #' } #' #' @export #' #' @examples #' #' design <- getDesignInverseNormal(kMax = 2) #' data <- getDataset( #' n = c(20, 30), #' means = c(50, 51), #' stDevs = c(130, 140) #' ) #' getFinalConfidenceInterval(design, dataInput = data) #' #' # Results in: #' # #' # $finalStage #' # [1] 2 #' # #' # $medianUnbiasedGeneral #' # [1] 0.3546145 #' # #' # $finalConfidenceIntervalGeneral #' # [1] 0.06967801 0.63468553 #' # #' # $medianUnbiased #' # [1] 47.7787 #' # #' # $finalConfidenceInterval #' # [1] 9.388012 85.513851' #' #' @keywords internal #' getFinalConfidenceInterval <- function(design, dataInput, ...) { .assertIsTrialDesign(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) .assertIsValidStage(stage, design$kMax) if (design$bindingFutility){ warning("Two-sided final confidence bounds are not appropriate, ", "use one-sided version (i.e., one bound) only.", call. = FALSE) } if (dataInput$isDatasetMeans()) { return(.getFinalConfidenceIntervalMeans( design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetRates()) { return(.getFinalConfidenceIntervalRates( design = design, dataInput = dataInput, ...)) } if (dataInput$isDatasetSurvival()) { return(.getFinalConfidenceIntervalSurvival( design = design, dataInput = dataInput, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", class(dataInput), "' is not implemented yet") } # # Get repeated p-values based on group sequential test # .getRepeatedPValuesGroupSequential <- function(..., design, stageResults, tolerance = NA_real_) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesGroupSequential", ignore = c("stage"), ...) if (is.na(tolerance)) { tolerance <- C_ANALYSIS_TOLERANCE_DEFAULT } repeatedPValues <- rep(NA_real_, design$kMax) if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP && stage == design$kMax) { if (!is.na(stageResults$overallPValues[design$kMax]) && stats::qnorm(1 - stageResults$overallPValues[design$kMax]) == Inf) { repeatedPValues[design$kMax] <- tolerance } else { startTime <- Sys.time() lower <- .getDesignGroupSequential(kMax = design$kMax, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility)$alphaSpent[design$kMax - 1] + tolerance if (design$bindingFutility){ upper <- min(0.5, 1 - stats::pnorm(max(design$futilityBounds))) } else { upper <- 0.5 } repeatedPValues[design$kMax] <- .getOneDimensionalRootBisectionMethod( f = function(level) { y <- .getDesignGroupSequential(kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility) if (design$sided == 2) { return(y$criticalValues[design$kMax] - abs(stats::qnorm(1 - stageResults$overallPValues[design$kMax]))) } return(y$criticalValues[design$kMax] - stats::qnorm(1 - stageResults$overallPValues[design$kMax])) }, lower = lower, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE ) } } else { for (k in 1:stage) { if (!is.na(stageResults$overallPValues[k]) && stats::qnorm(1 - stageResults$overallPValues[k]) == Inf) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() if (design$bindingFutility){ upper <- min(0.5, 1 - stats::pnorm(max(design$futilityBounds))) } else { upper <- 0.5 } repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( f = function(level) { y <- .getDesignGroupSequential(kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = design$typeOfDesign, deltaWT = design$deltaWT, gammaA = design$gammaA, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility) if (design$sided == 2) { return(y$criticalValues[k] - abs(stats::qnorm(1 - stageResults$overallPValues[k]))) } return(y$criticalValues[k] - stats::qnorm(1 - stageResults$overallPValues[k])) }, lower = tolerance, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE ) .logProgress("Overall repeated p-values of stage %s calculated", startTime = startTime, k) } } } return(repeatedPValues) } # # Get repeated p-values based on inverse normal method # .getRepeatedPValuesInverseNormal <- function(..., design, stageResults, tolerance = NA_real_) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesInverseNormal", ignore = c("stage"), ...) if (is.na(tolerance)) { tolerance <- C_ANALYSIS_TOLERANCE_DEFAULT } repeatedPValues <- rep(NA_real_, design$kMax) if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP && stage == design$kMax) { if (!is.na(stageResults$combInverseNormal[design$kMax]) && stageResults$combInverseNormal[design$kMax] == Inf) { repeatedPValues[design$kMax] <- tolerance } else { startTime <- Sys.time() lower <- .getDesignGroupSequential(kMax = design$kMax, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility)$alphaSpent[design$kMax - 1] + tolerance if (design$bindingFutility){ upper <- min(0.5, 1 - stats::pnorm(max(design$futilityBounds))) } else { upper <- 0.5 } repeatedPValues[design$kMax] <- .getOneDimensionalRootBisectionMethod( f = function(level) { y <- .getDesignGroupSequential(kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility) if (design$sided == 2) { return(y$criticalValues[design$kMax] - abs(stageResults$combInverseNormal[design$kMax])) } return(y$criticalValues[design$kMax] - stageResults$combInverseNormal[design$kMax]) }, lower = lower, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE ) } } else { for (k in 1:stage) { if (!is.na(stageResults$combInverseNormal[k]) && (stageResults$combInverseNormal[k] == Inf)) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() if (design$bindingFutility){ upper <- min(0.5, 1 - stats::pnorm(max(design$futilityBounds))) } else { upper <- 0.5 } repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( f = function(level) { y <- .getDesignGroupSequential(kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = design$typeOfDesign, deltaWT = design$deltaWT, gammaA = design$gammaA, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility) if (design$sided == 2) { return(y$criticalValues[k] - abs(stageResults$combInverseNormal[k])) } return(y$criticalValues[k] - stageResults$combInverseNormal[k]) }, lower = tolerance, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE ) .logProgress("Overall repeated p-values of stage %s calculated", startTime = startTime, k) } } } return(repeatedPValues) } # # Get repeated p-values based on Fisher combination test # .getRepeatedPValuesFisher <- function(..., design, stageResults, tolerance = NA_real_) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesFisher", ignore = c("stage"), ...) if (is.na(tolerance)) { tolerance <- C_ANALYSIS_TOLERANCE_DEFAULT } repeatedPValues <- rep(NA_real_, design$kMax) for (k in 1:stage) { if (!is.na(stageResults$combFisher[k]) && (stageResults$combFisher[k] == 0)) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( f = function(level) { y <- .getDesignFisher(kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, alpha0Vec = design$alpha0Vec, bindingFutility = design$bindingFutility, method = design$method) if (design$sided == 2){ combFisherNegStagek <- prod((1 - stageResults$pValues[1:k])^stageResults$weightsFisher[1:k]) return(y$criticalValues[k] - min(stageResults$combFisher[k],combFisherNegStagek)) } return(y$criticalValues[k] - stageResults$combFisher[k]) }, lower = tolerance, upper = 0.5, tolerance = tolerance, direction = 1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE ) .logProgress("Overall repeated p-values of stage %s calculated", startTime = startTime, k) } } return(repeatedPValues) } .getRejectValueConditionalPowerFisher <- function(kMax, alpha0Vec, criticalValues, weightsFisher, pValues, currentKMax, thetaH1, stage, nPlanned) { pValues <- c(pValues[1:stage], 1 - stats::pnorm(stats::rnorm(kMax - stage, thetaH1 * sqrt(nPlanned[(stage + 1):currentKMax])))) for (j in 1:currentKMax) { reject <- .getRejectValueFisherForOneStage(kMax = currentKMax, alpha0Vec, criticalValues, weightsFisher, stage = j, pValues) if (reject >= 0) { return(reject) } } return(0) } .getRejectValueFisherForOneStage <- function(kMax, alpha0Vec, criticalValues, weightsFisher, stage, pValues) { if (stage < kMax && pValues[stage] >= alpha0Vec[stage]) { return(0) } p <- prod(pValues[1:stage]^weightsFisher[1:stage]) if (is.na(p)) { stop("Calculation of 'p' failed for stage ", stage, " ('pValues' = ", .arrayToString(pValues), ", 'weightsFisher' = ", .arrayToString(weightsFisher), ")") } if (is.na(criticalValues[stage])) { stop("No critical value available for stage ", stage, " ('criticalValues' = ", .arrayToString(criticalValues), ")") } if (p < criticalValues[stage]) { return(1) } return(-1) } .getRejectValueCrpFisher <- function(kMax, alpha0Vec, criticalValues, weightsFisher, k, stageResults) { pValues <- c(stageResults$pValues[1:k], stats::runif(kMax - k)) for (stage in 1:kMax) { reject <- .getRejectValueFisherForOneStage(kMax = kMax, alpha0Vec = alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, stage = stage, pValues = pValues) if (reject >= 0) { return(reject) } } return(0) } # # Get CRP based on inverse normal or group sequential method # .getConditionalRejectionProbabilitiesInverseNormalorGroupSequential <- function( ..., design, stageResults) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalRejectionProbabilitiesInverseNormalorGroupSequential", ignore = c("stage"), ...) kMax <- design$kMax criticalValues <- design$criticalValues informationRates <- design$informationRates weights <- stageResults$weightsInverseNormal futilityBounds <- design$futilityBounds conditionalRejectionProbabilities <- rep(NA_real_, kMax) for (k in 1:min(kMax - 1, stage)) { if (.isTrialDesignInverseNormal(design)) { # Shifted decision region for use in getGroupSeqProbs shiftedDecision <- criticalValues[(k + 1):kMax] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):kMax]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) - as.vector(weights[1:k] %*% stats::qnorm(1 - stageResults$pValues[1:k])) / sqrt(cumsum(weights[(k + 1):kMax]^2)) if (k == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- futilityBounds[(k + 1):(kMax - 1)] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) - as.vector(weights[1:k] %*% stats::qnorm(1 - stageResults$pValues[1:k])) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) } } else { # Shifted decision region for use in getGroupSeqProbs shiftedDecision <- criticalValues[(k + 1):kMax] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):kMax]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) - stats::qnorm(1 - stageResults$overallPValues[k]) * sqrt(sum(weights[1:k]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) if (k == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- futilityBounds[(k + 1):(kMax - 1)] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) - stats::qnorm(1 - stageResults$overallPValues[k]) * sqrt(sum(weights[1:k]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) } } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(k + 1):kMax] - informationRates[k]) / (1 - informationRates[k]) if (design$sided == 2) { decisionMatrix <- matrix(c(-shiftedDecision, shiftedDecision), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) crp <- sum(probs[3, ] - probs[2, ] + probs[1, ]) } else { if (design$bindingFutility){ decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecision), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - k), shiftedDecision), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) crp <- sum(probs[3, ] - probs[2, ]) } conditionalRejectionProbabilities[k] <- crp } if (design$bindingFutility){ for (k in (1:min(kMax - 1, stage))){ if (.isTrialDesignInverseNormal(design)) { if (stageResults$combInverseNormal[k] <= futilityBounds[k]) conditionalRejectionProbabilities[k:stage] <- 0 } else { if (stats::qnorm(1 - stageResults$overallPValues[k]) <= futilityBounds[k]) conditionalRejectionProbabilities[k:stage] <- 0 } } } return(conditionalRejectionProbabilities) } # # Get CRP based on Fisher combination test # .getConditionalRejectionProbabilitiesFisher <- function(..., design, stageResults) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments(functionName = ".getConditionalRejectionProbabilitiesFisher", ignore = c("stage"), ...) kMax <- design$kMax if (kMax == 1) { return(NA_real_) } criticalValues <- design$criticalValues weights <- stageResults$weightsFisher if (design$bindingFutility){ alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, kMax - 1) } conditionalRejectionProbabilities <- rep(NA_real_, kMax) for (k in (1:min(kMax - 1, stage))) { if (prod(stageResults$pValues[1:k]^weights[1:k]) <= criticalValues[k]){ conditionalRejectionProbabilities[k] <- 1 } else { if (k < kMax - 1){ conditionalRejectionProbabilities[k] <- .getFisherCombinationSize(kMax - k, alpha0Vec[(k + 1):(kMax - 1)], (criticalValues[(k + 1):kMax] / prod(stageResults$pValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), weights[(k + 2):kMax] / weights[k + 1]) } else { conditionalRejectionProbabilities[k] <- (criticalValues[kMax]/ prod(stageResults$pValues[1:k]^weights[1:k]))^(1 / weights[kMax]) } } } if (design$bindingFutility){ for (k in (1:min(kMax - 1, stage))){ if (stageResults$pValues[k] > alpha0Vec[k]) conditionalRejectionProbabilities[k:stage] <- 0 } } conditionalRejectionProbabilities[conditionalRejectionProbabilities >= 1] <- 1 conditionalRejectionProbabilities[conditionalRejectionProbabilities < 0] <- NA_real_ return(conditionalRejectionProbabilities) } # # Get CRP based on Fisher combination test, tested through simulation # .getConditionalRejectionProbabilitiesFisherSimulated <- function( ..., design, stageResults, iterations = 0, seed = NA_real_) { .assertIsTrialDesignFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = stageResults$getDataInput()) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed) .warnInCaseOfUnknownArguments(functionName = ".getConditionalRejectionProbabilitiesFisherSimulated", ignore = c("stage"), ...) kMax <- design$kMax criticalValues <- design$criticalValues alpha0Vec <- design$alpha0Vec weightsFisher <- stageResults$weightsFisher crpFisherSimulated <- rep(NA_real_, kMax) if (iterations > 0) { seed = .setSeed(seed) if (kMax >= 2) { for (k in 1:min(kMax - 1, stage)) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueCrpFisher(kMax = kMax, alpha0Vec = alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, k = k, stageResults = stageResults) } crpFisherSimulated[k] <- reject / iterations } } else { warning("Simulation of CRP Fisher stopped: 'kMax' must be >= 2", call. = FALSE) } } return(list( crpFisherSimulated = crpFisherSimulated, iterations = iterations, seed = seed )) } #' #' @title #' Get Conditional Rejection Probabilities #' #' @description #' Calculates the conditional rejection probabilities (CRP) for given test results. #' #' @param design The trial design. #' @param stageResults The results at given stage, obtained from \code{\link{getStageResults}}. #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' #' @details #' The conditional rejection probability is the probability, under H0, to reject H0 #' in one of the subsequent (remaining) stages. #' The probability is calculated using the specified design. For testing rates and the #' survival design, the normal approximation is used, i.e., it is calculated with the #' use of the prototype case testing a mean for normally distributed data with known variance. #' #' The conditional rejection probabilities are provided up to the specified stage. #' #' For Fisher's combination test, you can check the validity of the CRP calculation via simulation. #' #' @export #' #' @keywords internal #' #' @examples #' #' x <- getDesignFisher(kMax = 3, informationRates = c(0.1,0.8,1)) #' y <- getDataset(n = c(40,40), events = c(20,22)) #' getConditionalRejectionProbabilities(x, getStageResults(x, y, thetaH0 = 0.4)) #' # provides #' # [1] 0.0216417 0.1068607 NA #' getConditionalRejectionProbabilities <- function(design, stageResults, ...) { .assertIsTrialDesign(design) .assertIsStageResults(stageResults) if (.isTrialDesignInverseNormalOrGroupSequential(design)) { return(.getConditionalRejectionProbabilitiesInverseNormalorGroupSequential( design = design, stageResults = stageResults, ...)) } if (.isTrialDesignFisher(design)) { iterations <- .getOptionalArgument("iterations", ...) if (!is.null(iterations) && iterations > 0) { return(.getConditionalRejectionProbabilitiesFisherSimulated( design = design, stageResults = stageResults, ...)) } return(.getConditionalRejectionProbabilitiesFisher( design = design, stageResults = stageResults, ...)) } .stopWithWrongDesignMessage(design) } .getDecisionMatrixRoot <- function(design, stage, stageResults, tolerance, firstParameterName, case) { firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- stats::qnorm(1 - firstValue) } if (firstValue >= 8){ return(NA_real_) } result <- .getOneDimensionalRoot( function(theta) { if (design$bindingFutility){ row1part1 <- design$futilityBounds[1:(stage - 1)] } else { row1part1 <- rep(C_FUTILITY_BOUNDS_DEFAULT, stage - 1) } row1part2 <- C_FUTILITY_BOUNDS_DEFAULT row2part1 <- design$criticalValues[1:(stage - 1)] row2part2 <- firstValue if (.isTrialDesignGroupSequential(design)){ if (stageResults$isDatasetSurvival()) { row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) * sqrt(stageResults$overallEvents[stage]) } else { if (stageResults$isOneSampleDataset()){ row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) * sqrt(stageResults$overallSampleSizes[stage]) } if (stageResults$isTwoSampleDataset()){ row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) / sqrt(1/stageResults$overallSampleSizes1[stage] + 1 / stageResults$overallSampleSizes2[stage]) } } } if (.isTrialDesignInverseNormal(design)){ if (stageResults$isDatasetSurvival()) { events <- stageResults$getDataInput()$getEventsUpTo(stage) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] * sqrt(events[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } else { if (stageResults$isOneSampleDataset()){ sampleSizes <- stageResults$getDataInput()$getSampleSizesUpTo(stage) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] * sqrt(sampleSizes[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } if (stageResults$isTwoSampleDataset()){ sampleSizes1 <- stageResults$getDataInput()$getSampleSizesUpTo(stage, 1) sampleSizes2 <- stageResults$getDataInput()$getSampleSizesUpTo(stage, 2) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] / sqrt(1 / sampleSizes1[1:stage] + 1 / sampleSizes2[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } } row1part3 <- theta * adjInfRate } row2part3 <- row1part3 row1 <- c(row1part1, row1part2) - row1part3 row2 <- c(row2part1, row2part2) - row2part3 decisionMatrix <- matrix(c(row1, row2), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:stage]) if (case == "finalConfidenceIntervalGeneralLower") { return(sum(probs[3, ] - probs[2, ]) - design$alpha / design$sided) } else if (case == "finalConfidenceIntervalGeneralUpper") { return(1 - sum(probs[3, ] - probs[2, ]) - design$alpha / design$sided) } else if (case == "medianUnbiasedGeneral") { return(sum(probs[3, ] - probs[2, ]) - 0.50) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'case' = '", case, "' is not implemented") } }, lower = -8, upper = 8, tolerance = tolerance ) } rpact/R/class_summary.R0000644000176200001440000010072313567754477014625 0ustar liggesusers ###################################################################################### # # # -- RPACT design utilities -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 29-10-2019 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### SummaryItem <- setRefClass("SummaryItem", fields = list( title = "character", values = "character", legendEntry = "list" ), methods = list( initialize = function(...) { callSuper(...) }, show = function() { cat(title, "=", values, "\n") }, toList = function() { result <- list() result[[title]] <- values } ) ) SummaryFactory <- setRefClass("SummaryFactory", contains = "ParameterSet", fields = list( object = "ParameterSet", summaryItems = "list", intervalFormat = "character", justify = "character" ), methods = list( initialize = function(..., intervalFormat = "[%s; %s]") { callSuper(..., intervalFormat = intervalFormat) summaryItems <<- list() justify <<- getOption("rpact.summary.justify", "right") }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { .cat(.createSummaryObjectTitle(object), "\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled) header <- .createSummaryObjectHeader(object) if (header != "") { .cat(header, "\n\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) } legendEntries <- c() legendEntriesUnique <- c() summaryItemNames <- c() for (summaryItem in summaryItems) { summaryItemNames <- c(summaryItemNames, summaryItem$title) if (length(summaryItem$legendEntry) > 0) { a <- names(summaryItem$legendEntry) for (aa in a) { if (!(aa %in% legendEntriesUnique)) { legendEntriesUnique <- c(legendEntriesUnique, aa) b <- summaryItem$legendEntry[[aa]] legendEntries <- c(legendEntries, paste0(" ", aa, ": ", b)) } } } } summaryItemNames <- paste0(format(summaryItemNames), " ") tableColumns <- 0 maxValueWidth <- 1 for (i in 1:length(summaryItems)) { validValues <- na.omit(summaryItems[[i]]$values) if (length(validValues) > 0) { w <- max(nchar(validValues)) maxValueWidth <- max(maxValueWidth, w) tableColumns <- max(tableColumns, 1 + length(validValues)) } } spaceString <- paste0(rep(" ", maxValueWidth + 1), collapse = "") for (i in 1:length(summaryItems)) { summaryItemName <- summaryItemNames[i] values <- summaryItems[[i]]$values values <- trimws(values) indices <- !grepl("(\\])$", values) values[indices] <- paste0(values[indices], " ") values <- format(c(spaceString, values), justify = justify)[2:(length(values) + 1)] .cat(summaryItemName, values, "\n", tableColumns = tableColumns, consoleOutputEnabled = consoleOutputEnabled) if (!consoleOutputEnabled && trimws(summaryItemName) == "Stage") { .cat(rep("----- ", tableColumns), "\n", tableColumns = tableColumns, consoleOutputEnabled = consoleOutputEnabled) } } if (length(legendEntries) > 0) { .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", consoleOutputEnabled = consoleOutputEnabled) for (legendEntry in legendEntries) { .cat(legendEntry, "\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } }, addItem = function(title, values, legendEntry = list()) { if (!is.character(values)) { values <- as.character(values) } tryCatch({ addSummaryItem(SummaryItem(title = title, values = values, legendEntry = legendEntry)) }, error = function(e) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to add summary item '", title, "' = ", .arrayToString(values), " (class: ", class(values), "): ", e$message) }) }, addSummaryItem = function(summaryItem) { if (!inherits(summaryItem, "SummaryItem")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'summaryItem' must be an instance of class 'SummaryItem' (was '", class(summaryItem), "')") } summaryItems <<- c(summaryItems, summaryItem) }, addParameter = function(parameterSet, ..., parameterName = NULL, values = NULL, parameterCaption, roundDigits = NA_integer_, ceilingEnabeld = FALSE, cumsumEnabled = FALSE, twoSided = FALSE, parameterCaptionSingle = parameterCaption, legendEntry = list()) { parameterName1 <- parameterName[1] if (is.character(parameterName1)) { if (!is.null(values)) { warning("'values' (", .arrayToString(values), ") will be ignored because 'parameterName' (", parameterName1, ") is defined") } values <- parameterSet[[parameterName1]] } parameterName2 <- NA_character_ values2 <- NA_real_ if (length(parameterName) > 1) { parameterName2 <- parameterName[2] values2 <- parameterSet[[parameterName2]] } if (is.null(values) && is.null(parameterName1)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'parameterName' or 'values' must be defined") } parameterNames <- "" numberOfVariants <- 1 numberOfStages <- length(values) if (inherits(parameterSet, "ParameterSet")) { parameterNames <- parameterSet$.getVisibleFieldNamesOrdered() numberOfVariants <- parameterSet$.getMultidimensionalNumberOfVariants(parameterNames) numberOfStages <- parameterSet$.getMultidimensionalNumberOfStages(parameterNames) } if (twoSided) { values <- 2 * values } if ((!is.matrix(values) || ncol(values) == 1) && (.isTrialDesign(parameterSet) || (numberOfStages > 1 && numberOfStages == length(values)) || length(values) != numberOfVariants || length(values) == 1) || (parameterName == "futilityBoundsEffectScale" && ncol(values) == 1)) { valuesToShow <- .getSummaryValuesFormatted(parameterSet, parameterName1, values, roundDigits = roundDigits, ceilingEnabeld = ceilingEnabeld, cumsumEnabled = cumsumEnabled) valuesToShow <- .getInnerValues(valuesToShow) if (!all(is.na(values2))) { valuesToShow2 <- .getSummaryValuesFormatted(parameterSet, parameterName1, values2, roundDigits = roundDigits, ceilingEnabeld = ceilingEnabeld, cumsumEnabled = cumsumEnabled) valuesToShow2 <- .getInnerValues(valuesToShow2) if (length(valuesToShow) == length(valuesToShow2) && !all(is.na(valuesToShow2))) { for (variantIndex in 1:length(valuesToShow)) { valuesToShow[variantIndex] <- sprintf( intervalFormat, valuesToShow[variantIndex], valuesToShow2[variantIndex]) } } } addItem(parameterCaptionSingle, valuesToShow, legendEntry) } else { if (!inherits(parameterSet, "ParameterSet")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "for varied values 'parameterSet' must be an instance of ", "class 'ParameterSet' (was '", class(parameterSet), "')") } variedParameter <- parameterSet$.getVariedParameter(parameterNames, numberOfVariants) if (length(variedParameter) == 0 || variedParameter == "") { return(invisible()) } variedParameterCaption <- parameterSet$.getDataFrameColumnCaption(variedParameter, tableColumnNames = C_TABLE_COLUMN_NAMES, niceColumnNamesEnabled = TRUE) variedParameterCaption <- tolower(variedParameterCaption) if (variedParameterCaption == "alternative") { legendEntry[["alt."]] <- "alternative" variedParameterCaption <- "alt." } else if (variedParameterCaption == "hazard ratio") { legendEntry[["HR"]] <- "hazard ratio" variedParameterCaption <- "HR" } else if (grepl(" \\(1\\)$", variedParameterCaption)) { groups <- parameterSet[["groups"]] if (!is.null(groups) && length(groups) == 1 && groups == 1) { variedParameterCaption = sub(" \\(1\\)$", "", variedParameterCaption) } else { legendEntry[["(1)"]] <- "values of treatment arm 1" } } variedParameterValues <- round(parameterSet[[variedParameter]], 3) for (variantIndex in 1:numberOfVariants) { colValues <- .getColumnValues(values, variantIndex) colValues <- .getSummaryValuesFormatted(parameterSet, parameterName1, colValues, roundDigits = roundDigits, ceilingEnabeld = ceilingEnabeld, cumsumEnabled = cumsumEnabled) colValues2 <- NA_real_ if (!all(is.na(values2))) { colValues2 <- .getColumnValues(values2, variantIndex) colValues2 <- .getSummaryValuesFormatted(parameterSet, parameterName2, colValues2, roundDigits = roundDigits, ceilingEnabeld = ceilingEnabeld, cumsumEnabled = cumsumEnabled) } if (length(colValues) == length(colValues2) && !all(is.na(colValues2))) { for (valueIndex in 1:length(colValues)) { colValues[valueIndex] <- sprintf(intervalFormat, colValues[valueIndex], colValues2[valueIndex]) } } addItem(paste0(parameterCaption, ", ", variedParameterCaption," = ", variedParameterValues[variantIndex]), colValues, legendEntry) } } }, .getInnerValues = function(values) { if (!is.matrix(values)) { return(values) } if (nrow(values) == 1 && ncol(values) == 1) { return(values[1, 1]) } return(values[, 1]) }, .getColumnValues = function(values, variantIndex) { if (length(values) <= 1 && !is.matrix(values)) { colValues <- values } else if (is.matrix(values)) { if (nrow(values) == 1 && ncol(values) == 1) { colValues <- values[1, 1] } else if (ncol(values) == 1) { colValues <- values[variantIndex, 1] } else { colValues <- values[, variantIndex] } } else { colValues <- values[variantIndex] } return(colValues) } ) ) .formatSummaryValues <- function(values, digits) { if (sum(is.na(values)) == length(values)) { return(values) } threshold <- 10^-digits text <- "<0." for (i in 1:(digits - 1)) { text <- paste0(text, "0") } text <- paste0(text, "1") indices <- (!is.na(values) & values > 1e-24 & abs(values) < threshold) values[!is.na(values) & !indices] <- round(values[!is.na(values) & !indices], digits) if (sum(indices) > 0) { values[indices] <- threshold formattedValue <- .getFormattedValue(values, digits = digits, nsmall = digits) formattedValue[indices] <- text } else { formattedValue <- .getFormattedValue(values, digits = digits, nsmall = digits) formattedValue <- format(formattedValue) } if (as.logical(getOption("rpact.summary.trim.zeroes", TRUE))) { zeroes <- grepl("^0\\.0*$", formattedValue) if (sum(zeroes) > 0) { formattedValue[zeroes] <- "0" } } return(formattedValue) } .getFormattedSimpleBoundarySummaryValues <- function(values, digits = NA_integer_) { if (is.na(digits) || digits < 1) { digits <- 3 } return(.formatSummaryValues(values = values, digits = digits)) } .getSummaryValuesFormatted <- function(fieldSet, parameterName, values, roundDigits = NA_integer_, ceilingEnabeld = FALSE, cumsumEnabled = FALSE) { if (!is.numeric(values)) { return(values) } if (cumsumEnabled) { values <- cumsum(values) } if (ceilingEnabeld) { values <- ceiling(values) } else { tryCatch({ formatFunctionName <- NULL if (!is.null(parameterName) && length(parameterName) == 1 && !is.na(parameterName) && !is.na(roundDigits) && roundDigits < 1 && inherits(fieldSet, "FieldSet")) { formatFunctionName <- fieldSet$.parameterFormatFunctions[[parameterName]] } if (!is.null(formatFunctionName)) { values <- eval(call(formatFunctionName, values)) } else { values <- .getFormattedSimpleBoundarySummaryValues(values, digits = roundDigits) } }, error = function(e) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to show parameter '", parameterName, "': ", e$message) }) } return(format(values)) } .createSummaryObjectTitle <- function(object) { design <- NULL designPlan <- NULL if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { design <- object$.design designPlan <- object } else if (.isTrialDesign(object)) { design <- object } if (!is.null(design)) { return(.createSummaryDesignTitle(design, designPlan)) } return("") } .createSummaryDesignTitle <- function(design, designPlan) { kMax <- design$kMax title <- "" if (kMax == 1) { title <- paste0(title, "Fixed sample analysis") } else { title <- paste0(title, "Sequential analysis with a maximum of ", kMax, " looks") } if (!is.null(designPlan)) { if (inherits(designPlan, "SimulationResults")) { title <- "Simulation of a " } else if (designPlan$.isSampleSizeObject()) { title <- "Sample size calculation for a " } else if (designPlan$.isPowerObject()) { title <- "Power calculation for a " } if (grepl("Means", class(designPlan))) { title <- paste0(title, "continuous endpoint") } else if (grepl("Rates", class(designPlan))) { title <- paste0(title, "binary endpoint") } else if (grepl("Survival", class(designPlan))) { title <- paste0(title, "survival endpoint") } } else if (kMax > 1) { title <- paste0(title, " (", design$.toString(startWithUpperCase = FALSE), ")") } return(title) } .createSummaryObjectHeader <- function(object) { design <- NULL designPlan <- NULL if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { design <- object$.design designPlan <- object } else if (.isTrialDesign(object)) { design <- object } if (!is.null(design)) { return(.createSummaryDesignHeader(design, designPlan)) } return("") } .addOptimumAllocationRatioToHeader <- function(designPlan, header) { if (!.isTrialDesignPlanSurvival(designPlan) && !grepl("Simulation", class(designPlan)) && designPlan$groups == 1) { return(header) } prefix <- "" if (!is.null(designPlan[["optimumAllocationRatio"]]) && length(designPlan$optimumAllocationRatio) == 1 && designPlan$optimumAllocationRatio) { if (length(unique(designPlan$allocationRatioPlanned)) > 1) { return(paste0(header, ", optimum allocation ratio")) } prefix <- "optimum " } return(paste0(header, ", ", prefix, "allocation ratio = ", round(unique(designPlan$allocationRatioPlanned), 3))) } .createSummaryDesignHeader <- function(design, designPlan) { if (is.null(designPlan)) { return("") } header <- "" if (design$kMax == 1) { header <- paste0(header, "Fixed sample analysis.\n") } else { header <- paste0(header, "Sequential analysis with a maximum of ", design$kMax, " looks") header <- paste0(header, " (", design$.toString(startWithUpperCase = FALSE), ").\n") } header <- paste0(header, "The ", ifelse(inherits(designPlan, "SimulationResults") || designPlan$.isPowerObject(), "results were ", "sample size was ")) header <- paste0(header, ifelse(inherits(designPlan, "SimulationResults"), "simulated", "calculated")) header <- paste0(header, " for a ") if (grepl("Means", class(designPlan))) { if (designPlan$groups == 1) { header <- paste0(header, "one-sample t-test") } else if (designPlan$groups == 2) { header <- paste0(header, "two-sample t-test") } } else if (grepl("Rates", class(designPlan))) { if (designPlan$groups == 1) { header <- paste0(header, "one-sample test for rates") } else if (designPlan$groups == 2) { header <- paste0(header, "two-sample test for rates") } } else if (grepl("Survival", class(designPlan))) { header <- paste0(header, "two-sample logrank test") } if (nchar(header) > 0) { # header <- paste0(header, " with ") } if (design$sided == 1) { header <- paste0(header, " (one-sided)") } else { header <- paste0(header, " (two-sided)") } if (grepl("Means", class(designPlan)) && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { alternativeText <- "alternative as specified" if (length(designPlan$alternative) == 1) { alternativeText <- paste0("alternative = ", round(designPlan$alternative, 3)) } header <- paste0(header, ",", "\n", alternativeText, ", standard deviation = ", round(designPlan$stDev, 3)) header <- .addOptimumAllocationRatioToHeader(designPlan, header) } else if (grepl("Rates", class(designPlan)) && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { if (designPlan$groups == 1) { if (length(designPlan$pi1) == 1) { treatmentRateText <- paste0("treatment rate pi = ", round(designPlan$pi1, 3)) } else { treatmentRateText <- "treatment rate (pi) as specified" } header <- paste0(header, ",", "\n", treatmentRateText, ", H0: pi = ", designPlan$thetaH0) } else { if (length(designPlan$pi1) == 1) { treatmentRateText <- paste0("treatment rate pi (1) = ", round(designPlan$pi1, 3)) } else { treatmentRateText <- "treatment rate pi (1) as specified" } header <- paste0(header, ",", "\n", treatmentRateText, ", control rate pi (2) = ", round(designPlan$pi2, 3)) header <- .addOptimumAllocationRatioToHeader(designPlan, header) } } else if (grepl("Survival", class(designPlan)) && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { parameterNames <- designPlan$.getVisibleFieldNamesOrdered() numberOfVariants <- designPlan$.getMultidimensionalNumberOfVariants(parameterNames) userDefinedParam <- "pi1" for (param in c("pi1", "lambda1", "median1", "hazardRatio")) { if (designPlan$.getParameterType(param) == C_PARAM_USER_DEFINED && length(designPlan[[param]]) == numberOfVariants) { userDefinedParam <- param } } paramValue <- designPlan[[userDefinedParam]] if (is.null(paramValue) || length(paramValue) == 0 || all(is.na(paramValue))) { userDefinedParam <- "hazardRatio" } paramName <- "treatment pi (1)" if (userDefinedParam == "lambda1") { paramName <- "treatment lambda (1)" } else if (userDefinedParam == "median1") { paramName <- "treatment median (1)" } else if (userDefinedParam == "hazardRatio") { paramName <- "hazard ratio" } if (length(designPlan[[userDefinedParam]]) == 1) { s <- ifelse(grepl("\\)$", paramName), " = ", " ") header <- paste0(header, ",", "\n", paramName, s, round(designPlan[[userDefinedParam]], 3)) } else { header <- paste0(header, ",", "\n", paramName, " as specified") } if (userDefinedParam %in% c("hazardRatio", "pi1") && (designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("pi2") == C_PARAM_DEFAULT_VALUE) && length(designPlan$pi2) == 1) { header <- paste0(header, ", control pi (2) = ", round(designPlan$pi2, 3)) } else if (userDefinedParam %in% c("hazardRatio", "lambda1") && (designPlan$.getParameterType("lambda2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("lambda2") == C_PARAM_DEFAULT_VALUE) && length(designPlan$lambda2) == 1) { header <- paste0(header, ", control lambda (2) = ", round(designPlan$lambda2, 3)) } else if (userDefinedParam %in% c("hazardRatio", "median1") && (designPlan$.getParameterType("median2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("median2") == C_PARAM_GENERATED) && length(designPlan$median2) == 1) { header <- paste0(header, ", control median (2) = ", round(designPlan$median2, 3)) } else if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { header <- paste0(header, ", piecewise survival distribution") } header <- .addOptimumAllocationRatioToHeader(designPlan, header) } if (!inherits(designPlan, "SimulationResults") && designPlan$.isSampleSizeObject()) { header <- paste0(header, ", and power ", paste0(round(100 * (1 - design$beta), 1), "%")) } header <- paste0(header, ".") return(header) } .createSummary <- function(object, digits = NA_integer_) { if (.isTrialDesign(object) || .isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { return(.createDesignPlanSummary(object, digits = digits)) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "function 'summary' not implemented yet for class ", class(object)) } .createDesignPlanSummary <- function(object, digits = NA_integer_) { designPlan <- NULL if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { design <- object$.design designPlan <- object } else if (.isTrialDesign(object)) { design <- object } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'object' must be a valid design, design plan, ", "or simulation result object (is class ", class(object), ")") } if (is.na(digits)) { digits <- as.numeric(getOption("rpact.summary.digits", 3)) } .assertIsSingleInteger(digits, "digits", validateType = FALSE, naAllowed = TRUE) .assertIsInClosedInterval(digits, "digits", lower = -1, upper = 12, naAllowed = TRUE) intervalFormat <- getOption("rpact.summary.intervalFormat", "[%s; %s]") digitsSampleSize <- 1 if (!is.na(digits)) { if (digits > 0) { digitsGeneral <- digits digitsProbabilities <- digits if (!as.logical(getOption("rpact.summary.digits.fixed", FALSE))) { digitsProbabilities <- digitsProbabilities + 1 } } else { digitsGeneral <- -1 digitsProbabilities <- -1 } } else { digitsGeneral <- NA_integer_ digitsProbabilities <- NA_integer_ } designCharacteristics <- NULL probsH0 <- NULL probsH1 <- NULL if (.isTrialDesignInverseNormalOrGroupSequential(design) && design$kMax > 1) { designCharacteristics <- getDesignCharacteristics(design) probsH0 <- getPowerAndAverageSampleNumber(design, theta = 0, nMax = designCharacteristics$shift) probsH1 <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = designCharacteristics$shift) } if (!is.null(designPlan) && design$kMax > 1) { probsH1 <- list( earlyStop = designPlan$rejectPerStage[1:(design$kMax - 1), ] + designPlan$futilityPerStage, rejectPerStage = designPlan$rejectPerStage, futilityPerStage = designPlan$futilityPerStage ) numberOfVariants <- 1 if (inherits(designPlan, "ParameterSet")) { parameterNames <- designPlan$.getVisibleFieldNamesOrdered() numberOfVariants <- designPlan$.getMultidimensionalNumberOfVariants(parameterNames) } if (numberOfVariants > 1 && is.matrix(probsH1$earlyStop) && ncol(probsH1$earlyStop) == 1) { probsH1$earlyStop <- matrix(rep(probsH1$earlyStop, numberOfVariants), ncol = numberOfVariants) probsH1$rejectPerStage <- matrix(rep(probsH1$rejectPerStage, numberOfVariants), ncol = numberOfVariants) probsH1$futilityPerStage <- matrix(rep(probsH1$futilityPerStage, numberOfVariants), ncol = numberOfVariants) } } summaryFactory <- SummaryFactory(object = object, intervalFormat = intervalFormat) if (design$kMax > 1) { summaryFactory$addItem("Stage", c(1:design$kMax)) summaryFactory$addItem("Information rate", paste0(round(100 * design$informationRates, 1), "%")) } else { summaryFactory$addItem("Stage", "Fixed") } summaryFactory$addParameter(design, parameterName = "criticalValues", parameterCaption = ifelse(.isTrialDesignFisher(design), "Efficacy boundary (p product scale)", "Efficacy boundary (z-value scale)"), roundDigits = digitsGeneral) if (.isTrialDesignFisher(design)) { if (any(design$alpha0Vec < 1)) summaryFactory$addParameter(design, parameterName = "alpha0Vec", parameterCaption = "Futility boundary (separate p-value scale)", roundDigits = digitsGeneral) } else { if (any(design$futilityBounds > - 6)) summaryFactory$addParameter(design, parameterName = "futilityBounds", parameterCaption = "Futility boundary (z-value scale)", roundDigits = digitsGeneral) } if (!is.null(designPlan)) { if (inherits(designPlan, "SimulationResults")) { parameterName1 <- ifelse(grepl("Survival", class(designPlan)), "numberOfSubjects", "sampleSizes") parameterName2 <- "eventsPerStage" } else { if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { parameterName1 <- "nFixed" parameterName2 <- "eventsFixed" } else { parameterName1 <- "numberOfSubjects" parameterName2 <- "eventsPerStage" } } subjectsCaption <- ifelse(design$kMax > 1 && inherits(designPlan, "SimulationResults") && !grepl("Survival", class(designPlan)), "Stagewise number of subjects", "Number of subjects") summaryFactory$addParameter(designPlan, parameterName = parameterName1, parameterCaption = subjectsCaption, ceilingEnabeld = TRUE, cumsumEnabled = FALSE) if (grepl("Survival", class(designPlan))) { summaryFactory$addParameter(designPlan, parameterName = parameterName2, parameterCaption = ifelse(design$kMax == 1, "Number of events", "Cumulative number of events"), roundDigits = digitsSampleSize) summaryFactory$addParameter(designPlan, parameterName = "analysisTime", parameterCaption = "Analysis time", roundDigits = digitsSampleSize) } } if (!is.null(designPlan) && !is.null(designPlan[["allocationRatioPlanned"]]) && length(unique(designPlan$allocationRatioPlanned)) > 1) { summaryFactory$addParameter(designPlan, parameterName = "allocationRatioPlanned", parameterCaption = "Optimum allocation ratio", roundDigits = digitsGeneral) } if (design$kMax > 1 && !inherits(designPlan, "SimulationResults")) { summaryFactory$addParameter(design, parameterName = "alphaSpent", parameterCaption = "Cumulative alpha spent", roundDigits = digitsProbabilities) } if (!is.null(designPlan)) { parameterName <- "rejectPerStage" if (design$kMax == 1) { parameterName <- "overallReject" } if (any(!is.na(designPlan[[parameterName]]))) { powerText <- ifelse(design$kMax == 1, "Power", "Cumulative power") if (inherits(designPlan, "SimulationResults")) { powerText <- ifelse(design$kMax == 1, "Simulated power", "Simulated cumulative power") } summaryFactory$addParameter(designPlan, parameterName = parameterName, parameterCaption = powerText, roundDigits = digitsProbabilities, cumsumEnabled = TRUE) } } else if (!is.null(designCharacteristics)) { summaryFactory$addParameter(designCharacteristics, parameterName = "power", parameterCaption = ifelse(design$kMax == 1, "Power", "Cumulative power"), roundDigits = digitsProbabilities) } if (!inherits(designPlan, "SimulationResults")) { summaryFactory$addParameter(design, parameterName = "stageLevels", twoSided = design$sided == 2, parameterCaption = paste0(ifelse(design$sided == 2, "Two", "One"), "-sided local significance level"), roundDigits = digitsProbabilities) } if (!is.null(designPlan) && .isTrialDesignPlan(designPlan)) { if (ncol(designPlan$criticalValuesEffectScale) > 0) { summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScale", parameterCaption = "Efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = list("(t)" = "approximate treatment effect scale")) } else if (ncol(designPlan$criticalValuesEffectScaleUpper) > 0) { if (as.logical(getOption("rpact.summary.enforceIntervalView", FALSE))) { summaryFactory$addParameter(designPlan, parameterName = c("criticalValuesEffectScaleLower", "criticalValuesEffectScaleUpper"), parameterCaption = "Efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = list("(t)" = "approximate treatment effect scale")) } else { summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScaleLower", parameterCaption = "Lower efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = list("(t)" = "approximate treatment effect scale")) summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScaleUpper", parameterCaption = "Upper efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = list("(t)" = "approximate treatment effect scale")) } } if (ncol(designPlan$futilityBoundsEffectScale) > 0 && !all(is.na(designPlan$futilityBoundsEffectScale))) { summaryFactory$addParameter(designPlan, parameterName = "futilityBoundsEffectScale", parameterCaption = "Futility boundary (t)", roundDigits = digitsGeneral, legendEntry = list("(t)" = "approximate treatment effect scale")) } } if (!is.null(designPlan) && !inherits(designPlan, "SimulationResults") && !is.null(probsH1) && !is.null(probsH0) && design$kMax > 1) { probsH0$earlyStop <- matrix(probsH0$earlyStop[1:(design$kMax - 1), 1], ncol = 1) probsH0$rejectPerStage <- matrix(probsH0$rejectPerStage[1:(design$kMax - 1), 1], ncol = 1) if (is.matrix(probsH1$rejectPerStage)) { if (!is.null(designPlan) && design$kMax > 1 && .isTrialDesignPlan(designPlan) && designPlan$.isSampleSizeObject()) { probsH1$rejectPerStage <- probsH1$rejectPerStage[1:(design$kMax - 1), 1] } else { probsH1$rejectPerStage <- matrix(probsH1$rejectPerStage[1:(design$kMax - 1), ], ncol = ncol(probsH1$rejectPerStage)) } } else { probsH1$rejectPerStage <- probsH1$rejectPerStage[1:(design$kMax - 1)] } if (any(design$futilityBounds > -6)) { if (is.matrix(probsH1$earlyStop)) { probsH1$earlyStop <- matrix(probsH1$earlyStop[1:(design$kMax - 1), ], ncol = ncol(probsH1$earlyStop)) } else { probsH1$earlyStop <- probsH1$earlyStop[1:(design$kMax - 1)] } summaryFactory$addParameter(probsH0, parameterName = "earlyStop", parameterCaption = "Overall exit probability (under H0)", roundDigits = digitsProbabilities) x <- designPlan if (is.null(x)) { x <- design } summaryFactory$addParameter(x, values = probsH1$earlyStop, parameterCaption = "Overall exit probability (under H1)", roundDigits = digitsProbabilities) } summaryFactory$addParameter(probsH0, parameterName = "rejectPerStage", parameterCaption = "Exit probability for efficacy (under H0)", roundDigits = digitsProbabilities) if (!is.null(designPlan) && (inherits(designPlan, "SimulationResults") || .isTrialDesignPlan(designPlan) && designPlan$.isPowerObject())) { summaryFactory$addParameter(designPlan, values = probsH1$rejectPerStage, parameterCaption = "Exit probability for efficacy (under H1)", roundDigits = digitsProbabilities) } else { summaryFactory$addParameter(probsH1, parameterName = "rejectPerStage", parameterCaption = "Exit probability for efficacy (under H1)", roundDigits = digitsProbabilities) } if (any(design$futilityBounds > -6)) { summaryFactory$addParameter(probsH0, parameterName = "futilityPerStage", parameterCaption = "Exit probability for futility (under H0)", roundDigits = digitsProbabilities) x <- designPlan if (is.null(x)) { x <- design } summaryFactory$addParameter(x, values = probsH1$futilityPerStage, parameterCaption = "Exit probability for futility (under H1)", roundDigits = digitsProbabilities) } } return(summaryFactory) } rpact/NEWS.md0000644000176200001440000001476613574437143012512 0ustar liggesusers # rpact 2.06 * Boundaries on effect scale for testing means now accounts for the unknown variance case * getAnalysisSurvival: calculation of stage wise results not more in getStageResults * getStageResults: the calculation of 'effectSizes' for survival data and thetaH0 != 1 was corrected * getDataset of survival data: issue with the internal storage of log ranks fixed * Sample size plot: issue for kMax = 1 fixed * getSampleSizeSurvival with piecewise survival time: issue with calculation of 'maxNumberOfSubjects' for given 'followUpTime' fixed * Internal Shiny app interface improved * Minor improvements # rpact 2.05 * Assumed median survival time: get[SampleSize/Power/Simulation]Survival now support direct input of arguments 'median1' and 'median2' * Output of generic function 'summary' improved * Plot type 5 of getPower[...] and getSimulation[...] objects improved * Output of getSampleSizeSurvival with given maxNumberOfSubjects improved * Output of get[SampleSize/Power]Survival for Kappa != 1 improved * Assert function for minNumberOfSubjectsPerStage corrected for undefined conditionalPower * Two-sided boundaries on effect scale in survival design improved * Error in 'summary' for getDesign[...] fixed * Other minor improvements # rpact 2.04 * Incorrect output of function 'summary' fixed for getSampleSize[...] and getPower[...] * as.data.frame: default value of argument 'niceColumnNamesEnabled' changed from TRUE to FALSE # rpact 2.03 ## New features * Plot function for Fisher design implemented * Generic function 'summary' implemented for getDesign[...], getSampleSize[...], getPower[...], and getSimulation[...] results: a simple boundary summary will be displayed ## Improvements, issues and changes * Generic function as.data.frame improved for getDesign[...], getSampleSize[...], getPower[...], and getSimulation[...] results * Ouput of getStageResults() improved * Improvements for Shiny app compatibility and better Shiny app performance * Repeated p-values are no longer calculated for typeOfDesign = "WToptimum" * Piecewise suvival time improved for numeric definition: median and pi will not be calculated and displayed any longer * Plot: legend title and tick mark positioning improved; optional arguments xlim and ylim implemented * Sample size/power: usage of argument 'twoSidedPower' optimized * Performance of function rpwexp/getPiecewiseExponentialRandomNumbers improved (special thanks to Marcel Wolbers for his example code) * For group sequential designs a warning will be displayed if information rates from design not according to data information * Format for output of standard deviation optimized # rpact 2.02 * Minor corrections in the inline help * Labeling of lower and upper critical values (effect scale) reverted * Simulation for Fisher's combination test corrected * Parameter minNumberOfAdditionalEventsPerStage renamed to minNumberOfEventsPerStage * Parameter maxNumberOfAdditionalEventsPerStage renamed to maxNumberOfEventsPerStage * Parameter minNumberOfAdditionalSubjectsPerStage renamed to minNumberOfSubjectsPerStage * Parameter maxNumberOfAdditionalSubjectsPerStage renamed to maxNumberOfSubjectsPerStage * Output of function getAccrualTime() improved * Validation of arguments maxNumberOfIterations, allocation1, and allocation2 added: check for positive integer * Function getSampleSizeSurvival improved: numeric search for accrualTime if followUpTime is given * Default value improved for analysis tools: if no effect was specified for conditional power calculation, the observed effect is selected * Fixed: function getDataset produced an error if only one log-rank value and one event was defined * Number of subjects per treatment arm are provided in output of simulation survival if allocation ratio != 1 * Function getSimulationSurvival improved: first value of minNumberOfEventsPerStage and maxNumberOfEventsPerStage must be NA or equal to first value of plannedSubjects # rpact 2.0.1 * Function base::isFALSE replaced to guarantee R 3.4.x compatibility * C++ compiler warning on r-devel-linux-x86_64-debian-clang system removed * C++ compiler error on r-patched-solaris-x86 system fixed # rpact 2.0.0 ## New features * Power calculation at given or adapted sample size for means, rates and survival data * Sample size and power calculation for survival trials with piecewise accrual time and intensity * Sample size and power calculation for survival trials with exponential survival time, piecewise exponential survival time and survival times that follow a Weibull distribution * Simulation tool for survival trials; our simulator is very fast because it was implemented with C++. Adaptive event number recalculations based on conditional power can be assessed * Simulation tool for designs with continuous and binary endpoints. Adaptive sample size recalculations based on conditional power can be assessed * Comprehensive and unified tool for performing sample size calculation for fixed sample size design * Enhanced plot functionalities ## Improvements, issues and changes * Fisher design, analysis of means or rates, conditional rejection probabilities (CRP): calculation issue fixed for stage > 2 * Call of getSampleSize[Means/Rates/Survival] without design argument implemented * For all 'set.seed' calls 'kind' and 'normal.kind' were specified as follows: kind = "Mersenne-Twister", normal.kind = "Inversion" * Minor code optimizations, e.g. 'return()' replaced by 'return(invisible())' if reasonable * Bug in 'readDatasets' fixed: variable names 'group' and 'groups' are now accepted * "Overall reject per stage" and "Overall futility per stage" renamed to "Overall reject" and "Overall futility", respectively (also variable names). * Labels "events.." and "..patients.." consistently changed to "# events.." and "# patients...", respectively. * Output format for 'allocationRatioPlanned' specified * Method 'show' of class 'ParameterSet' expanded: R Markdown output features implemented * getSampleSizeSurvival(): argument 'maxNumberOfPatients' was renamed in 'maxNumberOfSubjects' * Result output, inline help and documentation: the word 'patient' was replaced by 'subject' * Variables 'numberOfSubjectsGroup1' and 'numberOfSubjectsGroup2' were renamed to 'numberOfSubjects1' and 'numberOfSubjects1' * Final p-values for two-sided test (group sequential, inverse normal, and Fisher combination test) available * Upper and lower boundaries on effect scale for testing rates in two samples # rpact 1.0.0 * First release of rpact rpact/MD50000644000176200001440000003061613574450502011706 0ustar liggesusersd0195bae113d278217bb48640761cdbb *DESCRIPTION 3b53b1254822c3f8de25d7280f7a1fe7 *NAMESPACE 8e328dec32defed13aa387f3b816f993 *NEWS.md f677e734dd51a48939833f17aa144afc *R/RcppExports.R e0ea6baeb3dc938dd6a3c964450f5b71 *R/class_analysis_dataset.R 183f5e191c1a6f76ba30c3f5ab853c7d *R/class_analysis_results.R 99a6ed59aac58b8bb1dd28a3f1c84eb0 *R/class_analysis_stage_results.R f1e013e25092f0988767d55718ec2b5e *R/class_core_parameter_set.R 3a6a3cd095824f1740331289709a90d0 *R/class_core_plot_settings.R 819c2f8740beeb7d22569fec88e7e07a *R/class_design.R 7d7a6e418af54acc459c9daedbfbb0c6 *R/class_design_plan.R 3d449c7227edd0ed6eda45b903d0e663 *R/class_design_power_and_asn.R 6ba3144f20122ea29f9d4ab0470112d1 *R/class_design_set.R 15deda755c3a19fffe543f24cc5115b8 *R/class_event_probabilities.R f8ca1395c530f162421593876ea21a7f *R/class_simulation_results.R 104bc4f4502506acc39cd4ac9da27e96 *R/class_summary.R db022616dace267f448e9bbc50330611 *R/class_time.R b6a7298ee1e1effacfe6360c403ad018 *R/f_analysis_base.R ffb91cfb7949fb0e96b29e22c2c7518b *R/f_analysis_base_means.R ebcd6585cf9e04282cdc65d03d06af89 *R/f_analysis_base_rates.R 341db964c12f1eca1e400e4f55f52a87 *R/f_analysis_base_survival.R e3dfc2eb17ef1eef165102a156879cf0 *R/f_core_assertions.R eaaa6de774ea6bb02165791fab0424d7 *R/f_core_constants.R 54d02bb8f2a653f8645082f5fee284a9 *R/f_core_output_formats.R a60b280f70ed90434da10d34a7fc2000 *R/f_core_plot.R 877c909026ef0e01aae981748b488be2 *R/f_core_utilities.R 04e156970e9f7d6d7d4c5aed5046a9be *R/f_design_fisher_combination_test.R b0bdfdee5f90beb5c8131eb2a4fa0eaa *R/f_design_group_sequential.R 82cd0e3eaf94d930c63c23b7fe425b66 *R/f_design_sample_size_calculator.R 0e6d431d9d4436f622688bc8e09201ef *R/f_design_utilities.R 7a0771cb77393f14e776edbfde828e9a *R/f_simulation_means.R b55c79b8ecdfa76fd0a9c055baf1786a *R/f_simulation_rates.R 068dce92e3bd15b760d0fa2322b2f1e3 *R/f_simulation_survival.R 7f0532c29dfdd42a9f97853b243309a2 *R/pkgname.R 9e3de4c622a1b5cb0bacb70ccafbcc53 *README.md d2ea13b6edd5fe8985bbd0c2171be172 *inst/tests/testthat.R 58e6e055dc87dfd06e8953b61996fbd2 *inst/tests/testthat/helper-class_analysis_dataset.R 85e0dfb828b5e2353f033ab170f7458b *inst/tests/testthat/helper-f_analysis.R c31e2b69b15258bf6a7184344a86d71e *inst/tests/testthat/helper-f_analysis_means.R e62d24ca1469072a7e0058dc2275a90c *inst/tests/testthat/helper-f_analysis_rates.R e62d24ca1469072a7e0058dc2275a90c *inst/tests/testthat/helper-f_analysis_survival.R 0771297647c0dca37b9e54359e59b82a *inst/tests/testthat/helper-f_core_assertions.R dfd2071e95a69137180164c7d092138a *inst/tests/testthat/helper-f_core_utilities.R f24649388664d883529c97fde49fe58a *inst/tests/testthat/test-class_analysis_dataset.R abb184ce9f277738401302868811b19c *inst/tests/testthat/test-class_summary.R f28e4b9073285be29342014f375c2df4 *inst/tests/testthat/test-class_time.R 5b2c63a293b78374ea1727dda26c57e3 *inst/tests/testthat/test-f_analysis_base_means.R 388bab2924f307e59633744b02f569f3 *inst/tests/testthat/test-f_analysis_base_rates.R eade81934dd84d41e91228b0b66e9f2a *inst/tests/testthat/test-f_analysis_base_survival.R abc71a0bc2364f270e4812c6ff93ba8b *inst/tests/testthat/test-f_core_assertions.R b7c81a0e04e7d635b5301b047a3d1584 *inst/tests/testthat/test-f_core_output_formats.R 09f14f1c2ceed13873fe0704be8c300a *inst/tests/testthat/test-f_core_utilities.R 503e7e05a3767a0ac71ab34e8dc8bdcb *inst/tests/testthat/test-f_design_fisher_combination_test.R 3ff9363aa052e5fa7d1267aeb42b7545 *inst/tests/testthat/test-f_design_group_sequential_design.R b7665d9b804c22e01c60d7a631386018 *inst/tests/testthat/test-f_design_power_calculator.R c0a5db3ae70501a63d0a6126b054f300 *inst/tests/testthat/test-f_design_sample_size_calculator.R a74fd80abf2cf3cb053b8f8c6ae3f6ce *inst/tests/testthat/test-f_design_utilities.R f39c2c8b5fce769e4cc892b9d2b86a35 *inst/tests/testthat/test-f_simulation_means.R 497b3839c2e1976a13a68381895f9bab *inst/tests/testthat/test-f_simulation_rates.R 828207cf9e24d5a20a7ec9ba19206823 *inst/tests/testthat/test-f_simulation_survival.R 5cd0e734a6a619ecc5c3dea0166ec8e8 *inst/tests/testthat/test_generic_functions.R b7be45e60f17007b2ab2299a26fdf8cc *man/AccrualTime.Rd acd0ffcc62b8ab9bcda550a694f982ac *man/AnalysisResults.Rd 09161bbe4c7c0e9ea2a23aa452c1b43e *man/AnalysisResultsFisher.Rd 66d8704e54b64b84aec08e2dbfd666b3 *man/AnalysisResultsGroupSequential.Rd 8e96c049c572b76b5f8b7530bea980a1 *man/AnalysisResultsInverseNormal.Rd c2c8b097976d9ddba83c7be6f4bb3b54 *man/AnalysisResults_as.data.frame.Rd 0f73faf546f2516b39715869578e64e6 *man/AnalysisResults_names.Rd 82492fe9084a26be9a55ebaa71e6525e *man/Dataset.Rd 72c29fc5d537c414b16f456560aeddd7 *man/DatasetMeans.Rd 4814552cb0318e6a24a6bdf7453c4a61 *man/DatasetRates.Rd db672be50fe39c2cef2d72c16ea43490 *man/DatasetSurvival.Rd e41fe8d25e9dbf3bd2ac08a9612e6bb8 *man/EventProbabilities.Rd a4fb5825a8a5cd343dd8c2fe215a3372 *man/FieldSet.Rd 212cb7dd44c35d84e50d5be8e9ecba3f *man/FieldSet_names.Rd 820cad48b81d918ed2c379d7181f9440 *man/FieldSet_print.Rd a20828bab026f532d6575b62f6b567ea *man/FrameSet_as.matrix.Rd e19086d7a14ea5ce9b55b83680c870df *man/NumberOfSubjects.Rd 03c7794df397aa64dce86a15bb14015c *man/ParameterSet.Rd 99923e883f4667ad89f8437b33127ae8 *man/ParameterSet_as.data.frame.Rd b66e9e88b70a879a46f1516c96bbc3ae *man/ParameterSet_print.Rd 85c940b4f31f4e60121b83eb34e0e296 *man/ParameterSet_summary.Rd e9730a006637d5bb724d1f4c2711ec96 *man/PiecewiseSurvivalTime.Rd 7fb5f64e075c225cf27be9c77be46700 *man/PlotSettings.Rd 30aa900e780bf29cabe4fd0c6604bcff *man/PowerAndAverageSampleNumberResult.Rd 32ac68777da9a42d37ffcc6f7f2b50a1 *man/PowerAndAverageSampleNumberResult_as.data.frame.Rd dc75b99370bbf3ef44ea08de88e758be *man/SimulationResults.Rd 35d050ed605fd5004e3a30bd422760c3 *man/SimulationResultsMeans.Rd 9364cbf7cf49f6ce333e771c69132d4a *man/SimulationResultsRates.Rd 3b9ae74373dbeec3823b30ca3a96da8f *man/SimulationResultsSurvival.Rd 4254fa55a185a6ffe85c595e9aa5c8fa *man/StageResults.Rd 1bb3cf79f26b59f08b7a6903fe09fc55 *man/StageResultsMeans.Rd 7ed9e21851eabf82d6e12ba776b25584 *man/StageResultsRates.Rd 5972f7382e17c1828735b29f55355da5 *man/StageResultsSurvival.Rd 727e52428b23d0d1d26edc155f8bba89 *man/StageResults_as.data.frame.Rd 4cd94b52bab595000d3e5f1ddbd3d685 *man/StageResults_names.Rd c5d142c88ecdef7827fce8fe13557fab *man/TrialDesign.Rd 81755f7c9aa5b37a3571e5a257fe02b4 *man/TrialDesignCharacteristics.Rd b1c8a269d89cc451ec5db92c8f6c3eb1 *man/TrialDesignCharacteristics_as.data.frame.Rd 4b3f8f928f02ebf84298521b1bb2a5db *man/TrialDesignConditionalDunnett.Rd c899767f48c25910722cf5d9e3a01c3c *man/TrialDesignFisher.Rd 6bb146b2651f61b25dfd058935c7123d *man/TrialDesignGroupSequential.Rd 4839c20c1d24146b1844704a5260e487 *man/TrialDesignInverseNormal.Rd d28374d3f1fd4727b70ae5a75b418755 *man/TrialDesignPlan.Rd c8d653faafc2de9ff0d0888920466dd1 *man/TrialDesignPlanMeans.Rd a35645753674c9920f4ee7d23c32776d *man/TrialDesignPlanRates.Rd 75c337de6784312eca9b7e9d2e089a6f *man/TrialDesignPlanSurvival.Rd 273b91aa452e6229b4275b6572bfb7b9 *man/TrialDesignPlanSurvival_summary.Rd 0f7f87a6eee3397045943fc72fcba47d *man/TrialDesignPlan_as.data.frame.Rd f807d3b373204f64ab21051199cf0d86 *man/TrialDesignSet.Rd 052b6648b44bd56f874d669b91d972ff *man/TrialDesignSet_as.data.frame.Rd 189a78f007cbaa4adec52d18b7265864 *man/TrialDesignSet_length.Rd 1326d38f4901efbd9dca34e46497b5ec *man/TrialDesignSet_names.Rd d24eb6f559638a95126a0ceae1900eb8 *man/TrialDesign_as.data.frame.Rd 24fdbd35266cc2c1503cfc9752c215bf *man/getAccrualTime.Rd c7487cb151db1472b3630e31f8b754d2 *man/getAnalysisResults.Rd 9d35a0dcea14213c7f0cc4c97433f798 *man/getAvailablePlotTypes.Rd ba399bc99c85860c146da7afa3ef854d *man/getConditionalPower.Rd 49760789cb31dcbb945c3536e251a673 *man/getConditionalRejectionProbabilities.Rd 7baea7b5b91b55681c09c05e77505613 *man/getData.Rd 2112f8b21da36db839850c84b13374f3 *man/getDataset.Rd 1909f00e9d3f49853996c13b757fdac9 *man/getDesignCharacteristics.Rd 734b2c3ba8f3fa9eec2691f190f177df *man/getDesignFisher.Rd c634afd4ab1c0eec5b6311b1148c05e4 *man/getDesignGroupSequential.Rd 80585c45965c40c225d90d0967c5f81e *man/getDesignInverseNormal.Rd b53a6aaeed22ef6f79e29f555466b76c *man/getDesignSet.Rd f4203979b5725fd4ea223cf96a4a717a *man/getEventProbabilities.Rd 7ddde9a164b97dc78c8180b58b5d7095 *man/getFinalConfidenceInterval.Rd fa24bc461c2bdef7e51841bcac7cad68 *man/getFinalPValue.Rd 093d1e62f227accb0a56a55a27a9d599 *man/getLogLevel.Rd db1da9cea92260a5ae9be624fd12b9f5 *man/getNumberOfSubjects.Rd eb036f381512c39c3706b42a78b2ec01 *man/getPiecewiseSurvivalTime.Rd 33fc4f8250305842acb834fe540c17f5 *man/getPowerAndAverageSampleNumber.Rd 09dd40770104e336341f0bf5de028c01 *man/getPowerMeans.Rd f20ff4e0cd6cad7f690f481a90a23abe *man/getPowerRates.Rd c0e42d1c8f41530d01e24ff3eede59fd *man/getPowerSurvival.Rd ed6e8d8495dd64033b939b5e72606c2c *man/getRawData.Rd fea4aaacb8359c758693244964bbd870 *man/getRepeatedConfidenceIntervals.Rd 65e674e99c6f31efa81412832e67ed66 *man/getRepeatedPValues.Rd e400a33d208d4e719d05761d34a6536a *man/getSampleSizeMeans.Rd eafc5737abdd16d4877fa5f23e03fd7c *man/getSampleSizeRates.Rd d0abe6397bd4289a8df64b74d3d1f16a *man/getSampleSizeSurvival.Rd d4b463bf6f6279a8905bc8f6f2f185e1 *man/getSimulationMeans.Rd 83b7f98291d811ea43b578ac261bebbc *man/getSimulationRates.Rd 98f9a0787fae43744ad220afaae6304b *man/getSimulationSurvival.Rd b32765bf5c90f97272d27876d516c601 *man/getStageResults.Rd 39a46ff5f9421bea8c5ec845a9df32cb *man/getTestActions.Rd 674305a03b20ef2ca6a5c845ac7223eb *man/plot.AnalysisResults.Rd a0605bf05181385cac84ec61910d3d0e *man/plot.Dataset.Rd f9e17026e9ce12d5643aa520c6305584 *man/plot.SimulationResults.Rd ab4e20305e2f42d1f3b9f826c5a38487 *man/plot.StageResults.Rd 290c2a74ba213420d13ca0ea95fb2409 *man/plot.TrialDesign.Rd 041388d00a24865a38c1ba59cb5560dc *man/plot.TrialDesignPlan.Rd 7dd4ea0ea37a402584a3ddf439b871da *man/plot.TrialDesignSet.Rd 0532d80b5a6c32145c9b2f60a7445c04 *man/printCitation.Rd 48d3d4efc55c3a548524c722b9590ca2 *man/readDataset.Rd a861efbb3ede48abf73016afa2f6d636 *man/readDatasets.Rd 522c78729549bfb592ec50badd0d998b *man/resetLogLevel.Rd f45a75320d5be78f4175e0379b75bdf8 *man/rpact.Rd 9b1877683a6eba89e58764f7bc709d28 *man/setLogLevel.Rd 352eaa8d53312211b541944045afff77 *man/sub-TrialDesignSet-method.Rd 049ed7846528b4bc228e996e3e06fd58 *man/testPackage.Rd ec09ecfede43705c9d84cc4275fbe5bb *man/utilitiesForPiecewiseExponentialDistribution.Rd c9c148042062f0562604cb25a2c9e0ee *man/utilitiesForSurvivalTrials.Rd 069aa9104936374c82060bc4117bc5ef *man/writeDataset.Rd 4095cfbf246462cb01121a5ce203d887 *man/writeDatasets.Rd e4483e9fdd98b1d4ecf1ce05147cb7b5 *src/RcppExports.cpp 27142034fab3a3012eaa237e666ed52b *src/f_simulation_survival.cpp df11f41943b8dd906ee9b75d1f01cb00 *src/rpact.c d2ea13b6edd5fe8985bbd0c2171be172 *tests/testthat.R 58e6e055dc87dfd06e8953b61996fbd2 *tests/testthat/helper-class_analysis_dataset.R 85e0dfb828b5e2353f033ab170f7458b *tests/testthat/helper-f_analysis.R c31e2b69b15258bf6a7184344a86d71e *tests/testthat/helper-f_analysis_means.R e62d24ca1469072a7e0058dc2275a90c *tests/testthat/helper-f_analysis_rates.R e62d24ca1469072a7e0058dc2275a90c *tests/testthat/helper-f_analysis_survival.R 0771297647c0dca37b9e54359e59b82a *tests/testthat/helper-f_core_assertions.R dfd2071e95a69137180164c7d092138a *tests/testthat/helper-f_core_utilities.R f24649388664d883529c97fde49fe58a *tests/testthat/test-class_analysis_dataset.R abb184ce9f277738401302868811b19c *tests/testthat/test-class_summary.R f28e4b9073285be29342014f375c2df4 *tests/testthat/test-class_time.R 5b2c63a293b78374ea1727dda26c57e3 *tests/testthat/test-f_analysis_base_means.R 388bab2924f307e59633744b02f569f3 *tests/testthat/test-f_analysis_base_rates.R eade81934dd84d41e91228b0b66e9f2a *tests/testthat/test-f_analysis_base_survival.R abc71a0bc2364f270e4812c6ff93ba8b *tests/testthat/test-f_core_assertions.R b7c81a0e04e7d635b5301b047a3d1584 *tests/testthat/test-f_core_output_formats.R 09f14f1c2ceed13873fe0704be8c300a *tests/testthat/test-f_core_utilities.R 503e7e05a3767a0ac71ab34e8dc8bdcb *tests/testthat/test-f_design_fisher_combination_test.R 3ff9363aa052e5fa7d1267aeb42b7545 *tests/testthat/test-f_design_group_sequential_design.R b7665d9b804c22e01c60d7a631386018 *tests/testthat/test-f_design_power_calculator.R c0a5db3ae70501a63d0a6126b054f300 *tests/testthat/test-f_design_sample_size_calculator.R a74fd80abf2cf3cb053b8f8c6ae3f6ce *tests/testthat/test-f_design_utilities.R f39c2c8b5fce769e4cc892b9d2b86a35 *tests/testthat/test-f_simulation_means.R 497b3839c2e1976a13a68381895f9bab *tests/testthat/test-f_simulation_rates.R 828207cf9e24d5a20a7ec9ba19206823 *tests/testthat/test-f_simulation_survival.R 5cd0e734a6a619ecc5c3dea0166ec8e8 *tests/testthat/test_generic_functions.R rpact/inst/0000755000176200001440000000000013574442443012352 5ustar liggesusersrpact/inst/tests/0000755000176200001440000000000013574442443013514 5ustar liggesusersrpact/inst/tests/testthat/0000755000176200001440000000000013574450502015347 5ustar liggesusersrpact/inst/tests/testthat/test-class_summary.R0000644000176200001440000005576113574411563021353 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:08:57 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { ## test designs invisible(capture.output(expect_error(summary(getDesignGroupSequential(beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF")), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0))), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5), NA))) .skipTestifDisabled() invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(futilityBounds = c(0, 1))), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher()), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2))), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4, sided = 2), digits = 5), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1, sided = 2)), NA))) ## test design plans - means invisible(capture.output(expect_error(summary(getSampleSizeMeans(sided = 2, alternative = -0.5)), NA))) invisible(capture.output(expect_warning(summary(getSampleSizeMeans(sided = 2), alternative = -0.5)))) # warning expected invisible(capture.output(expect_error(summary(getPowerMeans(sided = 1, alternative = c(-0.5,-0.3), maxNumberOfSubjects = 100, directionUpper = FALSE)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, sided = 2, stDev = 1, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 0), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getPowerMeans(getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 4), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 3), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 2), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = -1), NA))) ## test design plans - rates invisible(capture.output(expect_error(summary(getSampleSizeRates(pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getPowerRates(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4,0.5))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = 0.4)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 2, thetaH0 = 0, pi1 = 0.25)), NA))) invisible(capture.output(expect_error(summary(getPowerRates(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100)), NA))) ## test design plans - survival invisible(capture.output(expect_error(summary(getSampleSizeSurvival()), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040))), NA))) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)))), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2)/6, lambda1 = log(2)/8)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2)/6, lambda1 = log(2)/8)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(sided = 2), lambda2 = log(2)/6, hazardRatio = c(0.55), accrualTime = c(0,10), accrualIntensity = 60)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8),directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30)), NA))) design <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2))), NA))) ## simulations design <- getDesignInverseNormal(alpha = 0.05, kMax = 4, futilityBounds = c(0,0,0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1) invisible(capture.output(expect_error(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE)), NA))) invisible(capture.output(expect_error(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345)), NA))) design <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1,1)) invisible(capture.output(expect_error(summary(getSampleSizeMeans(design)), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans(design, stDev = 4, plannedSubjects = (1:3)*200, alternative = c(1,2))), NA))) invisible(capture.output(expect_error(summary(getSimulationRates(design, plannedSubjects = (1:3)*200, pi1 = c(0.3,0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8)), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans(getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = c(1))), NA))) }) test_that("Testing 'summary.ParameterSet': output will be produced", { ## test designs expect_output(summary(getDesignGroupSequential(beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF"))$show()) expect_output(summary(getDesignGroupSequential(kMax = 1))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignGroupSequential(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5)$show()) .skipTestifDisabled() expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(futilityBounds = c(0, 1)))$show()) expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignInverseNormal(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignFisher())$show()) expect_output(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2)))$show()) expect_output(summary(getDesignFisher(kMax = 1))$show()) expect_output(summary(getDesignFisher(kMax = 4, sided = 2), digits = 5)$show()) expect_output(summary(getDesignFisher(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignFisher(kMax = 1, sided = 2))$show()) ## test design plans - means expect_output(summary(getSampleSizeMeans(sided = 2, alternative = -0.5))$show()) expect_output(summary(getPowerMeans(sided = 1, alternative = c(-0.5,-0.3), maxNumberOfSubjects = 100, directionUpper = FALSE))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, sided = 2, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 0)$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100))$show()) expect_output(summary(getPowerMeans(getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 4)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 3)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 2)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = -1)$show()) ## test design plans - rates expect_output(summary(getSampleSizeRates(pi2 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45))$show()) expect_output(summary(getSampleSizeRates(groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4,0.5)))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = 0.4))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 2, thetaH0 = 0, pi1 = 0.25))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100))$show()) ## test design plans - survival expect_output(summary(getSampleSizeSurvival())$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2)))$show()) expect_output(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040)))$show()) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) expect_output(summary(getSampleSizeSurvival(piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60))$show()) expect_output(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2)/6, lambda1 = log(2)/8))$show()) expect_output(summary(getPowerSurvival(sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2)/6, lambda1 = log(2)/8))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(sided = 2), lambda2 = log(2)/6, hazardRatio = c(0.55), accrualTime = c(0,10), accrualIntensity = 60))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8),directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30))$show()) design <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE) expect_output(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12))$show()) expect_output(summary(getSampleSizeSurvival( design, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) ## simulations design <- getDesignInverseNormal(alpha = 0.05, kMax = 4, futilityBounds = c(0,0,0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1) expect_output(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE))$show()) expect_output(summary(getSimulationSurvival(design,lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345))$show()) design <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1,1)) expect_output(summary(getSampleSizeMeans(design))$show()) expect_output(summary(getSimulationMeans(design, stDev = 4, plannedSubjects = (1:3)*200, alternative = c(1,2)))$show()) expect_output(summary(getSimulationRates(design, plannedSubjects = (1:3)*200, pi1 = c(0.3,0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8))$show()) expect_output(summary(getSimulationMeans(getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = 1))$show()) }) rpact/inst/tests/testthat/test_generic_functions.R0000644000176200001440000001636113573723560022252 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:08:57 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { design <- getDesignGroupSequential(alpha = 0.05, kMax = 4, sided = 1, typeOfDesign = "WT", deltaWT = 0.1) designFisher <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), alpha0Vec = rep(0.4, 3)) designCharacteristics <- getDesignCharacteristics(design) powerAndASN <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = 100) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) dataset <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults <- getStageResults(design, dataset) analysisResults <- getAnalysisResults(design, dataset) designPlan <- getSampleSizeMeans(design) simulationResults <- getSimulationSurvival(design, maxNumberOfSubjects = 100, plannedEvents = c(50, 100, 150, 200), seed = 12345) piecewiseSurvivalTime <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.8) accrualTime <- getAccrualTime(list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45), maxNumberOfSubjects = 1400) invisible(capture.output(names(design))) invisible(capture.output(names(designFisher))) invisible(capture.output(names(designCharacteristics))) invisible(capture.output(names(powerAndASN))) invisible(capture.output(names(designSet))) invisible(capture.output(names(dataset))) invisible(capture.output(names(stageResults))) invisible(capture.output(names(analysisResults))) invisible(capture.output(names(designPlan))) invisible(capture.output(names(simulationResults))) invisible(capture.output(names(piecewiseSurvivalTime))) invisible(capture.output(names(accrualTime))) invisible(capture.output(design$criticalValues)) invisible(capture.output(design[["criticalValues"]])) invisible(capture.output(print(design))) invisible(capture.output(print(designFisher))) invisible(capture.output(print(designCharacteristics))) invisible(capture.output(print(powerAndASN))) invisible(capture.output(print(designSet))) invisible(capture.output(print(dataset))) invisible(capture.output(print(stageResults))) invisible(capture.output(print(analysisResults))) invisible(capture.output(print(designPlan))) invisible(capture.output(print(simulationResults))) invisible(capture.output(print(piecewiseSurvivalTime))) invisible(capture.output(print(accrualTime))) invisible(capture.output(summary(design))) invisible(capture.output(summary(designFisher))) invisible(capture.output(summary(designCharacteristics))) invisible(capture.output(summary(powerAndASN))) invisible(capture.output(summary(designSet))) invisible(capture.output(summary(dataset))) invisible(capture.output(summary(stageResults))) invisible(capture.output(summary(analysisResults))) invisible(capture.output(summary(designPlan))) invisible(capture.output(summary(simulationResults))) invisible(capture.output(summary(piecewiseSurvivalTime))) invisible(capture.output(summary(accrualTime))) invisible(capture.output(as.data.frame(design))) invisible(capture.output(as.data.frame(designFisher))) invisible(capture.output(as.data.frame(designCharacteristics))) invisible(capture.output(as.data.frame(powerAndASN))) invisible(capture.output(as.data.frame(designSet))) invisible(capture.output(as.data.frame(dataset))) invisible(capture.output(as.data.frame(stageResults))) invisible(capture.output(as.data.frame(analysisResults))) invisible(capture.output(as.data.frame(designPlan))) invisible(capture.output(as.data.frame(simulationResults))) invisible(capture.output(as.data.frame(piecewiseSurvivalTime))) invisible(capture.output(as.data.frame(accrualTime))) invisible(capture.output(as.data.frame(design, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designFisher, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designCharacteristics, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(powerAndASN, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designSet, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(dataset, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(stageResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(analysisResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(designPlan, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(simulationResults, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(piecewiseSurvivalTime, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.data.frame(accrualTime, niceColumnNamesEnabled = FALSE))) invisible(capture.output(as.matrix(design))) invisible(capture.output(as.matrix(designFisher))) invisible(capture.output(as.matrix(designCharacteristics))) invisible(capture.output(as.matrix(powerAndASN))) invisible(capture.output(as.matrix(designSet))) invisible(capture.output(as.matrix(dataset))) invisible(capture.output(as.matrix(stageResults))) invisible(capture.output(as.matrix(analysisResults))) invisible(capture.output(as.matrix(designPlan))) invisible(capture.output(as.matrix(simulationResults))) invisible(capture.output(as.matrix(piecewiseSurvivalTime))) invisible(capture.output(as.matrix(accrualTime))) }) rpact/inst/tests/testthat/test-f_simulation_survival.R0000644000176200001440000026527013567165663023124 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:59 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation survival function") test_that("'getSimulationSurvival': configuration 1", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 200, plannedEvents = 50, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$analysisTime[1, ], c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(50, 50, 50, 50)) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResults$overallReject, c(0.01, 0.41, 0.81, 1), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': configuration 2", { .skipTestifDisabled() design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(60, 73, 78, 65)) expect_equal(simulationResults$iterations[3, ], c(5, 9, 28, 46)) expect_equal(simulationResults$analysisTime[1, ], c(5.4183926, 5.2945044, 5.1495619, 5.0392001), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(10.130549, 10.39649, 10.458778, 9.7641943), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(13.506679, 14.455396, 18.382917, 18.866629), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(8.500396, 9.4448778, 11.628285, 12.227203), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(84.483333, 93.054795, 98.884615, 92.015385), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(125.8, 159.44444, 229.53571, 250.93478), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(60.755833, 79.305068, 118.11231, 139.91292), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(186.51, 180.63, 173.73, 168.48), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(406.05, 420.67123, 424.60256, 393.44615), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(428.4, 466.33333, 480.96429, 488.78261), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(319.3515, 359.96969, 385.19188, 358.56277), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.4, 0.21, 0.2, 0.13), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.55, 0.63, 0.5, 0.15), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.05, 0.09, 0.26, 0.41), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(1, 0.93, 0.96, 0.69), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0.06, 0.02, 0.22), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0.01, 0, 0.04), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0.07, 0.02, 0.26), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.95, 0.91, 0.72, 0.54), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.61612368, 0.57564124, 0.49458667, 0.52832804), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.78816558, 0.77803263, 0.64572713, 0.66129837), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 3", { .skipTestifDisabled() design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.2, pi1 = seq(0.3, 0.45, 0.05), directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1.5984103, 1.9305192, 2.2892242, 2.6791588), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(69, 72, 59, 50)) expect_equal(simulationResults$iterations[3, ], c(37, 12, 7, 2)) expect_equal(simulationResults$analysisTime[1, ], c(7.2763799, 7.0838561, 6.7193502, 6.3616317), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(16.764021, 14.756285, 13.821816, 12.988284), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(38.977945, 24.200748, 26.934721, 11.875967), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.098154, 13.978342, 11.899449, 9.7796143), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(106.50725, 94.541667, 94.677966, 94.48), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(259.13514, 175, 204, 84.5), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(136.16232, 83.325, 71.712542, 57.0404), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(275.04, 265.86, 248.46, 231.45), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(496.07246, 481.84722, 476, 463.84), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(500, 500, 500, 494)) expect_equal(simulationResults$expectedNumberOfSubjects, c(429.00559, 423.54913, 384.3886, 348.2482), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.18, 0.22, 0.39, 0.49), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.31, 0.59, 0.52, 0.48), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.35, 0.11, 0.07, 0.02), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.84, 0.92, 0.98, 0.99), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0.13, 0.06, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0.01, 0.01, 0, 0), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0.14, 0.07, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.63, 0.88, 0.93, 0.98), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.46273079, 0.58305775, 0.61313502, 0.59484117), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.66165116, 0.75066235, 0.71981679, 0.8), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 4", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) simulationResults <- getSimulationSurvival(design = design, directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.7, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0425, 0.068, 0.0255, 0.017, 0.0119), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 95) expect_equal(simulationResults$iterations[3, ], 30) expect_equal(simulationResults$analysisTime[1, ], 6.3619038, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 12.345684, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 36.687962, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 19.26207, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 91.694737, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 207.83333, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 122.95158, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 231.41, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 448.23158, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], 491.66667, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 450.42103, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.05, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.65, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.29, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.99, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.7, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.49425129, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.73157546, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 5", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival(design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 40, 40), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(87, 89, 92, 100)) expect_equal(simulationResults$iterations[3, ], c(18, 34, 58, 77)) expect_equal(simulationResults$analysisTime[1, ], c(8.1674426, 7.9228743, 7.6045868, 7.4881493), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(12.354338, 12.56529, 12.380125, 12.254955), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(16.473595, 17.9949, 17.847597, 17.390492), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(12.562909, 13.818364, 15.044701, 16.144285), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(48.54023, 51.561798, 55.130435, 55.79), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(75.277778, 87.176471, 94.103448, 94.545455), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(49.642759, 60.198989, 74.924348, 85.6317), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(126.03, 121.42, 115.37, 113.16), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(187.50575, 190.98876, 193.16304, 192.33), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(199.11111, 200, 199.39655, 199.28571), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(181.60287, 186.40002, 190.55503, 197.6859), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0.13, 0.11, 0.08, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.69, 0.55, 0.34, 0.23), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.17, 0.31, 0.26, 0.25), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.99, 0.97, 0.68, 0.48), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.82, 0.66, 0.42, 0.23), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.56161185, 0.47418383, 0.31608317, 0.29578133), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.71394365, 0.57778506, 0.37448609, 0.32265113), tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 6", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = c(0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.02, 0.032, 0.012, 0.008, 0.0056), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 29) expect_equal(simulationResults$iterations[3, ], 8) expect_equal(simulationResults$analysisTime[1, ], 11.099103, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 137.1048, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 83.267347, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 10.437225, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 102.10345, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 96.375, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 96.661422, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.71, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.16, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 179.93, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 200) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.05, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.03, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.08, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.05, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80243482, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 7", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$pi1, 0.21337214, tolerance = 1e-07) expect_equal(simulationResults$pi2, 0.25918178, tolerance = 1e-07) expect_equal(simulationResults$median1, 34.657359, tolerance = 1e-07) expect_equal(simulationResults$median2, 27.725887, tolerance = 1e-07) expect_equal(simulationResults$lambda1, 0.02, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 26) expect_equal(simulationResults$iterations[3, ], 12) expect_equal(simulationResults$analysisTime[1, ], 11.419107, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 43.00709, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 62.010907, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 8.0301114, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 87.076923, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 122.5, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 121.79154, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.74, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.12, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 183.49, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 200) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.02, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.04, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.06, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.02, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80311744, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 8", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0.04, dropoutRate2 = 0.08, dropoutTime = 12, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.008, 0.024), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 99) expect_equal(simulationResults$iterations[3, ], 95) expect_equal(simulationResults$analysisTime[1, ], 14.155697, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 19.508242, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 25.008056, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 24.627971, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 40) expect_equal(simulationResults$eventsPerStage[3, ], 60) expect_equal(simulationResults$expectedNumberOfEvents, 58.8, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 199.73, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 199.9973, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.04, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.06, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.11, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.05, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.13387917, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.12806393, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 9; ", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = c(0.75), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0075, 0.0225), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 27) expect_equal(simulationResults$iterations[3, ], 2) expect_equal(simulationResults$analysisTime[1, ], 14.263292, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 43.719076, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 38.174522, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 6.7834542, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 90.037037, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 84.5, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 84.519444, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.72, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.13, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 199.79, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$expectedNumberOfSubjects, 199.9979, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.12, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.02, tolerance = 1e-07) expect_equal(simulationResults$overallReject, 0.15, tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$earlyStop, 0.13, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80261071, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.8, tolerance = 1e-07) }) test_that("'getSimulationSurvival': configuration 10; ", { .skipTestifDisabled() design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list("<6" = 0.025) simulationResults <- getSimulationSurvival(design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), lambda2 = 0.03, hazardRatio = c(0.75, 0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2) , dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results ## expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$pi1, c(0.23662051, 0.25023841, 0.27674976), tolerance = 1e-07) expect_equal(simulationResults$pi2, 0.30232367, tolerance = 1e-07) expect_equal(simulationResults$median1, c(30.806541, 28.881133, 25.672118), tolerance = 1e-07) expect_equal(simulationResults$median2, 23.104906, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0225, 0.024, 0.027), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(31, 24, 16)) expect_equal(simulationResults$iterations[3, ], c(5, 5, 7)) expect_equal(simulationResults$analysisTime[1, ], c(10.701574, 10.513732, 10.265089), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(41.567704, 37.665074, 37.552932), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(33.732435, 53.513221, 36.546609), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(6.1071436, 6.3055402, 2.5582627), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(95.290323, 89.541667, 93.9375), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(93.8, 124, 109.71429), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(93.949032, 120.19708, 109.71429), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0.69, 0.74, 0.84), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0.16, 0.14, 0.09), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], c(173.61, 170.39, 165.9), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(199.6129, 200, 200), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(200, 200, 200)) expect_equal(simulationResults$expectedNumberOfSubjects, c(199.96129, 199.4078, 200), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[1, ], c(0, 0.02, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.1, 0.05, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.05, 0.01, 0.03), tolerance = 1e-07) expect_equal(simulationResults$overallReject, c(0.15, 0.08, 0.03), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0)) expect_equal(simulationResults$futilityStop, c(0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.1, 0.07, 0), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.80326129, 0.80161244, 0.8), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.8, 0.8, 0.8), tolerance = 1e-07) }) test_that("'getSimulationSurvival': test accrual time and intensity definition", { .skipTestifDisabled() maxNumberOfSubjects <- getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100)$maxNumberOfSubjects expect_equal(maxNumberOfSubjects, 330) accrualIntensity <- getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(0.2, 0.3), maxNumberOfSubjects = 330, maxNumberOfIterations = 100, seed = 1234567890)$accrualIntensity expect_equal(accrualIntensity, c(22, 33)) }) test_that("'getSimulationSurvival': test exptected warnings and errors", { dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'minNumberOfEventsPerStage' (NA, 44, 44) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'maxNumberOfEventsPerStage' (NA, 176, 176) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'minNumberOfEventsPerStage' (NA, 44, 44) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_warning(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "'maxNumberOfEventsPerStage' (NA, 176, 176) will be ignored because no 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'minNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'maxNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), maxNumberOfEventsPerStage = 4 * c(58, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Missing argument: 'minNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = -0.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (-0.1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 1.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (1.1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = -100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'plannedEvents' (-100) must be >= 1", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, plannedEvents = c(100,100, 150), accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'plannedEvents' (100, 100, 150) must be strictly increasing: x_1 < .. < x_3", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, -44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: each value of 'minNumberOfEventsPerStage' (58, 44, -44) must be >= 1", fixed = TRUE) expect_error(getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 10, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'maxNumberOfEventsPerStage' (58, 40, 176) must be not smaller than minNumberOfEventsPerStage' (58, 44, 44)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'maxNumberOfSubjects' must be defined", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, thetaH1 = 0, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'thetaH1' (0) must be > 0", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 0, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (0) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 1, maxNumberOfIterations = 100, seed = 1234567890), "Argument out of bounds: 'conditionalPower' (1) is out of bounds (0; 1)", fixed = TRUE) expect_error(getSimulationSurvival(plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = c(0.5, 0.8), maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'conditionalPower' c(0.5, 0.8) must be a single numerical value", fixed = TRUE) }) context("Testing the simulation of survival data for different parameter variants") test_that("'getSimulationSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 199.71, 196.74), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Increase number of simulation iterations ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 199.71, 196.74), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = 0, accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.02, 0.28, 0.77, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30), maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify accrual time as a list", { .skipTestifDisabled() at <- list("0 - <6" = 20, "6 - Inf" = 30) simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { .skipTestifDisabled() at <- list("0 - <6" = 20, "6 - <=10" = 30) simulationResult <- getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) }) test_that("'getSimulationSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 97) expect_equal(simulationResult$analysisTime[1, ], 14.769473, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 24.499634, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 24.198958, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.4, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.47, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9841, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.03, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.24, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.27, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.03, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.29516222, tolerance = 1e-07) }) test_that("'getSimulationSurvival': As above, but with a three-stage O'Brien and Flemming design with specified information rates, note that planned events consists of integer values", { .skipTestifDisabled() d3 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) simulationResult <- getSimulationSurvival(design = d3, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = round(d3$informationRates * 40), maxNumberOfSubjects = 200, directionUpper = FALSE, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 1000) expect_equal(simulationResult$iterations[2, ], 985) expect_equal(simulationResult$iterations[3, ], 861) expect_equal(simulationResult$analysisTime[1, ], 13.073331, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 18.748105, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[3, ], 24.810251, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 23.877826, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 16) expect_equal(simulationResult$eventsPerStage[2, ], 28) expect_equal(simulationResult$eventsPerStage[3, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.152, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$eventsNotAchieved[3, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.313, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$numberOfSubjects[3, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.92969, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.015, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.124, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[3, ], 0.183, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.322, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityPerStage[2, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.139, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.19637573, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[3, ], 0.23542216, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(simulationResult$median1, 93.281194, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 92) expect_equal(simulationResult$analysisTime[1, ], 15.596955, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 26.310745, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 25.440402, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.4, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.69, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9752, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.08, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.44, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.52, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.08, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.43087375, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(simulationResult$median1, 69.314718, tolerance = 1e-07) expect_equal(simulationResult$median2, 34.657359, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 94) expect_equal(simulationResult$analysisTime[1, ], 13.132525, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 21.186744, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 20.690944, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 38.8, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.5, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.73, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.43, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.49, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.06, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.48014443, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time and hazard ratios, note that in getSimulationSurvival only one hazard ratio is used in the case that the survival time is piecewise exponential", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time for both treatment arms ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time as a list, note that in getSimulationSurvival only on hazard ratio (not a vector) can be used", { .skipTestifDisabled() pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time and delayed effect (response after 5 time units) ", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 100) expect_equal(simulationResult$analysisTime[1, ], 12.973056, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 17.030809, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 17.030809, tolerance = 1e-07) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 40) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 197.81, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$expectedNumberOfSubjects, 200) expect_equal(simulationResult$rejectPerStage[1, ], 0) expect_equal(simulationResult$rejectPerStage[2, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.06, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.1789388, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(simulationResult$median1, 5, tolerance = 1e-07) expect_equal(simulationResult$median2, 3) expect_equal(simulationResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$analysisTime[1, ], 6.1552733, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 6.1552733, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$expectedNumberOfSubjects, 102.09, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.29, tolerance = 1e-07) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) }) test_that("'getSimulationSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { .skipTestifDisabled() simulationResult <- getSimulationSurvival(lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results ## expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(simulationResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(simulationResult$median1, 5, tolerance = 1e-07) expect_equal(simulationResult$median2, 3) expect_equal(simulationResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$analysisTime[1, ], 6.3123397, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 6.3123397, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$expectedNumberOfSubjects, 104.7, tolerance = 1e-07) expect_equal(simulationResult$overallReject, 0.9, tolerance = 1e-07) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$earlyStop, 0) }) test_that("'getSimulationSurvival': Perform recalculation of number of events based on conditional power", { .skipTestifDisabled() # Perform recalculation of number of events based on conditional power for a # three-stage design with inverse normal combination test, where the conditional power # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold # increase in originally planned sample size (number of events) is allowed # Note that the first value in \code{minNumberOfEventsPerStage} and # \code{maxNumberOfEventsPerStage} is arbitrary, i.e., it has no effect. dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) resultsWithSSR1 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR1' with expected results ## expect_equal(resultsWithSSR1$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR1$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR1$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR1$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR1$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR1$iterations[3, ], c(96, 96, 88, 67, 50, 35, 11)) expect_equal(resultsWithSSR1$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[2, ], c(17.76189, 17.229038, 16.567328, 16.175906, 15.668575, 15.328143, 14.604753), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[3, ], c(30.192276, 28.615009, 26.463502, 25.657109, 23.821118, 23.34898, 22.534023), tolerance = 1e-07) expect_equal(resultsWithSSR1$studyDuration, c(29.683899, 28.160756, 25.20615, 22.190278, 19.319577, 18.030286, 14.789904), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR1$eventsPerStage[2, ], c(233.65, 231.27, 229.84, 229.43878, 228.57292, 227.67677, 219.44565), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsPerStage[3, ], c(409.28125, 401.01042, 385.875, 382.38806, 371.3, 374.14286, 367.72727), tolerance = 1e-07) expect_equal(resultsWithSSR1$expectedNumberOfEvents, c(402.256, 394.2208, 367.1508, 328.48602, 293.11354, 277.24313, 222.84098), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[2, ], c(800, 800, 799.45, 798.66327, 796.55208, 797.06061, 793.47826), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[3, ], c(800, 800, 800, 800, 800, 800, 800)) expect_equal(resultsWithSSR1$expectedNumberOfSubjects, c(800, 800, 799.934, 793.55401, 785.93916, 794.85349, 767.86699), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[2, ], c(0.04, 0.04, 0.12, 0.31, 0.46, 0.64, 0.81), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[3, ], c(0, 0.12, 0.26, 0.42, 0.41, 0.3, 0.11), tolerance = 1e-07) expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) expect_equal(resultsWithSSR1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$earlyStop, c(0.04, 0.04, 0.12, 0.33, 0.5, 0.65, 0.89), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR1$conditionalPowerAchieved[2, ], c(0.12165751, 0.15502837, 0.23497758, 0.29890789, 0.33886493, 0.41286728, 0.49916888), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[3, ], c(0.14749827, 0.23857933, 0.44868993, 0.59763371, 0.65378645, 0.66059558, 0.69812096), tolerance = 1e-07) # If thetaH1 is unspecified, the observed hazard ratio estimate # (calculated from the log-rank statistic) is used for performing the # recalculation of the number of events resultsWithSSR2 <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR2' with expected results ## expect_equal(resultsWithSSR2$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR2$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR2$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR2$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR2$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR2$iterations[3, ], c(99, 95, 92, 71, 60, 45, 21)) expect_equal(resultsWithSSR2$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[2, ], c(17.532866, 16.792737, 15.753436, 15.242772, 14.414526, 13.395253, 12.536642), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[3, ], c(29.782185, 28.27297, 25.249508, 24.235039, 21.407797, 20.846814, 17.625231), tolerance = 1e-07) expect_equal(resultsWithSSR2$studyDuration, c(29.663096, 27.530562, 24.305604, 21.136576, 18.176787, 16.398878, 13.170673), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR2$eventsPerStage[2, ], c(229.71, 222.76, 213.91, 210.63265, 201.21875, 185.82828, 171.84783), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[3, ], c(403.55556, 395.78947, 365.25, 358.80282, 327.35, 327.17778, 272.14286), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfEvents, c(401.8171, 387.138, 353.1428, 312.78082, 271.16875, 248.15727, 183.80196), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[2, ], c(798.3, 792.67, 784.71, 785.72449, 774.40625, 754.47475, 731), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[3, ], c(800, 800, 800, 800, 799.08333, 797.51111, 794.95238), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfSubjects, c(799.983, 799.6335, 798.7768, 790.11401, 777.76145, 771.03106, 723.0996), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[2, ], c(0.01, 0.05, 0.08, 0.27, 0.36, 0.54, 0.71), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[3, ], c(0.03, 0.11, 0.29, 0.39, 0.48, 0.37, 0.19), tolerance = 1e-07) expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) expect_equal(resultsWithSSR2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$earlyStop, c(0.01, 0.05, 0.08, 0.29, 0.4, 0.55, 0.79), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR2$conditionalPowerAchieved[2, ], c(0.13442705, 0.17515425, 0.27216274, 0.37121019, 0.42163288, 0.51345413, 0.62679958), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[3, ], c(0.088787205, 0.13342075, 0.37806621, 0.51790868, 0.64116584, 0.64220287, 0.73456911), tolerance = 1e-07) # Compare it with design without event size recalculation resultsWithoutSSR <- getSimulationSurvival(design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58,102,145), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of numeric object 'resultsWithoutSSR$overallReject' with expected results ## expect_equal(resultsWithoutSSR$overallReject, c(0.06, 0.09, 0.26, 0.36, 0.5, 0.62, 0.8), tolerance = 1e-07) ## ## Comparison of the results of numeric object 'resultsWithSSR1$overallReject' with expected results ## expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) ## ## Comparison of the results of numeric object 'resultsWithSSR2$overallReject' with expected results ## expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that event size racalcuation increases the Type I error rate, i.e., you have to use the combination test ", { .skipTestifDisabled() dGS <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) resultsWithSSRGS <- getSimulationSurvival(design = dGS, hazardRatio = seq(1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890) ## ## Comparison of the results of numeric object 'resultsWithSSRGS$overallReject' with expected results ## expect_equal(resultsWithSSRGS$overallReject, 0.05, tolerance = 1e-07) }) rpact/inst/tests/testthat/helper-class_analysis_dataset.R0000644000176200001440000000442413353343260023465 0ustar liggesusers###################################################################################### # # # -- Unit tests helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getMultipleStageResultsForDataset <- function(dataset, thetaH0 = NA_real_) { stage <- dataset$getNumberOfStages() kMax <- stage + 1 design1 <- getDesignGroupSequential(kMax = kMax) design2 <- getDesignInverseNormal(kMax = kMax) design3 <- getDesignFisher(kMax = kMax) stageResults1 <- getStageResults(design = design1, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults2 <- getStageResults(design = design2, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults3 <- getStageResults(design = design3, dataInput = dataset, stage = stage, thetaH0 = thetaH0) return(list( stageResults1 = stageResults1, stageResults2 = stageResults2, stageResults3 = stageResults3 )) }rpact/inst/tests/testthat/test-f_design_power_calculator.R0000644000176200001440000045761413574422572023701 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 12 December 2019, 12:31:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the power calculation of means for different designs and arguments") test_that("'getPowerMeans': power calculation of means in one sample for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1,1.2,1.4), directionUpper = TRUE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, c(-1.5, 0.7, 0.9), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(15.177049, 35.61826, 31.576281), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(1.2596734e-07, 0.17254516, 0.28730882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(2.7189456e-10, 0.43368823, 0.5145435), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(1.9550874e-12, 0.19182608, 0.13120557), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(1.2624119e-07, 0.79805947, 0.93305789), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.99114779, 0.032857727, 0.013099441), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.008851635, 0.045821034, 0.01275185), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.99999942, 0.078678761, 0.02585129), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.99999955, 0.68491215, 0.82770361), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.97002208, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1.2, -1), directionUpper = FALSE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, c(-0.7, -0.5), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(35.61826, 38.108498), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.17254516, 0.092241599), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.43368823, 0.28692789), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19182608, 0.18609918), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.79805947, 0.56526867), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.032857727, 0.072497778), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.045821034, 0.12144703), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.078678761, 0.19394481), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.68491215, 0.5731143), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.97002208, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 35.476828, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.17645213), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.43857394), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19041646), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.80544254, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.031759279), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.04381091), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.075570189, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69059627, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.6797184, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1091952, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.96124634, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.24180111, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.66903085, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, directionUpper = FALSE, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.152327, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0540554), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.17942496), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13908306), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.37256342, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.11944374), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.20558857), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.32503231, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55851267, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8594368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.7183904, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.4224927, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.01639778, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.8380617, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) getSampleSizeMeans(groups = 1, thetaH0 = -0.2, allocationRatioPlanned = 1) }) test_that("'getPowerMeans': power calculation of means in one sample for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.2, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.898263, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24200246), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44211004), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17580597), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.85991847, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.6841125, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.060186214, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.080390401, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.0601862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.9196096, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.898263, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24200246), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44211004), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17580597), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.85991847, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.6841125, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.0601862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.9196096, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.6632814, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.060186214, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.080390401, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.735156, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24557792), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44464112), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17420245), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8644215, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69021905, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.045347909, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.087095017, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.0453479, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.91290498, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.735156, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.24557792), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.44464112), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17420245), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8644215, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69021905, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.0453479, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.91290498, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.5560769, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.045347909, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.087095017, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.040846565, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.10670956, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.14433417, tolerance = 1e-07) }) test_that("'getPowerMeans': power calculation of mean difference in two samples for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = F, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.3144249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = F, normalApproximation = FALSE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.3144249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = F, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2989021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = F, normalApproximation = TRUE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.2989021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanRatio} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = T, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1) expect_equal(powerResult$expectedNumberOfSubjects, 36.038015, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16086364), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.41797637), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19543795), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.77427796, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.036438496), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.052451014), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.08888951, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.66772952, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.7808252, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7314858, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.495845, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.40854768, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0525289, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = T, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$effect, 1) expect_equal(powerResult$expectedNumberOfSubjects, 35.906427, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16454336), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.42310788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19440486), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.7820561, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.035259709), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.050256465), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.085516174, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.67316741, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5458238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7015266, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.4825823, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.41790054, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0501428, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerMeans': power calculation of mean difference in two samples for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7) ## ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) context("Testing the power calculation of rates for different designs and arguments") test_that("'getPowerRates': power calculation of rate in one sample for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.2,0.3,0.4), directionUpper = FALSE, maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(-0.2, -0.1, 0), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(26.793099, 30.568926, 25.859698), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.23143452, 0.056551742, 0.011170644), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.48990786, 0.18729986, 0.030436001), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.16366541, 0.14357447, 0.025842077), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8850078, 0.38742607, 0.067448723), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.020163481, 0.11504671, 0.30853754), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.023605223, 0.1982266, 0.40193671), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.043768704, 0.31327331, 0.71047424), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.76511109, 0.55712491, 0.75208089), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.076920806, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.23316503, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.27368249, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.47071068, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.353709, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.4,0.5,0.6), directionUpper = , maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(25.859698, 30.585503, 27.927522), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.057586328, 0.19206788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.19052871, 0.45635017), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.14536961, 0.1839518), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.39348465, 0.83236985), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.11330227, 0.027796437), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.19527267, 0.03667294), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.30857493, 0.064469377), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.55668998, 0.71288743), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.72307919, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.56683497, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.52631751, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.32928932, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.446291, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in one sample for two-sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS2, groups = 1, thetaH0 = 0.4, pi1 = seq(0.2,0.6,0.1), maxNumberOfSubjects = 40) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$effect, c(-0.2, -0.1, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.319274, 30.129425, 34.422159, 30.357182, 22.419855), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.54595705, 0.22704321, 0.1297467, 0.22142183, 0.46151826), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.36616073, 0.29278043, 0.16207777, 0.28691724, 0.38813612), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.065351333, 0.15710154, 0.10817552, 0.15623302, 0.098356497), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.97746912, 0.67692518, 0.4, 0.66457209, 0.94801088), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.91211779, 0.51982364, 0.29182448, 0.50833906, 0.84965439), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.18573229, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.28935423, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.3162256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.61426771, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.51064577, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.4837744, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for one-sided group sequential design, riskRatio = FALSE ", { # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.1, pi2 = 0.4, pi1 = c(0.1,0.2,0.3), directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 9) expect_equal(powerResult$numberOfSubjects1[2, ], 21) expect_equal(powerResult$numberOfSubjects1[3, ], 30) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(27.333747, 30.142404, 30.525807), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.21254585, 0.11056737, 0.054245237), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.47569558, 0.32910884, 0.18002797), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17392941, 0.19557908, 0.13943265), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.86217083, 0.63525529, 0.37370586), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.023466961, 0.059262043, 0.11909962), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.029128919, 0.096088854, 0.20501677), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.05259588, 0.1553509, 0.32411639), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.74083731, 0.59502711, 0.5583896), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -0.3905544, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -0.21681979, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.15504053, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.26517501, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.00361566, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = -0.1, pi2 = 0.4, pi1 = c(0.2, 0.3, 0.4, 0.5), directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$effect, c(-0.1, -2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(42.4454, 51.719397, 58.823585, 61.315141), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0028716829, 0.011170644, 0.031364648, 0.076178456), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0049229598, 0.030436001, 0.1027412, 0.24505539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0033586921, 0.025842077, 0.087149125, 0.17152942), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.011153335, 0.067448723, 0.22125497, 0.49276327), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.49105221, 0.30853754, 0.17789692, 0.08798644), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40736296, 0.40193671, 0.29133241, 0.150429), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.89841517, 0.71047424, 0.46922933, 0.23841544), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90620981, 0.75208089, 0.60333518, 0.55964928), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.38186802, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.17360028, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.10931124, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.20652185, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.02383242, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for one-sided group sequential design, riskRatio = TRUE ", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.5, pi1 = c(0.1,0.2,0.3), riskRatio = T, directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 5) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 10) expect_equal(powerResult$numberOfSubjects1[2, ], 23.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 33.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 2) expect_equal(powerResult$numberOfSubjects2[2, ], 4.6666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 6.6666667, tolerance = 1e-07) expect_equal(powerResult$effect, c(-0.6, -0.4, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(29.869153, 30.545915, 28.722194), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.12233203, 0.055263055, 0.02493902), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.35325438, 0.1832494, 0.079687483), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19845995, 0.14128433, 0.068746287), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.67404635, 0.37979679, 0.17337279), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.052497346, 0.11728241, 0.20511002), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.083047698, 0.20198492, 0.32334859), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.13554504, 0.31926733, 0.52845861), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61113145, 0.55777979, 0.63308512), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], NA_real_) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.19789883, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.30397209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.1132916, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.59448494, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = T, directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$effect, c(0.2, 0.45, 0.7), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(58.50994, 61.208415, 55.770675), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.029681783, 0.083038809, 0.19351805), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.096741134, 0.26351903, 0.45786385), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.082477726, 0.17856321, 0.18329277), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.20890064, 0.52512104, 0.83467468), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.18431999, 0.080816996, 0.027459911), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.29934054, 0.13713348, 0.036076093), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.48366053, 0.21795048, 0.063536004), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61008345, 0.56450831, 0.71491791), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8651141, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3871263, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2471692, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.57000905, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.96223105, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerRates': power calculation of rate in two samples for two-sided group sequential design ", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.5, pi1 = c(0.1,0.2,0.3), riskRatio = F, maxNumberOfSubjects = 40, allocationRatioPlanned = 0.5) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 4) expect_equal(powerResult$numberOfSubjects1[2, ], 9.3333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 13.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8) expect_equal(powerResult$numberOfSubjects2[2, ], 18.666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 26.666667, tolerance = 1e-07) expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.586564, 26.282925, 30.696455), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.53456929, 0.33187612, 0.2131539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.37045799, 0.36871195, 0.27793629), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.06955493, 0.14629915, 0.1545979), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.9745822, 0.84688722, 0.64568809), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90502727, 0.70058807, 0.49109019), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = T, maxNumberOfSubjects = 80, allocationRatioPlanned = 7) ## ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results ## expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 21) expect_equal(powerResult$numberOfSubjects1[2, ], 49) expect_equal(powerResult$numberOfSubjects1[3, ], 70) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$effect, c(0, 0.25, 0.5), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(68.844318, 66.97762, 61.620959), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.1297467, 0.14947843, 0.21040306), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.16207777, 0.19381617, 0.27485292), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.10817552, 0.12487952, 0.15395566), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.4, 0.46817413, 0.63921164), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.29182448, 0.3432946, 0.48525598), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.22081341, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.49677588, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5992042, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 2.0083461, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.5897897, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.4538504, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) }) context("Testing the power calculation for survival design for different designs and arguments") test_that("'getPowerSurvival': power calculation for survival design for one-sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5,0.5), typeOfDesign = "WT", deltaWT = 0.22) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.394846, 25.872188), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.496718, 34.368969), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.824774), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.036015488, 0.087726198), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11913846, 0.27563412), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.099477436, 0.1826593), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25463139, 0.54601962), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16216653, 0.076412449), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27053178, 0.12885945), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43269831, 0.2052719), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58785226, 0.56863222), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.163653, 26.008714), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.256688, 34.504982), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.822811), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.033136424, 0.067729226), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.10902189, 0.22109606), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.091947627, 0.16101101), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.23410594, 0.44983629), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1715797, 0.098248524), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.28318207, 0.16903127), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.45476178, 0.26727979), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.59692009, 0.55610508), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "Hsieh", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.372933, 25.919163), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.473935, 34.421802), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.825057), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03572104, 0.083721511), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11810922, 0.2653086), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.098722701, 0.17919441), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25255296, 0.52822452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16308496, 0.080152238), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27179271, 0.13588956), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43487767, 0.2160418), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58870793, 0.56507191), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 1.25, hazardRatio = 0.8, directionUpper = FALSE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.31886857, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.38121661, tolerance = 1e-07) expect_equal(powerResult$median1, 21.660849, tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.032, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 5.7883102, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 8.7091306, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 13.807185, tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], 17.78831, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 14.723329, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 17.78831, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, 49.818428, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 145.15218, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$numberOfSubjects[3, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 190.996, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.076192913), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.24509523), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17154561), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.49283375, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.087970326), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.15039938), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.2383697, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55965784, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.37847558, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.67448058, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.78350426, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.623577, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0533329, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 0.8, hazardRatio = seq(0.8,1.4,0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.38121661, 0.43785755, 0.48931382), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.38121661, tolerance = 1e-07) expect_equal(powerResult$median1, c(21.660849, 17.32868, 14.440566, 12.377628), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.032, 0.04, 0.048, 0.056), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(5.1617391, 4.0656056, 3.2120436, 2.5256004), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4767885, 8.0592408, 7.7076518, 7.4060255), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.399188, 12.692623, 12.137705, 11.68467), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(12.758265, 13.175351, 12.752351, 11.880451), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(141.27981, 134.32068, 128.46086, 123.43376), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 194.7445), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(181.22667, 187.06042, 188.27858, 183.16132), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, eventTime = 120, pi2 = 0.4, thetaH0 = 0.8, hazardRatio = seq(0.8,1.4,0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.33546019, 0.4, 0.45827173, 0.51088413), tolerance = 1e-07) expect_equal(powerResult$median1, c(203.53732, 162.82985, 135.69154, 116.30704), tolerance = 1e-07) expect_equal(powerResult$median2, 162.82985, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.0034055042, 0.0042568802, 0.0051082562, 0.0059596323), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.0042568802, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(96.86335, 86.356678, 78.102375, 71.398147), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(32.816894, 30.124548, 27.945787, 26.142615), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(73.505015, 66.662265, 61.211479, 56.744296), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(69.262697, 72.57735, 68.358222, 60.378881), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.011170644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.03577084, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.05147132, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) }) test_that("'getPowerSurvival': power calculation for survival design for two-sided group sequential design ", { designGS2 <- getDesignGroupSequential(informationRates = c(0.3,0.7,1), alpha = 0.11, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.32) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.378955, 22.994709, 18.586202), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(40.275667, 53.258703, 46.484493), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(177.90788, 179.45429, 176.38168), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.20812766, 0.025692757, 0.10981107), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.4067526, 0.045583354, 0.25986553), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19467465, 0.038723888, 0.1839545), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.80955491, 0.11, 0.5536311), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61488026, 0.071276112, 0.3696766), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(22.281639, 22.994709, 17.952578), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(44.992896, 53.258703, 44.408918), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.68182, 179.45429, 175.39233), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.13113454, 0.025692757, 0.13983652), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.30051056, 0.045583354, 0.31559857), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19586767, 0.038723888, 0.19878897), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.62751278, 0.11, 0.65422406), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.4316451, 0.071276112, 0.45543509), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Hsieh", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.85758, 22.994709, 18.697033), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(41.467466, 53.258703, 46.846888), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.11906, 179.45429, 176.54633), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.18711904, 0.025692757, 0.10481397), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.38354247, 0.045583354, 0.24956205), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19996364, 0.038723888, 0.18005389), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.77062516, 0.11, 0.53442991), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.57066151, 0.071276112, 0.35437602), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.20331, 15.121757, 12.72043), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(37.895698, 53.258703, 46.404972), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(166.366, 178.38985, 170.00949), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.25384788, 0.025692757, 0.11091682), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.44431262, 0.045583354, 0.26210486), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17592811, 0.038723888, 0.18475659), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8740886, 0.11, 0.55777827), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.6981605, 0.071276112, 0.37302168), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(14.524507, 15.121757, 12.352885), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(43.761896, 53.258703, 44.296935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(171.95824, 178.38985, 167.38024), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.14972738, 0.025692757, 0.14152926), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.33173334, 0.045583354, 0.31843565), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.20093576, 0.038723888, 0.19924141), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.68239647, 0.11, 0.65920633), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.48146072, 0.071276112, 0.45996492), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Hsieh", lambda2 = 0.04, hazardRatio = c(0.4,1,1.8), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, c(0.31886857, 0.61710711, 0.82236067), tolerance = 1e-07) expect_equal(powerResult$pi2, 0.61710711, tolerance = 1e-07) expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.562832, 15.121757, 12.784878), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$expectedNumberOfEvents, c(39.493229, 53.258703, 46.77542), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(168.04554, 178.38985, 170.45805), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.2225769, 0.025692757, 0.10579404), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.42045819, 0.045583354, 0.25160664), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.18963039, 0.038723888, 0.18085515), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.83266548, 0.11, 0.53825584), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.64303509, 0.071276112, 0.35740069), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) }) context("Testing the power calculation of survival data for other parameter variants") test_that("'getPowerSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Four stage O'Brien and Fleming group sequential design with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 4), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.2382885, 7.2643376, 6.5021817, 5.8683997), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(11.775158, 10.405299, 9.3411982, 8.4606249), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(14.851313, 12.90759, 11.580651, 10.517763), tolerance = 1e-07) expect_equal(powerResult$analysisTime[4, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.070854, 14.972567, 12.292784, 10.112156), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 10) expect_equal(powerResult$eventsPerStage[2, ], 20) expect_equal(powerResult$eventsPerStage[3, ], 30) expect_equal(powerResult$eventsPerStage[4, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.87408, 38.142534, 33.62741, 28.346513), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(137.30481, 121.07229, 108.36969, 97.806661), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(196.25264, 173.42164, 155.68664, 141.01041), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 193.01085, 175.29605), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[4, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(199.99057, 199.0474, 190.68267, 167.42879), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(2.5763449e-05, 0.00047146778, 0.0030806507, 0.012020122), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0020845834, 0.034441261, 0.15314753, 0.35953485), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0083455469, 0.11544971, 0.32172195, 0.41021864), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[4, ], c(0.014544106, 0.15846685, 0.25680093, 0.16196846), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.025, 0.30882929, 0.73475105, 0.94374207), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.010455897, 0.15036244, 0.47795013, 0.78177362), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 12.942983, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 3.5976357, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 2.3478921, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[4, ], 1.8967435, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 2.5763449e-05, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.0020996694, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.0097077663, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[4, ], 0.021469878, tolerance = 1e-07) }) test_that("'getPowerSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0), accrualIntensity = 30, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.7010979, 6.004962, 4.1561659, 2.779256), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit ", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify accrual time as a list", { at <- list("0 - <6" = 20, "6 - Inf" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { at <- list("0 - <6" = 20, "6 - <=10" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$median1, 74.550809, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 12.65889, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 14.822645, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 24.65889, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 24.262964, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 24.65889, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 39.194966, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.04025172), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.27369279), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.31394451, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.040251721, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(powerResult$median1, 93.281194, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 14.346945, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 15.582247, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 26.346945, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 25.202929, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 26.346945, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(powerResult$median1, 69.314718, tolerance = 1e-07) expect_equal(powerResult$median2, 34.657359, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 9.1631017, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 13.164641, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 21.163102, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 20.313067, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 21.163102, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time and hazard ratios ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01,0.02,0.04), hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as list and hazard ratios ", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time for both treatment arms ", { powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015,0.03,0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(powerResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 4.2070411, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 12.173669, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 16.207041, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 16.088508, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 16.207041, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, 39.412236, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0293882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.21729291), tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.24668111, tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.029388201, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as a list", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { powerResult <- getPowerSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(powerResult$median1, 5, tolerance = 1e-07) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.9093279, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.0906721, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.0906721, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$expectedNumberOfSubjects, 101.5112, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.36520074, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getPowerSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { powerResult <- getPowerSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) ## ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results ## expect_equal(powerResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(powerResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(powerResult$median1, 5, tolerance = 1e-07) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.7378582, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.2621418, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.2621418, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$expectedNumberOfSubjects, 104.36903, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.8980967, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) context("Testing the follow-up time calculation") test_that("'getPowerSurvival': analysis time at last stage equals accrual time + follow-up time", { x1 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, lambda2 = 0.005, lambda1 = 0.01, maxNumberOfSubjects = 766, maxNumberOfEvents = 76) expect_equal(x1$overallReject, 1 - x1$.design$beta, tolerance = 0.01) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfEvents = 76, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01, maxNumberOfEvents = 76) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), maxNumberOfEvents = 76, piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) rpact/inst/tests/testthat/test-f_analysis_base_rates.R0000644000176200001440000035647213574164753023022 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 08:45:16 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis rates functionality for one treatment") test_that("'getAnalysisResults' for a group sequential design and one treatment", { design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8) dataExample1 <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) x1 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, 0.60291694, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, 0.29544407, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, 0.73635572, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.26917981, 0.015800491, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.0089457853, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.32991006, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.70969307, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.51904357, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(1.2064848, 2.0674098, 2.4192811), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.11381531, 0.01934778, 0.0077756083), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, 0.83593758, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, 0.31742335, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, 0.71378821, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.10104164, 0.0056362503, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 3) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, 0.0042246203, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.29660132, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.68026724, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.48659273, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(-1.6329932, -2.4494897, -2.7777778, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.051235217, 0.0071529392, 0.0027366018, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.6918414, 0.87964625), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.26917981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$overallTestStatistics, c(1.2064848, 2.0674098), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.11381531, 0.01934778), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.94525609, 0.87964625, 0.77671901, 0.6376454, 0.47357888, 0.30528352, 0.15917802), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(-1.6329932, -1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.5, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.85377193, 0.95011174), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.10104164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$overallTestStatistics, c(-1.6329932, -2.4494897, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$overallPValues, c(0.051235217, 0.0071529392, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98086022, 0.95011174, 0.89224094, 0.79890753, 0.66697034, 0.50241609, 0.32350374), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") x5 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x5$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, 0.85300796, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, 0.29544407, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, 0.73635572, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.26917981, 0.0050506954, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, 3) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, 0.003964958, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.32244641, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.70667629, NA_real_), tolerance = 1e-07) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.51656189, NA_real_), tolerance = 1e-07) expect_equal(x5$normalApproximation, FALSE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$overallTestStatistics, c(1.2064848, 2.0674098, 2.8135397), tolerance = 1e-07) expect_equal(x5$overallPValues, c(0.11381531, 0.01934778, 0.0024499668), tolerance = 1e-07) x6 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x6$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, 0.96903431, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, 0.31742335, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, 0.71378821, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.10104164, 0.0013294657, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, 3) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, 0.0023857966, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.34941079, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.74332995, NA_real_), tolerance = 1e-07) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.55110366, NA_real_), tolerance = 1e-07) expect_equal(x6$normalApproximation, TRUE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$overallTestStatistics, c(1.6329932, 2.4494897, 3.2222222, NA_real_), tolerance = 1e-07) expect_equal(x6$overallPValues, c(0.051235217, 0.0071529392, 0.00063600219, NA_real_), tolerance = 1e-07) x7 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x7' with expected results ## expect_equal(x7$stages, c(1, 2, 3, 4)) expect_equal(x7$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x7$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x7$conditionalRejectionProbabilities, c(0.055828724, 0.21032099, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x7$allocationRatioPlanned, 1) expect_equal(x7$pi1, 0.5, tolerance = 1e-07) expect_equal(x7$pi2, NA_real_) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.6918414, 0.87964625), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.1766668, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.8233332, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.26917981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$normalApproximation, FALSE) expect_equal(x7$directionUpper, TRUE) expect_equal(x7$overallTestStatistics, c(1.2064848, 2.0674098), tolerance = 1e-07) expect_equal(x7$overallPValues, c(0.11381531, 0.01934778), tolerance = 1e-07) plotData3 <- testGetAnalysisResultsPlotData(x7, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.15917802, 0.30528352, 0.47357888, 0.6376454, 0.77671901, 0.87964625, 0.94525609), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") x8 <- getAnalysisResults(design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x8' with expected results ## expect_equal(x8$stages, c(1, 2, 3, 4)) expect_equal(x8$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x8$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x8$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x8$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x8$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testStatistics, c(1.6329932, 1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x8$conditionalRejectionProbabilities, c(0.088079629, 0.32476642, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x8$allocationRatioPlanned, 1) expect_equal(x8$pi1, 0.5, tolerance = 1e-07) expect_equal(x8$pi2, NA_real_) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.85377193, 0.95011174), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21639861, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78360139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.10104164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$normalApproximation, TRUE) expect_equal(x8$directionUpper, TRUE) expect_equal(x8$overallTestStatistics, c(1.6329932, 2.4494897, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$overallPValues, c(0.051235217, 0.0071529392, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x8, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.32350374, 0.50241609, 0.66697034, 0.79890753, 0.89224094, 0.95011174, 0.98086022), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for an inverse sequential design and one treatment", { .skipTestifDisabled() design2 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8) dataExample2 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) x1 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, 0.28098687, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, 0.76870152, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.43799317, 0.045574143, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(1.2064848, 1.8556383, 1.9988727, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, 0.78413538, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, 0.72001941, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.1020964, 0.0075111702, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 3) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, 0.0050707339, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.30413229, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.68870859, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.49547717, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.6329932, 2.445695, 2.6819469, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$combinationTestStatistics, c(1.2064848, 1.8556383, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.94793138, 0.88465983, 0.78396384, 0.64581102, 0.48045808, 0.30888816, 0.15917802), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(-1.6329932, -1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.5, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$combinationTestStatistics, c(1.6329932, 2.445695, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.45, 0.75, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98088099, 0.95015898, 0.89232288, 0.79901831, 0.66708346, 0.50248974, 0.32350374), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") x5 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x5$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, 0.6508521, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, 0.76870152, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.43799317, 0.013282796, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, 3) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, 0.007752129, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.29554194, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.67875285, NA_real_), tolerance = 1e-07) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.48769645, NA_real_), tolerance = 1e-07) expect_equal(x5$normalApproximation, FALSE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$combinationTestStatistics, c(1.2064848, 1.8556383, 2.4826398, NA_real_), tolerance = 1e-07) x6 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x6$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, 0.96959663, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, 0.72001941, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.1020964, 0.0013103922, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, 3) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, 0.002378519, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.3437363, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.73847376, NA_real_), tolerance = 1e-07) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.54446903, NA_real_), tolerance = 1e-07) expect_equal(x6$normalApproximation, TRUE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$combinationTestStatistics, c(1.6329932, 2.445695, 3.2262779, NA_real_), tolerance = 1e-07) x7 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x7' with expected results ## expect_equal(x7$stages, c(1, 2, 3, 4)) expect_equal(x7$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x7$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x7$conditionalRejectionProbabilities, c(0.055828724, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x7$allocationRatioPlanned, 1) expect_equal(x7$pi1, 0.5, tolerance = 1e-07) expect_equal(x7$pi2, NA_real_) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.04626695, 0.16132367, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.95373305, 0.83867633, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$normalApproximation, FALSE) expect_equal(x7$directionUpper, TRUE) expect_equal(x7$combinationTestStatistics, c(1.2064848, 1.8556383, NA_real_, NA_real_), tolerance = 1e-07) plotData3 <- testGetAnalysisResultsPlotData(x7, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.15917802, 0.30888816, 0.48045808, 0.64581102, 0.78396384, 0.88465983, 0.94793138), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") x8 <- getAnalysisResults(design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x8' with expected results ## expect_equal(x8$stages, c(1, 2, 3, 4)) expect_equal(x8$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x8$criticalValues, c(3.4542176, 2.9219298, 2.2448554, 2.0471908), tolerance = 1e-07) expect_equal(x8$futilityBounds, c(-0.5, 0, 0.5), tolerance = 1e-07) expect_equal(x8$alphaSpent, c(0.00027594593, 0.0019217991, 0.013384186, 0.025), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.00027594593, 0.00173935, 0.012388708, 0.020319679), tolerance = 1e-07) expect_equal(x8$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testStatistics, c(1.6329932, 1.8257419, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$pValues, c(0.051235217, 0.033944577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x8$conditionalRejectionProbabilities, c(0.088079629, 0.32350577, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x8$allocationRatioPlanned, 1) expect_equal(x8$pi1, 0.5, tolerance = 1e-07) expect_equal(x8$pi2, NA_real_) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.11314483, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.88685517, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$normalApproximation, TRUE) expect_equal(x8$directionUpper, TRUE) expect_equal(x8$combinationTestStatistics, c(1.6329932, 2.445695, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x8, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.32350374, 0.50248974, 0.66708346, 0.79901831, 0.89232288, 0.95015898, 0.98088099), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for a Fisher design and one treatment", { design3 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1)) dataExample3 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) x1 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(1, 1, 1)) expect_equal(x1$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.11381531, 0.078126907, 0.16572571, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.018233808, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$pi2, NA_real_) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.23393398, 0.11483365, 0.11050779, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(0.11381531, 0.008892038, 0.00069992563, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(1, 1, 1)) expect_equal(x2$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6329932, -1.8257419, -1.3471506, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.051235217, 0.033944577, 0.088965863, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$pi2, NA_real_) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.11554509, 0.032131177, 0.024656293, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(0.051235217, 0.0017391578, 5.6795832e-05, NA_real_), tolerance = 1e-07) x3 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(1, 1, 1)) expect_equal(x3$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.75, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.5, tolerance = 1e-07) expect_equal(x3$pi2, NA_real_) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) expect_equal(x3$combinationTestStatistics, c(0.11381531, 0.008892038, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.997, 0.99, 0.967, 0.9, 0.822, 0.659, 0.534), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") x4 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(1, 1, 1)) expect_equal(x4$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$pValues, c(0.11381531, 0.078126907, 0.048927307, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x4$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.10237226, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x4$pi2, NA_real_) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.23393398, 0.11483365, 0.040061917, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$normalApproximation, FALSE) expect_equal(x4$directionUpper, TRUE) expect_equal(x4$combinationTestStatistics, c(0.11381531, 0.008892038, 0.00012466571, NA_real_), tolerance = 1e-07) x5 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x5' with expected results ## expect_equal(x5$stages, c(1, 2, 3, 4)) expect_equal(x5$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x5$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x5$futilityBounds, c(1, 1, 1)) expect_equal(x5$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x5$effectSizes, c(0.5, 0.5, 0.51851852, NA_real_), tolerance = 1e-07) expect_equal(x5$testStatistics, c(1.6329932, 1.8257419, 2.116951, NA_real_), tolerance = 1e-07) expect_equal(x5$pValues, c(0.051235217, 0.033944577, 0.017132004, NA_real_), tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x5$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$allocationRatioPlanned, 1) expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$pi2, NA_real_) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.11554509, 0.032131177, 0.0055275316, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, NA_integer_) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$normalApproximation, TRUE) expect_equal(x5$directionUpper, TRUE) expect_equal(x5$combinationTestStatistics, c(0.051235217, 0.0017391578, 5.527981e-06, NA_real_), tolerance = 1e-07) x6 <- getAnalysisResults(design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.25, nPlanned = c(12,6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x6' with expected results ## expect_equal(x6$stages, c(1, 2, 3, 4)) expect_equal(x6$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x6$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(1, 1, 1)) expect_equal(x6$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x6$effectSizes, c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$pValues, c(0.11381531, 0.078126907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x6$thetaH0, 0.25, tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$nPlanned, c(NA_real_, NA_real_, 12, 6)) expect_equal(x6$allocationRatioPlanned, 1) expect_equal(x6$pi1, 0.5, tolerance = 1e-07) expect_equal(x6$pi2, NA_real_) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, NA_integer_) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$normalApproximation, FALSE) expect_equal(x6$directionUpper, TRUE) expect_equal(x6$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) expect_equal(x6$combinationTestStatistics, c(0.11381531, 0.008892038, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x6, piRange = seq(0.25, 0.55, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.039, 0.114, 0.208, 0.363, 0.54, 0.659, 0.817), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' produces the correct exact tests and final CIs", { .skipTestifDisabled() dataExample4 <- getDataset( n1 = c(10, 80), n2 = c(15, 100), events1 = c(8, 54), events2 = c(6, 45) ) design4 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x1 <- getAnalysisResults(design4, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x1$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.19076123, 0.00035290512), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.0010527587), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.08946214), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.35871085), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, 0.2258854), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(1.5754901, 3.3869257), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.057571679, 0.00035340268), tolerance = 1e-07) design5 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x2 <- getAnalysisResults(design5, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x2$testStatistics, c(NA_real_, NA_real_)) expect_equal(x2$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "reject")) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x2$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.11944223), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.38794979), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.19076123, 0.00053410288), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.0012242304), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.088125224), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.36146576), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, 0.2258396), tolerance = 1e-07) expect_equal(x2$normalApproximation, FALSE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(1.5754901, 3.2718402), tolerance = 1e-07) design6 <- getDesignFisher(kMax = 2, alpha = 0.025, method = "fullAlpha", informationRates = c(0.3, 1)) x3 <- getAnalysisResults(design6, dataExample4, thetaH0 = 0, stage = 2, directionUpper = TRUE, normalApproximation = FALSE, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00076737019, 0.00076737019), tolerance = 1e-07) expect_equal(x3$futilityBounds, 1) expect_equal(x3$alphaSpent, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "reject")) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.059209424, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x3$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.22999653, 0.10478651), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.78083174, 0.37635443), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.33766302, 0.00088314698), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.0015832283), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(0.057571679, 4.3180464e-06), tolerance = 1e-07) }) context("Testing the analysis rates functionality for two treatments") test_that("'getAnalysisResults' for a group sequential design and two treatments", { .skipTestifDisabled() design7 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = T) dataExample5 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.11795654, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$pi1, 0.8, tolerance = 1e-07) expect_equal(x1$pi2, 0.4, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.95912173, 0.99561789), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10866984, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44094175, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297284, 0.1166436, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(2.1918708, 1.5920411, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.014194417, 0.055687735, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.39134298, 0.56404834, 0.72892392, 0.85852169, 0.94050627, 0.98087239, 0.99561789), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x2 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.8, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 6.6613381e-16, 6.6613381e-16), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10866984, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44094175, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.21185459, 0.21185459, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 1) expect_equal(x2$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(0.03932898, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(0.62730993, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(2.1918708, 1.5920411, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.98580558, 0.94431227, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 1, 1), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for an inverse design and two treatments", { .skipTestifDisabled() design8 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = T) dataExample6 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0.0, stage = 2, nPlanned = c(30,30), pi2 = 0.2, pi1 = 0.4, directionUpper = T) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 30, 30)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.4, tolerance = 1e-07) expect_equal(x1$pi2, 0.2, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.42200962, 0.67359244), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.4, 0.7, 0.05), nPlanned = c(30,30)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") x2 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0.0, stage = 2, nPlanned = c(30,30), pi2 = 0.2, pi1 = 0.4, directionUpper = T) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 30, 30)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.2, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.42200962, 0.67359244), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.4, 0.7, 0.05), nPlanned = c(30, 30)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") plotData3 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.4, 0.7, 0.05)) ## ## Comparison of the results of list object 'plotData3' with expected results ## expect_equal(plotData3$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.67359244, 0.79683049, 0.88832834, 0.9471853, 0.97926376, 0.99357391, 0.99853781), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.081561833, 0.27837883, 0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power Plot with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") x3 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.19002543, 0.12887611, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$pi1, 0.8, tolerance = 1e-07) expect_equal(x3$pi2, 0.4, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.95920074, 0.9956319), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.083297284, 0.10825489, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(2.1918708, 1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData4 <- testGetAnalysisResultsPlotData(x3, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData4' with expected results ## expect_equal(plotData4$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.39156853, 0.5643629, 0.72924187, 0.85876179, 0.94064025, 0.98092555, 0.9956319), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power Plot with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x4 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2, 3, 4)) expect_equal(x4$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.0522063, 2.5665893, 2.1582358, 2.0411334), tolerance = 1e-07) expect_equal(x4$futilityBounds, c(0, 0.5, 0.8), tolerance = 1e-07) expect_equal(x4$alphaSpent, c(0.0011358297, 0.0058207373, 0.017395547, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.0011358297, 0.0051352086, 0.015454753, 0.020618787), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x4$thetaH0, 0) expect_equal(x4$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x4$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x4$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x4$pi1, 0.4, tolerance = 1e-07) expect_equal(x4$pi2, 0.8, tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 6.6613381e-16, 6.6613381e-16), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.14000098, -0.10230475, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.72492405, 0.44728185, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.21185459, 0.21185459, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, 1) expect_equal(x4$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(0.03932898, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(0.62730993, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$combinationTestStatistics, c(-2.1918708, -1.6504685, NA_real_, NA_real_), tolerance = 1e-07) plotData5 <- testGetAnalysisResultsPlotData(x4, piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData5' with expected results ## expect_equal(plotData5$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData5$condPowerValues, c(6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 6.6613381e-16, 1, 1), tolerance = 1e-07) expect_equal(plotData5$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData5$main, "Conditional Power Plot with Likelihood") expect_equal(plotData5$xlab, "pi1") expect_equal(plotData5$ylab, "Conditional power / Likelihood") expect_equal(plotData5$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for a Fisher design and two treatments", { .skipTestifDisabled() design9 <- getDesignFisher(kMax = 4, alpha = 0.025, method = "equalAlpha", informationRates = c(0.2, 0.4, 0.8, 1)) dataExample7 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) x1 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(1, 1, 1)) expect_equal(x1$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.014194417, 0.44344359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.13898608, 0.050808351, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$pi1, 0.8, tolerance = 1e-07) expect_equal(x1$pi2, 0.4, tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.035427069, 0.088523734, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.925, 0.972), tolerance = 1e-07) expect_equal(x1$combinationTestStatistics, c(0.014194417, 0.0062944232, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x1, piRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.199, 0.364, 0.506, 0.686, 0.839, 0.927, 0.979), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" x2 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60,30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(0.00979594, 0.001278498, 5.8066063e-05, 1.276231e-05), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(1, 1, 1)) expect_equal(x2$alphaSpent, c(0.00979594, 0.01571, 0.022184266, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00979594, 0.00979594, 0.00979594, 0.00979594), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.36928105, 0.18026316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.1918708, 0.14224412, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.98580558, 0.55655641, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.0056634595, 0.0023089469, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 60, 30)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$pi1, 0.4, tolerance = 1e-07) expect_equal(x2$pi2, 0.8, tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.591, 0.788), tolerance = 1e-07) expect_equal(x2$combinationTestStatistics, c(0.98580558, 0.54865641, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x2,piRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.998, 0.992, 0.967, 0.892, 0.807, 0.623, 0.493), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' produces the correct exact tests and final CIs", { .skipTestifDisabled() dataExample8 <- getDataset( n2 = c(10, 80), n1 = c(15, 100), events2 = c(8, 54), events1 = c(6, 45) ) design10 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x1 <- getAnalysisResults(design10, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(NA_real_, NA_real_)) expect_equal(x1$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, 0) expect_equal(x1$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x1$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.78187776, -0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.23265736, -0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.19076123, 0.00035290512), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.0010527587), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.35871085), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, -0.08946214), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, -0.2258854), tolerance = 1e-07) expect_equal(x1$normalApproximation, FALSE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(1.5754901, 3.3869257), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.057571679, 0.00035340268), tolerance = 1e-07) design11 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) x2 <- getAnalysisResults(design11, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x2$testStatistics, c(NA_real_, NA_real_)) expect_equal(x2$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "reject")) expect_equal(x2$thetaH0, 0) expect_equal(x2$conditionalRejectionProbabilities, c(0.093545976, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x2$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.78187776, -0.38794979), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.23265736, -0.11944223), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.19076123, 0.00053410288), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.0012242304), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.36146576), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, -0.088125224), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, -0.2258396), tolerance = 1e-07) expect_equal(x2$normalApproximation, FALSE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.5754901, 3.2718402), tolerance = 1e-07) design12 <- getDesignFisher(kMax = 2, alpha = 0.025, method = "fullAlpha", informationRates = c(0.3, 1)) x3 <- getAnalysisResults(design12, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE, seed = 123) ## ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(0.00076737019, 0.00076737019), tolerance = 1e-07) expect_equal(x3$futilityBounds, 1) expect_equal(x3$alphaSpent, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00076737019, 0.025), tolerance = 1e-07) expect_equal(x3$effectSizes, c(-0.4, -0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(NA_real_, NA_real_)) expect_equal(x3$pValues, c(0.057571679, 0.0019934481), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "reject")) expect_equal(x3$thetaH0, 0) expect_equal(x3$conditionalRejectionProbabilities, c(0.059209424, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.44347826, tolerance = 1e-07) expect_equal(x3$pi2, 0.68888889, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.78083174, -0.37635443), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.22999653, -0.10478651), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.33766302, 0.00088314698), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.0015832283), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x3$normalApproximation, FALSE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$combinationTestStatistics, c(0.057571679, 4.3180464e-06), tolerance = 1e-07) }) test_that("'getAnalysisResults' produes the correct non-inferiority results for a group sequential design", { .skipTestifDisabled() design13 <- getDesignGroupSequential(kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1)) dataExample9 <- getDataset( n1 = c(10, 80), n2 = c(15, 100), events1 = c(8, 54), events2 = c(6, 45) ) x1 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 2, directionUpper = TRUE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2)) expect_equal(x1$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x1$futilityBounds, -6) expect_equal(x1$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x1$testStatistics, c(2.4676423, 4.366446), tolerance = 1e-07) expect_equal(x1$pValues, c(0.0068003075, 6.3142236e-06), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$thetaH0, -0.1, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.2311149, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x1$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.067077549, 1.9536724e-06), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.00072814991), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.046389254), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.3577016), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, 0.2183453), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(2.4676423, 4.9460155), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.0068003075, 3.7873958e-07), tolerance = 1e-07) x2 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 1, nPlanned = 40, pi1 = 0.45, pi2 = 0.4, directionUpper = TRUE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2)) expect_equal(x2$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x2$futilityBounds, -6) expect_equal(x2$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.4, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(2.4676423, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.0068003075, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", NA_character_)) expect_equal(x2$thetaH0, -0.1, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.2311149, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, 40)) expect_equal(x2$allocationRatioPlanned, 1) expect_equal(x2$pi1, 0.45, tolerance = 1e-07) expect_equal(x2$pi2, 0.4, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, 0.59014508), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.78187776, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.067077549, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$overallTestStatistics, c(2.4676423, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.0068003075, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, piRange = seq(0.25, 0.7, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.13978279, 0.2311149, 0.34247666, 0.46554605, 0.59014508, 0.70618885, 0.80546789, 0.88295965, 0.937434, 0.97121381), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(7.8444044e-05, 0.00040464517, 0.0017853782, 0.006737947, 0.021750359, 0.060054668, 0.14183016, 0.2865048, 0.4950359, 0.73161563), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 1, # of remaining subjects = 40, pi2 = 0.4, allocation ratio = 1") # non-inferiority, reversed "directionUpper" x3 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 2, directionUpper = FALSE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2)) expect_equal(x3$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x3$futilityBounds, -6) expect_equal(x3$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x3$effectSizes, c(0.4, 0.24541063), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.4985437, 1.6883572), tolerance = 1e-07) expect_equal(x3$pValues, c(0.93300397, 0.95432866), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "accept")) expect_equal(x3$thetaH0, 0.1, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities, c(0.00043165085, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_)) expect_equal(x3$allocationRatioPlanned, 1) expect_equal(x3$pi1, 0.68888889, tolerance = 1e-07) expect_equal(x3$pi2, 0.44347826, tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, 0.10906229), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.78187776, 0.37165341), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.49999905), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.9819019), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.10906703), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.37282539), tolerance = 1e-07) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, 0.24094623), tolerance = 1e-07) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, FALSE) expect_equal(x3$overallTestStatistics, c(1.4985437, 2.0947166), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.93300397, 0.9819019), tolerance = 1e-07) x4 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 1, nPlanned = 40, pi1 = 0.4, pi2 = 0.45, directionUpper = FALSE, normalApproximation = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results ## expect_equal(x4$stages, c(1, 2)) expect_equal(x4$informationRates, c(0.3, 1), tolerance = 1e-07) expect_equal(x4$criticalValues, c(3.1833546, 1.9666792), tolerance = 1e-07) expect_equal(x4$futilityBounds, -6) expect_equal(x4$alphaSpent, c(0.00072789603, 0.025), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.00072789603, 0.024610104), tolerance = 1e-07) expect_equal(x4$effectSizes, c(0.4, NA_real_), tolerance = 1e-07) expect_equal(x4$testStatistics, c(1.4985437, NA_real_), tolerance = 1e-07) expect_equal(x4$pValues, c(0.93300397, NA_real_), tolerance = 1e-07) expect_equal(x4$testActions, c("continue", NA_character_)) expect_equal(x4$thetaH0, 0.1, tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities, c(0.00043165085, NA_real_), tolerance = 1e-07) expect_equal(x4$nPlanned, c(NA_real_, 40)) expect_equal(x4$allocationRatioPlanned, 1) expect_equal(x4$pi1, 0.4, tolerance = 1e-07) expect_equal(x4$pi2, 0.45, tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, 0.009129264), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.23265736, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.78187776, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_)) expect_equal(x4$normalApproximation, TRUE) expect_equal(x4$directionUpper, FALSE) expect_equal(x4$overallTestStatistics, c(1.4985437, NA_real_), tolerance = 1e-07) expect_equal(x4$overallPValues, c(0.93300397, NA_real_), tolerance = 1e-07) }) rpact/inst/tests/testthat/test-f_design_utilities.R0000644000176200001440000017431613567165663022351 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:56 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing design utility functions") test_that("'getPiByLambda' and 'getLambdaByPi' produce corresponding results", { expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 2), eventTime = 1, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 4), eventTime = 1, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 1), eventTime = 2, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 2), eventTime = 2, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 3), eventTime = 2, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 4), eventTime = 2, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 2, kappa = 5), eventTime = 2, kappa = 5), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 1), eventTime = 3, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 2), eventTime = 3, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 3), eventTime = 3, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 3, kappa = 4), eventTime = 3, kappa = 4), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 3, kappa = 5), eventTime = 3, kappa = 5), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 1), eventTime = 4, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 2), eventTime = 4, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 4, kappa = 3), eventTime = 4, kappa = 3), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 4, kappa = 4), eventTime = 4, kappa = 4), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 4, kappa = 5), eventTime = 4, kappa = 5), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 5, kappa = 1), eventTime = 5, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 5, kappa = 2), eventTime = 5, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 5, kappa = 3), eventTime = 5, kappa = 3), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 4), eventTime = 5, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 5, kappa = 5), eventTime = 5, kappa = 5), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 6, kappa = 2), eventTime = 6, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 6, kappa = 4), eventTime = 6, kappa = 4), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 7, kappa = 1), eventTime = 7, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 7, kappa = 2), eventTime = 7, kappa = 2), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 7, kappa = 3), eventTime = 7, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 7, kappa = 4), eventTime = 7, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 7, kappa = 5), eventTime = 7, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 7, kappa = 5), eventTime = 7, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 8, kappa = 1), eventTime = 8, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 8, kappa = 2), eventTime = 8, kappa = 2), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 8, kappa = 3), eventTime = 8, kappa = 3), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 8, kappa = 4), eventTime = 8, kappa = 4), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 8, kappa = 5), eventTime = 8, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 8, kappa = 5), eventTime = 8, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 9, kappa = 1), eventTime = 9, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 9, kappa = 2), eventTime = 9, kappa = 2), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 9, kappa = 3), eventTime = 9, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 4), eventTime = 9, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 4), eventTime = 9, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 9, kappa = 5), eventTime = 9, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 9, kappa = 5), eventTime = 9, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 10, kappa = 1), eventTime = 10, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 10, kappa = 2), eventTime = 10, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 10, kappa = 3), eventTime = 10, kappa = 3), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 4), eventTime = 10, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 4), eventTime = 10, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 10, kappa = 5), eventTime = 10, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 10, kappa = 5), eventTime = 10, kappa = 5), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 11, kappa = 2), eventTime = 11, kappa = 2), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 4), eventTime = 11, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 11, kappa = 4), eventTime = 11, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 12, kappa = 1), eventTime = 12, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 12, kappa = 2), eventTime = 12, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 3), eventTime = 12, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 3), eventTime = 12, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 4), eventTime = 12, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 12, kappa = 4), eventTime = 12, kappa = 4), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 12, kappa = 5), eventTime = 12, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 13, kappa = 1), eventTime = 13, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 13, kappa = 2), eventTime = 13, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 3), eventTime = 13, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 13, kappa = 3), eventTime = 13, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 4), eventTime = 13, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 13, kappa = 5), eventTime = 13, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 14, kappa = 1), eventTime = 14, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 14, kappa = 2), eventTime = 14, kappa = 2), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 3), eventTime = 14, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 14, kappa = 3), eventTime = 14, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 4), eventTime = 14, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 14, kappa = 5), eventTime = 14, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 15, kappa = 1), eventTime = 15, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 15, kappa = 2), eventTime = 15, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 3), eventTime = 15, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 15, kappa = 3), eventTime = 15, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 4), eventTime = 15, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 15, kappa = 5), eventTime = 15, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 16, kappa = 2), eventTime = 16, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 4), eventTime = 16, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 17, kappa = 1), eventTime = 17, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 17, kappa = 2), eventTime = 17, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 3), eventTime = 17, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 4), eventTime = 17, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 17, kappa = 5), eventTime = 17, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 18, kappa = 1), eventTime = 18, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 18, kappa = 2), eventTime = 18, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 3), eventTime = 18, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 4), eventTime = 18, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 18, kappa = 5), eventTime = 18, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 19, kappa = 1), eventTime = 19, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 19, kappa = 2), eventTime = 19, kappa = 2), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 3), eventTime = 19, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 4), eventTime = 19, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 19, kappa = 5), eventTime = 19, kappa = 5), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.3, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.3, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.4, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.4, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.5, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.5, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.6, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.6, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.7, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.7, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.8, eventTime = 20, kappa = 1), eventTime = 20, kappa = 1), 0.8, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 2), eventTime = 20, kappa = 2), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.2, eventTime = 20, kappa = 2), eventTime = 20, kappa = 2), 0.2, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 3), eventTime = 20, kappa = 3), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 4), eventTime = 20, kappa = 4), 0.1, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.1, eventTime = 20, kappa = 5), eventTime = 20, kappa = 5), 0.1, tolerance = 1e-04) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results", { piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results", { piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime, lambda = piecewiseLambda) y <- qpwexp(quantile, s = piecewiseSurvivalTime, lambda = piecewiseLambda) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime) y <- qpwexp(quantile, s = piecewiseSurvivalTime) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected", { piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(5000, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = 1)) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected", { piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(rpwexp(5000, s = piecewiseSurvivalTime, lambda = piecewiseLambda, kappa = 1)) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(5000, piecewiseSurvivalTime = piecewiseSurvivalTime, kappa = 1)) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003) y <- 1 / mean(rpwexp(5000, s = piecewiseSurvivalTime, kappa = 1)) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'getPiecewiseExponentialDistribution': test that function call with singel lambda is working", { expect_equal(getPiecewiseExponentialDistribution(4, piecewiseLambda = 0.003), 0.01192829, tolerance = 5e-05) }) rpact/inst/tests/testthat/helper-f_analysis_rates.R0000644000176200001440000000546013567153535022312 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments(x = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ...)) } rpact/inst/tests/testthat/test-f_design_group_sequential_design.R0000644000176200001440000007061613567165663025253 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:28 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the group sequential and inverse normal design functionality") test_that("'getDesignInverseNormal' with default parameters: parameters and results are as expected", { # @refFS[Formula]{fs:criticalValuesOBrienFleming} x1 <- getDesignInverseNormal() ## ## Comparison of the results of TrialDesignInverseNormal object 'x1' with expected results ## expect_equal(x1$alphaSpent, c(0.00025917372, 0.0071600594, 0.025), tolerance = 1e-07) expect_equal(x1$criticalValues, c(3.4710914, 2.4544323, 2.0040356), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.00025917372, 0.0070553616, 0.022533125), tolerance = 1e-07) }) test_that("'getDesignInverseNormal' and 'getDesignCharacteristics' with kMax = 4: parameters and results are as expected for different arguments", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingHwangShiDeCani} x2 <- getDesignInverseNormal(kMax = 4, alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD",gammaA = -1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_HSD, gammaB = -2) ## ## Comparison of the results of TrialDesignInverseNormal object 'x2' with expected results ## expect_equal(x2$power, c(0.18540359, 0.47374657, 0.7208955, 0.86), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-0.81517021, 0.063469084, 0.84025384), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.011570732, 0.026427847, 0.045504759, 0.07), tolerance = 1e-07) expect_equal(x2$betaSpent, c(0.014215085, 0.037651799, 0.076292407, 0.14), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.2710911, 2.0692301, 1.8645608, 1.6606881), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.011570732, 0.01926225, 0.031121494, 0.048388055), tolerance = 1e-07) y1 <- getDesignCharacteristics(x2) ## ## Comparison of the results of TrialDesignCharacteristics object 'y1' with expected results ## expect_equal(y1$nFixed, 6.5337002, tolerance = 1e-07) expect_equal(y1$shift, 7.5749205, tolerance = 1e-07) expect_equal(y1$inflationFactor, 1.1593615, tolerance = 1e-07) expect_equal(y1$information, c(1.8937301, 3.7874603, 5.6811904, 7.5749205), tolerance = 1e-07) expect_equal(y1$power, c(0.18540359, 0.47374657, 0.7208955, 0.86), tolerance = 1e-07) expect_equal(y1$rejectionProbabilities, c(0.18540359, 0.28834298, 0.24714893, 0.1391045), tolerance = 1e-07) expect_equal(y1$futilityProbabilities, c(0.014215085, 0.023436714, 0.038640608), tolerance = 1e-07) expect_equal(y1$averageSampleNumber1, 0.72222281, tolerance = 1e-07) expect_equal(y1$averageSampleNumber01, 0.82592961, tolerance = 1e-07) expect_equal(y1$averageSampleNumber0, 0.68240644, tolerance = 1e-07) # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingHwangShiDeCani} x3 <- getDesignInverseNormal(kMax = 4, informationRates = c(0.2, 0.4, 0.8, 1), alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD",gammaA = -1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_HSD, gammaB = -2) ## ## Comparison of the results of TrialDesignInverseNormal object 'x3' with expected results ## expect_equal(x3$power, c(0.12840586, 0.34869365, 0.76424148, 0.86), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-1.0672796, -0.30464832, 1.028624), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.0090195874, 0.020036136, 0.049926539, 0.07), tolerance = 1e-07) expect_equal(x3$betaSpent, c(0.010777094, 0.026854629, 0.086620705, 0.14), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.364813, 2.1928805, 1.7718975, 1.6682985), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0090195874, 0.014157994, 0.038205784, 0.047628242), tolerance = 1e-07) # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} y2 <- getDesignCharacteristics(x3) ## ## Comparison of the results of TrialDesignCharacteristics object 'y2' with expected results ## expect_equal(y2$nFixed, 6.5337002, tolerance = 1e-07) expect_equal(y2$shift, 7.5750078, tolerance = 1e-07) expect_equal(y2$inflationFactor, 1.1593749, tolerance = 1e-07) expect_equal(y2$information, c(1.5150016, 3.0300031, 6.0600063, 7.5750078), tolerance = 1e-07) expect_equal(y2$power, c(0.12840586, 0.34869365, 0.76424148, 0.86), tolerance = 1e-07) expect_equal(y2$rejectionProbabilities, c(0.12840586, 0.22028779, 0.41554783, 0.095758523), tolerance = 1e-07) expect_equal(y2$futilityProbabilities, c(0.010777094, 0.016077535, 0.059766076), tolerance = 1e-07) expect_equal(y2$averageSampleNumber1, 0.75564768, tolerance = 1e-07) expect_equal(y2$averageSampleNumber01, 0.85242855, tolerance = 1e-07) expect_equal(y2$averageSampleNumber0, 0.720263, tolerance = 1e-07) }) test_that("'getDesignInverseNormal' with binding futility bounds", { # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x4 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'x4' with expected results ## expect_equal(x4$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(x4$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asUser'", { # @refFS[Formula]{fs:alphaSpendingConcept} x5 <- getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.03, 0.05)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x5' with expected results ## expect_equal(x5$alphaSpent, c(0.01, 0.02, 0.03, 0.05), tolerance = 1e-07) expect_equal(x5$criticalValues, c(2.3263479, 2.2192994, 2.1201347, 1.8189562), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.01, 0.01323318, 0.016997342, 0.034459058), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsUser'", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} x6 <- getDesignGroupSequential(kMax = 3, alpha = 0.03, typeOfDesign = "asOF", typeBetaSpending = "bsUser", userBetaSpending = c(0.01, 0.05, 0.3)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x6' with expected results ## expect_equal(x6$power, c(0.014685829, 0.33275272, 0.7), tolerance = 1e-07) expect_equal(x6$futilityBounds, c(-0.92327973, 0.29975473), tolerance = 1e-07) expect_equal(x6$alphaSpent, c(0.00017079385, 0.0078650906, 0.03), tolerance = 1e-07) expect_equal(x6$betaSpent, c(0.01, 0.05, 0.3), tolerance = 1e-07) expect_equal(x6$criticalValues, c(3.5815302, 2.417863, 1.9175839), tolerance = 1e-07) expect_equal(x6$stageLevels, c(0.00017079385, 0.0078059773, 0.027581894), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsP'", { # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingPocock} x7 <- getDesignGroupSequential(kMax = 3, alpha = 0.03, typeOfDesign = "asOF", typeBetaSpending = "bsP", userBetaSpending = c(0.01, 0.05, 0.3)) ## ## Comparison of the results of TrialDesignGroupSequential object 'x7' with expected results ## expect_equal(x7$power, c(0.03410434, 0.52267986, 0.8), tolerance = 1e-07) expect_equal(x7$futilityBounds, c(0.42062972, 1.2539286), tolerance = 1e-07) expect_equal(x7$alphaSpent, c(0.00017079385, 0.0078650906, 0.03), tolerance = 1e-07) expect_equal(x7$betaSpent, c(0.090566485, 0.1526765, 0.2), tolerance = 1e-07) expect_equal(x7$criticalValues, c(3.5815302, 2.417863, 1.9175839), tolerance = 1e-07) expect_equal(x7$stageLevels, c(0.00017079385, 0.0078059773, 0.027581894), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with binding futility bounds ", { # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignGroupSequential object 'x8' with expected results ## expect_equal(x8$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(x8$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(x8$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) }) test_that("'getDesignGroupSequential' with Haybittle Peto boundaries ", { # @refFS[Formula]{fs:criticalValuesHaybittlePeto} x9 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "HP") ## ## Comparison of the results of TrialDesignGroupSequential object 'x9' with expected results ## expect_equal(x9$alphaSpent, c(0.001349898, 0.0024617416, 0.0033695882, 0.025), tolerance = 1e-07) expect_equal(x9$criticalValues, c(3, 3, 3, 1.9827514), tolerance = 1e-07) expect_equal(x9$stageLevels, c(0.001349898, 0.001349898, 0.001349898, 0.023697604), tolerance = 1e-07) }) test_that("'getDesignInverseNormal': illegal arguments throw exceptions as expected", { expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = NA_character_), "Missing argument: parameter 'optimizationCriterion' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = NA_character_), "Missing argument: parameter 'typeBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2)), paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05)), paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2), paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 17), "Argument out of bounds: 'kMax' (17) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 18), "Argument out of bounds: 'kMax' (18) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 19), "Argument out of bounds: 'kMax' (19) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 20), "Argument out of bounds: 'kMax' (20) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(futilityBounds = c(-7, 5)), "Argument out of bounds: 'futilityBounds' (-7, 5) is out of bounds [-6; 6]", fixed = TRUE) expect_error(getDesignInverseNormal(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-6; 6]", fixed = TRUE) }) test_that("'getDesignGroupSequential': illegal arguments throw exceptions as expected", { expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignGroupSequential(typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = NA_character_), "Missing argument: parameter 'optimizationCriterion' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = NA_character_), "Missing argument: parameter 'typeBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2)), paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05)), paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2), paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE) expect_error(getDesignGroupSequential(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignGroupSequential(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]"), fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 17), "Argument out of bounds: 'kMax' (17) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 18), "Argument out of bounds: 'kMax' (18) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 19), "Argument out of bounds: 'kMax' (19) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 20), "Argument out of bounds: 'kMax' (20) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 10]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_error(getDesignGroupSequential(futilityBounds = c(-7, 5)), "Argument out of bounds: 'futilityBounds' (-7, 5) is out of bounds [-6; 6]", fixed = TRUE) expect_error(getDesignGroupSequential(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-6; 6]", fixed = TRUE) }) rpact/inst/tests/testthat/helper-f_core_utilities.R0000644000176200001440000000710613375253362022307 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getTestInformationRatesDefault <- function(kMax) { return((1:kMax) / kMax) } getTestFutilityBoundsDefault <- function(kMax) { return(rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1)) } getTestAlpha0VecDefault <- function(kMax) { return(rep(C_ALPHA_0_VEC_DEFAULT, kMax - 1)) } getTestInformationRates <- function(kMax) { if (kMax == 1L) { return(1) } a <- 0.8 / kMax b <- c() for (i in 1:(kMax - 1)) { b <- c(b, a * i) } return(c(b, 1)) } getTestFutilityBounds <- function(kMax) { if (kMax < 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kMax' must be >= 2") } k <- kMax - 1 futilityBounds <- c(2) k <- k - 1 if (k > 0) { futilityBounds <- c(1, futilityBounds) k <- k - 1 } if (k > 0) { futilityBounds <- c(rep(0, k), futilityBounds) } return(futilityBounds) } getTestDesign <- function(kMax = NA_real_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { if (designClass == C_CLASS_NAME_TRIAL_DESIGN_FISHER) { return(TrialDesignFisher( kMax = as.integer(kMax), alpha = C_ALPHA_DEFAULT, method = C_FISHER_METHOD_DEFAULT, alpha0Vec = futilityBounds, informationRates = informationRates, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT )) } return(.createDesign( designClass = designClass, kMax = as.integer(kMax), alpha = C_ALPHA_DEFAULT, beta = C_BETA_DEFAULT, sided = 1, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, tolerance = 1e-06)) } rpact/inst/tests/testthat/helper-f_analysis_survival.R0000644000176200001440000000546013566731557023054 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = x$getNumberOfStages(), allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments(x = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, stage = plotArgs$stage, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ...)) } rpact/inst/tests/testthat/helper-f_analysis.R0000644000176200001440000000737513357572731021123 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### # Example in book p. 207f, one-treatment case, variance known getTestSettings1 = function() { kMax = 2 nActual = c(20, 60) return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, futilityBounds = c(stats::qnorm(0.7)), alpha0Vec = c(1), sided = 1, nActual = nActual, dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(20, 20), n2 = nActual, means1 = c(0.32, 0.35), means2 = c(1.92, 0.56), stds1 = c(1, 1), stds2 = c(1, 1))), stage = kMax )) } getTestSettings2 = function() { kMax = 4 return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, sided = 1, futilityBoundsForPower = c(-0.5, 0, 0.5), futilityBounds = rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1), alpha0Vec = c(0.7, 0.6, 0.5), alpha0Vec = rep(1, kMax - 1), nPlanned = rep(11, kMax), nActual = rep(11, kMax), dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), means1 = c(323, 514, 511, 611), means2 = c(452, 561, 635, 698), stds1 = c(111, 131, 145, 111), stds2 = c(118, 117, 104, 119))), stage = kMax )) } getTestSettings3 = function() { kMax = 4 return(list( alpha = 0.025, kMax = kMax, typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, deltaWT = 0.25, fisherMethod = "equalAlpha", informationRates = (1 : kMax) / kMax, sided = 1, futilityBoundsForPower = c(-0.5, 0, 0.5), futilityBounds = rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1), alpha0Vec = c(0.7, 0.6, 0.5), alpha0Vec = rep(1, kMax - 1), nPlanned = rep(11, kMax), nActual = rep(11, kMax), dataExample = DatasetMeans(dataFrame = data.frame(stage = (1 : kMax), n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), means1 = c(323, 514, 511, 611), means2 = c(452, 561, 635, 698), stds1 = c(111, 131, 145, 111), stds2 = c(118, 117, 104, 119))), stage = kMax )) } rpact/inst/tests/testthat/test-f_analysis_base_means.R0000644000176200001440000027110613574374172022773 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25 November 2019, 11:23:25 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis means functionality for one treatment") test_that("'getAnalysisResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { dataExample1 <- getDataset( n = c(120, 130, 130), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults(design = design1, dataInput = dataExample1, nPlanned = 130, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) result1 result1$.design$alphaSpent #plot(result1, thetaRange = c(0, 100)) dataExample1b <- rpact::getDataset( n = c(120, 130, 130), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1b <- rpact::getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1b <- rpact::getAnalysisResults(design = design1b, dataInput = dataExample1b, nPlanned = 130, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result1$.design$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(45, 48.12, 47.052632, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(2.9492753, 3.3390852, 3.3255117, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.0019178249, 0.00054956317, 0.00057478599, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("reject and stop", "reject and stop", "reject and stop", NA_character_)) expect_equal(result1$thetaH0, 10) expect_equal(result1$thetaH1, 50) expect_equal(result1$assumedStDev, 100) expect_equal(result1$conditionalRejectionProbabilities, c(0.4390924, 0.99186839, 0.99999982, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, NA_real_, 130)) expect_equal(result1$allocationRatioPlanned, 1) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 1), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(14.924587, 28.099918, 32.086386, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(75.075413, 68.140082, 62.018878, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.010254306, 3.704879e-05, 1.572203e-06, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, 1) expect_equal(result1$finalPValues, c(0.0019178249, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(21.740476, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(68.259524, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$medianUnbiasedEstimates, c(45, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(2.9492753, 4.4628381, 5.558203, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.0019178249, 6.1303223e-06, 2.5741923e-08, NA_real_), tolerance = 1e-07) }) test_that("'getStageResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestifDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults1 <- getStageResults(design1, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 50)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.20492816, 0.31007642, 0.43512091, 0.5683138, 0.6950205, 0.80243295, 0.88343665, 0.93770927, 0.96998259, 0.98700232, 0.99495733, 0.99825113, 0.99945881, 0.9998508, 0.9999634), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, std = 100") }) test_that("'getAnalysisResults' for inverse normal and Fisher designs and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestifDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design2 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults2 <- getStageResults(design2, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 50)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.9256836, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.18776792, 0.28883478, 0.41147918, 0.5447391, 0.67401995, 0.78575942, 0.87165951, 0.93031941, 0.96586805, 0.98497137, 0.99406923, 0.99790729, 0.999341, 0.99981509, 0.99995383), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults(design = design2, dataInput = dataExample1, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result2$.design$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result2$thetaH0, 10) expect_equal(result2$thetaH1, 50) expect_equal(result2$assumedStDev, 100) expect_equal(result2$conditionalRejectionProbabilities, c(0.054544013, 0.18776792, 0.47147471, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, NA_real_, 30)) expect_equal(result2$allocationRatioPlanned, 1) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.98296857), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-35.118855, 1.5735511, 13.58964, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(125.11886, 94.865725, 80.385626, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.18164628, 0.056608473, 0.014183052, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, 3) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, 0.016754234, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.011822, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 82.848073, NA_real_), tolerance = 1e-07) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 45.714272, NA_real_), tolerance = 1e-07) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.1666257, 1.9256836, 2.4675727, NA_real_), tolerance = 1e-07) design3 <- getDesignFisher(kMax = 4, alpha = 0.025, alpha0Vec = rep(0.4, 3), bindingFutility = TRUE) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults3 <- getStageResults(design3, dataExample1, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 50)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults(design = design3, dataInput = dataExample1, thetaH0 = 10, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.013928445, 0.0019196833, 0.00034092609, 6.8425459e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(0.4, 0.4, 0.4), tolerance = 1e-07) expect_equal(result3$.design$alphaSpent, c(0.013928445, 0.020373842, 0.0235151, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.013928445, 0.013928445, 0.013928445, 0.013928445), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result3$thetaH0, 10) expect_equal(result3$thetaH1, 50) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.029249394, 0.067046868, 0.15552139, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, NA_real_, 30)) expect_equal(result3$allocationRatioPlanned, 1) expect_equal(result3$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.88057256), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-24.226675, 0.014834887, 8.7947814, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(114.22668, 96.713521, 85.125684, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.165096, 0.068572907, 0.029926287, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$combinationTestStatistics, c(0.12168078, 0.007272934, 0.00043997458, NA_real_), tolerance = 1e-07) }) test_that("'getAnalysisResults' for different designs and a dataset of one mean per stage (bindingFutility = FALSE)", { .skipTestifDisabled() dataExample2 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design4 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) stageResults1 <- getStageResults(design4, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 50)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.17749108, 0.27572975, 0.39647686, 0.52937537, 0.65998377, 0.77434444, 0.86340967, 0.9250277, 0.96285863, 0.98345513, 0.99339288, 0.99764031, 0.99924778, 0.9997863, 0.99994597), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults(design = design4, dataInput = dataExample2, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result1$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result1$thetaH0, 10) expect_equal(result1$thetaH1, 47.25, tolerance = 1e-07) expect_equal(result1$assumedStDev, 128.66279, tolerance = 1e-07) expect_equal(result1$conditionalRejectionProbabilities, c(0.046837862, 0.17749108, 0.46585158, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$allocationRatioPlanned, 1) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-37.7517, 1.3684534, 13.520683, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(127.7517, 95.831547, 80.979317, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.28074785, 0.063917079, 0.013597508, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, 3) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, 0.014875116, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.605112, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 73.306215, NA_real_), tolerance = 1e-07) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 44.286284, NA_real_), tolerance = 1e-07) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(1.2040366, 2.025312, 2.5895142, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.12168078, 0.02415027, 0.0057194973, NA_real_), tolerance = 1e-07) design5 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'design5' with expected results ## expect_equal(design5$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design5$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design5$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) stageResults2 <- getStageResults(design5, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 50)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.9256836, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.16190673, 0.25578292, 0.37352456, 0.50571691, 0.63820191, 0.75647342, 0.85036726, 0.91657302, 0.95799593, 0.98097594, 0.99227321, 0.99719262, 0.99908938, 0.99973673, 0.99993225), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, std = 100") # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults(design = design5, dataInput = dataExample2, thetaH0 = 10) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.2040366, 1.6040446, 1.5975241, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.12168078, 0.059770605, 0.060494785, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result2$thetaH0, 10) expect_equal(result2$thetaH1, 47.25, tolerance = 1e-07) expect_equal(result2$assumedStDev, 128.66279, tolerance = 1e-07) expect_equal(result2$conditionalRejectionProbabilities, c(0.046837862, 0.16190673, 0.42383694, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$allocationRatioPlanned, 1) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-37.7517, 0.20066782, 12.631309, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(127.7517, 96.240714, 81.345632, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.28074785, 0.070627118, 0.016069426, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, 3) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, 0.015631623, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.353451, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 73.21831, NA_real_), tolerance = 1e-07) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 44.191393, NA_real_), tolerance = 1e-07) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.1666257, 1.9256836, 2.4675727, NA_real_), tolerance = 1e-07) design6 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design6' with expected results ## expect_equal(design6$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design6$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design6$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design6$scale, c(1, 1, 1)) expect_equal(design6$nonStochasticCurtailment, FALSE) stageResults3 <- getStageResults(design6, dataExample2, thetaH0 = 10, stage = 2) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48.6, 47.25), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(130, 134.76601, 128.66279), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 50)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults(design = design6, dataInput = dataExample2, stage = 2, thetaH0 = 10, nPlanned = c(30, 20), thetaH1 = 50, assumedStDev = 100, iterations = 800, seed = 31082018) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 10) expect_equal(result3$thetaH1, 50) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.026695414, 0.053938868, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 30, 20)) expect_equal(result3$allocationRatioPlanned, 1) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-28.274837, -2.3519587, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(118.27484, 99.090567, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.23830752, 0.094039775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.6175, 0.84875), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) }) context("Testing the analysis means functionality for two treatments") test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage", { .skipTestifDisabled() # note: if third stage value of means1 (4.5) increases, lower bound of RCI does not increase design7 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design7' with expected results ## expect_equal(design7$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design7$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design7$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design7$scale, c(1, 1, 1)) expect_equal(design7$nonStochasticCurtailment, FALSE) dataExample3 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(2.7, 1.5, 4.5, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:definitionRCIwithFutilityFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result <- getAnalysisResults(design = design7, dataInput = dataExample3, equalVariances = TRUE, directionUpper = TRUE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result' with expected results ## expect_equal(result$.design$stages, c(1, 2, 3, 4)) expect_equal(result$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result$.stageResults$effectSizes, c(170, 123.33333, 197.37931, 188.47418), tolerance = 1e-07) expect_equal(result$.stageResults$testStatistics, c(4.552582, 0.42245245, 4.9350374, 2.8165036), tolerance = 1e-07) expect_equal(result$.stageResults$pValues, c(2.1583718e-05, 0.33839752, 6.5708867e-06, 0.0050256902), tolerance = 1e-07) expect_equal(result$testActions, c("reject and stop", "reject and stop", "reject and stop", "reject")) expect_equal(result$thetaH0, 0) expect_equal(result$thetaH1, 188.47418, tolerance = 1e-07) expect_equal(result$assumedStDev, 192.76382, tolerance = 1e-07) expect_equal(result$conditionalRejectionProbabilities, c(1, 1, 1, NA_real_)) expect_equal(result$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$allocationRatioPlanned, 1) expect_equal(result$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$repeatedConfidenceIntervalLowerBounds, c(80.389809, 58.773337, 126.21876, 121.44462), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalUpperBounds, c(259.61019, 232.56315, 252.86796, 238.01813), tolerance = 1e-07) expect_equal(result$repeatedPValues, c(6.2988707e-05, 0.00026325991, 1.9536724e-06, 1.9536724e-06), tolerance = 1e-07) expect_equal(result$finalStage, 1) expect_equal(result$finalPValues, c(2.1583718e-05, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$finalConfidenceIntervalLowerBounds, c(96.812108, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$finalConfidenceIntervalUpperBounds, c(243.18789, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result$medianUnbiasedEstimates, c(170, NA_real_, NA_real_, NA_real_)) expect_equal(result$normalApproximation, FALSE) expect_equal(result$equalVariances, TRUE) expect_equal(result$directionUpper, TRUE) expect_equal(result$combinationTestStatistics, c(2.1583718e-05, 7.3038765e-06, 4.7992944e-11, 2.4119767e-13), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a group sequential design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample4 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignGroupSequential object 'design8' with expected results ## expect_equal(design8$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design8$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design8$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result1 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result1$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(1.8745926, 0.42245245, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.033826026, 0.33839752, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$thetaH0, 0) expect_equal(result1$thetaH1, 130) expect_equal(result1$assumedStDev, 100) expect_equal(result1$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result1$allocationRatioPlanned, 2) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.67921715, 0.95627008), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.10782416, 0.16254779, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, TRUE) expect_equal(result1$overallTestStatistics, c(1.8745926, 1.4830004, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.033826026, 0.071381585, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result4 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result4' with expected results ## expect_equal(result4$.design$stages, c(1, 2, 3, 4)) expect_equal(result4$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result4$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result4$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result4$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result4$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result4$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result4$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result4$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, NA_real_), tolerance = 1e-07) expect_equal(result4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result4$thetaH0, 0) expect_equal(result4$thetaH1, 130) expect_equal(result4$assumedStDev, 100) expect_equal(result4$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, 0.040934114, NA_real_), tolerance = 1e-07) expect_equal(result4$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result4$allocationRatioPlanned, 2) expect_equal(result4$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.73680191), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, -25.969325, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, 136.59001, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedPValues, c(0.10782416, 0.16254779, 0.12132816, NA_real_), tolerance = 1e-07) expect_equal(result4$finalStage, NA_integer_) expect_equal(result4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$normalApproximation, FALSE) expect_equal(result4$equalVariances, TRUE) expect_equal(result4$directionUpper, TRUE) expect_equal(result4$overallTestStatistics, c(1.8745926, 1.4830004, 1.5863394, NA_real_), tolerance = 1e-07) expect_equal(result4$overallPValues, c(0.033826026, 0.071381585, 0.057753539, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticTwoMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result7 <- getAnalysisResults(design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result7' with expected results ## expect_equal(result7$.design$stages, c(1, 2, 3, 4)) expect_equal(result7$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result7$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result7$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result7$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result7$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result7$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result7$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result7$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, 0.0050256902), tolerance = 1e-07) expect_equal(result7$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result7$thetaH0, 0) expect_equal(result7$thetaH1, 130) expect_equal(result7$assumedStDev, 100) expect_equal(result7$conditionalRejectionProbabilities, c(0.12319684, 0.060559169, 0.040934114, NA_real_), tolerance = 1e-07) expect_equal(result7$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result7$allocationRatioPlanned, 2) expect_equal(result7$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result7$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -38.955154, -25.969325, 3.8960985), tolerance = 1e-07) expect_equal(result7$repeatedConfidenceIntervalUpperBounds, c(170.18532, 157.84404, 136.59001, 140.93958), tolerance = 1e-07) expect_equal(result7$repeatedPValues, c(0.10782416, 0.16254779, 0.12132816, 0.017942439), tolerance = 1e-07) expect_equal(result7$finalStage, 4) expect_equal(result7$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.022610692), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 1.5235285), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 127.93924), tolerance = 1e-07) expect_equal(result7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 66.58768), tolerance = 1e-07) expect_equal(result7$normalApproximation, FALSE) expect_equal(result7$equalVariances, TRUE) expect_equal(result7$directionUpper, TRUE) expect_equal(result7$overallTestStatistics, c(1.8745926, 1.4830004, 1.5863394, 2.3864368), tolerance = 1e-07) expect_equal(result7$overallPValues, c(0.033826026, 0.071381585, 0.057753539, 0.0091998951), tolerance = 1e-07) }) test_that("'getAnalysisResults' for an inverse normal design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample5 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design9 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) ## ## Comparison of the results of TrialDesignInverseNormal object 'design9' with expected results ## expect_equal(design9$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(design9$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(design9$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result2 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(1.8780002, 0.42565792, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.033590771, 0.33726198, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$thetaH0, 0) expect_equal(result2$thetaH1, 130) expect_equal(result2$assumedStDev, 100) expect_equal(result2$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result2$allocationRatioPlanned, 2) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.7399771, 0.96741599), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.10725005, 0.13184907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, FALSE) expect_equal(result2$directionUpper, TRUE) expect_equal(result2$combinationTestStatistics, c(1.8304576, 1.5912766, NA_real_, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result5 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result5' with expected results ## expect_equal(result5$.design$stages, c(1, 2, 3, 4)) expect_equal(result5$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result5$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result5$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result5$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result5$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result5$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result5$.stageResults$testStatistics, c(1.8780002, 0.42565792, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result5$.stageResults$pValues, c(0.033590771, 0.33726198, 0.22248687, NA_real_), tolerance = 1e-07) expect_equal(result5$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result5$thetaH0, 0) expect_equal(result5$thetaH1, 130) expect_equal(result5$assumedStDev, 100) expect_equal(result5$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result5$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result5$allocationRatioPlanned, 2) expect_equal(result5$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.82164236), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, NA_real_), tolerance = 1e-07) expect_equal(result5$finalStage, NA_integer_) expect_equal(result5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$normalApproximation, FALSE) expect_equal(result5$equalVariances, FALSE) expect_equal(result5$directionUpper, TRUE) expect_equal(result5$combinationTestStatistics, c(1.8304576, 1.5912766, 1.7402643, NA_real_), tolerance = 1e-07) # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticsGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} result8 <- getAnalysisResults(design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result8' with expected results ## expect_equal(result8$.design$stages, c(1, 2, 3, 4)) expect_equal(result8$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result8$.design$criticalValues, c(2.5650713, 2.3932961, 2.2981973, 2.2330242), tolerance = 1e-07) expect_equal(result8$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result8$.design$alphaSpent, c(0.0051577307, 0.011892822, 0.018620498, 0.025), tolerance = 1e-07) expect_equal(result8$.design$stageLevels, c(0.0051577307, 0.0083488792, 0.010775281, 0.012773673), tolerance = 1e-07) expect_equal(result8$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result8$.stageResults$testStatistics, c(1.8780002, 0.42565792, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result8$.stageResults$pValues, c(0.033590771, 0.33726198, 0.22248687, 0.0051181248), tolerance = 1e-07) expect_equal(result8$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result8$thetaH0, 0) expect_equal(result8$thetaH1, 130) expect_equal(result8$assumedStDev, 100) expect_equal(result8$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result8$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result8$allocationRatioPlanned, 2) expect_equal(result8$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result8$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, 16.862491), tolerance = 1e-07) expect_equal(result8$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, 146.10543), tolerance = 1e-07) expect_equal(result8$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, 0.0050030118), tolerance = 1e-07) expect_equal(result8$finalStage, 4) expect_equal(result8$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.019192988), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 4.0866333), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 135.35067), tolerance = 1e-07) expect_equal(result8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 71.819795), tolerance = 1e-07) expect_equal(result8$normalApproximation, FALSE) expect_equal(result8$equalVariances, FALSE) expect_equal(result8$directionUpper, TRUE) expect_equal(result8$combinationTestStatistics, c(1.8304576, 1.5912766, 1.7402643, 2.7909855), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestifDisabled() dataExample6 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design10 <- getDesignFisher(kMax = 4, alpha = 0.025) ## ## Comparison of the results of TrialDesignFisher object 'design10' with expected results ## expect_equal(design10$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(design10$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(design10$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(design10$scale, c(1, 1, 1)) expect_equal(design10$nonStochasticCurtailment, FALSE) result3 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(70, 59.444444), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(1.8745926, 0.42245245, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.033826026, 0.33839752, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 0) expect_equal(result3$thetaH1, 130) expect_equal(result3$assumedStDev, 100) expect_equal(result3$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 15, 15)) expect_equal(result3$allocationRatioPlanned, 2) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.07529439, 0.13212373, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, TRUE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.599, 0.917), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.033826026, 0.011446643, NA_real_, NA_real_), tolerance = 1e-07) result6 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result6' with expected results ## expect_equal(result6$.design$stages, c(1, 2, 3, 4)) expect_equal(result6$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result6$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result6$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result6$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result6$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result6$.stageResults$effectSizes, c(70, 59.444444, 55.310345), tolerance = 1e-07) expect_equal(result6$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, NA_real_), tolerance = 1e-07) expect_equal(result6$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, NA_real_), tolerance = 1e-07) expect_equal(result6$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result6$thetaH0, 0) expect_equal(result6$thetaH1, 130) expect_equal(result6$assumedStDev, 100) expect_equal(result6$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, 0.017989301, NA_real_), tolerance = 1e-07) expect_equal(result6$nPlanned, c(NA_real_, NA_real_, NA_real_, 15)) expect_equal(result6$allocationRatioPlanned, 2) expect_equal(result6$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.60883935), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, -24.875191, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, 146.25589, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedPValues, c(0.07529439, 0.13212373, 0.13321282, NA_real_), tolerance = 1e-07) expect_equal(result6$finalStage, NA_integer_) expect_equal(result6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$normalApproximation, FALSE) expect_equal(result6$equalVariances, TRUE) expect_equal(result6$directionUpper, TRUE) expect_equal(result6$combinationTestStatistics, c(0.033826026, 0.011446643, 0.0025466747, NA_real_), tolerance = 1e-07) result9 <- getAnalysisResults(design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'result9' with expected results ## expect_equal(result9$.design$stages, c(1, 2, 3, 4)) expect_equal(result9$.design$informationRates, c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) expect_equal(result9$.design$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(result9$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result9$.design$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(result9$.design$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(result9$.stageResults$effectSizes, c(70, 59.444444, 55.310345, 72.41784), tolerance = 1e-07) expect_equal(result9$.stageResults$testStatistics, c(1.8745926, 0.42245245, 0.7710996, 2.8165036), tolerance = 1e-07) expect_equal(result9$.stageResults$pValues, c(0.033826026, 0.33839752, 0.22248223, 0.0050256902), tolerance = 1e-07) expect_equal(result9$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result9$thetaH0, 0) expect_equal(result9$thetaH1, 130) expect_equal(result9$assumedStDev, 100) expect_equal(result9$conditionalRejectionProbabilities, c(0.077408717, 0.036086707, 0.017989301, NA_real_), tolerance = 1e-07) expect_equal(result9$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$allocationRatioPlanned, 2) expect_equal(result9$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$repeatedConfidenceIntervalLowerBounds, c(-19.610191, -28.583726, -24.875191, 10.125544), tolerance = 1e-07) expect_equal(result9$repeatedConfidenceIntervalUpperBounds, c(159.61019, 157.36315, 146.25589, 154.53063), tolerance = 1e-07) expect_equal(result9$repeatedPValues, c(0.07529439, 0.13212373, 0.13321282, 0.010110881), tolerance = 1e-07) expect_equal(result9$finalStage, NA_integer_) expect_equal(result9$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$normalApproximation, FALSE) expect_equal(result9$equalVariances, TRUE) expect_equal(result9$directionUpper, TRUE) expect_equal(result9$combinationTestStatistics, c(0.033826026, 0.011446643, 0.0025466747, 1.2798798e-05), tolerance = 1e-07) }) test_that("Check that the conditional power is as expected for different designs and datasets", { .skipTestifDisabled() informationRates <- c(0.2, 0.5, 0.8, 1) dataExample7 <- getDataset( n1 = c(22, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 1, 2.5), stds1 = c(1, 2, 2, 1.3), stds2 = c(1, 2, 2, 1.3)) design11 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = informationRates, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.45) ## ## Comparison of the results of TrialDesignGroupSequential object 'design11' with expected results ## expect_equal(design11$alphaSpent, c(0.008066711, 0.01611168, 0.021671928, 0.025), tolerance = 1e-07) expect_equal(design11$criticalValues, c(2.4058832, 2.2981456, 2.2447684, 2.2198623), tolerance = 1e-07) expect_equal(design11$stageLevels, c(0.008066711, 0.010776752, 0.012391502, 0.013214058), tolerance = 1e-07) result1 <- getAnalysisResults(design = design11, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results ## expect_equal(result1$.design$stages, c(1, 2, 3, 4)) expect_equal(result1$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result1$.design$criticalValues, c(2.4058832, 2.2981456, 2.2447684, 2.2198623), tolerance = 1e-07) expect_equal(result1$.design$futilityBounds, c(0.5244, 0.5244, 0.5244), tolerance = 1e-07) expect_equal(result1$.design$alphaSpent, c(0.008066711, 0.01611168, 0.021671928, 0.025), tolerance = 1e-07) expect_equal(result1$.design$stageLevels, c(0.008066711, 0.010776752, 0.012391502, 0.013214058), tolerance = 1e-07) expect_equal(result1$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result1$.stageResults$testStatistics, c(-1.9899749, -0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$.stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$thetaH0, 0.2, tolerance = 1e-07) expect_equal(result1$thetaH1, -0.2, tolerance = 1e-07) expect_equal(result1$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result1$conditionalRejectionProbabilities, c(0.13790633, 0.11434101, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result1$allocationRatioPlanned, 3) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.4081395, 0.60690858), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-1.1558731, -1.198323, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(0.35587299, 0.40594209, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.06267268, 0.077641512, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$normalApproximation, FALSE) expect_equal(result1$equalVariances, TRUE) expect_equal(result1$directionUpper, FALSE) expect_equal(result1$overallTestStatistics, c(-1.9899749, -1.7496977, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$overallPValues, c(0.026564837, 0.042409297, NA_real_, NA_real_), tolerance = 1e-07) design12 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = informationRates, typeOfDesign = "WT", deltaWT = 0.45) ## ## Comparison of the results of TrialDesignInverseNormal object 'design12' with expected results ## expect_equal(design12$alphaSpent, c(0.0064937119, 0.013848609, 0.020340933, 0.025), tolerance = 1e-07) expect_equal(design12$criticalValues, c(2.484114, 2.3728731, 2.3177603, 2.2920443), tolerance = 1e-07) expect_equal(design12$stageLevels, c(0.0064937119, 0.0088251631, 0.010231176, 0.010951542), tolerance = 1e-07) stageResults <- getStageResults(design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = T, stage = 2, thetaH0 = -1) ## ## Comparison of the results of StageResultsMeans object 'stageResults' with expected results ## expect_equal(stageResults$overallTestStatistics, c(1.9899749, 1.7720581, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallPValues, c(0.026564837, 0.040500218, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallMeans1, c(1, 1.0371429, 1.022807, 1.0185714), tolerance = 1e-07) expect_equal(stageResults$overallMeans2, c(1.4, 1.4333333, 1.26, 1.4666667), tolerance = 1e-07) expect_equal(stageResults$overallStDevs1, c(1, 1.4254175, 1.6534615, 1.5851935), tolerance = 1e-07) expect_equal(stageResults$overallStDevs2, c(1, 1.3814998, 1.6530107, 1.6573689), tolerance = 1e-07) expect_equal(stageResults$overallSampleSizes1, c(22, 35)) expect_equal(stageResults$overallSampleSizes2, c(22, 33)) expect_equal(stageResults$testStatistics, c(1.9899749, 0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(stageResults$combInverseNormal, c(1.9338654, 1.7805468, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$weightsInverseNormal, c(0.4472136, 0.54772256, 0.54772256, 0.4472136), tolerance = 1e-07) conditionalPower <- getConditionalPower(design = design12, stageResults = stageResults, stage = 2, thetaH1 = 0.840, nPlanned = c(96,64), assumedStDev = 2) ## ## Comparison of the results of list object 'conditionalPower' with expected results ## expect_equal(conditionalPower$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(conditionalPower$conditionalPower, c(NA_real_, NA_real_, 0.99873967, 0.99999483), tolerance = 1e-07) conditionalPowerPlot <- .getConditionalPowerPlot(stageResults = stageResults, stage = 2, thetaRange = seq(-0.8,0.5,0.1), nPlanned = c(96,64), assumedStDev = 2, allocationRatioPlanned = 3) ## ## Comparison of the results of list object 'conditionalPowerPlot' with expected results ## expect_equal(conditionalPowerPlot$xValues, c(-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5), tolerance = 1e-07) expect_equal(conditionalPowerPlot$condPowerValues, c(0.22956329, 0.31502432, 0.41251256, 0.51641352, 0.6197496, 0.71556407, 0.79832397, 0.86487377, 0.91466948, 0.94932539, 0.97175172, 0.98524183, 0.99278324, 0.99670055), tolerance = 1e-07) expect_equal(conditionalPowerPlot$likelihoodValues, c(0.49547937, 0.67200309, 0.83620171, 0.95465162, 0.9999375, 0.96093693, 0.84724887, 0.68536385, 0.50865752, 0.34635689, 0.21637958, 0.12402316, 0.065220394, 0.031467201), tolerance = 1e-07) expect_equal(conditionalPowerPlot$main, "Conditional Power Plot with Likelihood") expect_equal(conditionalPowerPlot$xlab, "Effect size") expect_equal(conditionalPowerPlot$ylab, "Conditional power / Likelihood") expect_equal(conditionalPowerPlot$sub, "Stage = 2, # of remaining subjects = 160, std = 2, allocation ratio = 3") result2 <- getAnalysisResults(design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results ## expect_equal(result2$.design$stages, c(1, 2, 3, 4)) expect_equal(result2$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result2$.design$criticalValues, c(2.484114, 2.3728731, 2.3177603, 2.2920443), tolerance = 1e-07) expect_equal(result2$.design$futilityBounds, c(-6, -6, -6)) expect_equal(result2$.design$alphaSpent, c(0.0064937119, 0.013848609, 0.020340933, 0.025), tolerance = 1e-07) expect_equal(result2$.design$stageLevels, c(0.0064937119, 0.0088251631, 0.010231176, 0.010951542), tolerance = 1e-07) expect_equal(result2$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result2$.stageResults$testStatistics, c(-1.9899749, -0.73229093, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$.stageResults$pValues, c(0.026564837, 0.23586057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$thetaH0, 0.2, tolerance = 1e-07) expect_equal(result2$thetaH1, -0.2, tolerance = 1e-07) expect_equal(result2$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result2$conditionalRejectionProbabilities, c(0.11857307, 0.10556981, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result2$allocationRatioPlanned, 3) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.39060766, 0.5889102), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-1.182291, -1.2104795, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(0.3822909, 0.41047947, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.081445577, 0.092870573, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$normalApproximation, FALSE) expect_equal(result2$equalVariances, TRUE) expect_equal(result2$directionUpper, FALSE) expect_equal(result2$combinationTestStatistics, c(1.9338654, 1.7805468, NA_real_, NA_real_), tolerance = 1e-07) design13 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = informationRates) ## ## Comparison of the results of TrialDesignFisher object 'design13' with expected results ## expect_equal(design13$alphaSpent, c(0.0099747046, 0.017168497, 0.022142404, 0.025), tolerance = 1e-07) expect_equal(design13$criticalValues, c(0.0099747046, 0.00059134153, 6.046221e-05, 1.3203687e-05), tolerance = 1e-07) expect_equal(design13$stageLevels, c(0.0099747046, 0.0099747046, 0.0099747046, 0.0099747046), tolerance = 1e-07) expect_equal(design13$scale, c(1.2247449, 1.2247449, 1), tolerance = 1e-07) expect_equal(design13$nonStochasticCurtailment, FALSE) result3 <- getAnalysisResults(design = design13, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, nPlanned = c(96,64), thetaH1 = -0.4, allocationRatio = 2, normalApproximation = FALSE, iterations = 10000, seed = 442018) ## ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results ## expect_equal(result3$.design$stages, c(1, 2, 3, 4)) expect_equal(result3$.design$informationRates, c(0.2, 0.5, 0.8, 1), tolerance = 1e-07) expect_equal(result3$.design$criticalValues, c(0.0099747046, 0.00059134153, 6.046221e-05, 1.3203687e-05), tolerance = 1e-07) expect_equal(result3$.design$alpha0Vec, c(1, 1, 1)) expect_equal(result3$.design$alphaSpent, c(0.0099747046, 0.017168497, 0.022142404, 0.025), tolerance = 1e-07) expect_equal(result3$.design$stageLevels, c(0.0099747046, 0.0099747046, 0.0099747046, 0.0099747046), tolerance = 1e-07) expect_equal(result3$.stageResults$effectSizes, c(-0.4, -0.39619048), tolerance = 1e-07) expect_equal(result3$.stageResults$testStatistics, c(-1.3266499, -0.48819395, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$.stageResults$pValues, c(0.095896458, 0.31512146, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$thetaH0, 0) expect_equal(result3$thetaH1, -0.4, tolerance = 1e-07) expect_equal(result3$assumedStDev, 1.4042956, tolerance = 1e-07) expect_equal(result3$conditionalRejectionProbabilities, c(0.031447357, 0.012731128, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$nPlanned, c(NA_real_, NA_real_, 96, 64)) expect_equal(result3$allocationRatioPlanned, 2) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-1.1295139, -1.2072533, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(0.32951385, 0.40725333, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.19930232, 0.29225486, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$normalApproximation, FALSE) expect_equal(result3$equalVariances, TRUE) expect_equal(result3$directionUpper, FALSE) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.1353, 0.2436), tolerance = 1e-07) expect_equal(result3$combinationTestStatistics, c(0.095896458, 0.023311276, NA_real_, NA_real_), tolerance = 1e-07) }) context("Testing 'getStageResultsMeans'") test_that("'getStageResultsMeans' for an inverse normal design and one or two treatments", { .skipTestifDisabled() designInverseNormal <- getDesignInverseNormal(kMax = 2, alpha = 0.025, sided = 1, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = qnorm(0.7)) dataExample8 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5)) stageResults1 <- getStageResults(design = designInverseNormal, dataInput = dataExample8, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results ## expect_equal(stageResults1$overallTestStatistics, c(6.3245553, 8.3272484), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(6.846828e-05, 4.5964001e-08), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(1, 1.3426212), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(10, 20)) expect_equal(stageResults1$testStatistics, c(6.3245553, 6.3245553), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(6.846828e-05, 6.846828e-05), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults1$combInverseNormal, c(3.813637, 5.3932972), tolerance = 1e-07) expect_equal(stageResults1$weightsInverseNormal, c(0.70710678, 0.70710678), tolerance = 1e-07) dataExample9 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults2 <- getStageResults(design = designInverseNormal, dataInput = dataExample9, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results ## expect_equal(stageResults2$overallTestStatistics, c(-1.3266499, -1.1850988), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.90410354, 0.87988596), tolerance = 1e-07) expect_equal(stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes1, c(22, 33)) expect_equal(stageResults2$overallSampleSizes2, c(22, 35)) expect_equal(stageResults2$testStatistics, c(-1.3266499, -0.48819395), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.90410354, 0.68487854), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(-0.4, -0.40380952), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(-1.3052935, -1.2633725), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.70710678, 0.70710678), tolerance = 1e-07) }) test_that("'getStageResultsMeans' for a Fisher design and one or two treatments", { .skipTestifDisabled() designFisher <- getDesignFisher(kMax = 2, alpha = 0.025, alpha0Vec = 1, informationRates = c(0.5, 1), method = "equalAlpha") dataExample10 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5)) stageResults3 <- getStageResults(design = designFisher, dataInput = dataExample10, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results ## expect_equal(stageResults3$overallTestStatistics, c(6.3245553, 8.3272484), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(6.846828e-05, 4.5964001e-08), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(1, 1.3426212), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(10, 20)) expect_equal(stageResults3$testStatistics, c(6.3245553, 6.3245553), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(6.846828e-05, 6.846828e-05), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(6.846828e-05, 4.6879053e-09), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1)) dataExample11 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults4 <- getStageResults(design = designFisher, dataInput = dataExample11, stage = 2, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT) ## ## Comparison of the results of StageResultsMeans object 'stageResults4' with expected results ## expect_equal(stageResults4$overallTestStatistics, c(-1.3266499, -1.1850988), tolerance = 1e-07) expect_equal(stageResults4$overallPValues, c(0.90410354, 0.87988596), tolerance = 1e-07) expect_equal(stageResults4$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(stageResults4$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(stageResults4$overallSampleSizes1, c(22, 33)) expect_equal(stageResults4$overallSampleSizes2, c(22, 35)) expect_equal(stageResults4$testStatistics, c(-1.3266499, -0.48819395), tolerance = 1e-07) expect_equal(stageResults4$pValues, c(0.90410354, 0.68487854), tolerance = 1e-07) expect_equal(stageResults4$effectSizes, c(-0.4, -0.40380952), tolerance = 1e-07) expect_equal(stageResults4$combFisher, c(0.90410354, 0.61920111), tolerance = 1e-07) expect_equal(stageResults4$weightsFisher, c(1, 1)) }) rpact/inst/tests/testthat/test-class_time.R0000644000176200001440000017061213567165663020616 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 07 November 2019, 10:22:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing class 'PiecewiseSurvivalTime'") test_that("Testing 'getPiecewiseSurvivalTime': isPiecewiseSurvivalEnabled()", { expect_false(getPiecewiseSurvivalTime()$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime(piecewiseSurvivalTime = NA)$isPiecewiseSurvivalEnabled()) }) test_that("Testing 'getPiecewiseSurvivalTime': simple vector based definition", { pwSurvivalTime1 <- getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime1' with expected results ## expect_equal(pwSurvivalTime1$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime1$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime1$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime1$eventTime, 12) expect_equal(pwSurvivalTime1$kappa, 1) expect_equal(pwSurvivalTime1$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime1$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime1$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(lambda2 = 0.5, lambda1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime2$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.046209812, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.42565082, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 15) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, pi1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results ## expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.73696559, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) pwSurvivalTime3 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime3' with expected results ## expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime3$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime3$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime3$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime3$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime3$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime3$eventTime, 12) expect_equal(pwSurvivalTime3$kappa, 1) expect_equal(pwSurvivalTime3$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime3$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime3$delayedResponseEnabled, FALSE) pwSurvivalTime4 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime4' with expected results ## expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime4$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime4$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime4$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime4$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime4$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime4$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime4$eventTime, 12) expect_equal(pwSurvivalTime4$kappa, 1) expect_equal(pwSurvivalTime4$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime4$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime4$delayedResponseEnabled, FALSE) pwSurvivalTime5 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime5' with expected results ## expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime5$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime5$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime5$pi1, c(0.94386524, 0.9785064), tolerance = 1e-07) expect_equal(pwSurvivalTime5$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime5$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime5$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime5$eventTime, 12) expect_equal(pwSurvivalTime5$kappa, 1) expect_equal(pwSurvivalTime5$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime5$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime5$delayedResponseEnabled, FALSE) pwSurvivalTime6 <- getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime6' with expected results ## expect_equal(pwSurvivalTime6$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime6$lambda1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime6$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime6$hazardRatio, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime6$pi1, 0.97267628, tolerance = 1e-07) expect_equal(pwSurvivalTime6$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime6$median1, 2.3104906, tolerance = 1e-07) expect_equal(pwSurvivalTime6$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime6$eventTime, 12) expect_equal(pwSurvivalTime6$kappa, 1) expect_equal(pwSurvivalTime6$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime6$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime6$delayedResponseEnabled, FALSE) pwSurvivalTime7 <- getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime7' with expected results ## expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime7$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime7$hazardRatio, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime7$pi1, 0.97267628, tolerance = 1e-07) expect_equal(pwSurvivalTime7$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime7$median1, 2.3104906, tolerance = 1e-07) expect_equal(pwSurvivalTime7$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime7$eventTime, 12) expect_equal(pwSurvivalTime7$kappa, 1) expect_equal(pwSurvivalTime7$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime7$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime7$delayedResponseEnabled, FALSE) pwSurvivalTime8 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4, pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime8' with expected results ## expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime8$lambda1, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime8$lambda2, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime8$hazardRatio, 0.69823229, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median1, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median2, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime8$eventTime, 12) expect_equal(pwSurvivalTime8$kappa, 1) expect_equal(pwSurvivalTime8$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime8$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime8$delayedResponseEnabled, FALSE) pwSurvivalTime9 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime9' with expected results ## expect_equal(pwSurvivalTime9$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime9$lambda1, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime9$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime9$hazardRatio, 1.5984103, tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime9$median1, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime9$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime9$eventTime, 12) expect_equal(pwSurvivalTime9$kappa, 1) expect_equal(pwSurvivalTime9$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime9$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime9$delayedResponseEnabled, FALSE) pwSurvivalTime10 <- getPiecewiseSurvivalTime(median2 = 1.386294, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime10' with expected results ## expect_equal(pwSurvivalTime10$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime10$lambda1, 0.4000001, tolerance = 1e-07) expect_equal(pwSurvivalTime10$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime10$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi1, 0.99177026, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median1, 1.7328675, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime10$eventTime, 12) expect_equal(pwSurvivalTime10$kappa, 1) expect_equal(pwSurvivalTime10$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime10$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime10$delayedResponseEnabled, FALSE) pwSurvivalTime11 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime11' with expected results ## expect_equal(pwSurvivalTime11$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime11$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime11$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime11$hazardRatio, 0.79999979, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi1, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime11$eventTime, 12) expect_equal(pwSurvivalTime11$kappa, 1) expect_equal(pwSurvivalTime11$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime11$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime11$delayedResponseEnabled, FALSE) pwSurvivalTime12 <- getPiecewiseSurvivalTime(median2 = 5, median1 = 6) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime12' with expected results ## expect_equal(pwSurvivalTime12$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime12$lambda1, 0.11552453, tolerance = 1e-07) expect_equal(pwSurvivalTime12$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime12$hazardRatio, 0.83333333, tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi1, 0.75, tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi2, 0.81053543, tolerance = 1e-07) expect_equal(pwSurvivalTime12$median1, 6) expect_equal(pwSurvivalTime12$median2, 5) expect_equal(pwSurvivalTime12$eventTime, 12) expect_equal(pwSurvivalTime12$kappa, 1) expect_equal(pwSurvivalTime12$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime12$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime12$delayedResponseEnabled, FALSE) pwSurvivalTime13 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = c(0.3, 0.4)) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime13' with expected results ## expect_equal(pwSurvivalTime13$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime13$lambda1, c(0.3, 0.4), tolerance = 1e-07) expect_equal(pwSurvivalTime13$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime13$hazardRatio, c(0.59999984, 0.79999979), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi1, c(0.97267628, 0.99177025), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi2, 0.99752125, tolerance = 1e-07) expect_equal(pwSurvivalTime13$median1, c(2.3104906, 1.732868), tolerance = 1e-07) expect_equal(pwSurvivalTime13$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime13$eventTime, 12) expect_equal(pwSurvivalTime13$kappa, 1) expect_equal(pwSurvivalTime13$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime13$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime13$delayedResponseEnabled, FALSE) pwSurvivalTime14 <- getPiecewiseSurvivalTime(median2 = 5, median1 = c(6:8)) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime14' with expected results ## expect_equal(pwSurvivalTime14$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime14$lambda1, c(0.11552453, 0.099021026, 0.086643398), tolerance = 1e-07) expect_equal(pwSurvivalTime14$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime14$hazardRatio, c(0.83333333, 0.71428571, 0.625), tolerance = 1e-07) expect_equal(pwSurvivalTime14$pi1, c(0.75, 0.69524659, 0.64644661), tolerance = 1e-07) expect_equal(pwSurvivalTime14$pi2, 0.81053543, tolerance = 1e-07) expect_equal(pwSurvivalTime14$median1, c(6, 7, 8)) expect_equal(pwSurvivalTime14$median2, 5) expect_equal(pwSurvivalTime14$eventTime, 12) expect_equal(pwSurvivalTime14$kappa, 1) expect_equal(pwSurvivalTime14$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime14$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime14$delayedResponseEnabled, FALSE) pwSurvivalTime15 <- getPiecewiseSurvivalTime(median2 = 2, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime15' with expected results ## expect_equal(pwSurvivalTime15$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime15$lambda1, 0.27725887, tolerance = 1e-07) expect_equal(pwSurvivalTime15$lambda2, 0.34657359, tolerance = 1e-07) expect_equal(pwSurvivalTime15$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime15$pi1, 0.96410318, tolerance = 1e-07) expect_equal(pwSurvivalTime15$pi2, 0.984375, tolerance = 1e-07) expect_equal(pwSurvivalTime15$median1, 2.5, tolerance = 1e-07) expect_equal(pwSurvivalTime15$median2, 2) expect_equal(pwSurvivalTime15$eventTime, 12) expect_equal(pwSurvivalTime15$kappa, 1) expect_equal(pwSurvivalTime15$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime15$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime15$delayedResponseEnabled, FALSE) pwSurvivalTime16 <- getPiecewiseSurvivalTime(median1 = c(2, 3), hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime16' with expected results ## expect_equal(pwSurvivalTime16$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime16$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime16$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime16$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime16$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime16$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime16$median1, c(2, 3)) expect_equal(pwSurvivalTime16$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime16$eventTime, 12) expect_equal(pwSurvivalTime16$kappa, 1) expect_equal(pwSurvivalTime16$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime16$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime16$delayedResponseEnabled, FALSE) pwSurvivalTime17 <- getPiecewiseSurvivalTime(median1 = c(2, 3), median2 = 4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime17' with expected results ## expect_equal(pwSurvivalTime17$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime17$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime17$lambda2, 0.1732868, tolerance = 1e-07) expect_equal(pwSurvivalTime17$hazardRatio, c(2, 1.3333333), tolerance = 1e-07) expect_equal(pwSurvivalTime17$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime17$pi2, 0.875, tolerance = 1e-07) expect_equal(pwSurvivalTime17$median1, c(2, 3)) expect_equal(pwSurvivalTime17$median2, 4) expect_equal(pwSurvivalTime17$eventTime, 12) expect_equal(pwSurvivalTime17$kappa, 1) expect_equal(pwSurvivalTime17$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime17$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime17$delayedResponseEnabled, FALSE) pwSurvivalTime18 <- getPiecewiseSurvivalTime(median1 = c(2, 3), lambda2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime18' with expected results ## expect_equal(pwSurvivalTime18$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime18$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime18$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime18$hazardRatio, c(0.86643398, 0.57762265), tolerance = 1e-07) expect_equal(pwSurvivalTime18$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime18$pi2, 0.99177025, tolerance = 1e-07) expect_equal(pwSurvivalTime18$median1, c(2, 3)) expect_equal(pwSurvivalTime18$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime18$eventTime, 12) expect_equal(pwSurvivalTime18$kappa, 1) expect_equal(pwSurvivalTime18$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime18$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime18$delayedResponseEnabled, FALSE) pwSurvivalTime19 <- getPiecewiseSurvivalTime(median1 = c(2, 3), pi2 = 0.4) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime19' with expected results ## expect_equal(pwSurvivalTime19$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime19$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime19$lambda2, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime19$hazardRatio, c(8.1414927, 5.4276618), tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime19$median1, c(2, 3)) expect_equal(pwSurvivalTime19$median2, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime19$eventTime, 12) expect_equal(pwSurvivalTime19$kappa, 1) expect_equal(pwSurvivalTime19$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime19$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime19$delayedResponseEnabled, FALSE) pwSurvivalTime20 <- getPiecewiseSurvivalTime(median1 = c(2, 3), hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime20' with expected results ## expect_equal(pwSurvivalTime20$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime20$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime20$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime20$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime20$pi1, c(0.984375, 0.9375), tolerance = 1e-07) expect_equal(pwSurvivalTime20$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime20$median1, c(2, 3)) expect_equal(pwSurvivalTime20$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime20$eventTime, 12) expect_equal(pwSurvivalTime20$kappa, 1) expect_equal(pwSurvivalTime20$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime20$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime20$delayedResponseEnabled, FALSE) pwSurvivalTime21 <- getPiecewiseSurvivalTime(median1 = 3, hazardRatio = 0.8) ## ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime21' with expected results ## expect_equal(pwSurvivalTime21$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime21$lambda1, 0.23104906, tolerance = 1e-07) expect_equal(pwSurvivalTime21$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime21$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime21$pi1, 0.9375, tolerance = 1e-07) expect_equal(pwSurvivalTime21$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime21$median1, 3) expect_equal(pwSurvivalTime21$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime21$eventTime, 12) expect_equal(pwSurvivalTime21$kappa, 1) expect_equal(pwSurvivalTime21$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime21$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime21$delayedResponseEnabled, FALSE) getPiecewiseSurvivalTime(median1 = c(2, 3), lambda2 = 0.8) expect_error(getPiecewiseSurvivalTime(median2 = 1.386294, lambda2 = 0.4, hazardRatio = 0.8)) expect_error(getPiecewiseSurvivalTime(median2 = c(1.5, 1.7), lambda1 = c(0.3, 0.4))) }) test_that("Testing 'getPiecewiseSurvivalTime': vector based definition", { pwSurvivalTime1 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8) expect_equal(pwSurvivalTime1$hazardRatio, 0.8) expect_equal(pwSurvivalTime1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTime1$isDelayedResponseEnabled()) pwSurvivalTime2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8) expect_true(pwSurvivalTime2$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime2$hazardRatio, 0.8) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_true(pwSurvivalTime3$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime3$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime3$hazardRatio, 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) pwSurvivalTime4 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime4$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime4$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime4$hazardRatio, 0.8) expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda2, 0.01) expect_equal(pwSurvivalTime4$lambda1, 0.01 * 0.8) pwSurvivalTime5 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime5$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime5$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime5$hazardRatio, 0.8) expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda2, 0.01) expect_equal(pwSurvivalTime5$lambda1, 0.01 * 0.8) pwSurvivalTime6 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime6$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime6$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime6$hazardRatio, 0.8) expect_equal(pwSurvivalTime6$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime6$lambda2, 0.01) expect_equal(pwSurvivalTime6$lambda1, 0.008) pwSurvivalTime7 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime7$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime7$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime7$hazardRatio, 0.8) expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda2, 0.01) expect_equal(pwSurvivalTime7$lambda1, 0.008) # case 2.2 pwSurvivalTime9 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.025, 0.04, 0.015) * 0.8) expect_true(pwSurvivalTime9$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime9$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime9$hazardRatio, 0.8) # case 2.2: error expected expect_error(getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025)), paste0("Illegal argument: 'hazardRatio' can only be calculated if ", "'unique(lambda1 / lambda2)' result in a single value; ", "current result = c(1.2, 1, 1.667) (delayed response is not allowed)"), fixed = TRUE) # case 3 expect_false(getPiecewiseSurvivalTime(delayedResponseAllowed = TRUE)$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime(piecewiseSurvivalTime = NA, delayedResponseAllowed = TRUE)$isPiecewiseSurvivalEnabled()) # case 3.1 pwSurvivalTimeSim1 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8, delayedResponseAllowed = TRUE) expect_equal(pwSurvivalTimeSim1$hazardRatio, 0.8) expect_equal(pwSurvivalTimeSim1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTimeSim1$isDelayedResponseEnabled()) # case 3.2 pwSurvivalTimeSim2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025), delayedResponseAllowed = TRUE) expect_true(pwSurvivalTimeSim2$isPiecewiseSurvivalEnabled()) expect_true(pwSurvivalTimeSim2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTimeSim2$hazardRatio, c(1.2, 1, 5/3)) pwsTime1 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) expect_equal(pwsTime1$.isLambdaBased(minNumberOfLambdas = 1), TRUE) }) test_that("Testing 'getPiecewiseSurvivalTime': check warnings", { expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "'pi1' (0.3) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "'pi1' (0.3) will be ignored", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "'pi2' (0.4) will be ignored", fixed = TRUE) expect_equal(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4)$.isPiBased(), TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4, pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE) }) test_that("Testing 'getPiecewiseSurvivalTime': list-wise definition", { pwSurvivalTime8 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.6) expect_true(pwSurvivalTime8$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime8$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime8$hazardRatio, 0.6) expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, c(0, 6,9, 15, 21)) expect_equal(pwSurvivalTime8$lambda2, c(0.025, 0.040, 0.015, 0.010, 0.007)) expect_equal(pwSurvivalTime8$lambda1, c(0.0150, 0.0240, 0.0090, 0.0060, 0.0042)) result1 <- getPiecewiseSurvivalTime(list( "<5" = 0.1, "5 - <10" = 0.2, ">=10" = 0.8), hazardRatio = 0.8) expect_equal(result1$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result1$lambda2, c(0.1, 0.2, 0.8)) result2 <- getPiecewiseSurvivalTime(list( "0 - <5" = 0.1, "5 - <10" = 0.2, "10 - Inf" = 0.8), hazardRatio = 0.8) expect_equal(result2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime2 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) }) context("Testing class 'AccrualTime'") test_that("Testing 'getAccrualTime': isAccrualTimeEnabled()", { expect_true(getAccrualTime()$isAccrualTimeEnabled()) expect_true(getAccrualTime(maxNumberOfSubjects = 100)$isAccrualTimeEnabled()) }) test_that("Testing 'getAccrualTime': vector based definition", { accrualTime1 <- getAccrualTime(accrualTime = c(0, 6, 9, 15), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 315) expect_equal(accrualTime1$accrualTime, c(0, 6, 9, 15)) expect_equal(accrualTime1$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime1$remainingTime, 6) accrualTime2 <- getAccrualTime(accrualTime = c(0, 6, 9), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 1000) expect_equal(accrualTime2$accrualTime, c(0, 6, 9, 40.37037)) expect_equal(accrualTime2$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime2$remainingTime, 31.37037) accrualTime3 <- getAccrualTime(accrualTime = c(0, 12, 13, 14, 15, 16), accrualIntensity = c(15, 21, 27, 33, 39, 45), maxNumberOfSubjects = 1405) expect_equal(accrualTime3$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime3$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime3$remainingTime, 24.55555556) accrualTime4 <- getAccrualTime(accrualTime = c(0, 24), accrualIntensity = c(30, 45), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results ## expect_equal(accrualTime4$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime4$accrualTime, c(0, 24)) expect_equal(accrualTime4$accrualIntensity, 30) expect_equal(accrualTime4$accrualIntensityRelative, NA_real_) expect_equal(accrualTime4$maxNumberOfSubjects, 720) expect_equal(accrualTime4$remainingTime, 24) expect_equal(accrualTime4$piecewiseAccrualEnabled, FALSE) accrualTime5 <- getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45)) ## ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results ## expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 24, 30)) expect_equal(accrualTime5$accrualIntensity, c(30, 45)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 990) expect_equal(accrualTime5$remainingTime, 6) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) accrualTime6 <- getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45, 55), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results ## expect_equal(accrualTime6$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 24)) expect_equal(accrualTime6$accrualIntensity, 30) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 720) expect_equal(accrualTime6$remainingTime, 24) expect_equal(accrualTime6$piecewiseAccrualEnabled, FALSE) accrualTime7 <- getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720) ## ## Comparison of the results of AccrualTime object 'accrualTime7' with expected results ## expect_equal(accrualTime7$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime7$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime7$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime7$accrualTime, c(0, 24)) expect_equal(accrualTime7$accrualIntensity, 30) expect_equal(accrualTime7$accrualIntensityRelative, NA_real_) expect_equal(accrualTime7$maxNumberOfSubjects, 720) expect_equal(accrualTime7$remainingTime, 24) expect_equal(accrualTime7$piecewiseAccrualEnabled, FALSE) accrualTime8 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results ## expect_equal(accrualTime8$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime8$accrualTime, c(0, 66.666667), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensity, 15) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, 1000) expect_equal(accrualTime8$remainingTime, 66.666667, tolerance = 1e-07) expect_equal(accrualTime8$piecewiseAccrualEnabled, FALSE) accrualTime9 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15) ## ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results ## expect_equal(accrualTime9$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 5)) expect_equal(accrualTime9$accrualIntensity, 15) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 75) expect_equal(accrualTime9$remainingTime, 5) expect_equal(accrualTime9$piecewiseAccrualEnabled, FALSE) accrualTime10 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 10) ## ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results ## expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 0.66666667), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, 15) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 10) expect_equal(accrualTime10$remainingTime, 0.66666667, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, FALSE) accrualTime11 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15, maxNumberOfSubjects = 10) ## ## Comparison of the results of AccrualTime object 'accrualTime11' with expected results ## expect_equal(accrualTime11$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime11$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime11$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime11$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime11$accrualTime, c(0, 0.66666667), tolerance = 1e-07) expect_equal(accrualTime11$accrualIntensity, 15) expect_equal(accrualTime11$accrualIntensityRelative, NA_real_) expect_equal(accrualTime11$maxNumberOfSubjects, 10) expect_equal(accrualTime11$remainingTime, 0.66666667, tolerance = 1e-07) expect_equal(accrualTime11$piecewiseAccrualEnabled, FALSE) accrualTime12 <- getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(22, 0, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results ## expect_equal(accrualTime12$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime12$accrualTime, c(0, 6, 15, 25)) expect_equal(accrualTime12$accrualIntensity, c(22, 0, 33)) expect_equal(accrualTime12$accrualIntensityRelative, NA_real_) expect_equal(accrualTime12$maxNumberOfSubjects, 462) expect_equal(accrualTime12$remainingTime, 10) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results ## expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, 1000) expect_equal(accrualTime13$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime13$piecewiseAccrualEnabled, TRUE) }) test_that("Testing 'getAccrualTime': test absolute and relative definition", { accrualTime1 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 924) ## ## Comparison of the results of AccrualTime object 'accrualTime1' with expected results ## expect_equal(accrualTime1$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime1$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime1$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime1$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime1$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime1$accrualTime, c(0, 6, 30)) expect_equal(accrualTime1$accrualIntensity, c(22, 33)) expect_equal(accrualTime1$accrualIntensityRelative, NA_real_) expect_equal(accrualTime1$maxNumberOfSubjects, 924) expect_equal(accrualTime1$remainingTime, 24) expect_equal(accrualTime1$piecewiseAccrualEnabled, TRUE) accrualTime2 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33), maxNumberOfSubjects = 924) ## ## Comparison of the results of AccrualTime object 'accrualTime2' with expected results ## expect_equal(accrualTime2$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime2$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime2$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime2$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime2$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime2$accrualTime, c(0, 6, 30)) expect_equal(accrualTime2$accrualIntensity, c(22, 33)) expect_equal(accrualTime2$accrualIntensityRelative, NA_real_) expect_equal(accrualTime2$maxNumberOfSubjects, 924) expect_equal(accrualTime2$remainingTime, 24) expect_equal(accrualTime2$piecewiseAccrualEnabled, TRUE) accrualTime3 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime3' with expected results ## expect_equal(accrualTime3$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime3$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime3$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime3$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime3$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime3$accrualTime, c(0, 6, 30)) expect_equal(accrualTime3$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime3$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime3$maxNumberOfSubjects, 1000) expect_equal(accrualTime3$remainingTime, 24) expect_equal(accrualTime3$piecewiseAccrualEnabled, TRUE) accrualTime4 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results ## expect_equal(accrualTime4$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime4$accrualTime, c(0, 6, 30)) expect_equal(accrualTime4$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime4$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime4$maxNumberOfSubjects, 1000) expect_equal(accrualTime4$remainingTime, 24) expect_equal(accrualTime4$piecewiseAccrualEnabled, TRUE) accrualTime5 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results ## expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 6, 30)) expect_equal(accrualTime5$accrualIntensity, c(22, 33)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 924) expect_equal(accrualTime5$remainingTime, 24) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) accrualTime6 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results ## expect_equal(accrualTime6$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 6, 30)) expect_equal(accrualTime6$accrualIntensity, c(22, 33)) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 924) expect_equal(accrualTime6$remainingTime, 24) expect_equal(accrualTime6$piecewiseAccrualEnabled, TRUE) accrualTime7 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime7' with expected results ## expect_equal(accrualTime7$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime7$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime7$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime7$accrualTime, c(0, 6, 30)) expect_equal(accrualTime7$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime7$accrualIntensityRelative, NA_real_) expect_equal(accrualTime7$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime7$remainingTime, NA_real_) expect_equal(accrualTime7$piecewiseAccrualEnabled, TRUE) accrualTime8 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results ## expect_equal(accrualTime8$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime8$accrualTime, c(0, 6, 30)) expect_equal(accrualTime8$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime8$remainingTime, NA_real_) expect_equal(accrualTime8$piecewiseAccrualEnabled, TRUE) accrualTime9 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results ## expect_equal(accrualTime9$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime9$accrualIntensity, c(22, 33)) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 1000) expect_equal(accrualTime9$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime9$piecewiseAccrualEnabled, TRUE) accrualTime10 <- getAccrualTime(list( "0 - <6" = 22, "6" = 33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results ## expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, c(22, 33)) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 1000) expect_equal(accrualTime10$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, TRUE) accrualTime11 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime11' with expected results ## expect_equal(accrualTime11$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime11$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime11$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime11$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime11$accrualTime, c(0, 6)) expect_equal(accrualTime11$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime11$accrualIntensityRelative, NA_real_) expect_equal(accrualTime11$maxNumberOfSubjects, 1000) expect_equal(accrualTime11$remainingTime, NA_real_) expect_equal(accrualTime11$piecewiseAccrualEnabled, TRUE) accrualTime12 <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33), maxNumberOfSubjects = 1000) ## ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results ## expect_equal(accrualTime12$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime12$accrualTime, c(0, 6)) expect_equal(accrualTime12$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime12$accrualIntensityRelative, NA_real_) expect_equal(accrualTime12$maxNumberOfSubjects, 1000) expect_equal(accrualTime12$remainingTime, NA_real_) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results ## expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6)) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime13$remainingTime, NA_real_) expect_equal(accrualTime13$piecewiseAccrualEnabled, FALSE) accrualTime14 <- getAccrualTime(list( "0 - <6" = 22, "6" = 33)) ## ## Comparison of the results of AccrualTime object 'accrualTime14' with expected results ## expect_equal(accrualTime14$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime14$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime14$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime14$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime14$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime14$accrualTime, c(0, 6)) expect_equal(accrualTime14$accrualIntensity, c(22, 33)) expect_equal(accrualTime14$accrualIntensityRelative, NA_real_) expect_equal(accrualTime14$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime14$remainingTime, NA_real_) expect_equal(accrualTime14$piecewiseAccrualEnabled, FALSE) accrualTime15 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime15' with expected results ## expect_equal(accrualTime15$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime15$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime15$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime15$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime15$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime15$accrualTime, c(0, 6)) expect_equal(accrualTime15$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime15$accrualIntensityRelative, NA_real_) expect_equal(accrualTime15$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime15$remainingTime, NA_real_) expect_equal(accrualTime15$piecewiseAccrualEnabled, FALSE) accrualTime16 <- getAccrualTime(list( "0 - <6" = 0.22, "6" = 0.33)) ## ## Comparison of the results of AccrualTime object 'accrualTime16' with expected results ## expect_equal(accrualTime16$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime16$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime16$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime16$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime16$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime16$accrualTime, c(0, 6)) expect_equal(accrualTime16$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime16$accrualIntensityRelative, NA_real_) expect_equal(accrualTime16$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime16$remainingTime, NA_real_) expect_equal(accrualTime16$piecewiseAccrualEnabled, FALSE) }) test_that("Testing 'getAccrualTime': check expected warnings and errors", { expect_warning(getAccrualTime(accrualTime = c(0, 24), accrualIntensity = c(30, 45), maxNumberOfSubjects = 720), "Last accrual intensity value (45) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45, 55), maxNumberOfSubjects = 720), "Last 2 accrual intensity values (45, 55) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 2 accrual time values (30, 40) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 3 accrual intensity values (45, 55, 66) ignored", fixed = TRUE) expect_warning(getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(0, 22, 33)), "It makes no sense to start 'accrualIntensity' (0, 22, 33) with 0", fixed = TRUE) expect_error(getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0)), "Illegal argument: at least one 'accrualIntensity' value must be > 0", fixed = TRUE) expect_error(getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000), paste0("Conflicting arguments: 'maxNumberOfSubjects' (1000) disagrees with the defined ", "accrual time and intensity: 6 * 22 + 24 * 33 = 924"), fixed = TRUE) }) test_that("Testing 'getAccrualTime': list-wise definition", { accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) accrualTime4 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 1405) expect_equal(accrualTime4$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime4$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime4$remainingTime, 24.55555556) accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, "16 - ?" = 45) accrualTime5 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 1405) expect_equal(accrualTime5$accrualTime, c( 0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime5$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime5$remainingTime, 24.55555556) accrualTime <- list( "0 - <11" = 20, "11 - <16" = 40, ">=16" = 60) accrualTime6 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 800) expect_equal(accrualTime6$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime6$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime6$remainingTime, 6.33333333) accrualTime <- list( "0 - <11" = 20, "11 - <16" = 40, "16 - ?" = 60) accrualTime7 <- getAccrualTime(accrualTime = accrualTime, maxNumberOfSubjects = 800) expect_equal(accrualTime7$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime7$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime7$remainingTime, 6.33333333) }) rpact/inst/tests/testthat/test-f_core_utilities.R0000644000176200001440000015064613567165663022030 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:24 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing core utility functions") test_that("'getValidatedInformationRates': 'informationRates' must be generated correctly based on specified 'kMax'", { design1 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design1), 1, tolerance = 1e-08) design2 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design2), c(0.5, 1), tolerance = 1e-08) design3 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design3), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design4 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design4), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design5 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design5), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design6 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design6), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design7 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design7), 1, tolerance = 1e-08) design8 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design8), c(0.5, 1), tolerance = 1e-08) design9 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design9), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design10 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design10), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design11 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design11), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design12 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design12), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design13 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design13), 1, tolerance = 1e-08) design14 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design14), c(0.5, 1), tolerance = 1e-08) design15 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design15), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design16 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design16), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design17 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design17), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design18 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design18), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'informationRates' must be set correctly based on specified 'informationRates'", { design19 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design19), 1, tolerance = 1e-07) design20 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design20), c(0.4, 1), tolerance = 1e-07) design21 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design21), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design22 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design22), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design23 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design23), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design24 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design24), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design25 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design25), 1, tolerance = 1e-07) design26 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design26), c(0.4, 1), tolerance = 1e-07) design27 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design27), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design28 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design28), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design29 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design29), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design30 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design30), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design31 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design31), 1, tolerance = 1e-07) design32 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design32), c(0.4, 1), tolerance = 1e-07) design33 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design33), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design34 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design34), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design35 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design35), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design36 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design36), c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), tolerance = 1e-07) design37 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design37), c(0.5, 1), tolerance = 1e-07) design38 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design38), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design39 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design39), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design40 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design40), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design41 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design41), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design42 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design42), c(0.5, 1), tolerance = 1e-07) design43 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design43), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design44 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design44), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design45 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design45), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design46 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design46), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design47 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design47), c(0.5, 1), tolerance = 1e-07) design48 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design48), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design49 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design49), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design50 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design50), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design51 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design51), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'informationRates'", { design52 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design52) expect_equal(design52$kMax, 1, tolerance = 1e-07) design53 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design53) expect_equal(design53$kMax, 2, tolerance = 1e-07) design54 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design54) expect_equal(design54$kMax, 3, tolerance = 1e-07) design55 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design55) expect_equal(design55$kMax, 4, tolerance = 1e-07) design56 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design56) expect_equal(design56$kMax, 5, tolerance = 1e-07) design57 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design57) expect_equal(design57$kMax, 6, tolerance = 1e-07) design58 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design58) expect_equal(design58$kMax, 1, tolerance = 1e-07) design59 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design59) expect_equal(design59$kMax, 2, tolerance = 1e-07) design60 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design60) expect_equal(design60$kMax, 3, tolerance = 1e-07) design61 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design61) expect_equal(design61$kMax, 4, tolerance = 1e-07) design62 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design62) expect_equal(design62$kMax, 5, tolerance = 1e-07) design63 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design63) expect_equal(design63$kMax, 6, tolerance = 1e-07) design64 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") .getValidatedInformationRates(design64) expect_equal(design64$kMax, 1, tolerance = 1e-07) design65 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design65) expect_equal(design65$kMax, 2, tolerance = 1e-07) design66 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design66) expect_equal(design66$kMax, 3, tolerance = 1e-07) design67 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design67) expect_equal(design67$kMax, 4, tolerance = 1e-07) design68 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design68) expect_equal(design68$kMax, 5, tolerance = 1e-07) design69 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") .getValidatedInformationRates(design69) expect_equal(design69$kMax, 6, tolerance = 1e-07) design70 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design70) expect_equal(design70$kMax, 2, tolerance = 1e-07) design71 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design71) expect_equal(design71$kMax, 3, tolerance = 1e-07) design72 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design72) expect_equal(design72$kMax, 4, tolerance = 1e-07) design73 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design73) expect_equal(design73$kMax, 5, tolerance = 1e-07) design74 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedInformationRates(design74) expect_equal(design74$kMax, 6, tolerance = 1e-07) design75 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design75) expect_equal(design75$kMax, 2, tolerance = 1e-07) design76 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design76) expect_equal(design76$kMax, 3, tolerance = 1e-07) design77 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design77) expect_equal(design77$kMax, 4, tolerance = 1e-07) design78 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design78) expect_equal(design78$kMax, 5, tolerance = 1e-07) design79 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedInformationRates(design79) expect_equal(design79$kMax, 6, tolerance = 1e-07) design80 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") .getValidatedInformationRates(design80) expect_equal(design80$kMax, 2, tolerance = 1e-07) design81 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design81) expect_equal(design81$kMax, 3, tolerance = 1e-07) design82 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design82) expect_equal(design82$kMax, 4, tolerance = 1e-07) design83 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design83) expect_equal(design83$kMax, 5, tolerance = 1e-07) design84 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedInformationRates(design84) expect_equal(design84$kMax, 6, tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be generated correctly based on specified 'kMax'", { design85 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design85), numeric(0), tolerance = 1e-08) design86 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design86), -6, tolerance = 1e-08) design87 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design87), c(-6, -6), tolerance = 1e-08) design88 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design88), c(-6, -6, -6), tolerance = 1e-08) design89 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design89), c(-6, -6, -6, -6), tolerance = 1e-08) design90 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design90), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design91 <- getTestDesign(kMax = 7L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design91), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design92 <- getTestDesign(kMax = 8L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design92), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design93 <- getTestDesign(kMax = 9L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design93), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design94 <- getTestDesign(kMax = 10L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design94), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design95 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design95), numeric(0), tolerance = 1e-08) design96 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design96), -6, tolerance = 1e-08) design97 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design97), c(-6, -6), tolerance = 1e-08) design98 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design98), c(-6, -6, -6), tolerance = 1e-08) design99 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design99), c(-6, -6, -6, -6), tolerance = 1e-08) design100 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design100), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design101 <- getTestDesign(kMax = 7L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design101), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design102 <- getTestDesign(kMax = 8L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design102), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design103 <- getTestDesign(kMax = 9L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design103), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design104 <- getTestDesign(kMax = 10L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design104), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design105 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design105), numeric(0), tolerance = 1e-08) design106 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design106), 1, tolerance = 1e-08) design107 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design107), c(1, 1), tolerance = 1e-08) design108 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design108), c(1, 1, 1), tolerance = 1e-08) design109 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design109), c(1, 1, 1, 1), tolerance = 1e-08) design110 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design110), c(1, 1, 1, 1, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be set correctly based on specified 'futilityBounds'", { design111 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design111), 2, tolerance = 1e-07) design112 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design112), c(1, 2), tolerance = 1e-07) design113 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design113), c(0, 1, 2), tolerance = 1e-07) design114 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design114), c(0, 0, 1, 2), tolerance = 1e-07) design115 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design115), c(0, 0, 0, 1, 2), tolerance = 1e-07) design116 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design116), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design117 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design117), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design118 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design118), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design119 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design119), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design120 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design120), 2, tolerance = 1e-07) design121 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design121), c(1, 2), tolerance = 1e-07) design122 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design122), c(0, 1, 2), tolerance = 1e-07) design123 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design123), c(0, 0, 1, 2), tolerance = 1e-07) design124 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design124), c(0, 0, 0, 1, 2), tolerance = 1e-07) design125 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design125), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design126 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design126), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design127 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design127), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design128 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design128), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design129 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design129), 2, tolerance = 1e-07) design130 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design130), c(1, 2), tolerance = 1e-07) design131 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design131), c(0, 1, 2), tolerance = 1e-07) design132 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design132), c(0, 0, 1, 2), tolerance = 1e-07) design133 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design133), c(0, 0, 0, 1, 2), tolerance = 1e-07) design134 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design134), -6, tolerance = 1e-07) design135 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design135), c(-6, -6), tolerance = 1e-07) design136 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design136), c(-6, -6, -6), tolerance = 1e-07) design137 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design137), c(-6, -6, -6, -6), tolerance = 1e-07) design138 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design138), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design139 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design139), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design140 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design140), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design141 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design141), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design142 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design142), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design143 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design143), -6, tolerance = 1e-07) design144 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design144), c(-6, -6), tolerance = 1e-07) design145 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design145), c(-6, -6, -6), tolerance = 1e-07) design146 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design146), c(-6, -6, -6, -6), tolerance = 1e-07) design147 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design147), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design148 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design148), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design149 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design149), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design150 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design150), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design151 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design151), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design152 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design152), 1, tolerance = 1e-07) design153 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design153), c(1, 1), tolerance = 1e-07) design154 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design154), c(1, 1, 1), tolerance = 1e-07) design155 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design155), c(1, 1, 1, 1), tolerance = 1e-07) design156 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design156), c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'futilityBounds'", { design157 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design157) expect_equal(design157$kMax, 2, tolerance = 1e-07) design158 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design158) expect_equal(design158$kMax, 3, tolerance = 1e-07) design159 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design159) expect_equal(design159$kMax, 4, tolerance = 1e-07) design160 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design160) expect_equal(design160$kMax, 5, tolerance = 1e-07) design161 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design161) expect_equal(design161$kMax, 6, tolerance = 1e-07) design162 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design162) expect_equal(design162$kMax, 7, tolerance = 1e-07) design163 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design163) expect_equal(design163$kMax, 8, tolerance = 1e-07) design164 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design164) expect_equal(design164$kMax, 9, tolerance = 1e-07) design165 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design165) expect_equal(design165$kMax, 10, tolerance = 1e-07) design166 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design166) expect_equal(design166$kMax, 2, tolerance = 1e-07) design167 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design167) expect_equal(design167$kMax, 3, tolerance = 1e-07) design168 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design168) expect_equal(design168$kMax, 4, tolerance = 1e-07) design169 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design169) expect_equal(design169$kMax, 5, tolerance = 1e-07) design170 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design170) expect_equal(design170$kMax, 6, tolerance = 1e-07) design171 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design171) expect_equal(design171$kMax, 7, tolerance = 1e-07) design172 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design172) expect_equal(design172$kMax, 8, tolerance = 1e-07) design173 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design173) expect_equal(design173$kMax, 9, tolerance = 1e-07) design174 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design174) expect_equal(design174$kMax, 10, tolerance = 1e-07) design175 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design175) expect_equal(design175$kMax, 2, tolerance = 1e-07) design176 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design176) expect_equal(design176$kMax, 3, tolerance = 1e-07) design177 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design177) expect_equal(design177$kMax, 4, tolerance = 1e-07) design178 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design178) expect_equal(design178$kMax, 5, tolerance = 1e-07) design179 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design179) expect_equal(design179$kMax, 6, tolerance = 1e-07) design180 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design180) expect_equal(design180$kMax, 2, tolerance = 1e-07) design181 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design181) expect_equal(design181$kMax, 3, tolerance = 1e-07) design182 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design182) expect_equal(design182$kMax, 4, tolerance = 1e-07) design183 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design183) expect_equal(design183$kMax, 5, tolerance = 1e-07) design184 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design184) expect_equal(design184$kMax, 6, tolerance = 1e-07) design185 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design185) expect_equal(design185$kMax, 7, tolerance = 1e-07) design186 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design186) expect_equal(design186$kMax, 8, tolerance = 1e-07) design187 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design187) expect_equal(design187$kMax, 9, tolerance = 1e-07) design188 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design188) expect_equal(design188$kMax, 10, tolerance = 1e-07) design189 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design189) expect_equal(design189$kMax, 2, tolerance = 1e-07) design190 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design190) expect_equal(design190$kMax, 3, tolerance = 1e-07) design191 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design191) expect_equal(design191$kMax, 4, tolerance = 1e-07) design192 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design192) expect_equal(design192$kMax, 5, tolerance = 1e-07) design193 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design193) expect_equal(design193$kMax, 6, tolerance = 1e-07) design194 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design194) expect_equal(design194$kMax, 7, tolerance = 1e-07) design195 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design195) expect_equal(design195$kMax, 8, tolerance = 1e-07) design196 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design196) expect_equal(design196$kMax, 9, tolerance = 1e-07) design197 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design197) expect_equal(design197$kMax, 10, tolerance = 1e-07) design198 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design198) expect_equal(design198$kMax, 2, tolerance = 1e-07) design199 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design199) expect_equal(design199$kMax, 3, tolerance = 1e-07) design200 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design200) expect_equal(design200$kMax, 4, tolerance = 1e-07) design201 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design201) expect_equal(design201$kMax, 5, tolerance = 1e-07) design202 <- getTestDesign(informationRates = c(0.13333333, 0.26666667, 0.4, 0.53333333, 0.66666667, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design202) expect_equal(design202$kMax, 6, tolerance = 1e-07) }) context("Testing utilities") test_that("Testing '.toCapitalized'", { expect_equal(.toCapitalized("zip code"), "Zip Code") expect_equal(.toCapitalized("state of the art"), "State of the Art") expect_equal(.toCapitalized("final and count"), "Final and Count") }) test_that("Testing '.equalsRegexpIgnoreCase' ", { expect_equal(.equalsRegexpIgnoreCase("stage2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("Stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("STAGES", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase(" stages", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stages2", "stages?"), TRUE) expect_equal(.equalsRegexpIgnoreCase("1stage2", "stages?"), TRUE) }) test_that("Testing 'isUndefinedArgument' and 'isValidArgument'", { expect_equal(.isUndefinedArgument(NULL), TRUE) expect_equal(.isUndefinedArgument(numeric(0)), TRUE) expect_equal(.isUndefinedArgument(NA), TRUE) expect_equal(.isUndefinedArgument(NA_integer_), TRUE) expect_equal(.isUndefinedArgument(NA_real_), TRUE) expect_equal(.isUndefinedArgument(NA_complex_), TRUE) expect_equal(.isUndefinedArgument(NA_character_), TRUE) expect_equal(.isUndefinedArgument(c(NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(1, NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(NA, NA, 1)), FALSE) expect_equal(.isUndefinedArgument(1), FALSE) expect_equal(.isDefinedArgument(NULL), FALSE) expect_equal(.isDefinedArgument(numeric(0)), FALSE) expect_equal(.isDefinedArgument(NA), FALSE) expect_equal(.isDefinedArgument(NA_integer_), FALSE) expect_equal(.isDefinedArgument(NA_real_), FALSE) expect_equal(.isDefinedArgument(NA_complex_), FALSE) expect_equal(.isDefinedArgument(NA_character_), FALSE) expect_equal(.isDefinedArgument(c(NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(1, NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(NA, NA, 1)), TRUE) expect_equal(.isDefinedArgument(1), TRUE) skip_if_translated() expect_error(.isDefinedArgument(notExistingTestVariable, argumentExistsValidationEnabled = FALSE), "object 'notExistingTestVariable' not found", fixed = TRUE) expect_error(.isDefinedArgument(notExistingTestVariable), "Missing argument: the object 'notExistingTestVariable' has not been defined anywhere. Please define it first, e.g., run 'notExistingTestVariable <- 1'", fixed = TRUE) }) test_that("Result of 'setSeed(seed)' is working for different arguments, incl. NULL and NA", { # @refFS[Sec.]{fs:subsec:reproducibilityOfSimulationResults} expect_false(is.null(.setSeed())) expect_false(is.na(.setSeed())) expect_true(is.numeric(.setSeed())) expect_false(is.null(.setSeed(NULL))) expect_false(is.na(.setSeed(NULL))) expect_true(is.numeric(.setSeed(NULL))) expect_false(is.null(.setSeed(NA))) expect_false(is.na(.setSeed(NA))) expect_true(is.numeric(.setSeed(NA))) expect_true(.setSeed() != .setSeed()) expect_equal(.setSeed(123), 123) expect_equal(.setSeed(0), 0) expect_equal(.setSeed(5e-5), 5e-5) }) test_that("Testing '.getInputForZeroOutputInsideTolerance''", { input <- 99 tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance, tolerance), input) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance + epsilon, tolerance), NA_real_) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance - epsilon, tolerance), input) }) test_that("Testing '.arrayToString'", { expect_equal(.arrayToString(NA, vectorLookAndFeelEnabled = TRUE), "NA") expect_equal(.arrayToString(NULL, vectorLookAndFeelEnabled = TRUE), "NULL") expect_equal(.arrayToString(c(1, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 2, 3)") expect_equal(.arrayToString(c(NA, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(NA, 2, 3)") expect_equal(.arrayToString(c(1, 2, NA), vectorLookAndFeelEnabled = TRUE), "c(1, 2, NA)") expect_equal(.arrayToString(c(NA, NA, NA), vectorLookAndFeelEnabled = TRUE), "c(NA, NA, NA)") expect_equal(.arrayToString(c(1, NULL, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 3)") }) test_that("Testing '.getInputProducingZeroOutput'", { tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputProducingZeroOutput(1, 0, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, 0, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, 0, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, 2, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance + epsilon, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance + epsilon, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) }) test_that("Testing '.getOneDimensionalRoot'", { tolerance <- 1e-08 expect_equal(.getOneDimensionalRoot(f = function(x) {x - 2}, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 2}, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1 - tolerance}, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1 + tolerance}, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = 0, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 1}, lower = tolerance, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 0, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 1}, lower = -1, upper = 1- tolerance, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 3}, lower = 1, upper = 5, tolerance = tolerance), 3) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 3}, lower = -5, upper = -1, tolerance = tolerance), -3) expect_equal(.getOneDimensionalRoot(f = function(x) {3 * x - 700}, lower = 100, upper = 1000, tolerance = tolerance), 233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) {3 * x + 700}, lower = -1000, upper = -100, tolerance = tolerance), -233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) {x - 4}, lower = -10, upper = 10), 4, tolerance = tolerance) expect_equal(.getOneDimensionalRoot(f = function(x) {x + 4}, lower = -10, upper = 10), -4, tolerance = tolerance) dataExample1 <- getDataset( overallEvents = c(33, 55, 129), overallAllocationRatios = c(1, 1, 4), overallLogRanks = c(1.02, 1.38, 2.2) ) design1 <- getDesignGroupSequential(kMax = 3, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25) result1 <- getRepeatedConfidenceIntervals(design1, dataExample1, stage = 3) ## ## Comparison of the results of matrix object 'result1' with expected results ## expect_equal(result1[1, ], c(0.54923831, 0.77922365, 1.0261298), tolerance = 1e-07) expect_equal(result1[2, ], c(3.7041718, 2.7014099, 2.5669073), tolerance = 1e-07) design2 <- getDesignGroupSequential(kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.7, 1), typeOfDesign = "WT", deltaWT = 0.35) dataExample2 <- getDataset( overallN2 = c(30,80,100), overallN1 = c(30,80,100), overallEvents2 = c(10,25,36), overallEvents1 = c(14,35,53)) result2 <- getRepeatedConfidenceIntervals(design = design2, dataInput = dataExample2, stage = 3, normalApproximation = T, directionUpper = TRUE) ## ## Comparison of the results of matrix object 'result2' with expected results ## expect_equal(result2[1, ], c(-0.17491836, -0.048575353, 0.018957992), tolerance = 1e-07) expect_equal(result2[2, ], c(0.41834422, 0.29168781, 0.31353692), tolerance = 1e-07) design3 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25) dataExample3 <- getDataset( events1 = c(7,57), events2 = c(7,57), n1 = c(30,300), n2 = c(30,300) ) result3 <- getRepeatedConfidenceIntervals(design3, dataExample3) ## ## Comparison of the results of matrix object 'result3' with expected results ## expect_equal(result3[1, ], c(-0.26729325, -0.071745801), tolerance = 1e-07) expect_equal(result3[2, ], c(0.26729325, 0.071745801), tolerance = 1e-07) design4 <- getDesignInverseNormal(kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25) dataExample4 <- getDataset( events1 = c(4,55), events2 = c(4,46), n1 = c(30,300), n2 = c(30,300)) result4 <- getRepeatedConfidenceIntervals(design4, dataExample4) ## ## Comparison of the results of matrix object 'result4' with expected results ## expect_equal(result4[1, ], c(-0.23589449, -0.043528513), tolerance = 1e-07) expect_equal(result4[2, ], c(0.23589449, 0.088471324), tolerance = 1e-07) }) rpact/inst/tests/testthat/test-f_simulation_rates.R0000644000176200001440000006352613567165663022367 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:58 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation rates function") test_that("'getSimulationRates': check several configurations", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} seed <- 99123 maxNumberOfIterations <- 100 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- (1:3) / 3 plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = TRUE, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x1' with expected results ## expect_equal(x1$effect, c(0.2, 0.7, 1.2, 1.7), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(78, 93, 99, 96)) expect_equal(x1$iterations[3, ], c(41, 68, 56, 40)) expect_equal(x1$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0.04), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.02, 0.04, 0.34, 0.54), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0.03, 0.19, 0.4, 0.3), tolerance = 1e-07) expect_equal(x1$overallReject, c(0.05, 0.23, 0.74, 0.88), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.22, 0.07, 0.01, 0), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.35, 0.21, 0.09, 0.02), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.57, 0.28, 0.1, 0.02), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.59, 0.32, 0.44, 0.6), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(65.7, 78.3, 76.5, 70.8), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.090943215, 0.15808459, 0.48521663, 0.52642331), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.22475932, 0.38294099, 0.60961381, 0.67377136), tolerance = 1e-07) x2 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x2' with expected results ## expect_equal(x2$effect, c(0.1, 0.2, 0.3, 0.4), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(84, 95, 100, 97)) expect_equal(x2$iterations[3, ], c(55, 73, 64, 42)) expect_equal(x2$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0.03), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.02, 0.09, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0.06, 0.3, 0.48, 0.32), tolerance = 1e-07) expect_equal(x2$overallReject, c(0.08, 0.39, 0.81, 0.88), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.16, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.27, 0.13, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.43, 0.18, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.45, 0.27, 0.36, 0.58), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(71.7, 80.4, 79.2, 71.7), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.10237911, 0.25306891, 0.43740091, 0.54067879), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.30171473, 0.4623858, 0.59071853, 0.68245332), tolerance = 1e-07) x3 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x3' with expected results ## expect_equal(x3$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(78, 91, 96, 90, 72)) expect_equal(x3$iterations[3, ], c(32, 65, 62, 37, 6)) expect_equal(x3$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x3$rejectPerStage[1, ], c(0, 0.02, 0.04, 0.1, 0.28), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0.01, 0.06, 0.28, 0.53, 0.66), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0.02, 0.22, 0.28, 0.3, 0.05), tolerance = 1e-07) expect_equal(x3$overallReject, c(0.03, 0.3, 0.6, 0.93, 0.99), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.22, 0.07, 0, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.45, 0.2, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.67, 0.27, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.68, 0.35, 0.38, 0.63, 0.94), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(63, 76.8, 77.4, 68.1, 53.4), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.12773913, 0.18983473, 0.36146118, 0.53982038, 0.7268178), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.32676971, 0.35596086, 0.46114911, 0.56126649, 0.75350644), tolerance = 1e-07) x4 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = TRUE, thetaH0 = 1.5, pi1 = seq(0.05,0.25,0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, directionUpper = FALSE, allocationRatioPlanned = 3, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x4' with expected results ## expect_equal(x4$effect, c(-1.25, -1, -0.75, -0.5, -0.25), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(74, 64, 47, 36, 39)) expect_equal(x4$iterations[3, ], c(28, 28, 30, 20, 25)) expect_equal(x4$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x4$rejectPerStage[1, ], c(0.06, 0.05, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.43, 0.29, 0.09, 0.04, 0.04), tolerance = 1e-07) expect_equal(x4$rejectPerStage[3, ], c(0.17, 0.17, 0.08, 0.04, 0.06), tolerance = 1e-07) expect_equal(x4$overallReject, c(0.66, 0.51, 0.19, 0.08, 0.1), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.2, 0.31, 0.51, 0.64, 0.61), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.03, 0.07, 0.08, 0.12, 0.1), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.23, 0.38, 0.59, 0.76, 0.71), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.72, 0.72, 0.7, 0.8, 0.75), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(60.6, 57.6, 53.1, 46.8, 49.2), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.65569733, 0.50411153, 0.40992455, 0.37112776, 0.28877148), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.52876953, 0.55375049, 0.46252843, 0.37280654, 0.34687207), tolerance = 1e-07) x5 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 2, riskRatio = FALSE, thetaH0 = 0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x5' with expected results ## expect_equal(x5$effect, c(-0.1, 2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(50, 41, 12, 2)) expect_equal(x5$iterations[3, ], c(34, 29, 3, 0)) expect_equal(x5$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[3, ], c(30, 30, 30, NaN)) expect_equal(x5$rejectPerStage[1, ], c(0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[2, ], c(0.09, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.12, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x5$overallReject, c(0.22, 0.03, 0, 0), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.49, 0.59, 0.88, 0.98), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.07, 0.1, 0.09, 0.02), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.56, 0.69, 0.97, 1), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.66, 0.71, 0.97, 1), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(55.2, 51, 34.5, 30.6), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.36523014, 0.20927326, 0.16995311, 0.25129054), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.43064609, 0.32068397, 0.041565592, NaN), tolerance = 1e-07) x6 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.4, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, directionUpper = FALSE, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x6' with expected results ## expect_equal(x6$effect, c(-0.2, -0.15, -0.1, -0.05, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(91, 89, 66, 56, 39)) expect_equal(x6$iterations[3, ], c(19, 49, 51, 48, 24)) expect_equal(x6$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x6$rejectPerStage[1, ], c(0.03, 0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.72, 0.4, 0.14, 0.01, 0.01), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.17, 0.37, 0.26, 0.14, 0.02), tolerance = 1e-07) expect_equal(x6$overallReject, c(0.92, 0.78, 0.4, 0.15, 0.03), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0.06, 0.1, 0.34, 0.44, 0.61), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.01, 0.07, 0.14), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0.06, 0.1, 0.35, 0.51, 0.75), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.81, 0.51, 0.49, 0.52, 0.76), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(63, 71.4, 65.1, 61.2, 48.9), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.72335875, 0.55247274, 0.3843863, 0.29482523, 0.18598438), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.71459365, 0.68392316, 0.54740245, 0.39208559, 0.15519282), tolerance = 1e-07) x7 <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(0.5), typeOfDesign = "P"), thetaH0 = 0.3, groups = 1, plannedSubjects = c(30,60), pi1 = seq(0.3,0.5,0.05),maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(30, 30), maxNumberOfSubjectsPerStage = 5 * c(30, 30), directionUpper = TRUE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x7' with expected results ## expect_equal(x7$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(25, 41, 53, 50, 35)) expect_equal(x7$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x7$sampleSizes[2, ], c(114.24, 115.68293, 100.39623, 101.92, 82.371429), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0.02, 0.06, 0.15, 0.36, 0.59), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0.03, 0.12, 0.32, 0.41, 0.32), tolerance = 1e-07) expect_equal(x7$overallReject, c(0.05, 0.18, 0.47, 0.77, 0.91), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.73, 0.53, 0.32, 0.14, 0.06), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0.73, 0.53, 0.32, 0.14, 0.06), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.75, 0.59, 0.47, 0.5, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(58.56, 77.43, 83.21, 80.96, 58.83), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.60107965, 0.60407724, 0.68409402, 0.68536207, 0.68807468), tolerance = 1e-07) x8 <- getSimulationRates(design = getDesignGroupSequential( futilityBounds = c(0.5,0.5), typeOfDesign = "P"), thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 3, plannedSubjects = (1:3) * 100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.2, maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100,100), maxNumberOfSubjectsPerStage = 5*c(100,100,100), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x8' with expected results ## expect_equal(x8$effect, c(-0.3, -0.25, -0.2, -0.15, -0.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(7, 23, 41, 52, 59)) expect_equal(x8$iterations[3, ], c(0, 1, 1, 11, 20)) expect_equal(x8$sampleSizes[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$sampleSizes[2, ], c(225.57143, 148.73913, 239.7561, 361.73077, 405.05085), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(NaN, 112, 316, 398, 405.85), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.93, 0.75, 0.54, 0.29, 0.1), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.07, 0.22, 0.4, 0.41, 0.37), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0, 0.01, 0.01, 0.11, 0.14), tolerance = 1e-07) expect_equal(x8$overallReject, c(1, 0.98, 0.95, 0.81, 0.61), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0.02, 0.05, 0.19, 0.31), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0, 0.02), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0.02, 0.05, 0.19, 0.33), tolerance = 1e-07) expect_equal(x8$earlyStop, c(1, 0.99, 0.99, 0.89, 0.8), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(115.79, 135.33, 201.46, 331.88, 420.15), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.79294349, 0.80728899, 0.77763316, 0.64160567, 0.53147513), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(NaN, 0.80069037, 0.80071364, 0.56677072, 0.57523679), tolerance = 1e-07) x9 <- getSimulationRates(design = getDesignGroupSequential( futilityBounds = c(0), typeOfDesign = "P"), thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 3, maxNumberOfIterations = maxNumberOfIterations, plannedSubjects = c(100,200), pi1 = seq(0.15, 0.4, 0.05), pi2 = 0.2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100), maxNumberOfSubjectsPerStage = 5*c(100, 100), directionUpper = TRUE, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x9' with expected results ## expect_equal(x9$effect, c(-0.05, 0.2, 0.45, 0.7, 0.95, 1.2), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(48, 66, 75, 74, 57, 35)) expect_equal(x9$sampleSizes[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$sampleSizes[2, ], c(466.29167, 407.39394, 382.84, 357.2973, 256.61404, 268.45714), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0.01, 0.02, 0.11, 0.24, 0.41, 0.65), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0, 0.05, 0.34, 0.62, 0.51, 0.35), tolerance = 1e-07) expect_equal(x9$overallReject, c(0.01, 0.07, 0.45, 0.86, 0.92, 1), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.51, 0.32, 0.14, 0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0.51, 0.32, 0.14, 0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.52, 0.34, 0.25, 0.26, 0.43, 0.65), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(323.82, 368.88, 387.13, 364.4, 246.27, 193.96), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.32248415, 0.49314797, 0.522945, 0.55888112, 0.72047998, 0.75410423), tolerance = 1e-07) mySampleSizeCalculationFunction <- function(...,stage, plannedSubjects, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, conditionalCriticalValue, overallRate) { if (overallRate[1] - overallRate[2] < 0.1){ return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } else { rateUnderH0 <- (overallRate[1] + overallRate[2])/2 stageSubjects <- 2 * (max(0, conditionalCriticalValue * sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / (max(1e-12, (overallRate[1] - overallRate[2])))^2 stageSubjects <- ceiling(min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } } x10 <- getSimulationRates(design = getDesignInverseNormal(kMax = 2), pi1 = seq(0.3,0.6,0.1), pi2 = 0.3, plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = maxNumberOfIterations, seed = seed) ## ## Comparison of the results of SimulationResultsRates object 'x10' with expected results ## expect_equal(x10$effect, c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(100, 99, 95, 75)) expect_equal(x10$sampleSizes[1, ], c(40, 40, 40, 40)) expect_equal(x10$sampleSizes[2, ], c(64.34, 74.444444, 65.126316, 58.253333), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.02, 0.19, 0.47, 0.64), tolerance = 1e-07) expect_equal(x10$overallReject, c(0.02, 0.2, 0.52, 0.89), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$futilityStop, c(0, 0, 0, 0)) expect_equal(x10$earlyStop, c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(104.34, 113.7, 101.87, 83.69), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.20349537, 0.39194633, 0.57556995, 0.71162895), tolerance = 1e-07) #options(width = 180) #maxNumberOfSubjects <- 300 #informationRates <- (1:2)/2 #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 10000 # #x <- getSimulationRates(design = getDesignInverseNormal(futilityBounds = c(-1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.4, groups = 1, plannedSubjects = plannedSubjects, pi1 = seq(0.3,0.4,0.02),maxNumberOfIterations = maxNumberOfIterations, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100), maxNumberOfSubjectsPerStage = 5*c(100,100), directionUpper = FALSE) #x$overallReject #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes #x$rejectPerStage #x$futilityStop #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.4, groups = 1, pi1 = seq(0.3,0.4,0.02), directionUpper = FALSE, maxNumberOfSubjects = maxNumberOfSubjects) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # #x <- getSimulationRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), typeOfDesign = "P"), # thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, plannedSubjects = (1:3)*100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100,100,100), maxNumberOfSubjectsPerStage = 1*c(100,100,100), directionUpper = FALSE) # #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), typeOfDesign = "P"), # thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, directionUpper = FALSE, maxNumberOfSubjects = 300) # #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # #x <- getSimulationRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, plannedSubjects = plannedSubjects, pi1 = seq(0.15,0.4,0.05), pi2 = 0.2, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = plannedSubjects, maxNumberOfSubjectsPerStage = c(100,200,300), directionUpper = TRUE) # #y <- getPowerRates(design = getDesignGroupSequential(futilityBounds = c(-1,1), informationRates = informationRates,typeOfDesign = "P"), # thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, pi1 = seq(0.15,0.4,0.05), pi2 = 0.2, maxNumberOfSubjects = maxNumberOfSubjects, # directionUpper = TRUE) # #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #x$expectedNumberOfSubjects #y$expectedNumberOfSubjects #x$overallReject #round(x$overallReject - y$overallReject,4) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$futilityPerStage - y$futilityPerStage,4) # ############################################################################################################################## # #x <- getSimulationSurvival(design = getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0,0)), # pi1 = seq(0.2, 0.4, 0.05), maxNumberOfIterations = 10000, accrualTime = 24, plannedEvents = c(67,134,201), maxNumberOfSubjects = 396, allocation1 = 1, allocation2 = 1) #toc() #y <- getPowerSurvival(design = getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0,0)), # pi1 = seq(0.2, 0.4, 0.05), maxNumberOfEvents = 201, accrualTime = 24, maxNumberOfSubjects = 396, allocationRatioPlanned = 1) # #round(x$expectedNumberOfEvents - y$expectedNumberOfEvents,1) #round(x$expectedNumberOfSubjects - y$expectedNumberOfSubjects,1) #round(x$numberOfSubjects - y$numberOfSubjects,1) #round(x$rejectPerStage - y$rejectPerStage,4) #round(x$overallReject - y$overallReject,4) #round(x$earlyStop - y$earlyStop,4) #round(x$futilityPerStage - y$futilityPerStage,4) #round(x$futilityStop - y$futilityStop,4) #round(x$analysisTime - y$analysisTime,4) #round(x$studyDuration - y$studyDuration,4) #x$conditionalPowerAchieved }) rpact/inst/tests/testthat/test-f_core_output_formats.R0000644000176200001440000001073613574412501023063 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the output format functions") # @refFS[Sec.]{fs:sec:outputFormats} test_that("'formatPValues'", { x <- formatPValues(0.0000234) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, "<0.0001") x <- formatPValues(c(0.0000234, 0.0000134, 0.1234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- formatPValues(c(0.0002345678, 0.0000134, 0.1234, 0.000000000001, .00000009999)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.0002346", "0.0000134", "0.1234000", "<0.000001", "<0.000001")) x <- formatPValues(c(0.00234, 0.000013, 0.1234, 0.000000000001, .00000009999)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.00234", "<0.0001", "0.12340", "<0.0001", "<0.0001")) x <- formatPValues(c(6.244e-05, 4.906e-02, 1.446e-02, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "0.04906", "0.01446", "NA")) x <- formatPValues(c(6.24408201934656e-05, 7.55449751868031e-05, 1.23207030919836e-05, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "<0.0001", "NA")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatRepeatedPValues'", { x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.1234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", ">0.5")) x <- formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("<0.0001", "<0.0001", ">0.5", "NA")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatConditionalPower'", { x <- formatConditionalPower(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0", "0", "0.5234", "NA")) x <- formatConditionalPower(c(0.234, 0.123456, 0.6, 0.000001)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("0.2340", "0.1235", "0.6000", "0")) # @refFS[Sec.]{fs:sec:outputFormats} }) test_that("'formatProbabilities'", { x <- formatProbabilities(c(NA_real_, NA_real_, 0.4536623, 0.7713048)) ## ## Comparison of the results of character object 'x' with expected results ## expect_equal(x, c("NA", "NA", "0.4537", "0.7713")) }) rpact/inst/tests/testthat/test-f_design_sample_size_calculator.R0000644000176200001440000134066113574422572025052 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 12 December 2019, 12:31:28 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing internal functions of the sample size calculator") test_that("'.getLambdaStepFunctionByTime': return correct lambda for specified time and piecewise exponential bounds", { lambda1 <- .getLambdaStepFunctionByTime(time = 1, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda1' with expected results ## expect_equal(lambda1, 0.025, tolerance = 1e-07) lambda2 <- .getLambdaStepFunctionByTime(time = 6, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda2' with expected results ## expect_equal(lambda2, 0.025, tolerance = 1e-07) lambda3 <- .getLambdaStepFunctionByTime(time = 7, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda3' with expected results ## expect_equal(lambda3, 0.04, tolerance = 1e-07) lambda4 <- .getLambdaStepFunctionByTime(time = 9, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda4' with expected results ## expect_equal(lambda4, 0.04, tolerance = 1e-07) lambda5 <- .getLambdaStepFunctionByTime(time = 14, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda5' with expected results ## expect_equal(lambda5, 0.015, tolerance = 1e-07) lambda6 <- .getLambdaStepFunctionByTime(time = 15, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda6' with expected results ## expect_equal(lambda6, 0.015, tolerance = 1e-07) lambda7 <- .getLambdaStepFunctionByTime(time = 16, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda7' with expected results ## expect_equal(lambda7, 0.01, tolerance = 1e-07) lambda8 <- .getLambdaStepFunctionByTime(time = 21, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda8' with expected results ## expect_equal(lambda8, 0.01, tolerance = 1e-07) lambda9 <- .getLambdaStepFunctionByTime(time = 50, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## ## Comparison of the results of numeric object 'lambda9' with expected results ## expect_equal(lambda9, 0.007, tolerance = 1e-07) }) context("Testing the sample size calculation of means for different designs and arguments") test_that("'getSampleSizeMeans': sample size calculation of means for one sided group sequential design", { # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS1pretest <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) ## ## Comparison of the results of TrialDesignGroupSequential object 'designGS1pretest' with expected results ## expect_equal(designGS1pretest$alphaSpent, c(0.0020595603, 0.0098772988, 0.024999999), tolerance = 1e-07) expect_equal(designGS1pretest$criticalValues, c(2.8688923, 2.3885055, 2.0793148), tolerance = 1e-07) expect_equal(designGS1pretest$stageLevels, c(0.0020595603, 0.0084585282, 0.018794214), tolerance = 1e-07) designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.929099, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.32275, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 491.89699, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 462.87248, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 360.24062, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090771, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80583608, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68748891, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.522991, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 246.30748, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 489.87773, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 460.97237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 358.76182, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.0780634, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80438093, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68736844, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 21.400599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 106.40843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 100.12977, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 77.928183, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8110917, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3500437, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81436669, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 104.35265, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 98.195298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 76.422636, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 28.394266, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 70.985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.295699, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.239248, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 7.0985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.746416, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 141.18246, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 132.85195, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 103.39494, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.7228801, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3419598, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81376184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 27.982861, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 69.957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 6.9957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.489288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 139.13687, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 130.92706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 101.89685, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 14.272462, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 70.965784, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 66.77843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 51.971772, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.222748, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1829515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5038177, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.854796, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 68.889056, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 64.824239, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 50.450881, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 17.387515, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 43.468787, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.9678613, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 31.049133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 86.454503, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 81.353233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 63.314931, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.0734522, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1712593, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5029983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 16.972125, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 42.430311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.8491785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 30.307365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 84.389093, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 79.409693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 61.802329, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.629897, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 361.13164, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 339.82298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 264.47466, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8861856, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9212807, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5251098, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.222278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 359.10487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 337.9158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.99035, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.64926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 229.12315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.736945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.84236, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.912315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.280788, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 455.70005, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 428.81135, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 333.7318, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8732837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9198713, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5249957, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.242142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 228.10535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.431606, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.07902, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.810535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.026339, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 453.67577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 426.90651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 332.24932, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizeRatioMeansOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1111111, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.022769, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.05692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 37.906721, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 94.766802, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 34.116049, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 85.290122, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 358.11287, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 336.98233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.26386, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeMeans': sample size calculation of means for two sided group sequential design", { # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS2pretest <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) ## ## Comparison of the results of TrialDesignGroupSequential object 'designGS2pretest' with expected results ## expect_equal(designGS2pretest$alphaSpent, c(0.12265406, 0.26238998, 0.4), tolerance = 1e-07) expect_equal(designGS2pretest$criticalValues, c(1.5437287, 1.2852363, 1.1188632), tolerance = 1e-07) expect_equal(designGS2pretest$stageLevels, c(0.06132703, 0.099354859, 0.13159925), tolerance = 1e-07) designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.984866, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.46217, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.45911, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.81177, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.60888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.041134725, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26146972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.3536511, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95886527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73853028, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.6463489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.901412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.25353, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.11194, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.49772, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.36979, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.049174965, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26261678, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.35387349, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95082503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73738322, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.64612651, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 10.078438, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.926745, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.926818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.874132, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.562306, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.597148, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.62315, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.407507, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 10.05563, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 25.139075, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3796917, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.775818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.454651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.411718, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.321111, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3256945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.416408, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.12953, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.164199, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of rates for different designs and arguments") test_that("'getSampleSizeRates': sample size calculation of rates for one sided group sequential design", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedLargerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = FALSE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090192, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.81076728, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.6912997, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.2, normalApproximation = FALSE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], -0.090191958, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.18923272, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.3087003, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.2223957, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 13.055989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 25.966887, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 24.434704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 19.016842, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.127696, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.83051514, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.70345593, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 52.320365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 260.14823, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 244.79812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 190.51949, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.39662162, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20482715, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12354802, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 69.882614, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 174.70654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 52.411961, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 131.0299, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 17.470654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 43.676634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 347.47155, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 326.9689, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 254.47069, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.38949339, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20784714, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12553463, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.341131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 200.58487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 188.74931, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 146.89828, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6326463, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40827798, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32212934, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0.4) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 53.497736, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 133.74434, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 15.285067, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 95.531671, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 266.00237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 250.30683, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 194.80676, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.59822838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40051537, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32119139, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizeRatesDiffOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1669392, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.090378, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.22594, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.589453, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.973634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 18.500924, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 46.252311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 199.33807, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 187.57608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 145.98518, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.63834776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.41018483, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32243267, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.241624, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.2568, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.21075, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.68752, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1899424, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0225352, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5569402, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 44.344741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 110.86185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 33.258556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 83.14639, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 11.086185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 27.715463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 220.4917, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 207.48153, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 161.47703, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1917697, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0740853, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5843199, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} # @refFS[Formula]{fs:sampleSizeTwoRatesRatioOptimumAllocationRatio} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$allocationRatioPlanned, 1.0304199, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.234379, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.585947, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 17.37364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 43.434101, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 16.860739, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 42.151846, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.22077, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.17685, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.66114, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1919838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0241846, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5576701, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeRates': sample size calculation of rates for two sided group sequential design", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 2.2663131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 5.6657828, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 9.4279622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 8.5285086, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 6.4928537, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.01272092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.23002532, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.33381109, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.0127209, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.76997468, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.66618891, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 24.687106, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 102.69945, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 92.901636, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 70.727105, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.23899172, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13791313, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.087906186, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.30941892, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.15876644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.095938144, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$maxNumberOfSubjects, 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 32.461488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 81.153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 24.346116, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 60.865289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 8.1153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 20.28843, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 135.04122, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 122.15791, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 93.000251, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.21587527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13203224, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.086052993, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.31213587, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.16272503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.09811449, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of survival data for different designs and arguments") test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'Schoenfeld'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 218.43651, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 109.21825, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 109.21825, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.203042, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 129.1955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 209.26106, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 218.43651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 207.20268, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.835901, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 146.60794, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 238.15931, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 109.95596, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 178.61948, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 36.651986, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 59.539826, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 236.50497, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 359.16189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 179.58095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 179.58095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.930158, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 212.42831, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 344.07526, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 359.16189, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 340.69079, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 411.1105, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 308.33287, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 102.77762, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 34.259208, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 241.05854, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 391.59089, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 411.1105, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 180.79391, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 293.69317, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 308.33287, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 60.264635, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 97.897723, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 102.77762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 388.87078, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 27.051725, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 168.44491, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 212.39441, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.931767, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 190.83096, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 143.12322, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 47.70774, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 242.70959, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.47951, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 276.96374, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 349.22724, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 50.859227, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 313.77176, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 235.32882, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 78.442941, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 399.07264, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 28.03225, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 172.34323, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 219.90797, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 32.179199, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 195.71655, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 146.78741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 48.929138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 252.26222, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 46.091727, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 283.37351, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 361.58134, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 52.910303, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 321.80485, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 241.35364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 80.451212, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 414.77946, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.973595, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.747749, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 54.369372, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 108.13454, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 101.75403, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 79.192297, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 275.50245, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 158.48615, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 117.01629, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 352.72627, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'Freedman'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.318803, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 144.21204, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 233.58371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.28609, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 399.20253, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 299.4019, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 99.800633, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.266878, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 234.07619, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 380.24832, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 399.20253, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 175.55715, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 285.18624, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 299.4019, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 58.519049, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 95.06208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 99.800633, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 377.60699, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 49.386071, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 304.68325, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 228.51244, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 76.170813, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 387.51336, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.539836, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.846839, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.554152, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 31.385379, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 62.421971, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 58.738747, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 45.714713, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 182.82647, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 69.22509, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 113.60138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.83649, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 5.3084847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.4084373, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7178517, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for one sided group sequential design and typeOfComputation = 'HsiehFreedman'", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 121.91282, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.318803, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.423875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 144.21204, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 233.58371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 243.82563, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.28609, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 279.09218, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 209.31914, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.773046, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 23.257682, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 163.64835, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 265.84083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 279.09218, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 122.73626, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 199.38062, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 209.31914, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 40.912088, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 66.460208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.773046, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 263.99422, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 34.527001, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 213.01146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 159.75859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 53.252865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 270.92, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.5314, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.763979, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.909947, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 73.409713, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 69.078154, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 53.761583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 187.03142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 107.59211, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 79.439306, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 239.45666, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0020595603, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0084585282, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.018794214, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Schoenfeld'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 103.98569, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.992843, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.992843, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 8.6654738, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 61.502916, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 99.617756, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 103.98569, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 89.550349, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 119.02601, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 89.269507, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.756502, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.9188341, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 69.79203, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 113.37463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 119.02601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 52.344023, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 85.030974, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 89.269507, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 17.448008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 28.343658, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.756502, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 102.08444, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 12.877848, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 80.187417, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.108996, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.724924, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 90.844192, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.133144, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.711048, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 109.63825, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.344622, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.606421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 82.043195, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.274467, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.020897, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.587598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.2964769, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.741192, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 26.193621, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.694677, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 18.039036, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 79.764338, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 45.885412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 33.878926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.778811, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Freedman'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.6726717, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 68.65147, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 111.19644, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.958888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 190.03851, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 142.52888, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 47.509628, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 15.836543, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 13.100348, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.75087, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 54.498024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 49.298762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 37.531726, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 111.43089, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 181.01545, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 190.03851, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 83.573164, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 135.76159, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 142.52888, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 27.857721, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 45.253862, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 47.509628, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 162.98937, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.37344541, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.59532615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.72668369, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.6777676, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.6797515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.3761146, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.374655, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 89.507692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 107.27985, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 23.51001, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 13.100348, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.75087, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 65.50174, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 54.498024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 49.298762, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 37.531726, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 145.04305, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 188.08008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 108.78229, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 141.06006, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 36.260762, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 47.02002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 175.0499, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.37344541, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.59532615, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.72668369, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.6777676, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.6797515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.3761146, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 59.582732, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 59.582732, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.895683, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.606421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.579168, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 119.16546, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 110.81325, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 29.881728, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.062302, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.641184, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 5.9763456, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 14.940864, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 29.881728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 24.86186, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 22.48997, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.121878, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 87.033692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 112.49841, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 32.954283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 42.596199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 54.079409, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.902213, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 104.78854, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.2720218, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50383572, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65574857, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.676176, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9847739, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5249747, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'HsiehFreedman'", { designGS2 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 58.03603, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.6726717, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.356848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 68.65147, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 111.19644, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 116.07206, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.958888, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 132.86054, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 99.645403, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 33.215134, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 11.071711, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 12.314673, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 77.904037, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 126.55229, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 132.86054, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 58.428028, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 94.914221, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 99.645403, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 19.476009, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 31.638074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 33.215134, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 113.94983, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 57.49862, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.374655, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.8690786, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 17.172696, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 34.345393, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 28.575669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 25.849471, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 19.679506, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 89.507692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 114.99724, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 107.27985, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 16.436419, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 101.40312, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 131.49135, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 76.052337, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 98.618512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 25.350779, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 32.872837, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 122.38163, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(62.981507, 17.099275), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(11.671936, 11.550242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(392.16937, 103.99921), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(294.12702, 77.999405), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(98.042341, 25.999802), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(470.03826, 126.86497), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.3, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 27.207015, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.025476782, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 1.5984103, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 132.64612, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 62.981507, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267383, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.326085, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.671936, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 26.529224, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 66.32306, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 132.64612, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 110.36274, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.833829, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 76.004665, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 392.16937, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 503.85206, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 294.12702, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 377.88904, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 98.042341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 125.96301, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 470.03826, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.50049257, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.6945715, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.79903418, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9980317, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.4397366, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.2515109, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 17.099275, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 11.550242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 9.1587714, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 22.896929, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 45.793857, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 38.100892, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 34.465962, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.239341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 103.99921, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 136.7942, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 77.999405, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 102.59565, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 25.999802, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 34.19855, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 126.86497, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.30788852, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.53778926, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.68260947, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.2479288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.8594644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4649665, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, maxNumberOfSubjects = 0, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(62.981507, 17.099275), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(11.671936, 11.550242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(392.16937, 103.99921), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(503.85206, 136.7942), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(294.12702, 77.999405), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(377.88904, 102.59565), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(98.042341, 25.999802), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(125.96301, 34.19855), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(470.03826, 126.86497), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(maxNumberOfSubjects = 194, designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3,0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime =16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 24.25, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(65.207473, 5.0567417), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(10.593965, 5.0743995), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(23.577694, 8.185751), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(73.207473, 13.056742), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDurationH1, c(36.377625, 8.8858243), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(73.207473, 13.056742), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(26.529224, 9.1587714), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(66.32306, 22.896929), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(132.64612, 45.793857), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(110.36274, 38.100892), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(99.833829, 34.465962), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(76.004665, 26.239341), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(194, 123.05419), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(194, 194)) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(194, 194)) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(145.5, 92.290642), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(145.5, 145.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(48.5, 30.763547), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(48.5, 48.5), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(194, 172.51997), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.50049257, 0.30788852), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.6945715, 0.53778926), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.79903418, 0.68260947), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(1.9980317, 3.2479288), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4397366, 1.8594644), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2515109, 1.4649665), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) }) context("Testing the sample size calculation of survival data for other parameter variants") test_that("'getSampleSizeSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, only alpha = 0.01 is specified ", { sampleSizeResult <- getSampleSizeSurvival(alpha = 0.01) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(58.52451, 31.248898, 20.120262), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, c(197.78666, 90.804254, 51.314209), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.8370942, 2.2986321, 2.821477), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.01, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Four stage O'Brien and Fleming group sequential design with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 4)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(79.185858, 36.354387, 20.544155), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(79.185858, 36.354387, 20.544155), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(46.861741, 25.021615, 16.110694), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(13.197643, 6.0590646, 3.4240258), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.25, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 0.75, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[4, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(7.9739331, 7.8101434, 7.6105076), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(11.495939, 11.330412, 11.125901), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(14.574435, 14.425585, 14.235444), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[4, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(15.491732, 15.406299, 15.298535), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(11.715435, 6.2554038, 4.0276736), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(23.43087, 12.510808, 8.0553472), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(35.146306, 18.766211, 12.083021), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[4, ], c(46.861741, 25.021615, 16.110694), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(46.71422, 24.942847, 16.059978), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(44.876904, 23.961821, 15.428323), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(38.052731, 20.318084, 13.082227), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(105.23712, 47.322163, 26.058574), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(151.7193, 68.651695, 38.095372), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[4, ], c(158.37172, 72.708775, 41.088309), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(156.87296, 71.824595, 40.451776), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.0042542622, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.19131467, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.35652274, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[4, ], 0.24790832, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.55209168, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(10.651203, 25.469293, 56.523607), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(3.2636181, 5.0467111, 7.5182183), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], c(2.2002206, 2.9422007, 3.8377495), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[4, ], c(1.8065487, 2.2464886, 2.741937), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 2.5763449e-05, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.0020996694, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.0097077663, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[4, ], 0.021469878, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited ", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0), accrualIntensity = c(30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, 6.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(5.4039758, 0.22825781, -1.7164516), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.070642, 6.8949245, 4.950215), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.070642, 6.8949245, 4.950215), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(4.8516734, -0.31523272, -2.5326655), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(240, 240, 240)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 10) expect_equal(sampleSizeResult$followUpTime, c(2.6783764, -1.6485661, -3.8659989), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(240, 240, 240)) expect_equal(sampleSizeResult$nFixed1, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed2, c(120, 120, 120)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify accrual time as a list", { at <- list("0 - <6" = 20, "6 - Inf" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 8.6666667), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(4.8516734, -0.31523272, -2.5326655), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(200, 200, 200)) expect_equal(sampleSizeResult$nFixed1, c(100, 100, 100)) expect_equal(sampleSizeResult$nFixed2, c(100, 100, 100)) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(13.51834, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { at <- list("0 - <6" = 20, "6 - <=10" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(240, 240, 240)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 10) expect_equal(sampleSizeResult$followUpTime, c(2.6783764, -1.6485661, -3.8659989), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, c(240, 240, 240)) expect_equal(sampleSizeResult$nFixed1, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed2, c(120, 120, 120)) expect_equal(sampleSizeResult$analysisTime[1, ], c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(12.678376, 8.3514339, 6.1340011), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time needs to be specified because it should be shown that hazard ratio < 1", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 74.550809, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 46.640597, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 1076.0672, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 538.03358, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 538.03358, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 143.8377, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 89.672263, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.811468, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.702852, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 71.918848, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 143.8377, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 143.65194, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 141.26582, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 128.76314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 1059.161, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 1076.0672, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 1072.5235, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.51710185, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.71909794, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio ", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 93.281194, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 46.640597, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.393694, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.816947, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.704001, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 524.59793, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 531.021, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Effect size is based on hazard rate for the reference group and hazard ratio", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.872594, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 11.754955, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 16.691007, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 398.17083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 404.73134, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time and hazard ratios ", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list and hazard ratios ", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time for both treatment arms", { sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 63.558499, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 13.350554, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDurationH1, 17.025453, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 96.227483, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 192.20642, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 189.01379, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 172.2852, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.76855, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3298684, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list", { pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$informationRates[1, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDurationH1, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.0025828932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.023996469, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { sampleSizeResult <- getSampleSizeSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.81053543, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.9375, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 5, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, 141.26641, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { sampleSizeResult <- getSampleSizeSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.98154699, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.99998474, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 5, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, FALSE) expect_equal(sampleSizeResult$nFixed, 31.248566, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.48932026, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Calculation of maximum number of subjects for given follow-up time", { sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results ## expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 12.515269), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 12.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult$nFixed, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult2 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = -1) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results ## expect_equal(sampleSizeResult2$directionUpper, FALSE) expect_equal(sampleSizeResult2$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult2$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult2$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult2$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfSubjects, 741.77932, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualTime, c(6, 17.50527), tolerance = 1e-07) expect_equal(sampleSizeResult2$totalAccrualTime, 17.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult2$nFixed, 741.77932, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed1, 370.88966, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed2, 370.88966, tolerance = 1e-07) expect_equal(sampleSizeResult2$analysisTime[1, ], 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$studyDuration, 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) .skipTestifDisabled() sampleSizeResult3 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult3' with expected results ## expect_equal(sampleSizeResult3$directionUpper, FALSE) expect_equal(sampleSizeResult3$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult3$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult3$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult3$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult3$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfSubjects, 70.679258, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult3$accrualTime, 3.2126936, tolerance = 1e-07) expect_equal(sampleSizeResult3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult3$nFixed, 70.679258, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed1, 35.339629, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed2, 35.339629, tolerance = 1e-07) expect_equal(sampleSizeResult3$analysisTime[1, ], 203.2127, tolerance = 1e-07) expect_equal(sampleSizeResult3$studyDuration, 203.2127, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult4 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = -200) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult4' with expected results ## expect_equal(sampleSizeResult4$directionUpper, FALSE) expect_equal(sampleSizeResult4$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult4$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult4$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult4$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult4$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfSubjects, 11288.779, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult4$accrualTime, c(6, 216.50527), tolerance = 1e-07) expect_equal(sampleSizeResult4$totalAccrualTime, 216.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult4$nFixed, 11288.779, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed1, 5644.3897, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed2, 5644.3897, tolerance = 1e-07) expect_equal(sampleSizeResult4$analysisTime[1, ], 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$studyDuration, 16.50527, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult5 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 44.43107095) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult5' with expected results ## expect_equal(sampleSizeResult5$directionUpper, FALSE) expect_equal(sampleSizeResult5$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult5$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult5$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult5$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult5$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult5$maxNumberOfSubjects, 131.99999, tolerance = 1e-07) expect_equal(sampleSizeResult5$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult5$accrualTime, 5.9999996, tolerance = 1e-07) expect_equal(sampleSizeResult5$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult5$nFixed, 131.99999, tolerance = 1e-07) expect_equal(sampleSizeResult5$nFixed1, 65.999995, tolerance = 1e-07) expect_equal(sampleSizeResult5$nFixed2, 65.999995, tolerance = 1e-07) expect_equal(sampleSizeResult5$analysisTime[1, ], 50.43107, tolerance = 1e-07) expect_equal(sampleSizeResult5$studyDuration, 50.43107, tolerance = 1e-07) expect_equal(sampleSizeResult5$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult5$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult6 <- getSampleSizeSurvival(accrualTime = c(0, 60), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), maxNumberOfSubjects = 500000) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult6' with expected results ## expect_equal(sampleSizeResult6$directionUpper, FALSE) expect_equal(sampleSizeResult6$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult6$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult6$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult6$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult6$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult6$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult6$accrualTime, c(60, 9469.0566), tolerance = 1e-07) expect_equal(sampleSizeResult6$totalAccrualTime, 9469.0566, tolerance = 1e-07) expect_equal(sampleSizeResult6$followUpTime, -9448.0008, tolerance = 1e-07) expect_equal(sampleSizeResult6$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult6$nFixed, 5e+05) expect_equal(sampleSizeResult6$nFixed1, 250000) expect_equal(sampleSizeResult6$nFixed2, 250000) expect_equal(sampleSizeResult6$analysisTime[1, ], 21.055818, tolerance = 1e-07) expect_equal(sampleSizeResult6$studyDuration, 21.055818, tolerance = 1e-07) expect_equal(sampleSizeResult6$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult6$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult7 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 44) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult7' with expected results ## expect_equal(sampleSizeResult7$directionUpper, FALSE) expect_equal(sampleSizeResult7$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult7$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult7$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult7$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult7$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfSubjects, 132.8172, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult7$accrualTime, c(6, 6.0154188), tolerance = 1e-07) expect_equal(sampleSizeResult7$totalAccrualTime, 6.0154188, tolerance = 1e-07) expect_equal(sampleSizeResult7$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult7$nFixed, 132.8172, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed1, 66.408599, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed2, 66.408599, tolerance = 1e-07) expect_equal(sampleSizeResult7$analysisTime[1, ], 50.015396, tolerance = 1e-07) expect_equal(sampleSizeResult7$studyDuration, 50.015396, tolerance = 1e-07) expect_equal(sampleSizeResult7$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult7$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult8 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 45) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult8' with expected results ## expect_equal(sampleSizeResult8$directionUpper, FALSE) expect_equal(sampleSizeResult8$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult8$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult8$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult8$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult8$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult8$maxNumberOfSubjects, 130.99398, tolerance = 1e-07) expect_equal(sampleSizeResult8$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult8$accrualTime, 5.9542719, tolerance = 1e-07) expect_equal(sampleSizeResult8$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult8$nFixed, 130.99398, tolerance = 1e-07) expect_equal(sampleSizeResult8$nFixed1, 65.496991, tolerance = 1e-07) expect_equal(sampleSizeResult8$nFixed2, 65.496991, tolerance = 1e-07) expect_equal(sampleSizeResult8$analysisTime[1, ], 50.954287, tolerance = 1e-07) expect_equal(sampleSizeResult8$studyDuration, 50.954287, tolerance = 1e-07) expect_equal(sampleSizeResult8$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) expect_equal(sampleSizeResult8$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) test_that("'getSampleSizeSurvival': Calculation of median1 and median2", { sampleSizeResult <- getSampleSizeSurvival(lambda1 = log(2) / 3, lambda2 = log(2) / 5) expect_equal(sampleSizeResult$median1, 3) expect_equal(sampleSizeResult$median2, 5) kappa <- 2 sampleSizeResult2 <- getSampleSizeSurvival(lambda1 = log(2)^(1 / kappa) / 3, lambda2 = log(2)^(1 / kappa) / 5, kappa = kappa) expect_equal(sampleSizeResult2$median1, 3) expect_equal(sampleSizeResult2$median2, 5) sampleSizeResult1 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 4, median1 = c(5), followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult1' with expected results ## expect_equal(sampleSizeResult1$directionUpper, FALSE) expect_equal(sampleSizeResult1$pi1, 0.81053543, tolerance = 1e-07) expect_equal(sampleSizeResult1$pi2, 0.875, tolerance = 1e-07) expect_equal(sampleSizeResult1$lambda1, 0.13862944, tolerance = 1e-07) expect_equal(sampleSizeResult1$lambda2, 0.1732868, tolerance = 1e-07) expect_equal(sampleSizeResult1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(sampleSizeResult1$maxNumberOfSubjects, 770.8069, tolerance = 1e-07) expect_equal(sampleSizeResult1$maxNumberOfEvents, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeResult1$accrualTime, c(6, 18.05296), tolerance = 1e-07) expect_equal(sampleSizeResult1$totalAccrualTime, 18.05296, tolerance = 1e-07) expect_equal(sampleSizeResult1$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult1$nFixed, 770.8069, tolerance = 1e-07) expect_equal(sampleSizeResult1$nFixed1, 385.40345, tolerance = 1e-07) expect_equal(sampleSizeResult1$nFixed2, 385.40345, tolerance = 1e-07) expect_equal(sampleSizeResult1$analysisTime[1, ], 23.052959, tolerance = 1e-07) expect_equal(sampleSizeResult1$studyDuration, 23.052959, tolerance = 1e-07) expect_equal(sampleSizeResult1$criticalValuesEffectScale[1, ], 0.85546574, tolerance = 1e-07) expect_equal(sampleSizeResult1$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult2 <- getSampleSizeSurvival(median2 = 25, lambda1 = c(0.021, 0.023), maxNumberOfSubjects = 2280) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results ## expect_equal(sampleSizeResult2$directionUpper, c(FALSE, FALSE)) expect_equal(sampleSizeResult2$pi1, c(0.22275526, 0.24118707), tolerance = 1e-07) expect_equal(sampleSizeResult2$pi2, 0.28302238, tolerance = 1e-07) expect_equal(sampleSizeResult2$median1, c(33.007009, 30.136834), tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda2, 0.027725887, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, c(0.7574149, 0.82954965), tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, c(406.69171, 899.03732), tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualIntensity, 190) expect_equal(sampleSizeResult2$followUpTime, c(2.2277357, 13.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult2$nFixed, c(2280, 2280)) expect_equal(sampleSizeResult2$nFixed1, c(1140, 1140)) expect_equal(sampleSizeResult2$nFixed2, c(1140, 1140)) expect_equal(sampleSizeResult2$analysisTime[1, ], c(14.227736, 25.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$studyDuration, c(14.227736, 25.964693), tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], c(0.82334724, 0.87745097), tolerance = 1e-07) expect_equal(sampleSizeResult2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult3 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 50, lambda1 = 0.01, followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult3' with expected results ## expect_equal(sampleSizeResult3$directionUpper, FALSE) expect_equal(sampleSizeResult3$pi1, 0.11307956, tolerance = 1e-07) expect_equal(sampleSizeResult3$pi2, 0.15325469, tolerance = 1e-07) expect_equal(sampleSizeResult3$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult3$lambda2, 0.013862944, tolerance = 1e-07) expect_equal(sampleSizeResult3$hazardRatio, 0.72134752, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfSubjects, 1477.2065, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfEvents, 294.26878, tolerance = 1e-07) expect_equal(sampleSizeResult3$accrualTime, c(6, 31.381254), tolerance = 1e-07) expect_equal(sampleSizeResult3$totalAccrualTime, 31.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult3$nFixed, 1477.2065, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed1, 738.60324, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed2, 738.60324, tolerance = 1e-07) expect_equal(sampleSizeResult3$analysisTime[1, ], 36.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$studyDuration, 36.381254, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesEffectScale[1, ], 0.79571801, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeResult4 <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, median1 = 32, followUpTime = 5) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult4' with expected results ## expect_equal(sampleSizeResult4$directionUpper, TRUE) expect_equal(sampleSizeResult4$pi1, 0.22889459, tolerance = 1e-07) expect_equal(sampleSizeResult4$pi2, 0.21337214, tolerance = 1e-07) expect_equal(sampleSizeResult4$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult4$lambda1, 0.021660849, tolerance = 1e-07) expect_equal(sampleSizeResult4$hazardRatio, 1.0830425, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfSubjects, 7086.5152, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfEvents, 4933.3616, tolerance = 1e-07) expect_equal(sampleSizeResult4$accrualTime, c(6, 137.21727), tolerance = 1e-07) expect_equal(sampleSizeResult4$totalAccrualTime, 137.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeResult4$nFixed, 7086.5152, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed1, 3543.2576, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed2, 3543.2576, tolerance = 1e-07) expect_equal(sampleSizeResult4$analysisTime[1, ], 142.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$studyDuration, 142.21727, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesEffectScale[1, ], 1.057396, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) }) context("Testing the follow-up time calculation") test_that("'getSampleSizeSurvival': analysis time at last stage equals accrual time + follow-up time", { x1 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, pi2 = 0.05, pi1 = 0.1) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT"), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) test_that("'getSampleSizeSurvival': follow-up time is equal for different argument-target constellations", { designGS1 <- getDesignGroupSequential(informationRates = c(0.2,0.5,1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) x5 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x6 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, maxNumberOfSubjects = x5$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x5$followUpTime, x6$followUpTime) x7 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 6, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x8 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 6, accrualTime = 8, maxNumberOfSubjects = x7$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x7$followUpTime, x8$followUpTime) x9 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) x10 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, maxNumberOfSubjects = x9$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) expect_equal(x9$followUpTime, x10$followUpTime) }) context("Testing expected warnings and errors") test_that("'getSampleSizeSurvival': illegal arguments", { expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01, 0.015), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.01, 0.015)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, median1 = c(5, 6), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.139, 0.116)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 4, median1 = c(5, 6), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'median1'; median1 = c(5, 6)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), pi2 = 0.213, pi1 = c(0.113, 0.165), followUpTime = 5), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.113, 0.165)"), fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0), pi1 = c(0.4, 0.5), accrualIntensity = c(22), followUpTime = 6), paste0("Illegal argument: the calulation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.4, 0.5)"), fixed = TRUE) expect_warning(getSampleSizeSurvival(accrualTime = c(0, 6, 30), pi1 = 0.4, accrualIntensity = c(0.22, 0.53), maxNumberOfSubjects = 1000), "Accrual duration longer than maximal study duration (time to maximal number of events); followUpTime = -17.501", fixed = TRUE) expect_error(getSampleSizeSurvival(accrualTime = c(0, 6), pi1 = 0.4, accrualIntensity = c(0.22, 0.53), maxNumberOfSubjects = 1000), paste0("Illegal argument: the calulation of 'followUpTime' for given 'maxNumberOfSubjects' and ", "relative accrual intensities (< 1) can only be done if end of accrual is defined"), fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = -1, hazardRatio = 2), "Argument out of bounds: 'lambda2' (-1) must be >= 0", fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = 0, hazardRatio = 2), "Illegal argument: 'lambda2' (0) not allowed: at least one lambda value must be > 0", fixed = TRUE) expect_error(getSampleSizeSurvival(lambda2 = 0.9, hazardRatio = 0.8, kappa = 0), "Argument out of bounds: 'kappa' (0) must be > 0", fixed = TRUE) }) context("Testing the calculation of event probabilities and number of subjects") test_that("'getEventProbabilities': check expected events over time for overall survival (case 1)", { design <- getDesignGroupSequential( sided = 1, alpha = 0.025, beta = 0.2, informationRates = c(0.33, 0.7, 1), futilityBounds = c(0, 0), bindingFutility = FALSE) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007) accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) powerResults <- getPowerSurvival( design = design, typeOfComputation = "Schoenfeld", thetaH0 = 1, directionUpper = FALSE, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, allocationRatioPlanned = 1, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = seq(0.6, 1, 0.05), maxNumberOfEvents = 404, maxNumberOfSubjects = 1405) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(powerResults$analysisTime[2:3, 4], 17 + 3.5 * 12) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405)$overallEventProbabilities eventsOS <- eventsOS * 1405 expect_equal(round(timeOS, 2), c(37.60, 46.72, 59.00)) expect_equal(round(eventsOS, 1), c(194.1, 288.7, 365.1)) }) test_that("'getEventProbabilities': check expected events over time for overall survival (case 2)", { accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(37.59823, 46.71658, 59) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405) ## ## Comparison of the results of EventProbabilities object 'eventsOS' with expected results ## expect_equal(eventsOS$time, c(37.59823, 46.71658, 59), tolerance = 1e-07) expect_equal(eventsOS$accrualTime, c(12, 13, 14, 15, 16, 40.555556), tolerance = 1e-07) expect_equal(eventsOS$lambda1, c(0.012, 0.008, 0.004, 0.002), tolerance = 1e-07) expect_equal(eventsOS$overallEventProbabilities, c(0.13811859, 0.20546928, 0.2598385), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities1, c(0.12437783, 0.18544801, 0.23527681), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities2, c(0.15185935, 0.22549055, 0.28440019), tolerance = 1e-07) }) test_that("'getNumberOfSubjects': check the number of recruited subjects at given time vector", { accrualTime1 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45) numberOfSubjects1 <- getNumberOfSubjects(time = 1:3, accrualTime = getAccrualTime(accrualTime1, maxNumberOfSubjects = 1405)) ## ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects1' with expected results ## expect_equal(numberOfSubjects1$time, c(1, 2, 3)) expect_equal(numberOfSubjects1$accrualTime, c(12, 13, 14, 15, 16, 41.355556), tolerance = 1e-07) expect_equal(numberOfSubjects1$numberOfSubjects, c(12, 24, 36), tolerance = 1e-07) accrualTime2 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39) numberOfSubjects2 <- getNumberOfSubjects(time = 1:3, accrualTime = getAccrualTime(accrualTime2)) ## ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects2' with expected results ## expect_equal(numberOfSubjects2$time, c(1, 2, 3)) expect_equal(numberOfSubjects2$maxNumberOfSubjects, 264) expect_equal(numberOfSubjects2$numberOfSubjects, c(12, 24, 36)) }) test_that("'getSampleSizeSurvival': check the calulation of 'maxNumberOfSubjects' for given 'followUpTime'", { sampleSizeSurvival1 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival1' with expected results ## expect_equal(sampleSizeSurvival1$directionUpper, FALSE) expect_equal(sampleSizeSurvival1$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival1$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival2 <- getSampleSizeSurvival(piecewiseSurvivalTime = list( "<12" = 0.02, ">=12" = 0.03), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival2' with expected results ## expect_equal(sampleSizeSurvival2$directionUpper, FALSE) expect_equal(sampleSizeSurvival2$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival2$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival3 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival3' with expected results ## expect_equal(sampleSizeSurvival3$directionUpper, FALSE) expect_equal(sampleSizeSurvival3$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival3$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival4 <- getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.8, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival4' with expected results ## expect_equal(sampleSizeSurvival4$directionUpper, FALSE) expect_equal(sampleSizeSurvival4$lambda1, c(0.016, 0.024), tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfSubjects, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfEvents, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$accrualTime, 44.182203, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival4$nFixed, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed1, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed2, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$analysisTime[1, ], 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$studyDuration, 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesEffectScale[1, ], 0.85546574, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival5 <- getSampleSizeSurvival(lambda1 = 0.03, lambda2 = 0.2, hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival5' with expected results ## expect_equal(sampleSizeSurvival5$directionUpper, FALSE) expect_equal(sampleSizeSurvival5$pi1, 0.30232367, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$pi2, 0.90928205, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$median1, 23.104906, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$median2, 3.4657359, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$hazardRatio, 0.15, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfSubjects, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfEvents, 8.723245, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$accrualTime, 0.56510944, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival5$nFixed, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed1, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed2, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$analysisTime[1, ], 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$studyDuration, 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$criticalValuesEffectScale[1, ], 0.26521666, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) sampleSizeSurvival6 <- getSampleSizeSurvival(lambda1 = 0.03, lambda2 = 0.2, hazardRatio = c(0.6, 0.7), followUpTime = 8, accrualIntensity = 30, accrualTime = 0) ## ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival6' with expected results ## expect_equal(sampleSizeSurvival6$directionUpper, FALSE) expect_equal(sampleSizeSurvival6$pi1, 0.30232367, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$pi2, 0.90928205, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$median1, 23.104906, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$median2, 3.4657359, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$hazardRatio, 0.15, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$maxNumberOfSubjects, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$maxNumberOfEvents, 8.723245, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$accrualTime, 0.56510944, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$calculateFollowUpTime, TRUE) expect_equal(sampleSizeSurvival6$nFixed, 16.953283, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$nFixed1, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$nFixed2, 8.4766417, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$analysisTime[1, ], 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$studyDuration, 8.5650223, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$criticalValuesEffectScale[1, ], 0.26521666, tolerance = 1e-07) expect_equal(sampleSizeSurvival6$criticalValuesPValueScale[1, ], 0.025, tolerance = 1e-07) expect_error(getSampleSizeSurvival(lambda2 = 0.2, hazardRatio = c(0.6, 0.7), followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) expect_error(getSampleSizeSurvival(lambda1 = c(0.02, 0.03), lambda2 = 0.2, hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) expect_error(getSampleSizeSurvival(lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = c(0.6, 0.8), followUpTime = 8, accrualIntensity = 30, accrualTime = 0)) }) rpact/inst/tests/testthat/helper-f_analysis_means.R0000644000176200001440000000477513363325361022277 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 25-09-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### testGetStageResultsPlotData <- function(x, ..., nPlanned, stage = x$getNumberOfStages(), allocationRatioPlanned = 1) { if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, ...)) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, pi2 = pi2, ...)) } } return(.getConditionalPowerPlot(stageResults = x, nPlanned = nPlanned, stage = stage, allocationRatioPlanned = allocationRatioPlanned, ...)) }rpact/inst/tests/testthat/test-class_analysis_dataset.R0000644000176200001440000015732013567165663023211 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 10:07:41 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the class 'Dataset'") test_that("Usage of 'getDataset'", { datasetOfMeans1 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans1' with expected results ## expect_equal(datasetOfMeans1$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans1$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans1$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$means, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$overallMeans, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans1$.data' with expected results ## expect_equal(datasetOfMeans1$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans1$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans1$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$.data$mean, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$stDev, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$.data$overallMean, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallStDev, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) expect_equal(datasetOfMeans1$stages, datasetOfMeans1$.data$stage, tolerance = 1e-07) expect_equal(datasetOfMeans1$groups, datasetOfMeans1$.data$group, tolerance = 1e-07) expect_equal(datasetOfMeans1$sampleSizes, datasetOfMeans1$.data$sampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$means, datasetOfMeans1$.data$mean, tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, datasetOfMeans1$.data$stDev, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, datasetOfMeans1$.data$overallSampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallMeans, datasetOfMeans1$.data$overallMean, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, datasetOfMeans1$.data$overallStDev, tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans1) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) datasetOfMeans2 <- getDataset(data.frame( stages = 1:4, n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) )) x <- getMultipleStageResultsForDataset(datasetOfMeans2) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) datasetOfMeans3 <- getDataset( overallSampleSizes1 = c(22, 33, 55, 66), overallSampleSizes2 = c(22, 35, 57, 70), overallMeans1 = c(1, 1.033333, 1.02, 1.016667), overallMeans2 = c(1.4, 1.437143, 2.040351, 2.125714), overallStDevs1 = c(1, 1.381500, 1.639151, 1.578664), overallStDevs2 = c(1, 1.425418, 1.822857, 1.738706) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans3' with expected results ## expect_equal(datasetOfMeans3$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans3$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans3$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$means, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$stDevs, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$overallMeans, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallStDevs, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans3$.data' with expected results ## expect_equal(datasetOfMeans3$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans3$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans3$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$.data$mean, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$stDev, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$.data$overallMean, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallStDev, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans3) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.033333, 1.02, 1.016667), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of means using stage wise data (one group)", { datasetOfMeans4 <- getDataset( n = c(22, 11, 22, 11), means = c(1, 1.1, 1, 1), stDevs = c(1, 2, 2, 1.3) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans4' with expected results ## expect_equal(datasetOfMeans4$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans4$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans4$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$means, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$stDevs, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans4$.data' with expected results ## expect_equal(datasetOfMeans4$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfMeans4$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfMeans4$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$.data$mean, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$stDev, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$.data$overallMean, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallStDev, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans4) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of means using overall data (one group)", { datasetOfMeans5 <- getDataset( overallSampleSizes = c(22, 33, 55, 66), overallMeans = c(1.000, 1.033, 1.020, 1.017 ), overallStDevs = c(1.00, 1.38, 1.64, 1.58) ) ## ## Comparison of the results of DatasetMeans object 'datasetOfMeans5' with expected results ## expect_equal(datasetOfMeans5$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans5$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans5$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$means, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$stDevs, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetOfMeans5$.data' with expected results ## expect_equal(datasetOfMeans5$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfMeans5$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfMeans5$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$.data$mean, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$stDev, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$.data$overallMean, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallStDev, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans5) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (one group)", { datasetOfRates1 <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates1' with expected results ## expect_equal(datasetOfRates1$stages, c(1, 2, 3, 4)) expect_equal(datasetOfRates1$groups, c(1, 1, 1, 1)) expect_equal(datasetOfRates1$sampleSizes, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$events, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$overallEvents, c(4, 9, 14, 20)) ## ## Comparison of the results of data.frame object 'datasetOfRates1$.data' with expected results ## expect_equal(datasetOfRates1$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetOfRates1$.data$group, c(1, 1, 1, 1)) expect_equal(datasetOfRates1$.data$sampleSize, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$.data$event, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$.data$overallSampleSize, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$.data$overallEvent, c(4, 9, 14, 20)) x <- getMultipleStageResultsForDataset(datasetOfRates1, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults1$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults2$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(4, 9, 14, 20)) expect_equal(x$stageResults3$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (two groups)", { datasetOfRates2 <- getDataset( n2 = c(8, 10, 9, 11), n1 = c(11, 13, 12, 13), events2 = c(3, 5, 5, 6), events1 = c(10, 10, 12, 12) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates2' with expected results ## expect_equal(datasetOfRates2$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates2$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates2$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) ## ## Comparison of the results of data.frame object 'datasetOfRates2$.data' with expected results ## expect_equal(datasetOfRates2$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates2$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates2$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates2, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using stage wise data (three groups)", { datasetOfRates3 <- getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), n3 = c(7, 10, 8, 9), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6), events3 = c(2, 4, 3, 5) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates3' with expected results ## expect_equal(datasetOfRates3$stages, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates3$groups, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates3$sampleSizes, c(11, 8, 7, 13, 10, 10, 12, 9, 8, 13, 11, 9)) expect_equal(datasetOfRates3$events, c(10, 3, 2, 10, 5, 4, 12, 5, 3, 12, 6, 5)) expect_equal(datasetOfRates3$overallSampleSizes, c(11, 8, 7, 24, 18, 17, 36, 27, 25, 49, 38, 34)) expect_equal(datasetOfRates3$overallEvents, c(10, 3, 2, 20, 8, 6, 32, 13, 9, 44, 19, 14)) ## ## Comparison of the results of data.frame object 'datasetOfRates3$.data' with expected results ## expect_equal(datasetOfRates3$.data$stage, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates3$.data$group, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates3$.data$sampleSize, c(11, 8, 7, 13, 10, 10, 12, 9, 8, 13, 11, 9)) expect_equal(datasetOfRates3$.data$event, c(10, 3, 2, 10, 5, 4, 12, 5, 3, 12, 6, 5)) expect_equal(datasetOfRates3$.data$overallSampleSize, c(11, 8, 7, 24, 18, 17, 36, 27, 25, 49, 38, 34)) expect_equal(datasetOfRates3$.data$overallEvent, c(10, 3, 2, 20, 8, 6, 32, 13, 9, 44, 19, 14)) }) test_that("Creation of a dataset of rates using overall data (two groups)", { datasetOfRates4 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates4' with expected results ## expect_equal(datasetOfRates4$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates4$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates4$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) ## ## Comparison of the results of data.frame object 'datasetOfRates4$.data' with expected results ## expect_equal(datasetOfRates4$.data$stage, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates4$.data$group, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates4$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates4, thetaH0 = 0.99) ## ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-Inf, -Inf, -Inf, -Inf, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38)) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("Creation of a dataset of rates using overall data (three groups)", { datasetOfRates5 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) ## ## Comparison of the results of DatasetRates object 'datasetOfRates5' with expected results ## expect_equal(datasetOfRates5$stages, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates5$groups, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates5$sampleSizes, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$events, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$overallSampleSizes, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$overallEvents, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) ## ## Comparison of the results of data.frame object 'datasetOfRates5$.data' with expected results ## expect_equal(datasetOfRates5$.data$stage, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates5$.data$group, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates5$.data$sampleSize, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$.data$event, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$.data$overallSampleSize, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$.data$overallEvent, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) }) test_that("Creation of a dataset of survival data using stage wise data", { datasetSurvival1 <- getDataset( events = c(8, 7, 4, 12), allocationRatios = c(1, 1, 1, 3.58333333333333), logRanks = c(1.520, 1.273, 0.503, 0.887) ) ## ## Comparison of the results of DatasetSurvival object 'datasetSurvival1' with expected results ## expect_equal(datasetSurvival1$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival1$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival1$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$overallAllocationRatios, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetSurvival1$.data' with expected results ## expect_equal(datasetSurvival1$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetSurvival1$.data$group, c(1, 1, 1, 1)) expect_equal(datasetSurvival1$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$.data$overallAllocationRatio, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$overallLogRank, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$logRanks, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival1) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) expect_equal(datasetSurvival1$stages, datasetSurvival1$.data$stage, tolerance = 1e-07) expect_equal(datasetSurvival1$groups, datasetSurvival1$.data$group, tolerance = 1e-07) expect_equal(datasetSurvival1$events, datasetSurvival1$.data$event, tolerance = 1e-07) expect_equal(datasetSurvival1$allocationRatios, datasetSurvival1$.data$allocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, datasetSurvival1$.data$logRank, tolerance = 1e-07) expect_equal(datasetSurvival1$overallEvents, datasetSurvival1$.data$overallEvent, tolerance = 1e-07) expect_equal(datasetSurvival1$overallAllocationRatios, datasetSurvival1$.data$overallAllocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, datasetSurvival1$.data$overallLogRank, tolerance = 1e-07) }) test_that("Creation of a dataset of survival data using overall data", { datasetSurvival2 <- getDataset( overallEvents = c(8, 15, 19, 31), overallAllocationRatios = c(1, 1, 1, 2), overallLogRanks = c(1.52, 1.98, 1.99, 2.11) ) ## ## Comparison of the results of DatasetSurvival object 'datasetSurvival2' with expected results ## expect_equal(datasetSurvival2$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival2$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival2$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$overallAllocationRatios, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$overallLogRanks, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$logRanks, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) ## ## Comparison of the results of data.frame object 'datasetSurvival2$.data' with expected results ## expect_equal(datasetSurvival2$.data$stage, c(1, 2, 3, 4)) expect_equal(datasetSurvival2$.data$group, c(1, 1, 1, 1)) expect_equal(datasetSurvival2$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$.data$overallAllocationRatio, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$.data$overallLogRank, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$logRanks, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival2) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results ## expect_equal(x$stageResults1$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results ## expect_equal(x$stageResults2$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) ## ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results ## expect_equal(x$stageResults3$overallLogRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$logRanks, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) }) context("Testing that 'getDataset' throws exceptions as expected") test_that("Wrong parameter usage of 'getDataset'", { expect_error(getDataset(), "Missing argument: data.frame or data vectors expected", fixed = TRUE) expect_error(getDataset(1), "Illegal argument: all parameters must be named", fixed = TRUE) expect_error(getDataset(n = 1), "Illegal argument: failed to identify dataset type", fixed = TRUE) expect_error(getDataset(1, x = 2), "Illegal argument: all parameters must be named", fixed = TRUE) expect_error(getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 8, 13, 19), overallEvents1 = c(3, 8, 13, 19) ), "Illegal argument: the parameter names must be unique", fixed = TRUE) }) rpact/inst/tests/testthat/test-f_analysis_base_survival.R0000644000176200001440000012407413574172040023532 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 26 November 2019, 10:08:38 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the analysis survival functionality for the group sequential design") test_that("'getAnalysisResults' for a group sequential design and survival data", { design1 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample1 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design1, dataExample1, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.9359555, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2.9359555, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, 0.46900287, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, 1.2243899, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, 7.0401059, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.19438137, 0.0054226276, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.013371274, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.1294538, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 5.6956842, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.632639, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$overallTestStatistics, c(1.52, 1.38, 2.9, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) .skipTestifDisabled() x2 <- getAnalysisResults(design1, dataExample1, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1) expect_equal(x2$thetaH1, 2) expect_equal(x2$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 2) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.24122422, 0.76137238), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.16918725, 0.19438137, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$overallTestStatistics, c(1.52, 1.38, 2.9, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.042054884, 0.058920703, 0.079860688, 0.10500347, 0.13429067, 0.16748187, 0.20417526, 0.24383962, 0.28585263, 0.32954089, 0.37421781, 0.41921675, 0.46391757, 0.50776612, 0.55028679, 0.59108872, 0.62986668, 0.6663978, 0.70053535, 0.73220037, 0.76137238, 0.78807962, 0.81238956, 0.83439998, 0.85423087, 0.87201737, 0.88790373, 0.90203829, 0.91456948, 0.92564264, 0.93539766), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") x3 <- getAnalysisResults(design1, dataExample1, thetaH0 = 0.95, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0, 0)) expect_equal(x3$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x3$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.5925397, 0.46301945, 2.8413382, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.055631748, 0.32167521, 0.0022462323, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.95, tolerance = 1e-07) expect_equal(x3$thetaH1, 2) expect_equal(x3$conditionalRejectionProbabilities, c(0.082607165, 0.055558825, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.32497202, 0.83762717), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.58958009, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(17.013382, 7.0540547, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.15076802, 0.16617365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$overallTestStatistics, c(1.5925397, 1.479329, 3.0381114, NA_real_), tolerance = 1e-07) expect_equal(x3$overallPValues, c(0.055631748, 0.069526198, 0.0011903296, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.073284723, 0.098867221, 0.12923781, 0.16416019, 0.20317699, 0.24565174, 0.29082221, 0.33785714, 0.3859101, 0.43416582, 0.48187662, 0.52838782, 0.57315275, 0.61573859, 0.65582473, 0.69319563, 0.72772995, 0.75938755, 0.7881956, 0.81423493, 0.83762717, 0.85852313, 0.87709283, 0.89351704, 0.9079804, 0.92066609, 0.93175171, 0.94140636, 0.94978864, 0.95704547, 0.96331147), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") }) test_that("'getAnalysisResults' for a group sequential design and survival data ('directionUpper' reversed)", { .skipTestifDisabled() design2 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample2 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design2, dataExample2, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.34060461, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.34060461, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.044563047, 0.46900287, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14176244, 0.14204332, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.6961224, 0.81673327, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.19438137, 0.0054226276, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.013371274, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.17557153, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.88538374, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.379847, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$overallTestStatistics, c(-1.52, -1.38, -2.9, NA_real_), tolerance = 1e-07) expect_equal(x1$overallPValues, c(0.064255488, 0.083793322, 0.0018658133, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design2, dataExample2, thetaH0 = 1.1, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.34136523, 0.49035339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6547889, -0.52124832, -2.9236862, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.048983658, 0.3010969, 0.0017295662, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1.1, tolerance = 1e-07) expect_equal(x2$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.090339948, 0.066890003, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.40494574, 0.88883511), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14176244, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.6961224, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.13608528, 0.14422583, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$overallTestStatistics, c(-1.6547889, -1.5645674, -3.1566305, NA_real_), tolerance = 1e-07) expect_equal(x2$overallPValues, c(0.048983658, 0.058842192, 0.00079801722, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.97858552, 0.94519604, 0.88883511, 0.81002306, 0.71447863, 0.61071863, 0.50731205, 0.41100476, 0.32600179, 0.25411912, 0.19536859, 0.14863199, 0.11223419), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 0.5") }) context("Testing the analysis survival functionality for the inverse normal design") test_that("'getAnalysisResults' for an inverse normal design and survival data", { design3 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample3 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design3, dataExample3, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.9359555, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2.9359555, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, 0.36917623, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, 1.1345596, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, 6.6631754, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.20216143, 0.010091808, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.014307783, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.121428, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 5.6413216, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.6253218, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(1.52, 1.354226, 2.6907652, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design3, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, dataExample3, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(1.52, 0.3951648, 2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1) expect_equal(x2$thetaH1, 2) expect_equal(x2$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 2) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.24122422, 0.76137238), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.16918725, 0.20216143, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, TRUE) expect_equal(x2$combinationTestStatistics, c(1.52, 1.354226, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.042054884, 0.058920703, 0.079860688, 0.10500347, 0.13429067, 0.16748187, 0.20417526, 0.24383962, 0.28585263, 0.32954089, 0.37421781, 0.41921675, 0.46391757, 0.50776612, 0.55028679, 0.59108872, 0.62986668, 0.6663978, 0.70053535, 0.73220037, 0.76137238, 0.78807962, 0.81238956, 0.83439998, 0.85423087, 0.87201737, 0.88790373, 0.90203829, 0.91456948, 0.92564264, 0.93539766), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") x3 <- getAnalysisResults(design3, dataExample3, thetaH0 = 0.95, stage = 2, nPlanned = c(20,40), allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results ## expect_equal(x3$stages, c(1, 2, 3, 4)) expect_equal(x3$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(0, 0, 0)) expect_equal(x3$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x3$effectSizes, c(2.9294137, 2.0393455, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$testStatistics, c(1.5925397, 0.46301945, 2.8413382, NA_real_), tolerance = 1e-07) expect_equal(x3$pValues, c(0.055631748, 0.32167521, 0.0022462323, NA_real_), tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$thetaH0, 0.95, tolerance = 1e-07) expect_equal(x3$thetaH1, 2) expect_equal(x3$conditionalRejectionProbabilities, c(0.082607165, 0.052483916, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x3$allocationRatioPlanned, 2) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.32497202, 0.83762717), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.50439514, 0.5816096, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(17.013382, 6.9683119, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.15076802, 0.17323655, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$normalApproximation, TRUE) expect_equal(x3$directionUpper, TRUE) expect_equal(x3$combinationTestStatistics, c(1.5925397, 1.4534998, NA_real_, NA_real_), tolerance = 1e-07) plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## ## Comparison of the results of list object 'plotData2' with expected results ## expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.073284723, 0.098867221, 0.12923781, 0.16416019, 0.20317699, 0.24565174, 0.29082221, 0.33785714, 0.3859101, 0.43416582, 0.48187662, 0.52838782, 0.57315275, 0.61573859, 0.65582473, 0.69319563, 0.72772995, 0.75938755, 0.7881956, 0.81423493, 0.83762717, 0.85852313, 0.87709283, 0.89351704, 0.9079804, 0.92066609, 0.93175171, 0.94140636, 0.94978864, 0.95704547, 0.96331147), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power Plot with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 2") }) test_that("'getAnalysisResults' for an inverse normal design and survival data ('directionUpper' reversed)", { .skipTestifDisabled() design4 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), bindingFutility = F, typeOfDesign = "WT", deltaWT = 0.45, futilityBounds = c(0, 0, 0)) dataExample4 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) x1 <- getAnalysisResults(design4, dataExample4, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0, 0, 0)) expect_equal(x1$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.34060461, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -2.7453772, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.003022069, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.34060461, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.07432319, 0.042056716, 0.36917623, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$allocationRatioPlanned, 1) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14350678, 0.15007853, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.7193664, 0.88139925, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16918725, 0.20216143, 0.010091808, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.014307783, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.17726343, NA_real_), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.89172021, NA_real_), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.38090568, NA_real_), tolerance = 1e-07) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(1.52, 1.354226, 2.6907652, NA_real_), tolerance = 1e-07) x2 <- getAnalysisResults(design4, dataExample4, thetaH0 = 1.1, stage = 2, nPlanned = c(20, 40), allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE) ## ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results ## expect_equal(x2$stages, c(1, 2, 3, 4)) expect_equal(x2$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.4878815, 2.4031352, 2.3549063, 2.2955206), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(0, 0, 0)) expect_equal(x2$alphaSpent, c(0.0064253267, 0.012720859, 0.01826361, 0.025), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0064253267, 0.0081275893, 0.0092636882, 0.010851653), tolerance = 1e-07) expect_equal(x2$effectSizes, c(0.34136523, 0.49035339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$testStatistics, c(-1.6547889, -0.52124832, -2.9236862, NA_real_), tolerance = 1e-07) expect_equal(x2$pValues, c(0.048983658, 0.3010969, 0.0017295662, NA_real_), tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$thetaH0, 1.1, tolerance = 1e-07) expect_equal(x2$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities, c(0.090339948, 0.063249751, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$nPlanned, c(NA_real_, NA_real_, 20, 40)) expect_equal(x2$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.40494574, 0.88883511), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.058777136, 0.14350678, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(1.9825732, 1.7193664, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.13608528, 0.15066694, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$normalApproximation, TRUE) expect_equal(x2$directionUpper, FALSE) expect_equal(x2$combinationTestStatistics, c(1.6547889, 1.5386907, NA_real_, NA_real_), tolerance = 1e-07) plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## ## Comparison of the results of list object 'plotData1' with expected results ## expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.97858552, 0.94519604, 0.88883511, 0.81002306, 0.71447863, 0.61071863, 0.50731205, 0.41100476, 0.32600179, 0.25411912, 0.19536859, 0.14863199, 0.11223419), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power Plot with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 60, allocation ratio = 0.5") }) context("Testing the analysis survival functionality for the Fisher design") test_that("'getAnalysisResults' for a Fisher design and 'bindingFutility = TRUE'", { .skipTestifDisabled() design5 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), alpha0Vec = c(0.6,0.5,0.4), bindingFutility = TRUE) dataExample5 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2) ) x1 <- getAnalysisResults(design5, dataExample5, thetaH1 = 2, allocationRatioPlanned = 2, nPlanned = 50, directionUpper = TRUE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.012419362, 0.0016809245, 0.00029441484, 1.8548902e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0.6, 0.5, 0.4), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.012419362, 0.018937437, 0.022642761, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.012419362, 0.012419362, 0.012419362, 0.012419362), tolerance = 1e-07) expect_equal(x1$effectSizes, c(2.9294137, 2.0393455, 2.1017732, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(1.52, 0.3951648, 1.450056, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.073521457, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 2) expect_equal(x1$conditionalRejectionProbabilities, c(0.046367462, 0.024190775, 0.042101664, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, 50)) expect_equal(x1$allocationRatioPlanned, 2) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.72028527), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.59937028, 0.5945604, 0.81409304, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(14.31747, 6.9389819, 5.3768854, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.10915739, 0.16855974, 0.081195715, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, TRUE) expect_equal(x1$combinationTestStatistics, c(0.064255488, 0.022255572, 0.0016362621, NA_real_), tolerance = 1e-07) }) test_that("'getAnalysisResults' for a Fisher design and 'bindingFutility = TRUE' ('directionUpper' reversed)", { .skipTestifDisabled() design6 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.6, 1), alpha0Vec = c(0.6,0.5,0.4), bindingFutility = TRUE) dataExample6 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2) ) x1 <- getAnalysisResults(design6, dataExample6, thetaH1 = 0.5, allocationRatioPlanned = 0.5, nPlanned = 50, directionUpper = FALSE, seed = 123456789) ## ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results ## expect_equal(x1$stages, c(1, 2, 3, 4)) expect_equal(x1$informationRates, c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) expect_equal(x1$criticalValues, c(0.012419362, 0.0016809245, 0.00029441484, 1.8548902e-05), tolerance = 1e-07) expect_equal(x1$futilityBounds, c(0.6, 0.5, 0.4), tolerance = 1e-07) expect_equal(x1$alphaSpent, c(0.012419362, 0.018937437, 0.022642761, 0.025), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.012419362, 0.012419362, 0.012419362, 0.012419362), tolerance = 1e-07) expect_equal(x1$effectSizes, c(0.34136523, 0.49035339, 0.47578874, NA_real_), tolerance = 1e-07) expect_equal(x1$testStatistics, c(-1.52, -0.3951648, -1.450056, NA_real_), tolerance = 1e-07) expect_equal(x1$pValues, c(0.064255488, 0.34636063, 0.073521457, NA_real_), tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$thetaH0, 1) expect_equal(x1$thetaH1, 0.5, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities, c(0.046367462, 0.024190775, 0.042101664, NA_real_), tolerance = 1e-07) expect_equal(x1$nPlanned, c(NA_real_, NA_real_, NA_real_, 50)) expect_equal(x1$allocationRatioPlanned, 0.5, tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.72028527), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.069844861, 0.14411336, 0.18598127, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.6684179, 1.6819149, 1.2283608, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.10915739, 0.16855974, 0.081195715, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$normalApproximation, TRUE) expect_equal(x1$directionUpper, FALSE) expect_equal(x1$combinationTestStatistics, c(0.064255488, 0.022255572, 0.0016362621, NA_real_), tolerance = 1e-07) }) rpact/inst/tests/testthat/test-f_simulation_means.R0000644000176200001440000007755013567165663022356 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:56 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing simulation means function") test_that("'getSimulationMeans': several configurations", { .skipTestifDisabled() # @refFS[Sec.]{fs:subsec:seed} maxNumberOfIterations <- 100 seed <- 99123 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- c(0.2, 0.5, 1) plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results ## expect_equal(x1$effect, c(-0.4, -0.2, 0, 0.2, 0.4, 0.6), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(42, 49, 68, 87, 94, 97)) expect_equal(x1$iterations[3, ], c(4, 9, 23, 43, 65, 68)) expect_equal(x1$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x1$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x1$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x1$rejectPerStage[2, ], c(0, 0, 0, 0.01, 0.09, 0.23), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0, 0, 0.01, 0.13, 0.33, 0.53), tolerance = 1e-07) expect_equal(x1$overallReject, c(0, 0, 0.01, 0.14, 0.42, 0.76), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.58, 0.51, 0.32, 0.13, 0.06, 0.03), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.38, 0.4, 0.45, 0.43, 0.2, 0.06), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.96, 0.91, 0.77, 0.56, 0.26, 0.09), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.96, 0.91, 0.77, 0.57, 0.35, 0.32), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(31.14, 35.28, 46.71, 60.84, 72.63, 74.79), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.017557086, 0.030814475, 0.058601262, 0.09027436, 0.17816715, 0.24070046), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.10771631, 0.32388388, 0.32415334, 0.38125404, 0.51933559, 0.59400955), tolerance = 1e-07) x2 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results ## expect_equal(x2$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(63, 73, 84, 83, 89, 96)) expect_equal(x2$iterations[3, ], c(15, 24, 42, 53, 69, 76)) expect_equal(x2$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x2$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x2$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x2$rejectPerStage[2, ], c(0, 0, 0.02, 0.03, 0.06, 0.1), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0, 0.02, 0.05, 0.15, 0.27, 0.43), tolerance = 1e-07) expect_equal(x2$overallReject, c(0, 0.02, 0.07, 0.18, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.37, 0.27, 0.16, 0.17, 0.11, 0.04), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.48, 0.49, 0.4, 0.27, 0.14, 0.1), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.85, 0.76, 0.56, 0.44, 0.25, 0.14), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.85, 0.76, 0.58, 0.47, 0.31, 0.24), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(41.76, 48.51, 59.58, 64.26, 73.08, 78.12), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.056595809, 0.082243527, 0.1171868, 0.14183443, 0.20192022, 0.18371302), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.36165449, 0.31543938, 0.36771185, 0.4758946, 0.54527876, 0.61204049), tolerance = 1e-07) x3 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results ## expect_equal(x3$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(50, 71, 87, 96, 97, 99)) expect_equal(x3$iterations[3, ], c(9, 21, 63, 67, 49, 29)) expect_equal(x3$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x3$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x3$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x3$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0.01), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0, 0, 0.03, 0.21, 0.47, 0.7), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0, 0.02, 0.18, 0.38, 0.47, 0.29), tolerance = 1e-07) expect_equal(x3$overallReject, c(0, 0.02, 0.21, 0.59, 0.94, 1), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.5, 0.29, 0.13, 0.04, 0.03, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.41, 0.5, 0.21, 0.08, 0.01, 0), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.91, 0.79, 0.34, 0.12, 0.04, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.91, 0.79, 0.37, 0.33, 0.51, 0.71), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(35.55, 46.62, 69.84, 74.07, 66.24, 57.78), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.047252355, 0.074094582, 0.18424333, 0.30402818, 0.54078356, 0.67131653), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.27249296, 0.30454177, 0.45212728, 0.62638376, 0.84307565, 0.91215549), tolerance = 1e-07) x4 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results ## expect_equal(x4$effect, c(-1.1, -0.9, -0.7, -0.5, -0.3, -0.1), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(95, 97, 88, 83, 82, 80)) expect_equal(x4$iterations[3, ], c(74, 76, 68, 55, 50, 41)) expect_equal(x4$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x4$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x4$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x4$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x4$rejectPerStage[2, ], c(0.16, 0.12, 0.06, 0.06, 0.01, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[3, ], c(0.56, 0.52, 0.38, 0.2, 0.11, 0.04), tolerance = 1e-07) expect_equal(x4$overallReject, c(0.72, 0.64, 0.44, 0.26, 0.12, 0.04), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.05, 0.03, 0.12, 0.17, 0.18, 0.2), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.05, 0.09, 0.14, 0.22, 0.31, 0.39), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.1, 0.12, 0.26, 0.39, 0.49, 0.59), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.26, 0.24, 0.32, 0.45, 0.5, 0.59), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(76.95, 78.39, 72.36, 65.16, 62.64, 58.05), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.32767401, 0.23178095, 0.2071599, 0.2070829, 0.10752485, 0.096294166), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.75737536, 0.64651318, 0.56642877, 0.51397128, 0.44717442, 0.36357098), tolerance = 1e-07) x5 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results ## expect_equal(x5$effect, c(-1.1, -0.9, -0.7, -0.5, -0.3, -0.1), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(98, 96, 88, 84, 82, 79)) expect_equal(x5$iterations[3, ], c(77, 74, 69, 58, 54, 43)) expect_equal(x5$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x5$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x5$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x5$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x5$rejectPerStage[2, ], c(0.19, 0.14, 0.08, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.59, 0.57, 0.43, 0.21, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$overallReject, c(0.78, 0.71, 0.51, 0.27, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.02, 0.04, 0.12, 0.16, 0.18, 0.21), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.02, 0.08, 0.11, 0.2, 0.28, 0.36), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.04, 0.12, 0.23, 0.36, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.23, 0.26, 0.31, 0.42, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(79.11, 77.22, 72.81, 66.78, 64.44, 58.68), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.33588936, 0.25194744, 0.19824827, 0.19178721, 0.11444971, 0.092566355), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.74226501, 0.69902839, 0.55641803, 0.50033698, 0.45636572, 0.33236099), tolerance = 1e-07) x6 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results ## expect_equal(x6$effect, c(-0.8, -0.6, -0.4, -0.2, 0, 0.2), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(100, 99, 96, 81, 70, 49)) expect_equal(x6$iterations[3, ], c(22, 43, 75, 57, 27, 7)) expect_equal(x6$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x6$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x6$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x6$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x6$rejectPerStage[2, ], c(0.78, 0.56, 0.13, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.22, 0.4, 0.53, 0.21, 0.02, 0), tolerance = 1e-07) expect_equal(x6$overallReject, c(1, 0.96, 0.66, 0.26, 0.02, 0), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0.01, 0.04, 0.19, 0.3, 0.51), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.08, 0.19, 0.43, 0.42), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0.01, 0.12, 0.38, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.78, 0.57, 0.25, 0.43, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(54.9, 64.08, 77.67, 65.52, 49.05, 34.38), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.67267344, 0.52857476, 0.27194206, 0.18361852, 0.064769395, 0.04670856), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.81011604, 0.77276452, 0.65795757, 0.50391481, 0.35327029, 0.24591214), tolerance = 1e-07) x7 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = -0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 3.5, alternative = seq(-1.2,-0.2,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,10,10), maxNumberOfSubjectsPerStage = c(100,100,100), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results ## expect_equal(x7$effect, c(-1, -0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(93, 97, 88, 78, 78, 74)) expect_equal(x7$iterations[3, ], c(52, 77, 69, 57, 51, 35)) expect_equal(x7$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x7$sampleSizes[2, ], c(74.918717, 83.151367, 90.734126, 88.517379, 94.605927, 95.502536), tolerance = 1e-07) expect_equal(x7$sampleSizes[3, ], c(34.779445, 56.130993, 68.133125, 83.503922, 92.63947, 93.575595), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x7$rejectPerStage[2, ], c(0.4, 0.19, 0.12, 0.07, 0, 0), tolerance = 1e-07) expect_equal(x7$rejectPerStage[3, ], c(0.41, 0.63, 0.47, 0.25, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$overallReject, c(0.81, 0.82, 0.59, 0.32, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.07, 0.03, 0.12, 0.22, 0.22, 0.26), tolerance = 1e-07) expect_equal(x7$futilityPerStage[2, ], c(0.01, 0.01, 0.07, 0.14, 0.27, 0.39), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0.08, 0.04, 0.19, 0.36, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.48, 0.23, 0.31, 0.43, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(105.75972, 141.87769, 144.85789, 134.64079, 139.03875, 121.42333), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.48960058, 0.35501907, 0.33230293, 0.3239724, 0.20164899, 0.17099815), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.75975737, 0.70067902, 0.61722401, 0.51061814, 0.40378864, 0.28388391), tolerance = 1e-07) x8 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5)), groups = 2, meanRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 3.5, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(100,400,400), seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results ## expect_equal(x8$effect, c(0.1, 0.3, 0.5, 0.7, 0.9, 1.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(75, 75, 82, 77, 85, 88)) expect_equal(x8$iterations[3, ], c(32, 45, 59, 64, 62, 66)) expect_equal(x8$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x8$sampleSizes[2, ], c(312.537, 315.47118, 298.71109, 298.99103, 273.70172, 271.45585), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(337.66315, 320.33174, 340.67902, 295.14071, 245.92316, 230.91095), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0, 0, 0, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.02, 0, 0.05, 0.08, 0.21, 0.21), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0.02, 0.06, 0.22, 0.27, 0.42, 0.5), tolerance = 1e-07) expect_equal(x8$overallReject, c(0.04, 0.06, 0.27, 0.36, 0.63, 0.71), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0.25, 0.25, 0.18, 0.22, 0.15, 0.12), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0.41, 0.3, 0.18, 0.05, 0.02, 0.01), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0.66, 0.55, 0.36, 0.27, 0.17, 0.13), tolerance = 1e-07) expect_equal(x8$earlyStop, c(0.68, 0.55, 0.41, 0.36, 0.38, 0.34), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(360.45496, 398.75267, 463.94372, 437.11315, 403.11882, 409.28238), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.31465275, 0.38687556, 0.41716705, 0.36457183, 0.46957137, 0.48650775), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.36402168, 0.44332107, 0.47182355, 0.52975853, 0.68482255, 0.64923586), tolerance = 1e-07) x9 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, alternative = seq(0.8,1.6,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(50,50,50), maxNumberOfSubjectsPerStage = c(400,400,400), directionUpper = FALSE, seed = seed) ## ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results ## expect_equal(x9$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(84, 86, 78, 67, 70)) expect_equal(x9$iterations[3, ], c(53, 62, 62, 45, 23)) expect_equal(x9$sampleSizes[1, ], c(18, 18, 18, 18, 18)) expect_equal(x9$sampleSizes[2, ], c(257.73836, 278.8361, 303.27301, 306.23977, 339.30408), tolerance = 1e-07) expect_equal(x9$sampleSizes[3, ], c(153.57289, 230.09947, 313.53643, 325.28234, 342.27563), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0, 0, 0, 0.01, 0), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.3, 0.21, 0.06, 0.03, 0.02), tolerance = 1e-07) expect_equal(x9$rejectPerStage[3, ], c(0.44, 0.45, 0.39, 0.09, 0.01), tolerance = 1e-07) expect_equal(x9$overallReject, c(0.74, 0.66, 0.45, 0.13, 0.03), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.16, 0.14, 0.22, 0.32, 0.3), tolerance = 1e-07) expect_equal(x9$futilityPerStage[2, ], c(0.01, 0.03, 0.1, 0.19, 0.45), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0.17, 0.17, 0.32, 0.51, 0.75), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.47, 0.38, 0.38, 0.55, 0.77), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(315.89385, 400.46072, 448.94553, 369.5577, 334.23625), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.63427493, 0.5497222, 0.49353349, 0.49721138, 0.34872602), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.84712236, 0.77882668, 0.6178067, 0.51284066, 0.41825576), tolerance = 1e-07) myStageSubjects <- function(..., stage, thetaH0, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaStandardized, conditionalPower, conditionalCriticalValue) { mult <- 1 if (stage == 2){ stageSubjects <- (1 + 1/allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned))* (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / (max(1e-12, thetaStandardized))^2 stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage]) } else { stageSubjects <- sampleSizesPerStage[stage - 1] } return(stageSubjects) } x10 <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed, calcSubjectsFunction = myStageSubjects) ## ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results ## expect_equal(x10$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(76, 73, 61, 39, 31)) expect_equal(x10$iterations[3, ], c(26, 36, 49, 28, 25)) expect_equal(x10$sampleSizes[1, ], c(80, 80, 80, 80, 80)) expect_equal(x10$sampleSizes[2, ], c(226.65982, 237.82641, 309.41238, 322.48967, 279.64545), tolerance = 1e-07) expect_equal(x10$sampleSizes[3, ], c(192.90106, 242.76454, 304.34981, 316.96502, 285.17103), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.01, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.5, 0.37, 0.11, 0.04, 0.02), tolerance = 1e-07) expect_equal(x10$rejectPerStage[3, ], c(0.23, 0.26, 0.27, 0.07, 0.01), tolerance = 1e-07) expect_equal(x10$overallReject, c(0.74, 0.63, 0.38, 0.11, 0.03), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0.23, 0.27, 0.39, 0.61, 0.69), tolerance = 1e-07) expect_equal(x10$futilityPerStage[2, ], c(0, 0, 0.01, 0.07, 0.04), tolerance = 1e-07) expect_equal(x10$futilityStop, c(0.23, 0.27, 0.4, 0.68, 0.73), tolerance = 1e-07) expect_equal(x10$earlyStop, c(0.74, 0.64, 0.51, 0.72, 0.75), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(302.41574, 341.00851, 417.87296, 294.52118, 237.98285), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.83285854, 0.79260753, 0.67563397, 0.60585275, 0.66737426), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(0.84091805, 0.78999444, 0.60898256, 0.53740242, 0.36130254), tolerance = 1e-07) #x <- getSimulationRates(getDesignGroupSequential(),plannedSubjects = c(33,67,100)) #y <- getPowerRates(getDesignGroupSequential(),maxNumberOfSubjects = 100) # #plot(x, type = 5) #plot(y, type = 5) #options(width = 180) #maxNumberOfSubjects <- 200 #informationRates <- c(0.2,0.5,1) #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 10000 # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.2, # maxNumberOfSubjects = maxNumberOfSubjects, stDev = 1.5, normalApproximation = TRUE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, # maxNumberOfSubjects = maxNumberOfSubjects, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, directionUpper = FALSE) #y <- getPowerMeans(design = getDesignInverseNormal(futilityBounds = c(-0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0.8, # maxNumberOfSubjects = maxNumberOfSubjects, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE) #round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects)/maxNumberOfSubjects,4) #round(x$overallReject - y$overallReject,4) #round(x$futilityStop - y$futilityStop,4) #x$overallReject # # #options(width = 180) #maxNumberOfSubjects <- 150 #informationRates <- (1:3)/3 #plannedSubjects <- round(informationRates*maxNumberOfSubjects) #maxNumberOfIterations <- 20000 # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5), informationRates = informationRates), groups = 1, thetaH0 = 0, # plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 3.5, alternative = seq(-1,0,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,10,10), maxNumberOfSubjectsPerStage = c(100,100,100), directionUpper = FALSE) #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5, 0.5)), groups = 2, meanRatio = FALSE, thetaH0 = -0.1, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 3.5, # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 40, 40), maxNumberOfSubjectsPerStage = c(100,400, 400)) #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), # allocationRatioPlanned = 3, directionUpper = FALSE) # #x$overallReject #x$futilityStop #x$expectedNumberOfSubjects #x$conditionalPowerAchieved #x$sampleSizes # #myStageSubjects <- function(..., stage, thetaH0, allocationRatioPlanned, # minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, # sampleSizesPerStage, thetaStandardized, conditionalPower, conditionalCriticalValue) { # if (stage == 2){ # stageSubjects <- (1 + 1/allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned))* # (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / # (max(1e-12, thetaStandardized))^2 # stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), # maxNumberOfSubjectsPerStage[stage]) # } else { # stageSubjects <- sampleSizesPerStage[stage - 1] # } # return(stageSubjects) #} # #x <- getSimulationMeans(design = getDesignInverseNormal(futilityBounds = c(0.5,0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, # plannedSubjects = c(80,160,240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8,1.6,0.2), # conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10,40,40), maxNumberOfSubjectsPerStage = c(200,400,400), # allocationRatioPlanned = 3, directionUpper = FALSE, calcSubjectsFunction = myStageSubjects) # # }) rpact/inst/tests/testthat/test-f_core_assertions.R0000644000176200001440000002146413567423107022170 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:23 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing assertion functions") test_that("Testing '.assertIsInClosedInterval'", { invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(NA_real_, 0, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = 1.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInClosedInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertIsInOpenInterval'", { invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(0.9999, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(NA_real_, 0.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(1.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInOpenInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertDesignParameterExists'", { expect_error(.assertDesignParameterExists(), "Missing argument: 'design' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign()), "Missing argument: 'parameterName' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(), parameterName = "kMax"), "Missing argument: 'defaultValue' must be defined", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(kMax = NA_integer_), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE) }) test_that("Testing '.assertIsValidThetaRange' ", { expect_error(.assertIsValidThetaRange(thetaRange = c()), "Illegal argument: 'thetaRange' must be a vector with two entries defining minimum and maximum or a sequence of values with length > 2", fixed = TRUE) #expect_error(.assertIsValidThetaRange(thetaRange = c(1, -2)), # "Illegal argument: 'thetaRange' with length 2 must contain minimum < maximum (1 >= -2)", fixed = TRUE) expect_equal(.assertIsValidThetaRange(thetaRange = c(1, 2, 3)), c(1, 2, 3)) expect_equal(.assertIsValidThetaRange(thetaRange = c(-1, 2)), seq(-1, 2, 3 / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT)) }) test_that("Testing '.assertIsSingleNumber'", { expect_error(.assertIsSingleNumber(NA, "x"), "Illegal argument: 'x' (NA) must be a valid single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(NULL, "x"), "Missing argument: 'x' must be a valid single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(c(1, 2), "x"), "Illegal argument: 'x' c(1, 2) must be a single numerical value", fixed = TRUE) expect_error(.assertIsSingleNumber(numeric(0), "x"), "Missing argument: 'x' must be a valid single numerical value", fixed = TRUE) }) test_that("Testing '.assertAssociatedArgumentsAreDefined'", { expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1), "Missing argument: 'a' must be defined because 'b' is defined", fixed = TRUE) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = NA), "Missing argument: 'a', 'c' must be defined because 'b' is defined", fixed = TRUE) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = 2), "Missing argument: 'a' must be defined because 'b', 'c' are defined", fixed = TRUE) }) test_that("Testing '.associatedArgumentsAreDefined'", { expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = NA_real_), FALSE) expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = 1), FALSE) expect_equal(.associatedArgumentsAreDefined(nPlanned = 1, thetaH1 = 1), TRUE) }) test_that("Testing '.isValidNPlanned", { expect_equal(.isValidNPlanned(nPlanned = c(1, 2), kMax = 4, stage = 2), TRUE) expect_silent(.isValidNPlanned(nPlanned = NA_real_, kMax = 4, stage = 2)) expect_warning(.isValidNPlanned(nPlanned = c(1), kMax = 4, stage = 2), "'nPlanned' (1) will be ignored: length must be equal to 'kMax' (4) - 'stage' (2)", fixed = TRUE) expect_warning(.isValidNPlanned(nPlanned = c(1, 2, 3), kMax = 4, stage = 2), "'nPlanned' (1, 2, 3) will be ignored: length must be equal to 'kMax' (4) - 'stage' (2)", fixed = TRUE) }) rpact/inst/tests/testthat/helper-f_core_assertions.R0000644000176200001440000000545713370565006022471 0ustar liggesusers###################################################################################### # # # -- Unit test helper functions -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 07-11-2018 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### getAssertionTestDesign <- function(kMax = NA_integer_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { if (designClass == C_CLASS_NAME_TRIAL_DESIGN_FISHER) { return(TrialDesignFisher( kMax = kMax, alpha = C_ALPHA_DEFAULT, method = C_FISHER_METHOD_DEFAULT, alpha0Vec = futilityBounds, informationRates = informationRates, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = 9498485 )) } return(.createDesign( designClass = designClass, kMax = kMax, alpha = C_ALPHA_DEFAULT, beta = C_BETA_DEFAULT, sided = 1, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, delta = 0, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = 1, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, tolerance = 1e-06)) } rpact/inst/tests/testthat/test-f_design_fisher_combination_test.R0000644000176200001440000003427213567165663025233 0ustar liggesusers###################################################################################### # # # -- Unit tests -- # # # # This file is part of the R package RPACT - R Package for Adaptive Clinical Trials. # # # # File version: 1.0.0 # # Date: 06 November 2019, 17:12:27 # # Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD # # Licensed under "GNU Lesser General Public License" version 3 # # License text can be found here: https://www.r-project.org/Licenses/LGPL-3 # # # # RPACT company website: https://www.rpact.com # # RPACT package website: https://www.rpact.org # # # # Contact us for information about our services: info@rpact.com # # # ###################################################################################### context("Testing the Fisher design functionality") test_that("'getDesignFisher' with default parameters: parameters and results are as expected", { # @refFS[Formula]{fs:FisherCombinationEqualAlpha} x <- getDesignFisher() ## ## Comparison of the results of TrialDesignFisher object 'x' with expected results ## expect_equal(x$alphaSpent, c(0.012308547, 0.01962413, 0.025), tolerance = 1e-07) expect_equal(x$criticalValues, c(0.012308547, 0.0016635923, 0.00029106687), tolerance = 1e-07) expect_equal(x$stageLevels, c(0.012308547, 0.012308547, 0.012308547), tolerance = 1e-07) expect_equal(x$scale, c(1, 1)) expect_equal(x$nonStochasticCurtailment, FALSE) }) test_that("'getDesignFisher' with kMax = 4: parameters and results are as expected for different arguments", { # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher1 <- getDesignFisher(kMax = 4) ## ## Comparison of the results of TrialDesignFisher object 'designFisher1' with expected results ## expect_equal(designFisher1$alphaSpent, c(0.010404785, 0.016661203, 0.021286477, 0.025), tolerance = 1e-07) expect_equal(designFisher1$criticalValues, c(0.010404785, 0.0013703718, 0.00023506069, 4.5812899e-05), tolerance = 1e-07) expect_equal(designFisher1$stageLevels, c(0.010404785, 0.010404785, 0.010404785, 0.010404785), tolerance = 1e-07) expect_equal(designFisher1$scale, c(1, 1, 1)) expect_equal(designFisher1$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher2 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher2' with expected results ## expect_equal(designFisher2$alphaSpent, c(0.010565317, 0.017774885, 0.022713904, 0.025), tolerance = 1e-07) expect_equal(designFisher2$criticalValues, c(0.010565317, 0.00031144789, 2.8609076e-06, 1.4533579e-07), tolerance = 1e-07) expect_equal(designFisher2$stageLevels, c(0.010565317, 0.010565317, 0.010565317, 0.010565317), tolerance = 1e-07) expect_equal(designFisher2$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher2$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher3 <- getDesignFisher(kMax = 4, method = "fullAlpha") ## ## Comparison of the results of TrialDesignFisher object 'designFisher3' with expected results ## expect_equal(designFisher3$alphaSpent, c(0.00015574772, 0.0015212305, 0.0075070105, 0.025), tolerance = 1e-07) expect_equal(designFisher3$criticalValues, c(0.00015574772, 0.00015574772, 0.00015574772, 0.00015574772), tolerance = 1e-07) expect_equal(designFisher3$stageLevels, c(0.00015574772, 0.0015212305, 0.0075070105, 0.025), tolerance = 1e-07) expect_equal(designFisher3$scale, c(1, 1, 1)) expect_equal(designFisher3$nonStochasticCurtailment, TRUE) # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher4 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "fullAlpha") ## ## Comparison of the results of TrialDesignFisher object 'designFisher4' with expected results ## expect_equal(designFisher4$alphaSpent, c(0.0075234886, 0.012807964, 0.016496254, 0.025), tolerance = 1e-07) expect_equal(designFisher4$criticalValues, c(0.0075234886, 0.00019010097, 1.4149989e-06, 1.0550077e-06), tolerance = 1e-07) expect_equal(designFisher4$stageLevels, c(0.0075234886, 0.0075234886, 0.0075234886, 0.025), tolerance = 1e-07) expect_equal(designFisher4$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher4$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher5 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), method = "noInteraction") ## ## Comparison of the results of TrialDesignFisher object 'designFisher5' with expected results ## expect_equal(designFisher5$alphaSpent, c(0.0098603693, 0.012073314, 0.018133935, 0.025), tolerance = 1e-07) expect_equal(designFisher5$criticalValues, c(0.0098603693, 0.00051915905, 0.00031149543, 0.00015574772), tolerance = 1e-07) expect_equal(designFisher5$stageLevels, c(0.0098603693, 0.0044457148, 0.012979977, 0.025), tolerance = 1e-07) expect_equal(designFisher5$scale, c(1, 1, 1)) expect_equal(designFisher5$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher6 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "noInteraction") ## ## Comparison of the results of TrialDesignFisher object 'designFisher6' with expected results ## expect_equal(designFisher6$alphaSpent, c(0.01128689, 0.011490625, 0.016266616, 0.025), tolerance = 1e-07) expect_equal(designFisher6$criticalValues, c(0.01128689, 2.0322622e-06, 1.5741835e-06, 1.0550077e-06), tolerance = 1e-07) expect_equal(designFisher6$stageLevels, c(0.01128689, 0.0003175156, 0.0079214091, 0.025), tolerance = 1e-07) expect_equal(designFisher6$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher6$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaPending} designFisher7 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), method = "userDefinedAlpha", userAlphaSpending = c(0.01,0.015,0.02,0.025)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher7' with expected results ## expect_equal(designFisher7$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher7$criticalValues, c(0.01, 0.0011768873, 0.00031357454, 0.00011586425), tolerance = 1e-07) expect_equal(designFisher7$stageLevels, c(0.01, 0.0091148534, 0.013047692, 0.020300118), tolerance = 1e-07) expect_equal(designFisher7$scale, c(1, 1, 1)) expect_equal(designFisher7$nonStochasticCurtailment, FALSE) # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaPending} designFisher8 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7,0.6,0.5), informationRates = c(0.1,0.3,0.7,1), method = "userDefinedAlpha", userAlphaSpending = c(0.01,0.015,0.02,0.025)) ## ## Comparison of the results of TrialDesignFisher object 'designFisher8' with expected results ## expect_equal(designFisher8$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher8$criticalValues, c(0.01, 0.00018389153, 2.6484943e-06, 5.2344628e-07), tolerance = 1e-07) expect_equal(designFisher8$stageLevels, c(0.01, 0.0073532156, 0.0101804, 0.018500415), tolerance = 1e-07) expect_equal(designFisher8$scale, c(1.4142136, 2, 1.7320508), tolerance = 1e-07) expect_equal(designFisher8$nonStochasticCurtailment, FALSE) }) test_that("'getDesignFisher': illegal arguments throw exceptions as expected", { expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4), paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.5, 1)), paste0("Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.4, 1)), paste0("Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02), paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE) expect_equal(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE) expect_error(getDesignFisher(kMax = Inf), paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]"), fixed = TRUE) expect_error(getDesignFisher(kMax = -Inf), paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]"), fixed = TRUE) expect_error(getDesignFisher(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 7), "Argument out of bounds: 'kMax' (7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 8), "Argument out of bounds: 'kMax' (8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 9), "Argument out of bounds: 'kMax' (9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 10), "Argument out of bounds: 'kMax' (10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 2, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (2)", fixed = TRUE) expect_error(getDesignFisher(kMax = 3, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (3)", fixed = TRUE) expect_error(getDesignFisher(kMax = 5, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (5)", fixed = TRUE) expect_error(getDesignFisher(kMax = 6, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (6)", fixed = TRUE) expect_error(getDesignFisher(alpha0Vec = c(0, 1)), "Argument out of bounds: 'alpha0Vec' (0, 1) is out of bounds (0; 1]", fixed = TRUE) expect_error(getDesignFisher(alpha0Vec = c(0.1, 1.01)), "Argument out of bounds: 'alpha0Vec' (0.1, 1.01) is out of bounds (0; 1]", fixed = TRUE) }) rpact/inst/tests/testthat.R0000644000176200001440000000007413352126153015467 0ustar liggesusers library(testthat) library(rpact) test_check("rpact")