DoseFinding/0000755000176200001440000000000012620475456012456 5ustar liggesusersDoseFinding/tests/0000755000176200001440000000000012620132264013603 5ustar liggesusersDoseFinding/tests/testssamplMod.R0000644000176200001440000001047512411761624016602 0ustar liggesusersrequire("DoseFinding") ######################################################################## ## test Bayesian fitting ## compare bFitMod on example data set with jags data(biom) ## (i) fit biom data ## data to fit model <- "sigEmax" anMod <- lm(resp~factor(dose)-1, data=biom) drFit <- coef(anMod);y <- drFit vCov <- vcov(anMod) Omega <- solve(vCov)#+diag(5)*1000 dose <- sort(unique(biom$dose)) nD <- length(dose) prior <- list(t = c(0, sqrt(2), 3), norm = c(0, sqrt(2)), beta=c(0,1,1,1), lnorm=c(0, sqrt(0.5))) res <- bFitMod(dose, drFit, vCov, model = "sigEmax", prior=prior, nSim = 100) ## ## jags code (commented out, only for "manual" testing) ## library(rjags) ## path <- "~/Projekte/DoseFindingPackage/" ## modelstr <- " ## model{ ## y[] ~ dmnorm(mu[], Omega[,]) ## for(i in 1:nD){ ## mu[i] <- E0 + (Emax*dose[i]^h)/(dose[i]^h+ED50^h) ## } ## E0 ~ dt(0, 0.5, 3) ## Emax ~ dnorm(0, 0.5) ## ED50 ~ dunif(0,1) ## h ~ dlnorm(0, 2) ## } ## " ## file <- paste(path, "mod.txt", sep="") ## cat(modelstr, file = file) ## ## data ## jags.data <- list(y=y, nD=nD, dose=dose, Omega=Omega) ## jags.inits <- list("E0"=0,"Emax"=0,"ED50"=0.5,"h"=1) ## mod <- jags.model(file, jags.data, jags.inits, n.chains = 3) ## samp <- jags.samples(mod, c("E0","Emax","ED50", "h"), 100000) ## quantile(samp$E0, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,1], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$Emax, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,2], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$ED50, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,3], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$h, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,4], c(0.05,0.25,0.5,0.75,0.95)) ## cor(cbind(as.numeric(samp$E0[,,]), ## as.numeric(samp$Emax[,,]), ## as.numeric(samp$ED50[,,]), ## as.numeric(samp$h[,,]))) ## cor(res$samples) ## (ii) now run with inflated variance (essentially sample prior) vCov <- vcov(anMod)*100000 Omega <- solve(vCov)#+diag(5)*1000 res <- bFitMod(dose, drFit, vCov, model = "sigEmax", prior=prior, nSim = 100) ## ## jags code (commented out, only for "manual" testing) ## jags.data <- list(y=y, nD=nD, dose=dose, Omega=Omega) ## mod <- jags.model(file, jags.data, jags.inits, n.chains = 3) ## samp <- jags.samples(mod, c("E0","Emax","ED50", "h"), 100000) ## quantile(samp$E0, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,1], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$Emax, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,2], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$ED50, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,3], c(0.05,0.25,0.5,0.75,0.95)) ## quantile(samp$h, c(0.05,0.25,0.5,0.75,0.95)) ## quantile(res$samples[,4], c(0.05,0.25,0.5,0.75,0.95)) ## cor(cbind(as.numeric(samp$E0[,,]), ## as.numeric(samp$Emax[,,]), ## as.numeric(samp$ED50[,,]), ## as.numeric(samp$h[,,]))) ## cor(res$samples) ######################################################################## ## test bootstrap fitting vCov <- vcov(anMod) bnds <- matrix(c(0.001, 0.5, 1.5, 10), 2, 2) res <- bFitMod(dose, drFit, vCov, model = "sigEmax", nSim = 100, bnds=bnds, type = "bootstrap") dd <- dose[-1];resp <- drFit[2:5]-drFit[1] vc <- cbind(-1,diag(4))%*%vCov%*%t(cbind(-1,diag(4))) res <- bFitMod(dd, resp, vc, model = "linear", nSim = 100, bnds=bnds, placAdj = TRUE, type = "bootstrap") ######################################################################## ## test dose calculations, when model = "linInt" and placAdj=TRUE data(IBScovars) anovaMod <- lm(resp~factor(dose)+gender, data=IBScovars) drFit <- coef(anovaMod)[2:5] # placebo adjusted estimates at doses vCov <- vcov(anovaMod)[2:5,2:5] dose <- sort(unique(IBScovars$dose))[-1] fm <- fitMod(dose, drFit, S=vCov, model = "linInt", type = "general", placAdj=TRUE) ED(fm, 0.25) ED(fm, 0.5) ED(fm, 0.75) ED(fm, 0.95) TD(fm, 0.2) TD(fm, 0.3) TD(fm, 0.4) prior <- list(norm = c(0,1000), norm = c(0,1000), norm = c(0,1000), norm = c(0,1000)) gsample <- bFitMod(dose, drFit, vCov, model = "linInt", placAdj=TRUE, start = c(1, 1, 1, 1), nSim = 1000, prior = prior) td1 <- TD(gsample, 0.3) td2 <- TD(gsample, 0.3, TDtype="d", doses = seq(0,4,length=101)) ed1 <- ED(gsample, 0.8) ed2 <- ED(gsample, 0.8, EDtype="d", doses = seq(0,4,length=101)) DoseFinding/tests/testplanMod.R0000644000176200001440000001253312400112175016217 0ustar liggesusers## ## commented out for time-reasons ## ################################################################################ ## ## test 1: "validation" using fitMod and vcov.DRMod and predict.DRMod ## model <- "emax" ## sigma <- 1 ## n <- c(100,50,50,50,100) ## doses <- c(0,10,20,40,50) ## cf <- c(0,1,10) ## V <- DoseFinding:::aprCov(doses, model, cf, S=diag(1/n)) ## DoseFinding:::getPredVar(model, cf, V=V, pDose=50) ## ## now validation using the formulas in fitMod ## doseVec <- rep(doses, n) ## respVec <- rnorm(length(doseVec)) ## dd <- fitMod(doseVec, respVec, model="emax") ## ## now change to achieve desired values ## dd$coefs <- cf ## dd$df <- dd$RSS <- sum(n) ## vcov(dd) ## predict(dd, predType = "effect-curve", doseSeq=50, se.fit=TRUE)$se.fit^2 ## ################################################################################ ## ## test 2: "validation" using simulation ## model <- "emax" ## sigma <- 1 ## ## select very large sample size (to validate asymptotics) ## n <- c(100000, 50000, 50000, 50000, 100000) ## n <- c(100, 50, 50, 50, 100) ## doses <- c(0,10,20,40,50) ## cf <- c(0,0.4,10) ## Delta <- 0.2 ## V <- DoseFinding:::aprCov(doses, model, cf, S=diag(0.3^2/n)) ## tdvar <- DoseFinding:::getTDVar(model, cf, V=V, Delta=Delta, scale = "unrestricted") ## edvar <- DoseFinding:::getEDVar(model, cf, V=V, p=0.5, maxD=50, scale = "unrestricted") ## pavar <- DoseFinding:::getPredVar(model, cf, V=V, pDose=50) ## tdvart <- DoseFinding:::getTDVar(model, cf, V=V,scale = "log", Delta=Delta) ## edvart <- DoseFinding:::getEDVar(model, cf, V=V,scale = "logit", p=0.5, maxD=50) ## ## simulation ## mn <- emax(doses, cf[1], cf[2], cf[3]) ## doseVec <- rep(doses, n) ## mnVec <- rep(mn, n) ## td <- pl <- ed <- numeric(10000) ## for(i in 1:10000){ ## respVec <- mnVec + rnorm(length(mnVec),0,0.3) ## ff <- fitMod(doseVec, respVec, model="emax", bnds = c(0.05, 75)) ## ed[i] <- ED(ff, p=0.5) ## td[i] <- TD(ff, Delta = Delta) ## pl[i] <- predict(ff, doseSeq=50, predType = "effect-curve") ## pb <- txtProgressBar(min=1, max=1000, char="*", width = 20, style = 3) ## setTxtProgressBar(pb, i) ## } ## cat("\n") ## mm <- Mods(emax=cf[3], doses=doses, placEff=0, maxEff=emax(50,cf[1],cf[2],cf[3])) ## edt <- ED(mm, p=0.5) ## edt7 <- ED(mm, p=0.7) ## edt3 <- ED(mm, p=0.3) ## tdt <- TD(mm, Delta=Delta) ## hist(td[td < 100], freq=FALSE, breaks = 21) ## curve(dnorm(x, tdt, sqrt(tdvar)), add=TRUE) ## hist(ed, freq=FALSE, breaks = 101) ## curve(dnorm(x, edt, sqrt(edvar)), add=TRUE) ## hist(pl, freq=FALSE, breaks = 21) ## curve(dnorm(x, emax(50,cf[1],cf[2],cf[3]), sqrt(pavar)), add=TRUE) ## hist(td[td < 100], freq=FALSE, breaks = 101) ## curve(dlnorm(x, log(tdt), sqrt(tdvart)), add=TRUE) ## hist(ed, freq=FALSE, breaks = 101) ## ## plot against logit-normal distribution ## mean(ed < edt7 & ed > edt3) ## pnorm(edt7, edt, sqrt(edvar))-pnorm(edt3, edt, sqrt(edvar)) ## ################################################################################ ## ## test 3: study example ## nSim <- 100 ## doses <- c(0,1,3,10,30,50,75,150,300,450) ## n <- c(100,rep(38,8),100) ## sigma <- 380 ## mm <- Mods(sigEmax = rbind(c(100,6),c(170,4), c(80,3), c(290,5)), ## emax = c(5,20,50,120), linear=NULL, doses=doses, ## placEff=0, maxEff=150) ## model <- "sigEmax" ## pp <- planMod(model, mm, n, sigma, doses=doses, ## simulation = TRUE, cores = 4, ## alpha = 0.025, nSim = nSim, ## p = 0.5, pLB = 0.25, pUB = 0.75) ## print(pp) ## summary(pp, Delta = 130, p = 0.5) ## plot(pp) ## plot(pp, type="ED", 0.5) ## plot(pp, type="TD", Delta = 130, direction = "increasing") ## model <- "emax" ## pp <- planMod(model, mm, n, sigma, doses=doses, ## simulation = TRUE, cores = 4, ## alpha = 0.025, ## p = 0.5, pLB = 0.25, pUB = 0.75) ## model <- "linear" ## pp <- planMod(model, mm, n, sigma, doses=doses, ## simulation = TRUE, cores = 4, ## alpha = 0.025, nSim = nSim, ## p = 0.5, pLB = 0.25, pUB = 0.75) ## ## now model selection ## model <- c("sigEmax", "emax") ## pp1 <- planMod(model, mm, n, sigma, doses=doses, asyApprox = FALSE, ## simulation = TRUE, cores = 4, ## alpha = 0.025, nSim = nSim, ## p = 0.5, pLB = 0.25, pUB = 0.75) ## print(pp1) ## summary(pp1) ## plot(pp1) ## plot(pp1, type="ED", 0.5) ## plot(pp1, type="TD", Delta = 130, direction = "increasing") ## ## ################################################################################ ## ## ## test 4: study example ## doses <- c(0,10,25,50,100,150) ## fmodels <- Mods(linear = NULL, emax = 25, ## logistic = c(50, 10.88111), exponential= 85, ## betaMod=rbind(c(0.33,2.31),c(1.39,1.39)), ## doses = doses, addArgs=list(scal = 200), ## placEff = 0, maxEff = 0.4) ## sigma <- 1 ## n <- rep(62, 6)*2 ## ## use all models not used previously ## model <- c("linear", "quadratic", "exponential", "betaMod", "logistic", "linlog") ## altb <- defBnds(200) ## pp <- planMod(model, fmodels, n, sigma, doses=doses, ## asyApprox = FALSE, simulation = TRUE, cores = 4, ## alpha = 0.025, nSim = nSim, bnds=altb, ## p = 0.5, pLB = 0.25, pUB = 0.75) ## pp ## summary(pp, p = 0.5, Delta = 0.3) ## plot(pp) ## plot(pp, type = "TD", Delta=0.3, direction = "i") ## plot(pp, type = "ED", p = 0.5) DoseFinding/tests/testgFit.R0000644000176200001440000001067312411761337015535 0ustar liggesusersrequire("DoseFinding") data(IBScovars) lmfit <- lm(resp~factor(dose)+gender, data=IBScovars) cf <- coef(lmfit)[-c(6)] vcv <- vcov(lmfit)[-c(6), -c(6)] lmfit2 <- lm(resp~as.factor(dose)-1+gender, data=IBScovars) cf2 <- coef(lmfit2)[-c(6)] vcv2 <- vcov(lmfit2)[-c(6), -c(6)] dose <- c(0:4) ## test fitting all available models fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="linear", placAdj=TRUE,type="general") fitMod(dose, cf2, S=vcv2, model="linear", placAdj=FALSE,type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="quadratic", placAdj=TRUE,type="general") fitMod(dose, cf2, S=vcv2, model="quadratic", placAdj=FALSE,type="general") fitMod(dose, cf2, S=vcv2, model="linlog", placAdj=FALSE, addArgs=list(off=0.01*max(dose)),type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="emax", placAdj=TRUE, bnds=defBnds(max(dose))$emax,type="general") fitMod(dose, cf2, S=vcv2, model="emax", placAdj=FALSE, bnds=defBnds(max(dose))$emax,type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="sigEmax", placAdj=TRUE, bnds=defBnds(max(dose))$sigEmax,type="general") fitMod(dose, cf2, S=vcv2, model="sigEmax", placAdj=FALSE, bnds=defBnds(max(dose))$sigEmax,type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="exponential", placAdj=TRUE, bnds=defBnds(max(dose))$exponential,type="general") fitMod(dose, cf2, S=vcv2, model="exponential", placAdj=FALSE, bnds=defBnds(max(dose))$exponential,type="general") fitMod(dose, cf2, S=vcv2, model="logistic", placAdj=FALSE, bnds=defBnds(max(dose))$logistic,type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="betaMod", placAdj=TRUE, bnds=defBnds(max(dose))$betaMod, addArgs=list(scal=1.2*4),type="general") fitMod(dose, cf2, S=vcv2, model="betaMod", placAdj=FALSE, bnds=defBnds(max(dose))$betaMod, addArgs=list(scal=1.2*4),type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="linInt", placAdj=TRUE, type="general") fitMod(dose, cf2, S=vcv2, model="linInt", placAdj=FALSE, type="general") ## test using starting value (instead of grid search) fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="emax", placAdj=TRUE, bnds=defBnds(max(dose))$emax, start = 0.5,type="general") fitMod(dose, cf2, S=vcv2, model="emax", placAdj=FALSE, bnds=defBnds(max(dose))$emax, start = 0.2,type="general") fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="betaMod", placAdj=TRUE, bnds=defBnds(max(dose))$betaMod, addArgs=list(scal=1.2*4),type="general") fitMod(dose, cf2, S=vcv2, model="betaMod", placAdj=FALSE, bnds=defBnds(max(dose))$betaMod, start = c(1, 1), addArgs=list(scal=1.2*4),type="general") ## test predict, vcov, coef, intervals, plot, summary ggI <- fitMod(dose, cf2, S=vcv2, model="betaMod", placAdj=FALSE, bnds=defBnds(max(dose))$betaMod, addArgs=list(scal=1.2*4),type="general") ggNI <- fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="betaMod", placAdj=TRUE, bnds=defBnds(max(dose))$betaMod, addArgs=list(scal=1.2*4),type="general") predict(ggI, se.fit=TRUE, predType = "e") predict(ggNI, se.fit=TRUE, predType = "e") vcov(ggI) vcov(ggNI) plot(ggI, CI=T, plotData = "meansCI") plot(ggNI, CI=T, plotData = "meansCI") ggI <- fitMod(dose, cf2, S=vcv2, model="linInt", placAdj=FALSE,type="general") ggNI <- fitMod(dose[-1], cf[-1], S=vcv[-1,-1], model="linInt", placAdj=TRUE,type="general") predict(ggI, se.fit=TRUE, predType = "full-model") predict(ggI, se.fit=TRUE, predType = "effect-curve") predict(ggNI, se.fit=TRUE, predType = "full-model") vcov(ggI) vcov(ggNI) plot(ggI, CI=T, plotData = "meansCI") plot(ggNI, CI=T, plotData = "meansCI") ## even more tests for the linInt model data(IBScovars) ## without covariates fit <- fitMod(dose, resp, data=IBScovars, model="linInt") plot(fit, CI=TRUE, plotData="meansCI") fit <- fitMod(dose, resp, data=IBScovars, model="linInt", addCovars=~gender) plot(fit, CI=TRUE, plotData="meansCI") vcov(fit) fit <- lm(resp~as.factor(dose)-1, data=IBScovars) cf <- coef(fit) vc <- vcov(fit) doseVec <- 0:4 fit <- fitMod(doseVec, cf, model="linInt", S=vc, type = "general") plot(fit, CI=TRUE, plotData="meansCI") vcov(fit) fit <- lm(resp~as.factor(dose)+gender, data=IBScovars) cf <- coef(fit)[2:5] vc <- vcov(fit)[2:5,2:5] doseVec <- 1:4 fit <- fitMod(doseVec, cf, model="linInt", S=vc, type = "general", placAdj=TRUE) vcov(fit) plot(fit, CI=TRUE, plotData="meansCI") predict(fit, predType = "effect-curve", se.fit=TRUE) DoseFinding/tests/testsFitting.R0000644000176200001440000003276112411761357016437 0ustar liggesusersrequire("DoseFinding") ## effect curve estimate for linear type models!! ######################################################################## #### Testing function to generate doses and sample size allocs. genDFdats <- function(model, argsMod, doses, n, sigma, mu = NULL){ nD <- length(doses) dose <- sort(doses) if (length(n) == 1) n <- rep(n, nD) dose <- rep(dose, n) args <- c(list(dose), argsMod) mu <- do.call(model, args) data.frame(dose = dose, resp = mu + rnorm(sum(n), sd = sigma)) } getDosSampSiz <- function(){ # generate dose levels mD <- runif(1, 0, 1500) nD <- max(rpois(1, 5), 4) p <- rgamma(nD, 3) p <- cumsum(p/sum(p)) doses <- signif(c(0, mD*p), 3) # sample size allocations totSS <- rpois(1, rexp(1, 1/250)) totSS <- max(totSS, 50) p <- rgamma(nD+1, 3);p <- p/sum(p) n <- round(p*totSS) n[n==0] <- rpois(sum(n==0), 1)+1 list(doses=doses, n=n) } getDFdataSet <- function(doses, n){ if(missing(doses) & missing(n)){ ll <- getDosSampSiz() } else { ll <- list(doses = doses, n=n) } e0 <- rnorm(1, 0, 10) eMax <- rgamma(1, abs(e0)*0.5, 0.5) sig <- eMax/runif(1, 0.5, 5) if(runif(1)<0.3){ aa <- genDFdats("betaMod", c(e0 = e0, eMax = eMax, delta1=runif(1, 0.5, 4), delta2=runif(1, 0.5, 4), scal=1.2*max(ll$doses)), ll$doses, ll$n, sig) } else { aa <- genDFdats("sigEmax", c(e0 = e0, eMax = eMax, ed50=runif(1, 0.05*max(ll$doses), 1.5*max(ll$doses)), h=runif(1, 0.5, 4)), ll$doses, ll$n, sig) } N <- sum(ll$n) center <- c("blue", "green", "red", "yellow", "silver") aa <- data.frame(x= aa$dose, y=aa$resp, center=as.factor(sample(center, N, replace = T)), age=runif(N, 1, 100)) aa[sample(1:nrow(aa)),] } ######################################################################## ######################################################################## #### Generate data sets and compare results of fitDRModel #### to the result of nls and lm for AIC function (if these are consistent #### parameter estimates, residual sum of square and degrees of freedom are #### consistent) and the vcov function (if these are consistent parameter #### estimates, RSS, df and gradient are consistent) ######################################################################## ######################################################################## #### beta Model set.seed(2000) ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates bnds <- matrix(c(0.05, 0.05, 6, 6), nrow=2) fit0 <- fitMod(x, y, datset, model = "betaMod", addCovars = ~1, addArgs=list(scal=1.2*max(datset$x)), bnds=bnds, start=c(0.6, 0.6)) fitnls <- nls(y~betaMod(x, e0, emax, delta1, delta2, 1.2*max(datset$x)), start=c(e0=15, emax=14, delta1=0.8, delta2=0.5), data=datset) AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0) vcov(fitnls) predict(fit0, predType="effect-curve", se.fit=TRUE) predict(fit0, predType="full-model", se.fit=TRUE) TD(fit0, Delta = 1) # with covariates fit0 <- fitMod(x, y, datset, model="betaMod", addCovars = ~age+center, addArgs=list(scal=1.2*max(datset$x)), bnds=bnds) XX <- model.matrix(~center+age, data=datset) scl <- 1.2*max(datset$x) fitnls <- nls(y~cbind(XX, betaMod(x, 0, 1, delta1, delta2, scl)), data=datset, start=c(delta1=1, delta2=0.2), algorithm = "plinear") AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", doseSeq = c(0, 100), se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata = data.frame(x = c(0,100), center = as.factor("yellow"), age = 50)) TD(fit0, Delta = 1) ######################################################################## #### emax Model set.seed(15) ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates bnds <- c(1e-5, max(datset$x)) fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~1, bnds=bnds) fitnls <- nls(y~emax(x, e0, emax, ed50), start=c(e0=-1, emax=1.3, ed50=0.1), data=datset) AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType="full-model", se.fit=T) TD(fit0, Delta = 0.005) # with covariates fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~age+center, bnds=bnds) XX <- model.matrix(~center+age, data=datset) fitnls <- nls(y~cbind(XX, emax(x, 0, 1, ed50)), data=datset, start=list(ed50=1), algorithm = "plinear") AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", doseSeq = c(0, 100), se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata = data.frame(x = c(0,100), center = as.factor("silver"), age = 50)) TD(fit0, Delta = 0.005) ######################################################################## #### sigEmax Model ## set.seed(25) # example where nls and bndnls find different optimum set.seed(13) ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates bnds <- matrix(c(1e-5, 1e-5, max(datset$x), 30), nrow=2) fit0 <- fitMod(x,y, datset, model = "sigEmax", addCovars = ~1, bnds=bnds) fitnls <- nls(y~sigEmax(x, e0, emax, ed50, h), start=c(e0=6, emax=17, ed50=240, h=2), data=datset) AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType="full-model", se.fit=T) TD(fit0, Delta = 1) # with covariates fit0 <- fitMod(x,y, datset, model="sigEmax", addCovars = ~age+center, bnds=bnds) XX <- model.matrix(~center+age, data=datset) fitnls <- nls(y~cbind(XX, sigEmax(x, 0, 1, ed50, h)), data=datset, start=list(ed50=368, h=2), algorithm = "plinear") AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", doseSeq = c(0, 100), se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata = data.frame(x = c(0,100), center = as.factor("silver"), age = 50)) TD(fit0, Delta = 1) ######################################################################## #### logistic Model set.seed(200) ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates bnds <- matrix(c(1e-5, 1e-5, max(datset$x), max(datset$x)/2), nrow=2) fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~1, bnds=bnds) fitnls <- nls(y~logistic(x, e0, emax, ed50, delta), start=c(e0=0, emax=16, ed50=250, delta=90), data=datset) AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType="full-model", se.fit=T) TD(fit0, Delta = 0.5) # with covariates (example where nls and bndnls find different optima) fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~age+center, bnds=bnds) XX <- model.matrix(~center+age, data=datset) fitnls <- nls(y~cbind(XX, logistic(x, 0, 1, ed50, delta)), data=datset, start=list(ed50=220, delta=48), algorithm = "plinear") AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", doseSeq = c(0, 100), se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata = data.frame(x = c(0,100), center = as.factor("silver"), age = 5)) TD(fit0, Delta = 0.02) ######################################################################## #### exponential Model set.seed(4) ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates bnds <- c(0.1, 2)*max(datset$x) fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~1, bnds=bnds) fitnls <- nls(y~exponential(x, e0, e1, delta), start=coef(fit0), data=datset) AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType="full-model", se.fit=T) TD(fit0, Delta = 0.1) # with covariates bnds <- c(0.1, 2)*max(datset$x) fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~age+center, bnds=bnds) XX <- model.matrix(~center+age, data=datset) fitnls <- nls(y~cbind(XX, exponential(x, 0, 1, delta)), data=datset, start=c(delta=450), algorithm = "plinear") AIC(fit0) AIC(fitnls) summary(fit0) summary(fitnls) vcov(fit0 ) vcov(fitnls) predict(fit0, predType="effect-curve", doseSeq = c(0, 100), se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata = data.frame(x = c(0,100), center = as.factor("blue"), age = 50)) TD(fit0, Delta = 0.1) ######################################################################## #### linear model ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~1) fitlm <- lm(y~x, data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType="effect-curve", se.fit=T) TD(fit0, Delta = 1) # with covariates fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~age+center) fitlm <- lm(y~x+age+center, data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType = "f", se.fit = T, newdata = data.frame(x=c(0,1,2,100), age = 30, center = as.factor("blue"))) predict(fitlm, se.fit = T, newdata = data.frame(x=c(0,1,2,100), age = 30, center = as.factor("blue"))) TD(fit0, Delta = 1) ######################################################################## #### linlog model ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) off <- 0.05*max(datset$x) # without covariates fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~1,addArgs=list(off=off)) fitlm <- lm(y~log(x+off), data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType="effect-curve", se.fit=T) ## bug ## TD(fit0, Delta = 1) # with covariates fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~age+center, addArgs=list(off=off)) fitlm <- lm(y~log(x+off)+age+center, data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType = "f", se.fit = T, ## degrees of freedom wrong ## newdata = data.frame(x=c(0,1,2,100), age = 35, center = as.factor("blue"))) predict(fitlm, se.fit = T, newdata = data.frame(x=c(0,1,2,100), age = 35, center = as.factor("blue"))) TD(fit0, Delta = 1) ######################################################################## #### quadratic model ll <- getDosSampSiz() datset <- getDFdataSet(ll$doses, ll$n) # without covariates fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~1) fitlm <- lm(y~x+I(x^2), data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType="effect-curve", se.fit=T) predict(fit0, predType="full-model", se.fit=T, newdata=data.frame(x=c(0, 10, 100))) predict(fitlm, se.fit=T, newdata=data.frame(x=c(0, 10, 100))) TD(fit0, Delta = 1) # with covariates fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~age+center) fitlm <- lm(y~x+I(x^2)+age+center, data=datset) AIC(fit0) AIC(fitlm) summary(fit0) summary(fitlm) vcov(fit0 ) vcov(fitlm) predict(fit0, predType = "f", se.fit = T, newdata=data.frame(x=c(0, 10, 100), age = 30, center = as.factor("blue"))) predict(fitlm, se.fit = T, newdata=data.frame(x=c(0, 10, 100), age = 30, center = as.factor("blue"))) TD(fit0, Delta = 0.1) ######################################################################## ## ensure that predict with no argument uses the original data not the ## sorted data that were used for fitting data(IBScovars) ff <- fitMod(dose, resp, data=IBScovars, model="quadratic", addCovars = ~gender) ## should be all zero predict(ff, predType = "ls-means")- predict(ff, predType = "ls-means", doseSeq = IBScovars[,3]) predict(ff, predType = "full-model")- predict(ff, predType = "full-model", newdata = IBScovars[,-2]) predict(ff, predType = "effect-curve")- predict(ff, predType = "effect-curve", doseSeq = IBScovars[,3]) ff2 <- fitMod(dose, resp, data=IBScovars, model="quadratic") ## should be all zero predict(ff2, predType = "ls-means")- predict(ff2, predType = "ls-means", doseSeq = IBScovars[,3]) predict(ff2, predType = "full-model")- predict(ff2, predType = "full-model", newdata = IBScovars[,-2]) predict(ff2, predType = "effect-curve")- predict(ff2, predType = "effect-curve", doseSeq = IBScovars[,3]) dose <- unique(IBScovars$dose) ord <- c(2,4,1,3,5) mns <- tapply(IBScovars$resp, IBScovars$dose, mean)[ord] ff3 <- fitMod(dose, mns, S=diag(5), model="quadratic", type = "general") predict(ff3, predType = "ls-means")- predict(ff3, predType = "ls-means", doseSeq = dose) predict(ff3, predType = "effect-curve")- predict(ff3, predType = "effect-curve", doseSeq = dose) ######################################################################## ## ensure that S is also sorted when the dose is not entered sorted dose <- sort(unique(IBScovars$dose)) mns <- tapply(IBScovars$resp, IBScovars$dose, mean) S <- c(1000,1,1,1,1)*diag(5) ff1 <- fitMod(dose, mns, S = S, model="linear", type="general") ## fit unsorted dose <- unique(IBScovars$dose) ord <- c(2,4,1,3,5) mns <- tapply(IBScovars$resp, IBScovars$dose, mean)[ord] ff2 <- fitMod(dose, mns, S = S, model="linear", type="general") ff3 <- fitMod(dose, mns, S = S[ord,ord], model="linear", type="general") ## coef(ff1) & coef(ff3) should be equal coef(ff1) coef(ff3) DoseFinding/tests/testsMCPMod.R0000644000176200001440000001554612411761364016112 0ustar liggesusersrequire("DoseFinding") ######################################################################## #### multContTest # functions to sample random DF data getDosSampSiz <- function(){ # generate dose levels mD <- runif(1, 0, 1500) nD <- max(rpois(1, 5), 4) p <- rgamma(nD, 3) p <- cumsum(p/sum(p)) doses <- signif(c(0, mD*p), 3) # sample size allocations totSS <- rpois(1, rexp(1, 1/250)) totSS <- max(totSS, 50) p <- rgamma(nD+1, 3);p <- p/sum(p) n <- round(p*totSS) n[n==0] <- rpois(sum(n==0), 1)+1 list(doses=doses, n=n) } getDFdataSet <- function(doses, n){ ll <- getDosSampSiz() e0 <- rnorm(1, 0, 10) eMax <- rgamma(1, abs(e0)*0.5, 0.5)*I(runif(1)<0.25) if(eMax > 0){ sig <- eMax/runif(1, 0.5, 5)} else { sig <- rgamma(1, abs(e0)*0.5, 0.5) } dosVec <- rep(ll$doses, ll$n) if(runif(1)<0.3){ mnVec <- betaMod(dosVec, e0=e0, eMax=eMax, delta1=runif(1, 0.5, 5), delta2=runif(1, 0.5, 5), scal=1.2*max(ll$doses)) } else { mnVec <- logistic(dosVec, e0 = e0, eMax = eMax, ed50=runif(1, 0.05*max(ll$doses), 1.5*max(ll$doses)), delta=runif(1, 0.5, max(ll$doses)/2)) } resp <- rnorm(sum(ll$n), mnVec, sig) N <- sum(ll$n) cov1 <- as.factor(rpois(N, 5)) cov2 <- runif(N, 1, 100) aa <- data.frame(x= dosVec, y=resp, cov1=cov1, cov2=cov2) aa[sample(1:nrow(aa)),] } #### simulate data set.seed(10) dd <- getDFdataSet() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) obj <- MCPMod(x,y, dd, models=models, addCovars = ~cov1+cov2, alpha=0.05, Delta=0.5) plot(obj, plotData="meansCI", CI=TRUE) obj <- MCPMod(dd$x,dd$y, models=models, alpha=0.05, Delta=0.5) plot(obj, plotData="meansCI", CI=TRUE) #### different model set set.seed(10) dd <- getDFdataSet() mD <- max(dd$x) lg1 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.9), "logistic") lg2 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.5), "logistic") expo <- guesst(c(0.9*mD), c(0.7), "exponential", Maxd=mD) quad <- guesst(c(0.6*mD), c(1), "quadratic") models <- Mods(linlog = NULL, logistic = rbind(lg1, lg2), exponential = expo, quadratic = quad, doses = sort(unique(dd$x)), addArgs=list(off = 0.2*max(dd$x))) obj <- MCPMod(x,y, dd, models=models, addCovars = ~cov1+cov2, alpha = 0.2, Delta=0.5) plot(obj, plotData="meansCI", CI=TRUE) obj <- MCPMod(dd$x,dd$y, models=models, addCovars = ~1, alpha = 0.2, Delta=0.5) plot(obj, plotData="meansCI", CI=TRUE) ######################################################################## #### some binary test cases getDFdataSet.bin <- function(doses, n){ ll <- getDosSampSiz() ll$n <- ll$n+10 e0 <- rnorm(1, 0, sqrt(3.28)) eMax <- rnorm(1, 0, 5) dosVec <- rep(ll$doses, ll$n) if(runif(1)<0.3){ mn <- betaMod(dosVec, e0 = e0, eMax = eMax, delta1=runif(1, 0.5, 5), delta2=runif(1, 0.5, 5), scal=1.2*max(ll$doses)) } else { mn <- logistic(dosVec, e0 = e0, eMax = eMax, ed50=runif(1, 0.05*max(ll$doses), 1.5*max(ll$doses)), delta=runif(1, 0.5, max(ll$doses)/2)) } resp <- rbinom(length(ll$n), ll$n, 1/(1+exp(-mn))) aa <- data.frame(dose = ll$doses, resp = resp) aa <- data.frame(x= aa$dose, y=aa$resp/ll$n, n=ll$n) aa[sample(1:nrow(aa)),] } set.seed(1909) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCPMod(dose, dePar, S=vCov, models=models, type="general", df=Inf, alpha = 0.3, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) set.seed(1997) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE,direction = "decreasing", addArgs=list(scal = 1.2*max(dd$x)), doses = sort(unique(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCPMod(dose, dePar, S=vCov, models=models, type = "general", pVal = TRUE, df=Inf, alpha = 0.2, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) set.seed(1) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCPMod(dose, dePar, S=vCov, models=models, type = "general", pVal = T, df=Inf, alpha = 0.4, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) ######################################################################## ## placebo-adjusted scale ## two blocks below should give equal results data(IBScovars) modlist <- Mods(emax = 0.05, linear = NULL, linInt = c(0, 1, 1, 1), doses = c(0, 1, 2, 3, 4)) ancMod <- lm(resp~factor(dose)+gender, data=IBScovars) drEst <- coef(ancMod)[2:5] vc <- vcov(ancMod)[2:5, 2:5] doses <- (1:4) obj <- MCPMod(doses, drEst, S = vc, models = modlist, placAdj = TRUE, type = "general", df = Inf, Delta=0.5) plot(obj, plotData="meansCI", CI=TRUE) ## now unordered ord <- c(3,4,1,2) drEst2 <- drEst[ord] vc2 <- vc[ord,ord] doses2 <- doses[ord] obj <- MCPMod(doses2, drEst2, S = vc2, models = modlist, placAdj = TRUE, type = "general", df = Inf, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) ## unadjusted scale ## two blocks below should give equal results ancMod <- lm(resp~factor(dose)-1, data=IBScovars) drEst <- coef(ancMod) vc <- vcov(ancMod) doses <- 0:4 obj <- MCPMod(doses, drEst, S = vc, models = modlist, type = "general", df = Inf, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) ord <- c(3,4,1,2,5) drEst2 <- drEst[ord] vc2 <- vc[ord,ord] doses2 <- doses[ord] obj <- MCPMod(doses2, drEst2, S = vc2, models = modlist, type = "general", df = Inf, Delta = 0.5) plot(obj, plotData="meansCI", CI=TRUE) DoseFinding/tests/testsplotDRMod.R0000644000176200001440000000750412411761614016670 0ustar liggesusersrequire("DoseFinding") ## ## commented out for time reasons ## resp <- c(1.23, 1.31, 1.32, 1.36, 1.38) ## dose <- c(0, 1.25, 2.5, 5, 10) ## sdev <- c(0.015, 0.014, 0.015, 0.016, 0.015) ## V <- diag(sdev^2) ## mods <- Mods(emax=c(2.65, 12.5), linear=NULL, linInt = c(1, 1, 1, 1), ## logistic=c(29, 9.55), quadratic = -0.0075, ## doses=dose) ## mmfit <- MCPMod(dose, resp, S=V, type="general", models=mods, Delta=0.12) ## efit <- mmfit$mods$emax ## plot(efit, plotData = "meansCI", CI=TRUE) ## plot(efit, plotData = "meansCI", CI=FALSE) ## ## plot(efit, plotData = "raw") # should throw an error ## plot(efit, plotData = "means", CI = TRUE) ## plot(efit, plotData = "means", CI = FALSE) ## plot(efit, plotData = "none", CI =TRUE) ## plot(efit, plotData = "none", CI =FALSE) ## plot(mmfit, plotData = "meansCI", CI=TRUE) ## plot(mmfit, plotData = "meansCI", CI=FALSE) ## ## plot(mmfit, plotData = "raw") # should throw an error ## plot(mmfit, plotData = "means", CI = TRUE) ## plot(mmfit, plotData = "means", CI = FALSE) ## plot(mmfit, plotData = "none", CI =TRUE) ## plot(mmfit, plotData = "none", CI =FALSE) ## data(IBScovars) ## models <- Mods(emax = c(0.5, 1), betaMod=c(1,1), linear = NULL, doses=c(0,4)) ## mmfit <- MCPMod(dose, resp, data=IBScovars, models=models, Delta=0.12) ## efit <- mmfit$mods$emax ## plot(efit, plotData = "meansCI", CI=TRUE) ## plot(efit, plotData = "meansCI", CI=FALSE) ## plot(efit, plotData = "raw", CI=FALSE) ## plot(efit, plotData = "raw", CI=TRUE) ## plot(efit, plotData = "means", CI = TRUE) ## plot(efit, plotData = "means", CI = FALSE) ## plot(efit, plotData = "none", CI =TRUE) ## plot(efit, plotData = "none", CI =FALSE) ## plot(mmfit, plotData = "meansCI", CI=TRUE) ## plot(mmfit, plotData = "meansCI", CI=FALSE) ## plot(mmfit, plotData = "raw", CI=TRUE) ## plot(mmfit, plotData = "raw", CI=FALSE) ## plot(mmfit, plotData = "means", CI = TRUE) ## plot(mmfit, plotData = "means", CI = FALSE) ## plot(mmfit, plotData = "none", CI =TRUE) ## plot(mmfit, plotData = "none", CI =FALSE) ## data(IBScovars) ## models <- Mods(emax = c(0.5, 1), betaMod=c(1,1), linInt = c(1, 1, 1, 1), ## linear = NULL, doses=0:4) ## anovaMod <- lm(resp~factor(dose)+gender, data=IBScovars) ## drFit <- coef(anovaMod)[2:5] # placebo adjusted estimates at doses ## vCov <- vcov(anovaMod)[2:5,2:5] ## dose <- sort(unique(IBScovars$dose))[-1] ## mmfit <- MCPMod(dose, drFit, S=vCov, type = "general", models=models, Delta=0.12, placAdj=TRUE) ## efit <- mmfit$mods$emax ## plot(efit, plotData = "meansCI", CI=TRUE) ## plot(efit, plotData = "meansCI", CI=FALSE) ## ## plot(efit, plotData = "raw", CI=FALSE) # should throw an error ## ## plot(efit, plotData = "raw", CI=TRUE) # should throw an error ## plot(efit, plotData = "means", CI = TRUE) ## plot(efit, plotData = "means", CI = FALSE) ## plot(efit, plotData = "none", CI =TRUE) ## plot(efit, plotData = "none", CI =FALSE) ## plot(mmfit, plotData = "meansCI", CI=TRUE) ## plot(mmfit, plotData = "meansCI", CI=FALSE) ## ## plot(mmfit, plotData = "raw", CI=TRUE) # should throw an error ## ## plot(mmfit, plotData = "raw", CI=FALSE) # should throw an error ## plot(mmfit, plotData = "means", CI = TRUE) ## plot(mmfit, plotData = "means", CI = FALSE) ## plot(mmfit, plotData = "none", CI =TRUE) ## plot(mmfit, plotData = "none", CI =FALSE) ## ## neurodeg example (in 0.9-6 not all means were visible) ## doses <- c(0,1,3,10,30) ## muH <- c(-5.099, -4.581, -3.22, -2.879, -3.52) # estimated slope ## covH <- structure(c(0.149, 0.009, 0.009, 0.009, 0.009, 0.009, 0.149, ## 0.009, 0.009, 0.009, 0.009, 0.009, 0.149, 0.009, ## 0.009, 0.009, 0.009, 0.009, 0.149, 0.009, 0.009, ## 0.009, 0.009, 0.009, 0.149), .Dim = c(5L, 5L)) ## fit <- fitMod(doses, muH, S=covH, model="emax", type = "general") ## plot(fit, plotData="meansCI", CI=TRUE) DoseFinding/tests/testssampSize.R0000644000176200001440000001165712411761636016627 0ustar liggesusers## require("DoseFinding") ## S <- diag(rep(1,4))/c(5,6,7,8) ## contrastModels <- Mods(emax=c(0.25,0.01),exponential=c(1.5), ## doses=seq(0,1,length=5)) ## contMat <- optContr(contrastModels,c(0.25,0.5,0.75,1),S=S,placAdj=TRUE)$contMat ## ## power scenario 1 ## models <- Mods(linear=NULL,emax=c(0.25,0.01),doses=seq(0,1,length=5), ## placEff=c(0.5,0.6,0.7),maxEff=0.5) ## power1 <- powMCT(contMat, alpha = 0.025, altModels=models, S=S, placAdj = TRUE, ## alternative = c("one.sided"),df=Inf, critV = TRUE) ## ## power scenario 2: placebo Effect smaller for linear model. ## models <- Mods(linear=NULL,emax=c(0.25,0.01), ## doses=seq(0,1,length=5),placEff=c(0.1,0.6,0.7),maxEff=0.5) ## power2 <- powMCT(contMat, alpha = 0.025, altModels=models, S=S, placAdj = TRUE, ## alternative = c("one.sided"),df=Inf, critV = TRUE) ## ## resulting values: ## any(abs(power1-power2) > 0.05) ## ## everything commented out here, for time reasons ## ## first define the target function ## ## first calculate the power to detect all of the models in the candidate set ## fmodels <- Mods(linear = NULL, emax = c(25), ## logistic = c(50, 10.88111), exponential=c(85), ## betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2), ## doses = c(0,10,25,50,100,150), placEff=0, maxEff=0.4, ## addArgs = list(scal=200)) ## ## contrast matrix to use ## contMat <- optContr(fmodels, w=1) ## ## this function calculates the power under each model and then returns ## ## the average power under all models ## tFunc <- function(n){ ## powVals <- powMCT(contMat, altModels=fmodels, n=n, sigma = 1, ## alpha=0.05) ## mean(powVals) ## } ## ## assume we want to achieve 80\% average power over the selected shapes ## ## and want to use a balanced allocations ## sSize <- sampSize(upperN = 80, targFunc = tFunc, target=0.8, ## alRatio = rep(1,6), verbose = TRUE) ## ## Now the same using the convenience sampSizeMCT function ## sampSizeMCT(upperN=80, contMat = contMat, sigma = 1, altModels=fmodels, ## power = 0.8, alRatio = rep(1, 6), alpha = 0.05) ## ## Alternatively one can also specify an S matrix ## ## covariance matrix in one observation (6 total observation result in a ## ## variance of 1 in each group) ## S <- 6*diag(6) ## ## this uses df = Inf, hence a slightly smaller sample size results ## sampSizeMCT(upperN=500, contMat = contMat, S=S, altModels=fmodels, ## power = 0.8, alRatio = rep(1, 6), alpha = 0.05, Ntype = "total") ## ## targN examples ## ## first calculate the power to detect all of the models in the candidate set ## fmodels <- Mods(linear = NULL, emax = c(25), ## logistic = c(50, 10.88111), exponential=c(85), ## betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2), ## doses = c(0,10,25,50,100,150), placEff=0, maxEff=0.4, ## addArgs = list(scal=200)) ## ## corresponding contrast matrix ## contMat <- optContr(fmodels, w=1) ## ## define target function ## tFunc <- function(n){ ## powMCT(contMat, altModels=fmodels, n=n, sigma = 1, alpha=0.05) ## } ## powVsN <- targN(upperN = 100, lowerN = 10, step = 10, tFunc, ## alRatio = rep(1, 6)) ## plot(powVsN) ## ## the same can be achieved using the convenience powN function ## ## without the need to specify a target function ## res <- powN(upperN = 100, lowerN=10, step = 10, contMat = contMat, ## sigma = 1, altModels = fmodels, alpha = 0.05, alRatio = rep(1, 6)) ## ## the same but with S (but using df=Inf) ## S <- 6*diag(6) ## res1 <- powN(upperN=80*6, lowerN=60, step=60, contMat = contMat, ## S=S, altModels = fmodels, alRatio = rep(1, 6), ## alpha = 0.05, sumFct = "mean", Ntype = "total") ## ## different allocation ratio ## res2 <- powN(upperN=80, lowerN=10, step=10, contMat = contMat, ## sigma = 1, altModels=fmodels, alRatio = c(1, rep(0.5,4), 1), ## alpha = 0.05, sumFct = "mean") ## ## powMCT(contMat, n = c(100,rep(50,4),100), sigma = 1, altModels = fmodels, ## ## alpha = 0.05) ## ## iterating the total sample size ## res3 <- powN(upperN=600, lowerN=100, step=25, contMat = contMat, ## sigma = 1, altModels=fmodels, alRatio = rep(1, 6), ## alpha = 0.05, sumFct = "mean", Ntype = "total") ## ## powMCT(contMat, n = c(50,rep(50,4),50), sigma = 1, altModels = fmodels, ## ## alpha = 0.05) ## ## iterating the total sample size, with unbalanced allocations ## res4 <- powN(upperN=600, lowerN=100, step=25, contMat = contMat, ## sigma = 1, altModels=fmodels, alRatio = c(1, rep(0.5,4), 1), ## alpha = 0.05, sumFct = "mean", Ntype = "total") ## ## powMCT(contMat, n = c(100,rep(50,4),100), sigma = 1, altModels = fmodels, ## ## alpha = 0.05) DoseFinding/tests/testsoptContr.R0000644000176200001440000001137212411761604016631 0ustar liggesusers## ## commented out for time (and dependency reasons) require("DoseFinding") if(!(require("quadprog") & require("Rsolnp"))) stop("need packages quadprog and Rsolnp to run these tests") ## ## calculation of optimal contrast by enumerating all active sets ## allActiveSets <- function(S, mu, mult){ ## k <- length(mu) ## CC <- cbind(-1, diag(k - 1)) ## SPa <- CC %*% S %*% t(CC) ## muPa <- as.numeric(CC %*% mu) ## ## generate all possible active sets ## mat <- matrix(nrow = 2^(k-1), ncol = (k-1)) ## for(i in 1:(k-1)) ## mat[,i] <- rep(rep(c(FALSE,TRUE), each=2^(i-1)), 2^((k-1)-i)) ## val <- numeric(2^(k-1)) ## feasible <- logical(2^(k-1)) ## cont <- matrix(nrow = 2^(k-1), ncol = (k-1)) ## for(i in 1:(2^(k-1))){ ## nonzero <- mat[i,] ## if(sum(nonzero) > 0){ ## cont[i,!nonzero] <- 0 ## cont[i,nonzero] <- solve(SPa[nonzero, nonzero]) %*% muPa[nonzero] ## feasible[i] <- all(mult*cont[i,] >= 0) ## contrast <- c(-sum(cont[i,]), cont[i,]) ## val[i] <- as.numeric(t(contrast)%*%mu/sqrt(t(contrast)%*%S%*%contrast)) ## } ## } ## if(!any(feasible)) ## return(rep(NA, k)) ## mm <- max(val[which(feasible)]) ## c(-sum(cont[val == mm,]), cont[val == mm,]) ## } ## ## helper functions ## getStand <- function(x) ## x/sqrt(sum(x^2)) ## getNCP <- function(cont, mu, S) ## as.numeric(t(cont)%*%mu/sqrt(t(cont)%*%S%*%cont)) ## set.seed(1) ## ncp1 <- ncp2 <- ncp3 <- ncp4 <- ncp5 <- numeric(1000) ## for(i in 1:1000){ ## ## simulate mean and covariance matrix ## kk <- round(runif(1, 4, 10)) ## A <- matrix(runif(kk^2,-1,1),kk,kk) ## S <- crossprod(A)+diag(kk) ## mult <- sign(rnorm(1)) ## mu <- mult*sort(rnorm(kk, 1:kk, 1)) ## ## unconstrained solution ## ones <- rep(1,kk) ## unConst <- solve(S)%*%(mu - t(mu)%*%solve(S)%*%ones/(t(ones)%*%solve(S)%*%ones)) ## cont1 <- getStand(unConst) ## ## function from DoseFinding package ## cont2 <- DoseFinding:::constOptC(mu, solve(S), placAdj=FALSE, ## ifelse(mult == 1, "increasing", "decreasing")) ## ## alternative solution using quadratic programming ## D <- S ## d <- rep(0,kk) ## tA <- rbind(rep(1, kk), ## mu, ## mult*diag(kk)*c(-1,rep(1,kk-1))) ## A <- t(tA) ## bvec <- c(0,1,rep(0,kk)) ## rr <- solve.QP(D, d, A, bvec, meq=2) ## cont3 <- getStand(rr$solution) ## ## using solnp ## LB <- rep(0, kk-1) ## UB <- rep(20, kk-1) ## strt <- rep(1, kk-1) ## mgetNCP <- function(x, ...){ ## cont <- c(-sum(x), x) ## -getNCP(cont, ...) ## } ## res <- solnp(strt, mgetNCP, mu=mu, S=S, ## LB=LB, UB=UB, ## control = list(trace = 0)) ## out <- c(-sum(res$pars), res$pars) ## cont4 <- getStand(out) ## ## using ## cont5 <- allActiveSets(S=S, mu=mu, mult=mult) ## ## compare optimized non-centrality parameters ## ncp1[i] <- getNCP(cont1, mu, S) ## ncp2[i] <- getNCP(cont2, mu, S) ## ncp3[i] <- getNCP(cont3, mu, S) ## ncp4[i] <- getNCP(cont4, mu, S) ## ncp5[i] <- getNCP(cont5, mu, S) ## } ## sapply(list(ncp1, ncp2, ncp3, ncp4, ncp5), quantile) ## ## tests whether constant shapes (possible with linInt) are handled correctly ## data(biom) ## ## define shapes for which to calculate optimal contrasts ## modlist <- Mods(emax = 0.05, linear = NULL, logistic = c(0.5, 0.1), ## linInt = rbind(c(0, 0, 0, 1), c(0, 1, 1, 1)), ## doses = c(0, 0.05, 0.2, 0.6, 1), placEff = 1) ## optContr(modlist, w=1, doses=c(0.05), placAdj=TRUE, type = "u") ## optContr(modlist, w=1, doses=c(0.05), placAdj=TRUE, type = "c") ## optContr(modlist, w=1, doses=c(0.05,0.5), placAdj=TRUE, type = "u") ## optContr(modlist, w=1, doses=c(0.05,0.5), placAdj=TRUE, type = "c") ## optContr(modlist, w=1, doses=c(0,0.05), placAdj=FALSE, type = "u") ## optContr(modlist, w=1, doses=c(0,0.05), placAdj=FALSE, type = "c") ## optContr(modlist, w=1, doses=c(0,0.05,0.5), placAdj=FALSE, type = "u") ## optContr(modlist, w=1, doses=c(0,0.05,0.5), placAdj=FALSE, type = "c") ## modlist2 <- Mods(linInt = rbind(c(0, 1, 1, 1), c(0,0,0,1)), ## doses = c(0, 0.05, 0.2, 0.6, 1), placEff = 1) ## ## all of these should throw an error ## optContr(modlist2, w=1, doses=c(0.05), placAdj=TRUE, type = "u") ## optContr(modlist2, w=1, doses=c(0.05), placAdj=TRUE, type = "c") ## optContr(modlist2, w=1, doses=c(0,0.05), placAdj=FALSE, type = "u") ## optContr(modlist2, w=1, doses=c(0,0.05), placAdj=FALSE, type = "c") ## ## these should work ## optContr(modlist2, w=1, doses=c(0.05,0.5), placAdj=TRUE, type = "u") ## optContr(modlist2, w=1, doses=c(0.05,0.5), placAdj=TRUE, type = "c") ## optContr(modlist2, w=1, doses=c(0,0.05,0.5), placAdj=FALSE, type = "u") ## optContr(modlist2, w=1, doses=c(0,0.05,0.5), placAdj=FALSE, type = "c") DoseFinding/tests/testsDesign.R0000644000176200001440000003321012411761351016224 0ustar liggesusersrequire("DoseFinding") ## Some examples from the JASA paper (for validation) ######################################################################## # Emax model p.1228 l. 5 fMod <- Mods(emax = 25, doses = c(0,150), placEff=0, maxEff=0.4) fMod$emax[2] <- 0.6666667 doses <- c(0, 18.75, 150) probs <- 1 deswgts1 <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD", optimizer="Nelder-Mead") deswgts2 <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD", optimizer="nlminb") ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.2, designCrit = "TD") exp(deswgts1$crit - crt) # Paper p. 1228 l. 2 fMod <- Mods(emax = 25, doses = c(0,150), placEff=0, maxEff=0.4) doses <- c(0, 18.75, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD") deswgts ######################################################################## #### exponential # Paper p.1229 2nd line fMod <- Mods(exponential=85, doses = c(0, 150), placEff=0, maxEff=0.4) doses <- c(0, 50, 104.52, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD", optimizer="Nelder-Mead") deswgts ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.2, designCrit = "TD") exp(deswgts$crit - crt) # Paper p.1229 1st line fMod <- Mods(exponential=65, doses=c(0, 150), placEff=0, maxEff=0.4) fMod$exponential[2] <- 0.08264711 doses <- c(0, 101.57, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD") deswgts ######################################################################## #### Logistic #### Paper: p.1230 7th line fMod <- Mods(logistic=c(50, 10.881), doses = c(0, 150), placEff=0, maxEff=0.4) doses <- c(0, 37.29, 64.44, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.05, designCrit = "TD") deswgts ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.05, designCrit = "TD") exp(deswgts$crit - crt) #### Paper p.1230 line 1 fMod <- Mods(logistic=c(50, 10.881), doses = c(0, 150), placEff=0, maxEff=0.4) doses <- c(0, 50.22) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD") deswgts ######################################################################## #### beta # Paper p.1230 line 5 fMod <- Mods(betaMod = c(0.33, 2.31), doses = c(0,150), addArgs=list(scal=200), placEff=0, maxEff=0.4) doses <- c(0, 0.49, 25.2, 108.07, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.1, control=list(maxit=1000), designCrit = "TD") deswgts ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.1, designCrit = "TD") exp(deswgts$crit - crt) # Paper p. 1230 line 10 fMod <- Mods(betaMod = c(1.39, 1.39), doses=c(0, 150), addArgs=list(scal=200), placEff=0, maxEff=0.4) #doses <- c(0, 10, 25, 50, 100, 150) doses <- c(0, 27, 94.89, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.1, designCrit = "TD") deswgts ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.1, designCrit = "TD") exp(deswgts$crit - crt) # Paper p. 1230 line 1 fMod <- Mods(betaMod = c(0.23, 2.31), doses=c(0,150), addArgs=list(scal=200), placEff=0, maxEff=0.4) doses <- c(0, 0.35, 150) probs <- 1 deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD") deswgts ## efficiency compared to standard design (last column) crt <- calcCrit(rep(1/6, 6), fMod, probs, c(0, 10, 25, 50, 100, 150), Delta=0.2, designCrit = "TD") exp(deswgts$crit - crt) ######################################################################## #### mixed Paper p. 1233, l. 2 (note the off and probably also the #### scal parameter were treated as unknown in this example in the paper, #### hence the results need not be consistent with paper) doses <- c(0, 9.9, 49.5, 115.4, 150) fMod <- Mods(linear = NULL, emax = 25, exponential = 85, linlog = NULL, logistic = c(50, 10.8811), doses=doses, addArgs=list(off=1), placEff=0, maxEff=0.4) probs <- rep(1/5, 5) deswgts <- optDesign(fMod, probs, doses, Delta=0.2, designCrit = "TD") deswgts2 <- optDesign(fMod, probs, doses, Delta=0.2, optimizer = "nlminb", designCrit = "TD") # Some other examples ######################################################################## doses <- c(0, 62.5, 125, 250, 500) fMod <- Mods(emax = c(25, 107.14), linear = NULL, logistic = c(150, 45.51), betaMod = c(1,1), doses = doses, addArgs=list(scal=1.2*500), placEff=60, maxEff=280) probs <- rep(0.2, length=5) deswgts <- optDesign(fMod, probs, Delta=200, designCrit = "TD") ######################################################################## #### using already allocated patients fMod <- Mods(betaMod = c(0.33, 2.31), doses = c(0,150), addArgs=list(scal=200), placEff=0, maxEff=0.4) doses <- c(0, 0.49, 25.2, 108.07, 150) probs <- 1 # no previously allocated patients deswgts <- optDesign(fMod, probs, doses=doses, Delta=0.1, control=list(maxit=1000), designCrit = "TD") # now use previously allocated patients nold <- c(45, 50, 0, 0, 0) deswgts2 <- optDesign(fMod, probs, doses=doses, Delta=0.1, n=30, control=list(maxit=1000), nold=nold, designCrit = "TD") # the overall design (30*deswgts2$design+nold)/(30+sum(nold)) deswgts$design ######################################################################## #### Dopt Examples doses <- c(0, 62.5, 125, 250, 500) fMod <- Mods(emax = c(25, 107.14), logistic = c(150, 45.51), linear = NULL, betaMod = c(1,1), doses=doses, addArgs=list(scal=500*1.2), placEff=60, maxEff=280) probs <- rep(0.2, 5) des1 <- optDesign(fMod, probs, doses, Delta = 200, scal = 500*1.2, designCrit = "TD") des2 <- optDesign(fMod, probs, doses, Delta = 200, scal = 500*1.2, designCrit = "Dopt") des3 <- optDesign(fMod, probs, doses, Delta = 200, scal = 500*1.2, designCrit = "Dopt&TD") ######################################################################## #### optimizer = "exact" and "solnp" doses <- c(0, 62.5, 125, 250, 500) fMod <- Mods(emax = c(25, 107.14), logistic = c(150, 45.51), linear = NULL, betaMod = c(1,1), doses=doses, addArgs=list(scal=500*1.2), placEff=60, maxEff=280) probs <- rep(0.2, 5) des41 <- optDesign(fMod, probs, doses=doses, Delta = 200, n = 10, optimizer = "exact", lowbnd = c(0.3,0,0,0,0), designCrit = "TD") des42 <- optDesign(fMod, probs, doses=doses, Delta = 200, optimizer = "solnp", designCrit = "TD", lowbnd = c(0.1,0,0,0,0)) des51 <- optDesign(fMod, probs, doses=doses, Delta = 200, n = 10, designCrit = "Dopt", optimizer = "exact", uppbnd = rep(0.5,5)) des52 <- optDesign(fMod, probs, doses=doses, Delta = 200, designCrit = "Dopt", optimizer = "solnp", uppbnd = rep(0.5,5)) des61 <- optDesign(fMod, probs, doses=doses, Delta = 200, n = 10, optimizer = "exact", designCrit = "Dopt&TD") des62 <- optDesign(fMod, probs, doses=doses, Delta = 200, optimizer = "solnp", designCrit = "Dopt&TD") ######################################################################## #### Example from Padmanabhan and Dragalin, Biometrical Journal 52 (2010) #### p. 836-852 fm <- Mods(sigEmax = c(4, 5), doses = 0:8, placEff=0, maxEff=-1.65) fm$sigEmax <- c(0, -1.70, 4, 5) ## compare to Figure 1, p. 841 desSED <- optDesign(fm, 1, designCrit="Dopt", optimizer = "solnp") desSEM <- optDesign(fm, 1, Delta = 1.3, designCrit = "TD", optimizer = "solnp") ## designs underlying Table 2, p. 843 (from an e-mail of Vlad) ## I cannot reproduce the displayed efficiencies exactly ## (most probably due to numerical round-off) ##LDoD ## [1,] 0.246 0.141 0.123 0.000 0.000 0.240 0 0 0.250 ## [2,] 0.248 0.233 0.061 0.210 0.000 0.000 0 0 0.248 ## [3,] 0.246 0.000 0.000 0.223 0.081 0.204 0 0 0.246 ## [4,] 0.250 0.247 0.045 0.210 0.000 0.000 0 0 0.248 ## [6,] 0.250 0.249 0.192 0.062 0.000 0.000 0 0 0.246 ## MEDoD ## [1,] 0.49 0.01 0.00 0.00 0.00 0.00 0.36 0.14 0 ## [2,] 0.49 0.02 0.00 0.15 0.35 0.00 0.00 0.00 0 ## [3,] 0.23 0.26 0.01 0.00 0.00 0.46 0.04 0.00 0 ## [4,] 0.50 0.00 0.49 0.01 0.00 0.00 0.00 0.00 0 ## [6,] 0.49 0.01 0.47 0.02 0.00 0.00 0.00 0.00 0 doses <- 0:8 fm <- list() fm[[1]] <- Mods(sigEmax = c(23.07, 1.18), doses=doses, placEff=0, maxEff=-1.65);fm[[1]]$sigEmax <- c(0, -7.29, 23.07, 1.18) fm[[2]] <- Mods(sigEmax = c(2, 2.22), doses=doses, placEff=0, maxEff=-1.65);fm[[2]]$sigEmax <- c(-0.08, -1.71, 2, 2.22) fm[[3]] <- Mods(sigEmax = c(4, 5), doses=doses, placEff=0, maxEff=-1.65);fm[[3]]$sigEmax <- c(0, -1.70, 4, 5) fm[[4]] <- Mods(sigEmax = c(0.79, 1), doses=doses, placEff=0, maxEff=-1.65);fm[[4]]$sigEmax <- c(0, -1.81, 0.79, 1.00) fm[[5]] <- Mods(sigEmax = c(0.74, 1.18), doses=doses, placEff=0, maxEff=-1.65);fm[[5]]$sigEmax <- c(-0.03, -1.72, 0.74, 1.18) desD <- desM <- matrix(ncol = 9, nrow = 5) for(i in 1:5){ cc1 <- optDesign(fm[[i]], 1, doses=doses, designCrit = "TD", optimizer = "solnp", Delta = 1.3) cc2 <- optDesign(fm[[i]], 1, doses=doses, designCrit="Dopt", optimizer = "solnp") desM[i,] <- cc1$design desD[i,] <- cc2$design } round(desD, 3) round(desM, 2) ## compare criterion for TD design under model 2 crDrag <- calcCrit(c(0.49,0.02,0,0.15,0.34,0,0,0,0), models=fm[[2]], probs=1, doses=doses, designCrit="TD", Delta=1.3) crDF <- optDesign(fm[[i]], 1, doses=doses, designCrit = "TD", optimizer = "solnp", Delta = 1.3)$crit exp(crDF-crDrag) ## design calculated by P and Dragalin only has 88% efficacy? ################################################################################ #### look at standardized Dopt and Dopt&TD criteria doses <- c(0, 62.5, 125, 250, 500) fMod1 <- Mods(sigEmax = rbind(c(25, 5), c(107.14, 2)), doses=doses, placEff=60, maxEff=280) fMod2 <- Mods(sigEmax = rbind(c(25, 5), c(107.14, 2)), linear = NULL, doses=doses, placEff=60, maxEff=280) w1 <- rep(0.5, 2) w2 <- rep(1/3, 3) ## des1 and des2 should be exactly the same des1 <- optDesign(fMod1, w1, doses, designCrit = "Dopt", standDopt = FALSE) des2 <- optDesign(fMod1, w1, doses, designCrit = "Dopt", standDopt = TRUE) ## des1 and des2 should be different (as linear and emax have ## different number of parameters) des1 <- optDesign(fMod2, w2, doses, designCrit = "Dopt", standDopt = FALSE, optimizer = "solnp") des2 <- optDesign(fMod2, w2, doses, designCrit = "Dopt", standDopt = TRUE, optimizer = "solnp") ## same with Dopt&TD criterion ## des1 and des2 will differ (due to different scaling ## of Dopt and TD criteria) des1 <- optDesign(fMod1, w1, doses, designCrit = "Dopt&TD", Delta = 100, standDopt = FALSE, optimizer = "solnp") des2 <- optDesign(fMod1, w1, doses, designCrit = "Dopt&TD", Delta = 100, standDopt = TRUE, optimizer = "solnp") ######################################################################## #### optimial design logistic regression ## compare this to Atkinson et al. (2007), p. 400 ## theoretically the D-opt design should have weights 0.5,0.5 at points where ## the probability is 0.176 and 1-0.176 (0.3456 and 0.6544 in this case) doses <- seq(0, 1, length = 21) fMod <- Mods(linear = NULL, doses=doses, placEff=-5, maxEff = 10) pp <- 1 # just one model ## by default calculates TD optimal design mu <- as.numeric(getResp(fMod, doses=doses)) mu <- 1/(1+exp(-mu)) weights <- mu*(1-mu) des1 <- optDesign(fMod, pp, doses, weights = weights, optimizer = "solnp") des2 <- optDesign(fMod, pp, doses, designCrit = "TD", Delta=0.2, optimizer = "solnp", weights = weights) des3 <- optDesign(fMod, pp, doses, Delta=0.2, designCrit = "Dopt&TD", optimizer = "solnp", weights = weights) ######################################################################## #### code using lower and upper bound (previous to version 0.9-6 this #### caused problems as the starting value for solnp rep(0.2, 5) was #### on the boundary, now a feasible starting values is used doses <- seq(0, 1, length=5) nold <- rep(0, times=5) lowbnd <- c(0.2,0.0,0.0,0.0,0.2) uppbnd <- c(1.0,0.3,1.0,1.0,1.0) trueModels <- Mods(linear=NULL, doses=doses, placEff = 0, maxEff = 1) optDesign(models=trueModels, probs=1, doses=doses, designCrit="Dopt", lowbnd=lowbnd,uppbnd=uppbnd) ######################################################################## ## TD optimal design for beta model (previously instabilities for ## numerical gradients) mm <- Mods(betaMod=c(1.5,0.8), doses=seq(0,1,by=0.25), placEff=0, maxEff=1) optDesign(mm, probs=1, designCrit="TD", Delta=0.5) ## Output from GUI ## placEff=0, maxEff=1 ## TD-optimalität mit Delta= 0.5 ## Model: BetaMod mit delta1=1.5, delta2=0.8 ## Dosen 0 0.25 0.5 0.75 1 ## Design 0.4895 0.3552 0.1448 0 0.0105 DoseFinding/tests/testsMCT.R0000644000176200001440000002257612411762061015452 0ustar liggesusersrequire("DoseFinding") if(!require("multcomp")) stop("need multcomp package to run this test") ######################################################################## #### multContTest # functions to sample random DF data getDosSampSiz <- function(){ # generate dose levels mD <- runif(1, 0, 1500) nD <- max(rpois(1, 5), 4) p <- rgamma(nD, 3) p <- cumsum(p/sum(p)) doses <- signif(c(0, mD*p), 3) # sample size allocations totSS <- rpois(1, rexp(1, 1/250)) totSS <- max(totSS, 50) p <- rgamma(nD+1, 3);p <- p/sum(p) n <- round(p*totSS) n[n==0] <- rpois(sum(n==0), 1)+1 list(doses=doses, n=n) } getDFdataSet <- function(doses, n){ ll <- getDosSampSiz() e0 <- rnorm(1, 0, 10) eMax <- rgamma(1, abs(e0)*0.5, 0.5)*I(runif(1)<0.25) if(eMax > 0){ sig <- eMax/runif(1, 0.5, 5)} else { sig <- rgamma(1, abs(e0)*0.5, 0.5) } dosVec <- rep(ll$doses, ll$n) if(runif(1)<0.3){ mnVec <- betaMod(dosVec, e0=e0, eMax=eMax, delta1=runif(1, 0.5, 5), delta2=runif(1, 0.5, 5), scal=1.2*max(ll$doses)) } else { mnVec <- logistic(dosVec, e0 = e0, eMax = eMax, ed50=runif(1, 0.05*max(ll$doses), 1.5*max(ll$doses)), delta=runif(1, 0.5, max(ll$doses)/2)) } resp <- rnorm(sum(ll$n), mnVec, sig) N <- sum(ll$n) cov1 <- as.factor(rpois(N, 5)) cov2 <- runif(N, 1, 100) aa <- data.frame(x= dosVec, y=resp, cov1=cov1, cov2=cov2) aa[sample(1:nrow(aa)),] } #### simulate data and compare to output of glht of multcomp package and oldMCPMod function set.seed(10) dd <- getDFdataSet() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) obj <- MCTtest(x,y, dd, models=models, addCovars = ~cov1+cov2, pVal = T) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- lm(y~x+cov1+cov2, data=dd2) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) obj <- MCTtest(x,y, dd, models=models, addCovars = ~1, pVal = T) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- lm(y~x, data=dd2) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) #### different model set set.seed(10) dd <- getDFdataSet() mD <- max(dd$x) lg1 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.9), "logistic") lg2 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.5), "logistic") expo <- guesst(c(0.9*mD), c(0.7), "exponential", Maxd=mD) quad <- guesst(c(0.6*mD), c(1), "quadratic") models <- Mods(linlog = NULL, logistic = rbind(lg1, lg2), exponential = expo, quadratic = quad, doses = sort(unique(dd$x)), addArgs=list(off = 0.2*max(dd$x))) obj <- MCTtest(x,y, dd, models=models, addCovars = ~cov1+cov2, pVal = T) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- lm(y~x+cov1+cov2, data=dd2) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) obj <- MCTtest(x,y, dd, models=models, addCovars = ~1, pVal = T) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- lm(y~x, data=dd2) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) #### contrast matrix handed over set.seed(23) dd <- getDFdataSet() mD <- max(dd$x) lg1 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.9), "logistic") lg2 <- guesst(c(0.3*mD, 0.4*mD), c(0.3, 0.5), "logistic") expo <- guesst(c(0.9*mD), c(0.7), "exponential", Maxd=mD) quad <- guesst(c(0.6*mD), c(1), "quadratic") models <- Mods(linlog = NULL, logistic = rbind(lg1, lg2), exponential = expo, quadratic = quad, doses = dd$x, addArgs=list(off = 0.2*max(dd$x))) obj <- MCTtest(x,y, dd, models=models, addCovars = ~cov1+cov2, pVal = T) contMat <- obj$contMat obj <- MCTtest(x,y, dd, models=models, addCovars = ~1, pVal = T, contMat = contMat) dd2 <- dd dd2$x <- as.factor(dd2$x) fit <- lm(y~x, data=dd2) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) obj ######################################################################## #### some binary test cases getDFdataSet.bin <- function(doses, n){ ll <- getDosSampSiz() ll$n <- ll$n+10 e0 <- rnorm(1, 0, sqrt(3.28)) eMax <- rnorm(1, 0, 5) dosVec <- rep(ll$doses, ll$n) if(runif(1)<0.3){ mn <- betaMod(dosVec, e0 = e0, eMax = eMax, delta1=runif(1, 0.5, 5), delta2=runif(1, 0.5, 5), scal=1.2*max(ll$doses)) } else { mn <- logistic(dosVec, e0 = e0, eMax = eMax, ed50=runif(1, 0.05*max(ll$doses), 1.5*max(ll$doses)), delta=runif(1, 0.5, max(ll$doses)/2)) } resp <- rbinom(length(ll$n), ll$n, 1/(1+exp(-mn))) aa <- data.frame(dose = ll$doses, resp = resp) aa <- data.frame(x= aa$dose, y=aa$resp/ll$n, n=ll$n) aa[sample(1:nrow(aa)),] } set.seed(1909) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCTtest(dose, dePar, S=vCov, models=models, type="general", df=Inf, pVal = T) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- glm(y~x-1, family = binomial, data=dd2, weights = n) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) set.seed(1997) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE,direction = "decreasing", addArgs=list(scal = 1.2*max(dd$x)), doses = sort(unique(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCTtest(dose, dePar, S=vCov, models=models, type = "general", pVal = TRUE, df=Inf) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- glm(y~x-1, family = binomial, data=dd2, weights = n) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) set.seed(1) dd <- getDFdataSet.bin() bet <- guesst(0.9*max(dd$x), p=0.8, "betaMod", scal = 1.2*max(dd$x), dMax = 0.7*max(dd$x), Maxd = max(dd$x)) sE <- guesst(c(0.5*max(dd$x), 0.7*max(dd$x)) , p=c(0.5, 0.9), "sigEmax") models <- Mods(linear = NULL, betaMod = bet, sigEmax = sE, doses = sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCTtest(dose, dePar, S=vCov, models=models, type = "general", pVal = T, df=Inf) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- glm(y~x-1, family = binomial, data=dd2, weights = n) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) ## one-dimensional test set.seed(1) dd <- getDFdataSet.bin() model <- Mods(linear = NULL, doses=sort(unique(dd$x)), addArgs=list(scal = 1.2*max(dd$x))) logReg <- glm(y~as.factor(x)-1, family=binomial, data=dd, weights = n) dePar <- coef(logReg) vCov <- vcov(logReg) dose <- sort(unique(dd$x)) obj <- MCTtest(dose, dePar, S=vCov, models=model, type = "general", pVal = T, df=Inf) dd2 <- dd;dd2$x <- as.factor(dd$x) fit <- glm(y~x-1, family = binomial, data=dd2, weights = n) mcp <- glht(fit, linfct = mcp(x = t(obj$contMat)), alternative = "greater") summary(mcp) print(obj, digits = 3) ######################################################################## ## unordered values in MCTtest ## placebo-adjusted scale ## two blocks below should give equal results data(IBScovars) modlist <- Mods(emax = 0.05, linear = NULL, logistic = c(0.5, 0.1), linInt = c(0, 1, 1, 1), doses = c(0, 1, 2, 3, 4)) ancMod <- lm(resp~factor(dose)+gender, data=IBScovars) drEst <- coef(ancMod)[2:5] vc <- vcov(ancMod)[2:5, 2:5] doses <- 1:4 fitMod(doses, drEst, S=vc, model = "sigEmax", placAdj=TRUE, type = "general") MCTtest(doses, drEst, S = vc, models = modlist, placAdj = TRUE, type = "general", df = Inf) ord <- c(3,4,1,2) drEst2 <- drEst[ord] vc2 <- vc[ord,ord] doses2 <- doses[ord] fitMod(doses2, drEst2, S=vc2, model = "sigEmax", placAdj=TRUE, type = "general") MCTtest(doses2, drEst2, S = vc2, models = modlist, placAdj = TRUE, type = "general", df = Inf) ## unadjusted scale ## two blocks below should give equal results ancMod <- lm(resp~factor(dose)-1, data=IBScovars) drEst <- coef(ancMod) vc <- vcov(ancMod) doses <- 0:4 fitMod(doses, drEst, S=vc, model = "sigEmax", type = "general") MCTtest(doses, drEst, S = vc, models = modlist, type = "general", df = Inf) ord <- c(3,4,1,2,5) drEst2 <- drEst[ord] vc2 <- vc[ord,ord] doses2 <- doses[ord] fitMod(doses2, drEst2, S=vc2, model = "sigEmax", type = "general") MCTtest(doses2, drEst2, S = vc2, models = modlist, type = "general", df = Inf) DoseFinding/src/0000755000176200001440000000000012620446430013233 5ustar liggesusersDoseFinding/src/combinations.c0000644000176200001440000000323712620446431016072 0ustar liggesusers/* ####################################################################### ## This program is Open Source Software: you can redistribute it ## and/or modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation, either version 3 of ## the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see http://www.gnu.org/licenses/. */ #include #include /* General idea of the algorithm: It is easier to generate all possible "border" positions (instead of the actual numbers per group). And then count the number of objects between the borders (simple differencing). Example for N=5 and M=3: oo|ooo|o this has border positions 2, 5 and number of objects per group is 2, 3, 1 (2-0, 5-2, 6-5). */ void getcomp(int *out, int *work, int *N, int *B, int *nComp){ int i,j,k,row; for(i=0;i<*nComp;i++){ row = i*(*B+1); /* calculate number of obj in each group from borders */ out[row] = work[0]; for(j=1;j<*B;j++){ out[row+j] = work[j]-work[j-1]; } out[row+*B] = *N-work[*B-1]; /* always increment rightmost number */ work[*B-1] += 1; /* set right numbers to left number */ for(j= *B-1;j>0;j--){ if(work[j] == *N+1){ work[j-1] += 1; for(k=j;k<*B;k++){ work[k] = work[j-1]; } } } } } DoseFinding/src/optDes.c0000644000176200001440000001230412620446431014636 0ustar liggesusers/* ####################################################################### ## This program is Open Source Software: you can redistribute it ## and/or modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation, either version 3 of ## the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see http://www.gnu.org/licenses/. */ #include #include #include #include #include #include #include void rank1vec(double *grad, int *nPar, double *alpha, double *A){ // calculates alpha*grad*grad'+A char uplo='U'; int inc=1; F77_CALL(dsyr)(&uplo, nPar, alpha, grad, &inc, A, nPar); } // calculate design matrix void calcMat(double *grad, int *nPar, double *design, int *nD, double *A, int *incr){ // nD - number of doses (length of design) // nPar - number of parameters = ncol(A) = nrow(A) double gradsub[4]={0.0}; int i,j=0; for(i=0;i<*nD;i++){ for(j=0;j<*nPar;j++){ gradsub[j] = grad[*incr+*nPar*i+j]; } rank1vec(gradsub, nPar, &design[i], A); } // complete symmetric matrix from upper triang. part for(i=0;i<*nPar;i++){ for(j=0;j 0){ incgrad+=*nD*nPar[m-1]; incb+=nPar[m-1]; } setzero(A, 16);resM = 0.0; // calulate matrix calcMat(grad, &nPar[m], design, nD, A, &incgrad); // calculate det and/or MP-Inverse calcDetGinv(A, &nPar[m], work, s, VT, U, tol, type, &resD); if(*type == 1){ // calculate quadratic form (for MED designs) calcQuadform(MEDgrad, A, &nPar[m], &resM, &incb); *res += probs[m]*log(resM); } if(*type == 2){ if(*stand == 1){ fracp = (double) nPar[m]; *res += probs[m]*(-log(resD)/fracp); } else { *res += probs[m]*(-log(resD)); } } if(*type == 3){ // calculate quadratic form (for MED designs) calcQuadform(MEDgrad, A, &nPar[m], &resM, &incb); if(*stand == 1){ fracp = (double) nPar[m]; *res += probs[m]*(-0.5*log(resD)/fracp+0.5*log(resM)); } else { *res += probs[m]*(-0.5*log(resD)+0.5*log(resM)); } } } } DoseFinding/src/Makevars0000644000176200001440000000006012620446431014724 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) DoseFinding/src/bFitMod.c0000644000176200001440000002535712620446431014740 0ustar liggesusers/* ####################################################################### ## This program is Open Source Software: you can redistribute it ## and/or modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation, either version 3 of ## the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see http://www.gnu.org/licenses/. */ /* To do: - rwm - re-introduce random walk metropolis (for efficiency in linear models, optimal tuning parameter can be pre-calculated!, maybe also for linear parameters of nonlinear models, or leave it optional) - dbeta seems to be really slow in R (pre-calculate normalizing constant?) */ #include #include #include #include #include #include #include #include /* structure to store basic information on problem */ struct modpars{ double *doses; int *modelId; int *nPar; double *work; double *drEst; double *clinvCov; int *dim; double *prior; int *prnr; int *noint; }; void R_CheckUserInterrupt(void); #include #include #include #include #include #include #include /* calculates A*x with A upper triangular */ void trmatvec(double *A, int *dim, double *x){ char *uplo="U", *trans="N", *diag="N"; int incx=1; F77_CALL(dtrmv)(uplo, trans, diag, dim, A, dim, x, &incx); } /* calculates A*x for general A */ void matvec(double *A, int *nrow, int *ncol, double *x, double *y){ char *trans="N"; double alpha = 1.0, beta = 0.0; int incx=1; F77_CALL(dgemv)(trans, nrow, ncol, &alpha, A, nrow, x, &incx, &beta, y, &incx); } void crsprod(double *A, double *B, int *nrow, int *ncol, double *C){ /* calculate A'B */ /* Nrow - nrows of A*/ /* ncol - ncols of A*/ char *transa="T", *transb="N"; double alpha = 1.0, beta = 0.0; F77_CALL(dgemm)(transa, transb, ncol, nrow, nrow, &alpha, A, nrow, B, nrow, &beta, C, ncol); } /* model functions */ void linear(double *doses, const int dim, const double e0, const double delta, double *resp){ int i; for(i=0;i 0) ? (log(x)) : (0.0); } void logprior(double *par, int *npar, double *prior, int *prnr, int *noint, double *out){ /* prnr - number for prior (1- normal. 2-t, 3-log-normal, 4-beta) prior - prior parameters noint - equals 1 if there is no intercept in the model */ *out =0.0; int i,count=0,i2=0; double p1=0.0,p2=0.0,p3=0.0,p4=0.0; for(i=0;i<(*npar-*noint);i++){ i2 = i+*noint; p1 = prior[count];p2 = prior[count+1]; if(prnr[i] == 1){ // normal-distribution *out += dnorm(par[i2],p1,p2, 1); count += 2; } if(prnr[i] == 2){ // t-distribution p3 = prior[count+2]; *out += dt((par[i2]-p1)/p2, p3, 1)-log(p2); count += 3; } if(prnr[i] == 3){ // log-normal-distribution *out += dlnorm(par[i2], p1, p2, 1); count += 2; } if(prnr[i] == 4){ // scaled-beta-distribution p3 = prior[count+2];p4 = prior[count+3]; *out += dbeta((par[i2]-p1)/(p2-p1), p3, p4, 1)-log(p2-p1); count += 4; } } } /* function to evaluate the pseudo-log-likelihood and log-prior */ double logPost(double *par, struct modpars *mp){ double out=0.0,out2=0.0; logprior(par, mp->nPar, mp->prior, mp->prnr, mp->noint, &out); if(isfinite(out)){ /* only evaluate likelihood if prior > 0 */ loglik(par, mp->doses, mp->modelId, mp->work, mp->drEst, mp->clinvCov, mp->dim, &out2); out += out2; } return out; } double logPost1d(double *actpar, int *ind, double *par, struct modpars *mp){ double out=0.0; par[*ind] = *actpar; out = logPost(par, mp); return out; } /* get parameter bounds for non-linear parameters (from info on prior density) */ void getBnds(int *npar, double *prior, int *prnr, double *lower, double *upper, int *noint){ /* prnr - number for prior (1-normal, 2-t-distribution, 3-log-normal, 4-beta) prior - prior parameters */ int i,count=0,i2=0; for(i=0;i<*npar-*noint;i++){ i2 = i+*noint; lower[i2] = -DBL_MAX;upper[i2] = DBL_MAX; if(prnr[i] == 1) // normal-distribution count += 2; if(prnr[i] == 2)// t-distribution count += 3; if(prnr[i] == 3){ // log-normal-distribution lower[i2] = 0.0; count += 2; } if(prnr[i] == 4){ // scaled-beta-distribution lower[i2] = prior[count];upper[i2] = prior[count+1]; count += 4; } } } /* slice sampler */ /* stepping out procedure */ void getIntStep(double *par, int *ind, double *L, double *R, const double z, const double w, const double lower, const double upper, struct modpars *mp){ double r,temp; r = unif_rand(); temp = par[*ind]; *L = temp - r*w; if(*L < lower) *L = lower; *R = temp + (1-r)*w; if(*R > upper) *R = upper; while(logPost1d(L, ind, par, mp) > z){ *L -= w; if(*L < lower){ *L = lower; break; } } while(logPost1d(R, ind, par, mp) > z){ *R += w; if(*R > upper){ *R = upper; break; } } par[*ind] = temp; } void slice1step(double *par, int *ind, const double w, double *lpostx, const double lower, const double upper, struct modpars *mp){ /* x - current value of chain (will contain output) ind - current dimension lpostx - logposterior evaluated at x w - tuning parameter of slice sampler */ double z,tmp,xOld,xNew,L,R; z = *lpostx - exp_rand(); xOld = par[*ind]; /* get enclosing interval */ getIntStep(par, ind, &L, &R, z, w, lower, upper, mp); while(1){ xNew = unif_rand()*(R-L) + L; tmp = logPost1d(&xNew, ind, par, mp); if(tmp >= z - DBL_EPSILON){ break; } else { // shrink interval if(xNew > xOld){ R = xNew; } else { L = xNew; } } } par[*ind] = xNew; *lpostx = tmp; } /* whole sampler */ void sample(int *nSim, int *thin, double *out, double *par, int *noint, const double *w, double *doses, int *modelId, int *nPar, double *work, double *drEst, double *clinvCov, int *dim, double *prior, int *prnr, double *lower, double *upper){ int i=0,count=0,j=0,d=0,actSimI=0; double lds,actSimD=0; /* initialize structural information */ struct modpars mp = {doses, modelId, nPar, work, drEst, clinvCov, dim, prior, prnr, noint}; actSimD = ((double) *nSim) / ((double) *thin); actSimI = (int) actSimD; /* calculate lower and upper bounds for parameters */ getBnds(nPar, prior, prnr, lower, upper, noint); /* initialize R random number generator */ GetRNGstate(); lds = logPost(par, &mp); /* starting likelihood value */ /* actual MCMC loop */ for(i=0;i< *nSim;i++){ for(d=*noint;d < *nPar;d++){ slice1step(par, &d, w[d], &lds, lower[d], upper[d], &mp); } /* store information when desired */ if(!(i%(*thin))){ for(j = 0; j < *nPar; j++){ out[count+j*(actSimI)] = par[j]; } count++; } } PutRNGstate(); } DoseFinding/NAMESPACE0000644000176200001440000000343012620320670013660 0ustar liggesusersimport(mvtnorm, lattice) importFrom("grDevices", "rgb") importFrom("graphics", "lines", "plot", "points") importFrom("stats", "AIC", "IQR", "acf", "approx", "as.formula", "binomial", "coef", "cov2cor", "glm", "lm", "logLik", "model.matrix", "na.fail", "nlminb", "optim", "optimize", "pnorm", "predict", "qnorm", "qt", "quantile", "quasi", "sd", "terms", "uniroot", "var", "vcov") importFrom("utils", "setTxtProgressBar", "txtProgressBar") export(fitMod, defBnds, bFitMod, MCTtest, MCPMod, betaMod, quadratic, emax, exponential,linear, linlog, logistic, sigEmax, linInt, betaModGrad, quadraticGrad, emaxGrad, exponentialGrad, linearGrad, linlogGrad, logisticGrad, sigEmaxGrad, linIntGrad, Mods, getResp, TD, ED, guesst, MCTtest, gAIC, mvtnorm.control, optContr, powMCT, sampSize, sampSizeMCT, targN, powN, planMod, optDesign, calcCrit, rndDesign) S3method(predict, MCPMod) S3method(print, MCPMod) S3method(print, sampSize) S3method(plot, MCPMod) S3method(summary, MCPMod) S3method(print, summary.MCPMod) S3method(summary, DRMod) S3method(print, summary.DRMod) S3method(print, DRMod) S3method(plot, DRMod) S3method(vcov, DRMod) S3method(coef, DRMod) S3method(predict, DRMod) S3method(print, MCTtest) S3method(print, DRdesign) S3method(plot, DRdesign) S3method(print, optContr) S3method(summary, optContr) S3method(print, summary.optContr) S3method(plot, optContr) S3method(plot, Mods) S3method(AIC, DRMod) S3method(gAIC, DRMod) S3method(logLik, DRMod) S3method(gAIC, DRMod) S3method(predict, bFitMod) S3method(plot, bFitMod) S3method(print, bFitMod) S3method(plot, targN) S3method(plot, planMod) S3method(print, planMod) S3method(summary, planMod) S3method(print, summary.planMod) useDynLib(DoseFinding) DoseFinding/data/0000755000176200001440000000000012324162715013357 5ustar liggesusersDoseFinding/data/glycobrom.rda0000644000176200001440000000037712323407770016055 0ustar liggesusers‹ r‰0âŠàb```b`b&f “… H02°0piÎôœÊäü¤¢ü\fa,ó1+8hBiK(í ¥#a* êí??þ~ýW¡¼é½þ÷í¿‚¸bíö_?ÏévžfÿMãë¡þM¸úþOOúº ïÙ÷nÒå c‘´ï{³ct¸½}ŸYüµk`4L½ƒGÄ^o(í ¥½Pi4²æ%æ¦`ÏAYRò‹Saì´Ô2C»8%µ ÊfÌCª¢ür=˜i¼ 0YøÿÿÿßèV&ç$ì„ r¥$–$ê¥õyÿ@ÑHpžDoseFinding/data/biom.rda0000644000176200001440000000175212126252274015002 0ustar liggesusers‹ÕUýSÓuÿîAQ XrœEa“!#DG ½w§à!ó:ÕjñtÙáz‡$™§(§PÐÜb‰ç }ã!Ôµc;&¬1bt$_áF|]Å/üôº{¿_¯ÏçÞ?|îs¯´ÄŒï oAˆÑ„InI&º!#[=‡Ü÷ øBò_­A_wp@tŃ«–‡68OîYņe YÅFåÿþý'v±/j7(õœ²lÿX´cGѳñ¬Ë`i'ðòeýðHÕÈö-‡Ñw%“YW5¨pÐLJ¦cÁÜžWá…˜â’zÝ è~¸ÜtÉñô¶t¾à:ð˜Ò­•gn¡_—;É;ƒþÃhëj{8J¤×Ÿ±ÛAj~Y¦?¬­XãF×Uèº7Ã02‹ˆÂÂ`näõpmhoJÒÀ ˆ–3PåŸXé®sp¯¨T“"›¼í>íÍ[aÎÔÌúûF.“Ôù˜BÝ‹šÛ‚«1uˆHÒ£ßgÕƒõà¥öä'ûÁlë5»ÊÎÁEûírƒ?,twš›{·\­óÛجU8A}PL!c;Œ3w{&Ã8N7Ëîôåpâö 0šZýkÌ+xjc*S§šÕ¯=„•¢ÜÃÅ > î Pu@bkHëJYUzÝô=0}c÷ç•ÁgAE #æ¼â„¥]œe–õüs¼»£NÚÝw‚Ke 1.¥ny4IÜá VXâ³.’1>8Å,ƒl| œ ÑÔꦗ³ø¶eô/ö G˜„¤_Ä9¢“0…ÕxÓV¨LŠ&2SZÍ$äu-–~ƒÅme)Ÿt®«üI@ç¡ÕôP^ ‘·×Ö4‚ëÊ&ÂÓ`%ö(ÏÞ;ö¤ÅÍ÷{| ¿(†Ù/F[]Œ¯Ò0‰Îܾ÷<|É3ç«Pm…5øÝQÔ„ŸvùʳÑ!Mãm»-Îã!Çtðû’¯Íy º*çyÔËŒöåç;ø÷¯Áœ’‡K#03¬çk²aáA=ä\hYç5›>dó¹B·  ³ùÏ€8Bîšp……ë›ò>` ךÖ.½9ìbvT¾À=o]ùVAAiÔÚÏžÏÜÉår]sÓSOü°‚ÈDoseFinding/data/neurodeg.rda0000644000176200001440000002415612126252274015667 0ustar liggesusers‹í݉WÍoû/ðJJI¤ATŠ2”’±ð¦2¥4•jŸiïŠ$D†"Š$D¦ŒQÈ*CR†(¡($Ræ¡Ð¹¿¿óTU!(+©*iØi.»0bÃòJJôþù"Ñ…„—à[÷®º nÚ-þ»G‚y÷¨c- Ééi›Rnt‚hîqgç§ï®ÉÙ»è*„äó'MËX`˜ ‰KGé›êDˆT{9WN±1÷k¡÷g°E›õË­'ƒ]U¯~‚¹é†×}s@Ú½mZ˜¹²ÉÉK¦'¯ç3öX° ÿüç¦Ã!®;‘ïf шû“–=¶@ˆÃAã}¦ÏÀmY¤”yÉÒíi O™OGÀßæŽýáSÔfôÕ¾°R¦ða)Ø¥CÓ[×@¢=¯nçmÌÙ¦ÕG»¿+f%ffÍ©„l˜é›§;À%®6÷zLð®{ƒ!ΞcµùÄ:uf7ÂòƒÌ¬ãÀ¦Lì†ÜËæ»ö bÊ!V?ß±£ùsùW”O:sÂàËÎÁCƒ¿¼iúÂ×§Á…¿¬I´þ Nm¿§‹#XÛG÷ÅÔBf“§¬xÜÓ ®Z×íÜýzHÚ;Hóvuª® qa7Ôe!äF¦Úzƒ{’öÜøl¤ƒµóBSœÓÞÚ[øj?¨®… Ú'yéŒ.àöËëÿ`˜f;ƒ¨jlmmoˆ…dí¡Š«]ÚÀ'^¼_þÍÜoãUžSýÀY&ù„uLèj£üà ù{o­™õõvü6£|×â%W” Á„WβK’@ìxñ–Õm–çcÍÿh‰ÚˆÀ=äüz:Så(˜B³}îxBrèàYǑ޷L²Œ‰R‡0~9¿!ììÚiÌ´2ÈRúŸ[íeÙº®•½̆| ÷¢FÛÁW-¶N{:ÜÏì–ä¼5«u5Í 6Á5ÎÀÁ ¬Rî<ùÔõ优h„%ƒÕSÍ\«±Òá±w¤ÇÂCµ ]ïÛ t>2ÙŒØëMàööùÙm$$>.A· (­Ló›Q„™×{îêpÃ2½£v³™$pϾ40}!½ÒìZ–±íõò#ÈkíÜ£_ y–cþ›ngÁ7ŒËŠXpƒ¬ç#kUï3G´~ÝÒäàéÜ®DæäãVyÐ×¼IX•íÍÁ`zG?±ŒM†(,þýMð»ñÝ>wÓQÈ“®¬»ñ‚׸Þ-oÀhn®ZÿªD–/-%‚NwuãÚÈÖ¼ð;í{Â_ÃŒÛ6e–;Æ®Lÿ@Ýãm`¸8W­h‹)É»n-Ê€lññMqü ;á¦YR ¡Iïì%/ -‹#Þ^Œ‡lÇNûªÙ›É÷eFØ÷>vAóËùÖÙv|vÁ’¿‰ÍÉöáå×î\4ÜûØSÑ‹!õì±­»û}»M¹¤»%³£ÓCæ _‰}‹¬’" š5t×U;pf6JWƒk!½àüÝ ïDçÚÓ»?y~3ׯnâað±†]?½ô›‘<·Kk0× ´ ¾=€ÔàSøÉM|¿†‰[ÀÝ·L{9ù˜Š‡›(A2¤ã7ÿÄH<›îœ¹Ù2‹?–Á)~à=CïŸQ VëË_5çÏîr¶¿ÒñÄÍwïé©°3ÌƒË šÁ•?Ê^¹´'º ‡@lïs%döN¸žæä¹!­CmWÎÝ >κ_·àþ>ðmÖâHˆú·ymò}?ÍÕš56:tØœR{v—hì/rìïÑps3Êþ¬9f¤Eø™‰U7¯Si „!ž&›Ž9äèÒû—$c:8Ô\õC°¡ï´+SþB~²«hšõ&pÊÇ8 ‚´Á§uÚŠ6ˆã>ïhŸ ƒË]ö¥¹wÿ~e­î'ð™çöTI»ßÒkn‡-ðñWkÞïΟÀ/ïÕé$øu#U× Øf倨Š,ÁÚù=‹·3³µ~Ÿ¼¡ã¯CSšÀe'[l’Ž¿ÉºyíRrÿ¢eNSæ}ƒÐì~«GÉxÈÎþí9©62¥#ºímÁOµ?ª­ºLtÅù#F©·¶¶­ïŽ`ßíc³Ÿ€üöâÒ—z7!ˆuTj·­ˆXÚ¶D™ïiㇷ§#8µdH¾ì&øò²Î³<3Á˜çwÖE ܲ:ûÎwÔÁæ±å390s ™ï †d½[íWÖÔȰ¡òÀJ°ÏR‹4oísz]΄¡…þzzË×ðg ɸ’GÖ×÷/¶™¦`›W»ÆGºmÈN¬È ‡0ÀÉÌúÌpÊG#LÑYq HÏ_îé°/“ßöˇð$*‹wÜW¥~Ÿ•9°¯~¿þ,9 é\ÕoUëK!É=àðÁÈ òëv!׉øRž»e#„G#;ôË~ñ]fnVøIY¶/* ݰ>tTBǒƱ¶gÁq3kÇÏÖëpž]€ ¤¾]Ÿ+kxC¾Å%ÚSãd:S†õ ÖkÆ9äÝŸ8"Ô@¾ýžQ—ÌÉ”§æó½ÁÛGºV.™6Ô56äO ûâÂÀfXt~ÍòvšÌ[Y®·ØÄòz¤–lûCÆ"ã$ÿyæ™ð‹¬èþ`bcº%äÔ9èóÑE¡¤NkoUŸ} ’h· «“CX05&Ýò>N­®»!- (dðìêÞ´a¾àËÌÜØAêK‚ 5Ó÷©oZç÷’}c†h·tíI°ÚcGÈš'BrÈå~j&Ä7÷ê?Ý™á~yZÉVðs}Ò½°+z¦›d@ZžbØlñ©ˆ ÖýË!3è¦è 9îz½üéõàW85Λúã£ÕÌl°ÉŽÏ}¶¿×”§†6SÓÌÈõ}úàñátÚrÆ¥z‚ë?މꮹMlxÝ„ƒY^ª¾V‚iÃ6­ÚW _Ýè\Ò üÒ…>uV™l<üýú øQRë7z€«^´,lÇ8p·WæV´ÇÓÛp/I2rkk_›"r¼ïÇ;39¤´nò2ˆïîTýr,æ|>0bÞzSk˜úŸÁ‹§älh'u}ɲ~f_´Á´åÕ¥†µCêðçê–UéË·š@Ž»Nva-„Ë~Ct{Ù™¾šÏû¹Cæ×ǯ©Z‡|\ø®k<øS©mowƒOÙ5×}•=ø§ãƒÿ=AýØÉÖ—äõªÜþr“FêLÀ`³.³ÀúžÓ”D€tq3;·óˆ-µ \øüŽ£ÊÍ î`• ^Úˆ MÖTÜ¢åíqá>iôFß6‘Ô¡ˆÂcv—Áû,éöF?ìm•Ëk¿î{k_²Ÿ[)˜#¥st„‹jîŽûó)œœOÕÂå{|ªš6 RoNµUÇ ëû&"p=„Œý±Ç¶Cø¬yx`!„§{¤©G;AX4¾Òõ$ aÂÌék$o!½á±ã(_fƺZWO»ué±u¹¾_¿ºo©Ÿ×~[®&×á§'ìSɱ»×n¸Øü”¡ñUàÄk®º5ì‘¶™ƒ¯k@æà6âF Ùßm§äÉ:Ÿã§£Õ.Úq“Οïë¤:"Mi(îábcçÑÁ಺•uŽW3¦¶Øüñoˆ$õ'.@ ÉâµÓºCîPÍ&_5ƒpµ,âÈ:ü1Ç÷붬çç7Ýjl ˜|Wë*äyUäxlø®aÍ€«¾'À>5ÑØ&mÚÑúƒuPùb¾²‘Ï|“+H?fÄ´¥5AZ•šš"2¨ûÄ(îJ/\=ö2óøÖ?O‹lH}ÓÓ÷ÿûf/¤O6v YZ‰ÁޭÞ>(U{hc`%äz'µ?=·Æ-mEô50fp[°É¢æ¨È;¿‘þý±»p»¸Êc£Ç–‚›·\¯ÿ˜r°ö6-3æ{±Ýtvt¥Ù_­urƒ !&çÛÁ§]m^¶h¸ÓgkoÌ„tb¢¶¶ó rݵÜßã?2î鬭 ǃUýPDö{ùíÁû Y¾þcD¬‚þƯ•z²ã¾®08 AnüÌ`^¹(y÷žJ°on·Ì¼™ æ†z­O¾ ø=kGÄ«‘ý¢vFÒŒŽ`ô†¯÷õ(ƒþÆzÛA¤"»4ÈסªOwÔ¿ a…Ϲ ¡› má ÔOo ý†ê×W¿ýáûk[®~j „ Ó&¹VƒË¯[xôíÊO÷úZï'ÏóŒyn'r¼^‚æÍ©-;eqêG¥÷' —<0%}…\kA7Å‚‘»š^Kæ²]jôÁG&«]‘CòÖä½Kƒ]‘+›_²Œe“[ÆR?öt[Ùkæ.°íÊç7ùïÀ/ƒÐ©³0Ëëm÷çßÊ05Þ§§nPð×'=ù2Ÿ¬»³Kò\æC|V#09íüËtþJOL‚¯¹ÍÕVÙŸ['Õù ^%¸À¤!’/mã»í€ .¾ÂÔE~˜uvß@MÛà ~ßþè¡êÔ—OãÈyž-®æ~‘×ý㈬„ßÁdžÚpý"yoý§u}4À›¦>IXzŒ¤«W[Üpˆ7n.èÍǜ֤ãF‰Ö÷üÙùÆ“J2×Î3®vò†t›çïúÎ!3æïöÛ€œF½C^H|ôGµFxÝ•ùËV‚™æ•;²ÔݰºK}!Ž‹×ÛÒ”Á³û›×ñ¾`x~0QÞ él›Í¥³f@¬ÙéVÙ›sHÉ«ms®‚lóÊ)¦3€÷_næðó9¸™Í' Éœ¤µW_|ŒçH¯üŸ$}ïO#À¿µÜæjú½øÃÁK΀ߍµX ÁÜ]åÕõ*pS{gÙd€9#׿9âE;ïjË*¤•›eµ³?¤t/?h ùˆM[‡üPƒLSv7I§'øzõÓÌþ›`ƒUj~”ø‚aã¯uÜt²‡ï—ßúEêŒEÞY2„êÜÙ¤Ëd]tÐÞpqø€5ï{>"ëÊk_ÄX×-à^æ›'ëÁæðÕËÑ¿ aòGÖk6Deuý£³!|{Xï´9¬¿2?•Âsàb›r,ôÀ|W™¸²¤e ç|èCöÉu¯Eô0†°6iy±Ò}R/ ¼èz ܘ˜5Γº€ï¶Œì£3&%^N²†lÒ£Ùiû!«:¡oPûò‚þÍ~â3™: 9¯‹×ñSº ÓøŠ™ÿ`+$†ùÞ‹=5d>Š¿ð1~ Ùn‰3 häÌÿñüK¥ã Á/ÈsR:ìåœ^ΚY`Õš§–Lƒ|JHM"RÇŽ¸EìëDúµ”̈ˆq`£t¼7l_ éÎãµõÃëÈ>_ùÛ~àR2ÇÈÂk¦QÉIOÈ&o™wÂ@ãD·&XÍw!s ^-Lˇ|Û¦¤£Úû2!|ôŽ‹£!/Ê}<´÷B„ˆÚ—Dª FÈ M§¹õ»Ÿya8„°êuêÇ~‚;6ìá›å`¬4ô®œY‡õZãÛ’zÔ8{¢™ÿM½ ÍÛø.ÓK†‘ë:ÿúé4kð'žOm ó-W7s¼¾i©›íJ‹ZÀL·x¢yN â†Cæ5{_@vé蹓 ÃÈýý4l´„ƒjz>’ïïQ[wl*¹þuHOÿ¾ïæÐ »`«ª²»ÈúU-RXHÍsB»ÍÙ q¤²—Z²î3=_N… Tˆ¶`s‡MÚîæ]€j~îOHíîßXJöµëu]'“ú?¬ð ‚wzòýµ·À‡ño4^Z‘¾c¥êÂSéׯ4ø~íÜecÁÞÜÖ´̪“;kò¤´¯“=[UÙß”¿gÖA8ÚÞËC ç­p…IL"ÆÉi/wÒßÍ}=¬`óbȇ ­ÓײWZ—‘ë¤û) ៀ Œ­ÔÜ0Ø<›×ÍÈò  ëNgGØÍœ’ÚKBæ¶Ê¯Ö¡o\~bþÖ­‹f¿ó$OúüÖö®…‡Á…ÆXëþåÂ_pë;†]žÎúO•ÍÕΗî˜ÙË:‚Ú¨w*ñ¤/xÞ`–ó’ÌE?bάz‘û—™9×!ÜŽzíbïGæé2‹ŒóýH_ñ7aÙý³¤ïxZòùÚSȆv6xk9¬ã¨k!f¤.ü»=ß[¬y§èuQ3 =úÇ;ñšâƒµò'ŒŠqS¹ YŠëú¶ŸNè.ϯò†0xNÊ2Ç|ܱ꺽 ™Ï¿l˜î±ÜÍÑí¾ƒÝÁ¹xF.úIúÊgÙY>Ž¡à:,šòwx#‹Çë:¶ƒ}x`Ò3CÁºel¬ß•†Ù²ÿzHGÎÄ3I=/éxÅ'ÝrÿŸ›N É@Ho«í³ƒôÒÒ``a\Ðñü.¼)w­Víkš øaÙ+M5IŸk²¦yÆÉñJÚ'žßj ™‹“플öùçMcž‡AÐéÒ8½©•ô‘©âœó£ÀJ-ÿöœÔÑÜ % ?öyõ«W¬¨ ‚óê3w F‚¯²2ká+R÷ÚXKÖQ}@î©[M9Û¯cÙ÷¶¥-ø´²Ã'&ỵ0–ïc!kµûÛ’ü²™ûüö޶lܹ†þqÞå.Kýûdù~]Þf™“×Ð5ë7˜|^wü ó!4½ëÖe 9¯Ïn<Þ\>±Óß’ÙÀÝOë?òÄ"ÒˆnÕ¹ËÈ:YïU÷KLvCôZwH'þÂ$â£*ýLVí¼×ì÷z²O±ŸS‹¢ÁöÞÝý–<’u=2uÂ!òW¿»Ç-²C"M{3ðí/ê7u%uüíæm.˜¥ç³‡7†Dl›ZîPDúÔ¹’&²¾'ZXÛ<ìüGýÆ÷3h’¯ÇhCHD-l‡¬ç3Ç5Àç¾I }òÜðsÅM›û€MœZõ$°˜ÒEß'-/AèŠÉîYM_!|5)itìÓ=ì´$Eu?¾–A4lK¨Õt²þXû'ì“3Ú^NöìqÂÉõ× Âû¹Å$Ø¢ì¿;&&ÎMúÉ˲÷¤Žô|Ú·fÚqpËÃä{ú{ßx»¬Ò¬NFÜÞü.ÝÞ"ZÖeÈͱѧ VÍÙvÝà þ‰‹N¿Z} >w3\o…’ý,¦,·´¼êxG—}öà ¢üîîõ‚4´Ãà% l!¾UdõŽÌ‰…$î§Á—† ï7î*™÷7:´[=Ü·Éî0©×ÍÊv}Ú\§2ŸJŸ6X RU˜ ª!}Ú_fœ‹9AU›¥o ë{1ñœVé#®è÷_Î\{`¿õ¤?Mˆaâµ épÔ÷»ÊmÈw=ÿà=‚anÏmqI¼Ûq+éˤ{ï|qîz’‰N§¶Lu‡ÜçuÃx²Ã%›c“^@Xø¢Õ1›ôAÓ= œÃûƒ-Š-q¾ô‚ènñ³3×ÁY)+ h©„ôËÝ5ƒ—Ç@´;Ù‰S[…À¤) f~ž·, “”¯?ÀêM~İÝs©GAòrt“›Óÿ)ÛìYAöåíÞ‡2 È>~bÃÈ d^îôé¸òÊ=N5dÞ¨5Õ8\^ ù §ž·I½bW•i R{¢äbZõzHÇIÒÛÔŽC´Ñ«÷ó)Ï¥2[I=˧õ.ç,x,ŸCú™!FûF ø ÉÉÆÍ½ ªU³Oé=ä/÷m]ûpä~¾œ˜ì?¾ÄÕ!¤ÜË­—çëÃ-NèutwO\ææb° 8]9¦ Fgm­½±/$i)?£gõ‡ÌþÞ˜]cÀ¿^6ÿÒH¼–¬êR,AÐù®qÝ>ò˜ÝšsqÊ ²#%Æñå+pÅÆ‹ =K®Ó ÷Ô\R´öº\pK!óXý £ª½kgvÇ ²ñ]Öµ©‘ùiTÑÉaá{ íàóv“îR_\×íÑ+ò¸þòñ¶NàšÏ3­$sÈt§ÃÙ…Üë7ãÑ͉fOÎ}öþ©‡yû²kÖýúyOµæÍàäÙ¬Ù+°Nó<ïº]€LÙåúÞß2Žùvì‰%¸¾¹3go»ÖÙÛ~èÒ*0‘ón®¨ ™QFßw»rIŸå&×!ÝœÁ4U´’ýlÙóè(3ø½\¸:Ã÷+øsœÉd½ÞOÛ°VÌ‚³³hy±=ìÉÊÄv÷0ü>Ù«2ǧ@Öv4yi)Y'6+oΚy´ÉÔ7fqÕ0~ÖM ~Iú™Å/2ÁÝyps}:x­Ë:Û“ÿùwŽ“Om®Å‚Ë,žZ6mø›Û§8úì¶ÍGænãZ-×ëd}®^eذ¬Á-Ü·æhR—–WÔ?X !.Ëm¡},øýIïÜ"n‚óJ¬=׿æûÞª~[ɼ}q]itÈòzÞòñ1*çóõ{Šuo°Y‡šŽ_Ù fåî^_›²!¥ºÿ̲Ÿ{M1„W·Š‡‹Àw²”Ç8ÞùçýÄ3%f¡àc÷ò«’}¨4:ñz¬?¤5û -Z‚ Öqÿ­±c‚•?Û¹òêþ“_ƒLþ(«%# üÏ•›Èq¶,©¸óA ¬Ï¤œ„lwó™§¼ LmqÿÀ/*»Æ°|JÏ’¼ÌéàE~{®·Ç äנȳ«dþ¹¯WIæîÒÆ¬ÛcHÕ{Ú>[ˆÀuaºüOBï:ßÍ—Ìůt:ë<RÏ®qÏçAìÝ4Vº ‚î?ÕîNŽó¼Ï¸ËM“!¨ŠÙLê¹µž8ûZXn†¿^¢6˜®³¶ÿ5È'oU{´=2YSÀü.¤Ÿ]ß0Å”ì—.ãSû}ófР©ÕdŽº®Ä¼ß~Òíª’Ô±å+ëm[!—}P‰»’€;”V–ßö«c·Ã]TÞŽ8®jlYUâ6ðjkÚz?ûÚÿä¬ÞKÁ¯Ú=ïÜg¢S~ô&ûúߟ®{,„lîN[2DÎí<©ü~-#õ-ÝÕÀë8äfOý:xòÔÆƒ¬6!dá¸g×jAÖ’þh™Ï*²ìâ’Z!«û±²‹,„Uï«uŠN€o7™àì¶“Ì_Aö¬yRÏß[(¯¾_øñ£¤ÏâVX왥FÖ1W¨}†ô»¹® ÜÈ>þàv‡ÊÓdÿŠz¸Ó!ŒÁª­=-~C¼½ã’ISÈ|gÿ>%ó9/ZÝ¿í¸f€¿í ˆZ½» ²9…9—–Èt{O€°b鯸œ·àÅ玅çƒõô±3Ùº¬WO‡F©.˜Ógön9¹šAcud~:ÇŽ…‘> ó\žÙ‘çÿPl}(µüõáW~\ð uã·saÙ§uw÷ëgG®ç̹"㣕à«#b÷½H"}~—>&ß¶’ynÆøqiãHßaå´} Ùçäý³I}ÉՒߪ%sÕEѶS² »¾xM´Žd¯ÿâþ–Cnà1g©‹¦o/Vß®€Ü޵”TG‘yuÑõ9äz“×fW½V'õªøçàÎ`®FTŸ”)Û{£Oúçss/'‘9®Beµí¤Þül‘^½ ò†çïøŸEÈ•õ?_ 5‡,:gШ-‡Á»Ö~q+#s¹­²kE¡ ˜nËÛ DHo^iµ›Dö[ßÈÝ®“ùÃç…Ǫ9¤N©Y§Jæï{wtv¡+é.ýþSLæÞZ‘×À=[·‹ôUdzã—m5Ó¯0«oçH–GF^ûR¹Íû–…5b[½Ÿ>+¨Ë}æž)¤¯û+VÎIû é1Bõ2ç5=zvbä{ðá–5 ØÓ:Sâ“vIRÏ¡wÞƒéæáÜ/v©W–—Õn¥‚Ÿæh¶jZ8ÿkƒ."ýUlÁšççƒs”ƒ©_å«DA°?¸Ñ´”¼®1¡·ÓSHë´neýU²Þ¶îȯƒÛýŠ®‘ë$RcÞÊÀ0¡µç‰«!¾2ègpxG//1!óz÷/Â¥ùúäuQRê¶ŒÿÔú¬½}êÁÖ4=¥Fž·¦gi¯dþšÏöÑy¸f»FE@Úh—]ØÉšú mó?õ5p[tiäÝ"LÍÇÍàÕ'ì]ŸJ]kÍÛN旴ᵫÀνñ¸>] òz‹S¬ÆoȤqsÔ%à&ÌwZ–oéGî¢T{$üÝòåg_@ÞE:Ð*œ—â÷&¦Ÿ/‚éiýäðщè¶Ev=[… Ó¾cŒTEÿ¼?Ù¨Û‰Ì'²ïÅïz:‚o^kàΑú¿2½?_AöÃðë0™k/—^æ³ÈÜ=1éÍUŽ^4{¸nÃwˆeh tD¹~Ö“Ò?ÜÞ÷Zíò\ÈŠnLJÊI¿/:qøð¬„ XqmãZ„„Yx嶇P™™pó5ø†Æùí]À6lpÄÜ~ßï—v“ý™ÐniGæ¹eJ»Ê ˜Þ·ºm¿¨©J$Ù¯m‚|Ì !ü~öMeäLÛáó·À/ßëu*8ÒCžBõÝ ö0žÛ1¹?ú¨;\ëµ²nv7wC¶|½õ×p-™î£înÙ@êÁŒÁ''‚mù¡tødOfŸ:_:Ü“V/犰ñ–/œ>ô„äýBþ[ÛU·µ*ÕMA(Ôæ´ !ï8cw¸¾.d/ºn5ÇÈ&^¸ÜÙ6‡œïû<ÙFÑ$ï_ðãRâÇx*ƒ]öUTí ¦~›Ñ£>:j ÒÁR„ìÚÜp)?" ¶‡XéÊÓµ#0;äô“ÊšzL9’ÝÌ ÖÓoµäë½hBÚ32Ÿk]ðy¼ãØN ‘¼g£[?2{¼¯Þ,ÿ0_JêŠé©¼R°Qsœk §’:¹­x„ú@0»Ì'L?óÂþ†}ƒ†+A8ÝO×N—ÌF¢õ?ywùIÛ†÷†Lóv¹ÉÎwàòíkN½.–ר-û¾ºãCõZÒŸ¿Yp/ï1سϣFŒØGæžç·­·~ƒ0ÙÆNÝ÷Qo.‘AHx¸÷!é'BÂÝØ·GÁ~Ë=XiŽ÷ÁQ ∻êÝ—#ÄèR¸CÚ>ŽoO\9H^¿1ÍËUL&shÚûÕ÷@P|¤øDÑDñ™â E3ÅWŠoß)Z(~Pü¤øEÑJÑå_ïÇ)nŠ›â¦¸)nŠ›âö_~÷YQQQÿÛ#:)¢"*¢"*¢"*â}´PDETDETDETÄÿú8VQQQÿÛ£Òü\Üÿ™Û þÍÿ­”þ¯›"O‘§ÈSä)òyЦ4T¼àßùïOj2âH± !þç~•þþã@¥~@¶‰DoseFinding/data/IBScovars.rda0000644000176200001440000000421312126252274015702 0ustar liggesusers‹íXWTÇûP@AAÄÆx09ȉMšªèÝå ÓP‹Å˜¢ÁU1$ùÔEðQ =ÖÔ rª¶–Ô6A­Ê¥„UQ$òµ Â.»|Z vîœáÿÖwÎ{óÞÌ;÷þîïÞ™ó‚ü7.±Ùh#‚\Û+¹B|UÊŇLP SÅvj€ßúmQ»5±q‚ p;D Å4qj镽/K$ò/m‹ÐÄ¡8S²C³->*V|ïéc[ .w,Tm2€®¾¾þ«i%Pà‘sôìShYìgŠ„2úý»ºæ»‹ Ðéïïᅩ­y -K‰ÁT¹Â)óWÐIÛ6M†ËÎãÃ`Š,®û…S:˜…ñ L‰*’Wô’+17]û»ƒ.888uÁ.qhœ>%ç×LînpðÏ‹œ¡¯.²x q)yXPð¥ÕÚ'Dï¢I¿×íÀôÖþÃ;{¥ÙŒV‡ÃGÓfá_c×mòÈTé›í2š×¼™šæ¾:,ò½Hmü~ì:Æú£Ö:FŸ¹ =¥9—ÑoŽ—o°;¢%g4®†ÁQ.ña6ô×EžµñC=¸´äxœîØ«’Õæf5d‡Ãwö+¯Z!Τ$¡5oÕA;0‰vB˜+tuuu»{hûûø“fRYü°êÔÒºu÷ëŸ.ʇ"cèl¯´OÉ1õ©y‘ï’C)3×Ö­&ÕËkKv3{ ÿï¨þÒ3Þ¯Ê!ÔÒ@ýù>Dwup©‹ÃEMÆì++Y¼Ç^q|eä*ø67ë¾y«Œá%†§®9­º}M•óoX©dÀdçsÝgñ•‚Æ[Ÿî¼1¿œ´{ä¼s¯Œ¾©ä¢½'~x’­‡q¿1õ…㦿€âÙïsc¸Õî‰ ž÷œÊä[×í²ÎÞdÆÐYÆØ‹kàvVmÞ”â$è§xQûHãrǯžºý;™ý  NˆÔü`ü‚üÆé3<ÖÊ}]¸Üé¥JÔ‹~†9”-©kÞL*³Ê+h¢qh@Þ[ð`¼Ò݉ßìã7DºÌ'“BI=µ›úG0Z©ß=–<`xÜVkç.Šö†¦¯&]ôarÉÂcÔOŒÎ¥q—bˆVäŸÛ^7R¼^¯}ÍǸõ…èJVCX¾ãø3ÇÂe#UL®âµ[ç<0YE^—,^Çð¤vPÞ“?;2§d}Ô«µî/ŠfüËFëçÛä¬Zëfý·Br.ÀãpLg4`Kë¡|d¼ê›?¾í²™,NÚÔ5oM]ò1˜}®Û½¸h|àª;ñ¥¶m §v"ïÐ?Z?Úy't–¡ýÇíÌ_ÊC¢ë܉–40ÆožýG‚zé|¨¢<ù7ÕOýb8àú´.­y{< l=Œ'­wŒ/úÁ¨£ÂÞès'ˆp?°ö#LŽÖEVo°Eš;Ëeq×à[MÆœ”}—ÉÓ¬ÚܤÕó oÇøí–x‘ö¨ß ¦¼Uú×·îvI†?•FÌ48¹¡ÿ„®Ëêúƒõ«‹Æë2­‡,:cnº¥Ø%SárÇRç* zšÅïÄòI®Zë‘wþ4èJBôÊû ›Äxÿ´Çñ¦ó½òÄ$›…¬=£8 |Þö¾o›‡ùi2€B÷× ¤ñkǧ¯øŒà~<@>Ió„ÊCu‘WЖ£’ë”_úÓo¨<ÿ.TèJ6ɺ¿Ä:Íêò˜òFn‹8°_‰û)i¶È‘êˆ?¼Ö¡ O-õW%Û-·6“šJ_sЫPe±C%§ù@ý#5Ôäß•WËÜŽ‰£ë¹7YA<†Ä6¨·„ùƒzmEÿ£³ü{*âf,‹eñÅ:Ð/ÖÙÕ'‰úGýe|xFëüeŠÝ'¿]?¿d[4LZÈ;æÏúw’†ëv>õK‡TŠ…KN¾ýÏØ~Úg±“`þá~„çŠÞžÇÞBÿ]À:ƒûV{mîyk2Ë+Š/˜333cò0 úOýSÉÄÖsÏchÔº«¯í飸/›Ë» -7«v_õÆ´£m¥}šëêÄϬÎ7Lä ǼdöZÖ'ÆS/«Ë¦Ÿ&µXï)_Pãb˜¸_±¼êI=³,á¾=îCä®õº]ûæî'˜_ÈZ¯Ø|ä÷-šÿ ‰¯XŸuOÇ|d³,5¹÷‹Ö=œßNùƒç¼ ޏßà¹ˆŽ“ï½š®;³:_6mÞÑ)^p.OíeoH'Xo±þv§ ¼R¬ý-«ÔQÿëFAakå\Ÿôö£¼Œ›#pcJ‰,/Çë’qzN‡’Êðºqž’ǹÒõø1´ex[•ÜÍ÷Ißq '´ ý˜ ;Þ\›ŸË¯…ß<æ2a¢ ra"6xñ˜(…ÿ¶—·çòºy9©¼~ÞN>¶§‡×Íc-WÞ>Ƽ-¨g2<¤6â7/#ãú…Iú±ïy6óñD=¼ÿrI¿t};žŸ²çèã9Ä牔ßRý<–¼í¨s²¸ð²ÏÓ/&Ç_&Ú ]_ªKêŸ4RîKû¥¼‘æo+?Wšÿ¼?Òœ“Îáù.}@?îç0ÊIs†×¥”|Of—4¾œÜÄ_1;5‘aø+F¿b> Û¹=,–~)cÃâ¢ñ}{T\˜ôWlÔ'Þ¨fÚØºÉâcttäEP1*LüÓc³]¯ñÞ+N,{Fþz_ÞQDoseFinding/data/migraine.rda0000644000176200001440000000031212126252274015636 0ustar liggesusers‹]= Â0EŸI[üuwp”⤃ .®Nu 6•‚m%-¸ºøŸ¬7ðÒÀÉI/7Éù¯Â8$"Ab$IH,•ÀÔ!EÜ˳›ÕYaˆä{×0tuâ±Wì){Á^³Oì ;ó'iÀ9Î.¥ Æ Ïw8f}o0K°c` 6­ww ›Š%URVÆì_¥Öø½*j[·C®w]ù_ ]ë(µÈoµ¶|Fþ^÷hñÂÔ4Íú:~ÌMpDoseFinding/R/0000755000176200001440000000000012620174661012651 5ustar liggesusersDoseFinding/R/planMod.R0000644000176200001440000005027712411760751014400 0ustar liggesusers## various functions for assessing the operating characteristics of a design ## for model-based estimation of dose-response functions ## calculates the variance of the estimated curve getPredVar <- function(model, cf, V, pDose, off, scal){ gr <- gradCalc(model, cf, pDose, off, scal) gr0 <- gradCalc(model, cf, 0, off, scal) grd <- sweep(gr, 2, gr0) out <- apply(grd, 1, function(x){ as.numeric(t(x)%*%V%*%x) }) out } ## calculates the variance of the EDp estimate getEDVar <- function(model, cf, V, scale = c("unrestricted", "logit"), p, maxD, off, scal, nodes){ grd <- calcEDgrad(model, cf, maxD, p, off, scal, nodes) if(scale == "logit"){ tmp <- calcED(model, cf, p, maxD, "continuous", off=off, scal=scal, nodes=nodes) grd <- grd*(-maxD/(tmp*(tmp-maxD))) } grd <- as.numeric(grd) return(as.numeric(t(grd)%*%V%*%grd)) } ## calculates the variance of the TD estimate getTDVar <- function(model, cf, V, scale = c("unrestricted", "log"), Delta, direction = c("increasing", "decreasing"), off, scal, nodes){ tmp <- calcTD(model, cf, Delta, "continuous", direction, off=off, scal=scal, nodes = nodes) grd <- calcTDgrad(model, cf, Delta, direction, off, scal, nodes) if(scale == "log") grd <- grd/tmp grd <- as.numeric(grd) return(as.numeric(t(grd)%*%V%*%grd)) } ## calculates approximate covariance matrix for parameter estimates aprCov <- function(doses, model, cf, S, off, scal){ F <- gradCalc(model, cf, doses, off, scal) V <- try(solve(t(F)%*%solve(S)%*%F)) if(inherits(V, "try-error")){ warning("Could not calculate covariance matrix; Fisher information singular.") return(NA) } V } planMod <- function(model, altModels, n, sigma, S, doses, asyApprox = TRUE, simulation = FALSE, alpha = 0.025, tau = 0, p = 0.5, pLB = 0.25, pUB = 0.75, nSim = 100, cores = 1, showSimProgress = TRUE, bnds, addArgs = NULL){ if(any(is.element(model, "linInt"))) stop("planMod works for all built-in models but not linInt") if(length(model) > 1 & asyApprox){ stop("\"asyApprox\" needs to be FALSE for multiple models") } ## off and scal off <- scal <- NULL if(any(is.element(model, c("linlog", "betaMod")))) { lst <- getAddArgs(addArgs, sort(unique(doses))) if ("betaMod" %in% model) scal <- lst$scal if ("linlog" %in% model) off <- lst$off } if(missing(doses)) doses <- attr(altModels, "doses") ## calculate mean response at doses muMat <- getResp(altModels, doses) nD <- length(doses) if(missing(S)){ if(missing(n) | missing(sigma)) stop("either S or n and sigma need to be specified") if (length(n) == 1) n <- rep(n, nD) if (length(n) != nD) stop("\"n\" and \"doses\" need to be of same length") S <- sigma^2 * diag(1/n) } ## calculate parameters, gradients and results for the asymptotic approximation if(missing(bnds)) { if(any(!is.element(model, c("linear", "linlog", "quadratic")))){ message("Message: Need bounds in \"bnds\" for nonlinear models, using default bounds from \"defBnds\".") bnds <- defBnds(max(doses)) } } nams <- colnames(muMat) covMat <- list() approx <- matrix(nrow = ncol(muMat), ncol = 3) maxdose <- apply(abs(muMat-muMat[1,]), 2, function(x) doses[which.max(x)]) EDs <- ED(altModels, p) EDsUB <- ED(altModels, pUB) EDsLB <- ED(altModels, pLB) if(!asyApprox & !simulation) stop("Need to select either \"asyApprox = TRUE\" or \"simulation = TRUE\"") if(asyApprox){ npar <- switch(model, linInt = length(doses), nPars(model)) bestPar <- matrix(nrow = ncol(muMat), ncol = npar) ## best fit by model to models in altModels for(i in 1:ncol(muMat)){ ## if other model-class approximate best fit nam <- gsub("[0-9]", "", nams[i]) # model name (number removed) if(nam == model){ pars <- attr(muMat, "parList")[[i]] if(is.element(model, c("betaMod", "linlog"))) bestPar[i,] <- pars[-length(pars)] else bestPar[i,] <- pars bias <- 0 } else { ## find the best fit fit <- fitMod(doses, muMat[,i], model=model, S=S, bnds = bnds[[model]], type="general") bias <- predict(fit, predType = "effect-curve" , doseSeq = doses[-1])-(muMat[-1,i]-muMat[1,i]) bestPar[i,] <- coef(fit) } ## now calculate approximate covariance matrix covMat[[i]] <- aprCov(doses, model, bestPar[i,], S, off, scal) if(!is.matrix(covMat[[i]])){ approx[i,] <- NA } else { ## root-mse paVar <- getPredVar(model, bestPar[i,], covMat[[i]], pDose=doses[-1], scal=scal, off=off) approx[i,1] <- sqrt(mean(paVar+bias^2)) ## Pr(eff_maxdose > 0) ind <- which(doses[-1] == maxdose[i]) paVar <- paVar[ind] call <- c(list(c(0,maxdose[i])), as.list(c(bestPar[i,], scal, off))) pa <- abs(diff(do.call(model, call))) LBmn <- qnorm(alpha, pa, sqrt(paVar)) approx[i,2] <- pnorm(tau, LBmn, sqrt(paVar), lower.tail = FALSE) ## Pr(eff_ED50) edvar <- getEDVar(model, bestPar[i,], covMat[[i]], "unrestricted", p, maxdose[i], off=off, scal=scal) ed <- calcED(model, bestPar[i,], p, maxdose[i], "continuous", off=off, scal=scal) edsd <- sqrt(edvar) approx[i,3] <- pnorm(EDsUB[i], ed, edsd) - pnorm(EDsLB[i], ed, edsd) } } colnames(approx) <- c("dRMSE", "Pow(maxDose)", "P(EDp)") rownames(approx) <- rownames(bestPar) <- nams colnames(bestPar) <- rownames(covMat[[1]]) attr(approx, "bestPar") <- bestPar attr(approx, "covMat") <- covMat } if(simulation){ cat("Running simulations\n") requireNamespace("parallel", quietly = TRUE) sim <- parallel::mclapply(1:ncol(muMat), function(i){ if(showSimProgress){ if(cores == 1){ cat(sprintf("Scenario %d/%d\n", i, ncol(muMat))) pb <- txtProgressBar(style=3, char="*") } else { cat(sprintf("Scenario %d/%d started\n", i, ncol(muMat))) } } dat <- rmvnorm(nSim, mean = muMat[,i], sigma = S) sims <- numeric(3) mse <- LBmn <- edpred <- resp <- numeric(nSim) coefs <- vector("list", length = nSim) modelSel <- character(nSim) for(j in 1:nSim){ if(showSimProgress & cores == 1) setTxtProgressBar(pb, j/nSim) fit <- vector("list", length = length(model)) k <- 1 for(namMod in model){ fit[[k]] <- fitMod(dose=doses, dat[j,], model=namMod, S=S, type="general", bnds=bnds[[namMod]]) k <- k+1 ## ## this would be faster ## fit <- fitMod.raw(doses, dat[j,], model=model, ## off=off, scal=scal, nodes=NULL, ## S=S, type="general", bnds=bnds, ## covarsUsed = FALSE, df = Inf, ## control = NULL, ## doseNam = "dose", respNam = "resp") } aics <- sapply(fit, gAIC) fit <- fit[[which.min(aics)]] coefs[[j]] <- coef(fit) modelSel[j] <- attr(fit, "model") ## root-MSE of plac-adj dr at doses respDoses <- predict(fit, predType = "effect-curve", doseSeq = doses[-1]) call <- c(list(doses), as.list(c(coef(fit), scal, off))) trm <- muMat[-1,i] - muMat[1,i] mse[j] <- mean((respDoses-trm)^2) ## Pr(LB_maxdose > tau) > 1-alpha respMaxD <- predict(fit, predType = "effect-curve", doseSeq = maxdose[i], se.fit=TRUE) if(is.na(respMaxD$se.fit)){ LBmn[j] <- NA } else { LBmn[j] <- qnorm(alpha, abs(respMaxD$fit), respMaxD$se.fit) } resp[j] <- respMaxD$fit ## ED estimation edpred[j] <- ED(fit, p=p) } ind <- is.na(LBmn) NAind <- sum(ind) LBmn[ind] <- qnorm(alpha, abs(resp[ind]), sd(resp, na.rm=TRUE)) sims[1] <- sqrt(mean(mse)) sims[2] <- mean(LBmn > tau) sims[3] <- mean(edpred > EDsLB[i] & edpred < EDsUB[i]) attr(sims, "NAind") <- NAind attr(sims, "coefs") <- coefs attr(sims, "model") <- modelSel if(showSimProgress){ if(cores == 1){ close(pb) } else { cat(sprintf("Scenario %d/%d finished\n", i, ncol(muMat))) } } sims }, mc.cores=cores) NAind <- sapply(sim, function(x) attr(x, "NAind")) coefs <- lapply(sim, function(x) attr(x, "coefs")) modelSel <- sapply(sim, function(x) attr(x, "model")) names(NAind) <- colnames(modelSel) <- names(coefs) <- nams rownames(modelSel) <- 1:nSim sim <- do.call("rbind", sim) colnames(sim) <- c("dRMSE", "Pow(maxDose)", "P(EDp)") rownames(sim) <- nams attr(sim, "NAind") <- NAind attr(sim, "coefs") <- coefs attr(sim, "modelSel") <- modelSel } out <- list(approx = NULL, sim = NULL) if(asyApprox) out$approx <- approx if(simulation){ out$sim <- sim attr(out$sim, "nSim") <- nSim } attr(out, "model") <- model attr(out, "altModels") <- altModels attr(out, "doses") <- doses attr(out, "off") <- off attr(out, "scal") <- scal attr(out, "S") <- S class(out) <- "planMod" out } tableMatch <- function(x, match){ ## like "table", but also returns categories with 0 counts out <- numeric(length(match)) for(i in 1:length(match)){ out[i] <- sum(x == match[i], na.rm=TRUE) } names(out) <- match out } print.planMod <- function(x, digits = 3,...){ model <- attr(x, "model") multiMod <- length(model) > 1 str <- ifelse(multiMod, "s", "") cat(sprintf("Fitted Model%s: %s\n\n", str, paste(model, collapse=" "))) if(!is.null(x$approx)){ attr(x$approx, "bestPar") <- NULL attr(x$approx, "NAind") <- NULL attr(x$approx, "covMat") <- NULL cat("Asymptotic Approximations\n") print(signif(x$approx, digits)) cat("\n") } if(!is.null(x$sim)){ pltsim <- x$sim attr(pltsim, "NAind") <- NULL attr(pltsim, "coefs") <- NULL attr(pltsim, "modelSel") <- NULL attr(pltsim, "nSim") <- NULL cat(sprintf("Simulation Results (nSim = %i)\n", attr(x$sim, "nSim"))) print(signif(pltsim, digits)) if(multiMod){ cat("\nSelected models\n") res <- apply(attr(x$sim, "modelSel"), 2, tableMatch, match = model) print(signif(t(res)/colSums(res), digits)) } } } summary.planMod <- function(object, digits = 3, len = 101, Delta=NULL, direction = c("increasing", "decreasing"), p=NULL, dLB = 0.05, dUB = 0.95, ...){ class(object) <- "summary.planMod" print(object, digits, len, Delta, direction, p, dLB, dUB, ...) } print.summary.planMod <- function(x, digits = 3, len = 101, Delta=NULL, direction = c("increasing", "decreasing"), p=NULL, dLB = 0.05, dUB = 0.95, ...){ ## provide more information than print method modelSel <- attr(x$sim, "modelSel") model <- attr(x, "model") coefs <- attr(x$sim, "coefs") altModels <- attr(x, "altModels") doses <- attr(x, "doses") S <- attr(x, "S") off <- attr(x, "off") scal <- attr(x, "scal") ## calculate mean response at doses doseSeq <- seq(min(doses), max(doses), length=len) muMat <- getResp(altModels, doseSeq) if(is.null(x$sim)) stop("Additional metrics only available if simulations were performed") cat("Calculating additional summary metrics, please wait.") ## calculate average mse of placebo-adjusted dose-response for ANOVA CM <- cbind(-1, diag(length(doses)-1)) mseANOVA <- mean(diag(CM%*%S%*%t(CM))) ## calculate predictions predList <- getSimEst(x, "dose-response", doseSeq=doseSeq) out <- matrix(ncol = 5, nrow = ncol(muMat)) colnames(out) <- c("Eff-vs-ANOVA", "cRMSE", "lengthTDCI", "P(no TD)", "lengthEDCI") rownames(out) <- colnames(muMat) if(!is.null(Delta)){ direction <- match.arg(direction) tds <- getSimEst(x, "TD", Delta=Delta, direction=direction) } if(!is.null(p)){ eds <- getSimEst(x, "ED", p=p) } for(i in 1:ncol(muMat)){ out[i,1] <- mseANOVA/x$sim[i,1]^2 ## calculate mse of estimating the plac-adj dose-response at fine grid ## first calculate placebo-adjusted predictions pred <- predList[[i]] pred <- (pred-pred[,1])[,-1] ## placebo-adjusted response mn <- (muMat[-1,i]-muMat[1,i]) clmn <- colMeans(sweep(pred, 2, mn)^2) out[i,2] <- sqrt(mean(clmn)) ## calculate length of CI for TD if(!is.null(Delta)){ out[i,3] <- diff(quantile(tds[[i]], c(dLB, dUB), na.rm = TRUE)) out[i,4] <- mean(is.na(tds[[i]])) } else { out[i,3] <- out[i,4] <- NA } ## calculate length of CI for ED if(!is.null(p)){ out[i,5] <- diff(quantile(eds[[i]], c(dLB, dUB))) } else { out[i,5] <- NA } } cat("\r") cat(sprintf("Additional simulation metrics (nSim=%i)\n", attr(x$sim, "nSim"))) print(signif(out, digits=digits)) } ## calculate the predictions for the fitted models getSimEst <- function(x, type = c("dose-response", "ED", "TD"), doseSeq, direction, p, Delta, placAdj = FALSE){ modelSel <- attr(x$sim, "modelSel") model <- attr(x, "model") coefs <- attr(x$sim, "coefs") off <- attr(x, "off") scal <- attr(x, "scal") nSim <- attr(x$sim, "nSim") altModels <- attr(x, "altModels") nAlt <- modCount(altModels, fullMod=TRUE) doses <- attr(x, "doses") maxD <- max(doses) type <- match.arg(type) if(type == "TD" & missing(direction)) stop("need direction for TD calculation") out <- vector("list", nAlt) for(i in 1:nAlt){ ind <- matrix(ncol = length(model), nrow = nSim) if(type == "dose-response"){ resMat <- matrix(nrow = nSim, ncol = length(doseSeq)) colnames(resMat) <- doseSeq rownames(resMat) <- 1:nSim for(j in 1:length(model)){ ind[,j] <- modelSel[,i] == model[j] if(any(ind[,j])){ cf <- do.call("rbind", (coefs[[i]])[ind[,j]]) resMat[ind[,j]] <- predSamples(samples=cf, placAdjfullPars = TRUE, doseSeq=doseSeq, placAdj=placAdj, model=model[j], scal=scal, off=off, nodes = NULL) } out[[i]] <- resMat } } if(is.element(type, c("TD", "ED"))){ resVec <- numeric(nSim) for(j in 1:length(model)){ ind[,j] <- modelSel[,i] == model[j] if(any(ind[,j])){ cf <- do.call("rbind", (coefs[[i]])[ind[,j]]) if(type == "TD"){ resVec[ind[,j]] <- apply(cf, 1, function(z){ calcTD(model[j], z, Delta, "continuous", direction, off=off, scal=scal) }) } if(type == "ED"){ resVec[ind[,j]] <- apply(cf, 1, function(z){ calcED(model[j], z, p, maxD, "continuous", off=off, scal=scal) }) } } } out[[i]] <- resVec } } names(out) <- colnames(getResp(attr(x, "altModels"), doses=0)) ## horrible hack need to improve! out } plotDoseSims <- function(x, type = c("ED", "TD"), p, Delta, direction, xlab){ altMods <- attr(x, "altModels") if(type == "ED"){ out <- getSimEst(x, "ED", p=p) trueDoses <- ED(altMods, p=p, EDtype="continuous") } else { out <- getSimEst(x, "TD", Delta=Delta, direction=direction) trueDoses <- TD(altMods, Delta=Delta, TDtype="continuous", direction=direction) } ## write plotting data frame nams <- names(out) group <- factor(rep(1:length(nams), each=length(out[[1]])), labels=nams) pdat <- data.frame(est = do.call("c", out), group = group) ## determine limits for x-axis rngQ <- tapply(pdat$est, pdat$group, function(x){ quantile(x, c(0.025, 0.975), na.rm=TRUE) }) rngQ <- do.call("rbind", rngQ) rng <- c(min(rngQ[,1], na.rm = TRUE), max(rngQ[,2], na.rm = TRUE)) delt <- diff(rng)*0.04 ## truncate x-axis to 2*maxdose maxdose <- max(attr(x, "doses")) xlimU <- min(2*maxdose, max(rng[2], maxdose)+delt) xlimL <- max(-0.05*maxdose, min(0, rng[1])-delt) xlim <- c(xlimL, xlimU) parVal <- ifelse(type == "ED", paste("p=", p, sep=""), paste("Delta=", Delta, sep="")) maintxt <- paste("95%, 80%, 50% intervals and median of simulated ", type, " estimates (", parVal, ")", sep = "") key <- list(text = list(maintxt, cex = 0.9)) bwplot(~est|group, data=pdat, xlab = xlab, trueDoses=trueDoses, xlim = xlim, panel = function(...){ z <- panel.number() panel.grid(v=-1, h=0, lty=2, col = "lightgrey") panel.abline(v=trueDoses[z], col = "red", lwd=2) panel.abline(v=c(0, max(attr(x, "doses"))), col = "grey", lwd=2) probs <- c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975) simDoseEst <- list(...)$x quants <- quantile(simDoseEst, probs, na.rm = TRUE) llines(c(quants[1], quants[7]), c(1,1), lwd=2, col=1) llines(c(quants[2], quants[6]), c(1,1), lwd=5, col=1) llines(c(quants[3], quants[5]), c(1,1), lwd=10, col=1) lpoints(quants[4], 1, cex=2, pch="|", col=1) if(type == "TD") ltext(xlim[2], 1.5, pos = 2, cex = 0.75, labels = paste("% No TD:", mean(is.na(simDoseEst))*100, "%")) }, layout = c(1,length(out)), as.table = TRUE, key = key) } plotDRSims <- function(x, placAdj = FALSE, xlab, ylab){ altMods <- attr(x, "altModels") rng <- range(attr(x, "doses")) doseSeq <- seq(rng[1], rng[2], length = 51) out <- getSimEst(x, type = "dose-response", doseSeq=doseSeq, placAdj = placAdj) trueMn <- getResp(altMods, doses=doseSeq) if(placAdj){ trueMn <- trueMn-trueMn[1,] } nM <- length(out) resp <- vector("list", length=nM) for(i in 1:nM){ qMat <-apply(out[[i]], 2, function(y){ quantile(y, c(0.025, 0.25, 0.5, 0.75, 0.975)) }) resp[[i]] <- c(t(qMat)) } resp <- do.call("c", resp) quant <- rep(rep(c(0.025, 0.25, 0.5, 0.75, 0.975), each = 51), nM) dose <- rep(doseSeq, nM*5) model <- factor(rep(1:nM, each = 5*51), labels = names(out)) key <- list(text = list("Pointwise 95%, 50% intervals and median of simulated dose-response estimates", cex = 0.9)) xyplot(resp~dose|model, groups = quant, xlab=xlab, ylab = ylab, panel = function(...){ ## plot grid panel.grid(v=-1, h=-1, col = "lightgrey", lty=2) ## plot estimates panel.dat <- list(...) ind <- panel.dat$subscripts LB95.x <- panel.dat$x[panel.dat$groups[ind] == 0.025] LB95 <- panel.dat$y[panel.dat$groups[ind] == 0.025] UB95.x <- panel.dat$x[panel.dat$groups[ind] == 0.975] UB95 <- panel.dat$y[panel.dat$groups[ind] == 0.975] lpolygon(c(LB95.x, rev(UB95.x)), c(LB95, rev(UB95)), col = "lightgrey", border = "lightgrey") LB50.x <- panel.dat$x[panel.dat$groups[ind] == 0.25] LB50 <- panel.dat$y[panel.dat$groups[ind] == 0.25] UB50.x <- panel.dat$x[panel.dat$groups[ind] == 0.75] UB50 <- panel.dat$y[panel.dat$groups[ind] == 0.75] lpolygon(c(LB50.x, rev(UB50.x)), c(LB50, rev(UB50)), col = "darkgrey", border = "darkgrey") MED.x <- panel.dat$x[panel.dat$groups[ind] == 0.5] MED <- panel.dat$y[panel.dat$groups[ind] == 0.5] llines(MED.x, MED, col = 1,lwd = 1.5) ## plot true curve z <- panel.number() llines(doseSeq, trueMn[,z], col=2, lwd=1.5) }, as.table = TRUE, key=key) } plot.planMod <- function(x, type = c("dose-response", "ED", "TD"), p, Delta, direction, placAdj = FALSE, xlab = "Dose", ylab = "", ...){ type <- match.arg(type) if(type == "dose-response"){ plotDRSims(x, placAdj = placAdj, xlab=xlab, ylab = ylab) } else { plotDoseSims(x, type=type, p=p, Delta=Delta, direction = direction, xlab = xlab) } } DoseFinding/R/MCPMod.R0000644000176200001440000001734012244155176014062 0ustar liggesusers## wrapper function for MCTtest and fitMod calls MCPMod <- function(dose, resp, data = NULL, models = NULL, S=NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, selModel = c("AIC", "maxT", "aveAIC"), alpha = 0.025, df = NULL, critV = NULL, doseType = c("TD", "ED"), Delta, p, pVal = TRUE, alternative = c("one.sided", "two.sided"), na.action = na.fail, mvtcontrol = mvtnorm.control(), bnds, control = NULL){ direction <- attr(models, "direction") ## first perform multiple contrast test if(!is.null(data)){ callMCT <- list(deparse(substitute(dose)), deparse(substitute(resp)), data, models, S, type, addCovars, placAdj, alpha, df, critV, pVal, alternative, na.action, mvtcontrol) test <- do.call(MCTtest, callMCT) } else { test <- MCTtest(dose, resp, data, models, S, type, addCovars, placAdj, alpha, df, critV, pVal, alternative, na.action, mvtcontrol) } ## now pre-select models based on contrasts tstat <- test$tStat pvals <- attr(tstat, "pVal") if(!is.null(pvals)){ tstat <- tstat[pvals < alpha] } else { tstat <- tstat[tstat > test$critVal] } if(length(tstat) == 0) ## stop if no model significant return(list(MCTtest = test, mods = NULL, modcrit = NULL, selMod = NULL, TD = NULL)) ## fit models and calculate model selection criteria addArgs <- list(off=attr(models, "off"), scal=attr(models, "scal")) selModel <- match.arg(selModel) builtIn <- c("linlog", "linear", "quadratic", "linInt", "emax", "exponential", "logistic", "betaMod", "sigEmax") nams <- gsub("[0-9]", "", names(tstat)) ## remove numbers from model-names namsU <- unique(nams) mods <- vector("list", length(namsU));z <- 1 if(missing(bnds)){ if(!is.null(data)){ cal <- as.character(match.call()) doseVec <- data[, cal[2]] } else { doseVec <- dose } bnds <- defBnds(max(doseVec)) } else { if(!is.list(bnds)) stop("bnds needs to be a list") } if(selModel %in% c("AIC", "aveAIC")){ if(type[1] == "normal"){ modcrit <- AIC } else { modcrit <- gAIC } } else { modcrit <- function(x) max(tstat[attr(x, "model") == nams]) } for(i in 1:length(namsU)){ if(!is.null(data)){ callMod <- list(deparse(substitute(dose)), deparse(substitute(resp)), data, namsU[i], S, type, addCovars, placAdj, bnds[[namsU[i]]], df, NULL, na.action, control, addArgs) mods[[i]] <- do.call(fitMod, callMod) } else { mods[[i]] <- fitMod(dose, resp, data, namsU[i], S, type, addCovars, placAdj, bnds[[namsU[i]]], df, NULL, na.action, control, addArgs) } } crit <- sapply(mods, modcrit) names(crit) <- names(mods) <- namsU attr(crit, "crit") <- selModel if(selModel %in% c("maxT", "AIC")){ if(selModel == "AIC"){ ind <- which.min(crit) } if(selModel == "maxT"){ nam <- names(tstat)[which.max(tstat)] ind <- which(gsub("[0-9]", "", nam) == names(mods)) } selMod <- namsU[ind] # name of selected model } else { aic <- crit-mean(crit) selMod <- exp(-0.5*aic)/sum(exp(-0.5*aic)) # model weights names(selMod) <- namsU } ## calculate target dose estimate tds <- NULL doseType <- match.arg(doseType) if(doseType == "TD"){ if(missing(Delta)) stop("\"Delta\" needs to be specified for TD estimation") tds <- sapply(mods, TD, Delta=Delta, direction = direction) attr(tds, "addPar") <- Delta } if(doseType == "ED"){ if(missing(p)) stop("\"p\" needs to be specified for TD estimation") tds <- sapply(mods, ED, p=p) attr(tds, "addPar") <- p } out <- list(MCTtest = test, mods = mods, modcrit=crit, selMod=selMod, doseEst=tds, doseType = doseType) class(out) <- "MCPMod" out } predict.MCPMod <- function(object, predType = c("full-model", "ls-means", "effect-curve"), newdata = NULL, doseSeq = NULL, se.fit = FALSE, ...){ lapply(object$mods, function(x) predict(x, predType, newdata, doseSeq, se.fit)) } print.MCPMod <- function(x, digits=3, eps=1e-03, ...){ cat("MCPMod\n") xx <- x$MCTtest cat("\nMultiple Contrast Test:\n") ord <- rev(order(xx$tStat)) if (!any(is.null(attr(xx$tStat, "pVal")))) { pval <- format.pval(attr(xx$tStat, "pVal"), digits = digits, eps = eps) dfrm <- data.frame(round(xx$tStat, digits)[ord], pval[ord]) names(dfrm) <- c("t-Stat", "adj-p") } else { dfrm <- data.frame(round(xx$tStat, digits)[ord]) names(dfrm) <- c("t-Stat") } print(dfrm) if (!is.null(xx$critVal)) { twoSide <- xx$alternative == "two.sided" vec <- c(" one-sided)", " two-sided)") cat("\n", "Critical value: ", round(xx$critVal, digits), sep = "") if (attr(xx$critVal, "Calc")) { cat(" (alpha = ", xx$alpha, ",", vec[twoSide + 1], sep = "") } cat("\n") } cat("\n") cat("Estimated Dose Response Models:") for(i in 1:length(x$mods)){ cat("\n") cat(names(x$mods)[i], "model\n") cofList <- coef(x$mods[[i]], sep = TRUE) cof <- do.call("c", cofList) namcof <- c(names(cofList$DRpars), names(cofList$covarPars)) namcof <- gsub(" ", "", namcof) # remove white spaces for GUI names(cof) <- gsub("doseM", "dose", namcof) # use more obvious names print(round(cof, digits)) } if(attr(x$modcrit, "crit") != "aveAIC"){ cat("\nSelected model (",attr(x$modcrit, "crit"),"): ", x$selMod, "\n", sep="") } else { cat("\nModel weights (AIC):\n") attr(x$selMod, "crit") <- NULL print(round(x$selMod, 4)) } if(is.null(length(x$doseEst))) return() if(x$doseType == "TD") strn <- ", Delta=" if(x$doseType == "ED") strn <- ", p=" cat("\nEstimated ",x$doseType,strn,attr(x$doseEst, "addPar"),"\n", sep="") attr(x$doseEst, "addPar") <- NULL print(round(x$doseEst, 4)) } summary.MCPMod <- function(object, ...){ class(object) <- "summary.MCPMod" print(object, digits = 3) } print.summary.MCPMod <- function(x, ...){ cat("MCPMod\n\n") cat(rep("*", 39), "\n", sep="") cat("MCP part \n") cat(rep("*", 39), "\n", sep="") print(x$MCTtest) cat("\n") if(length(x$mods) == 0) return() cat(rep("*", 39), "\n", sep="") cat("Mod part \n") cat(rep("*", 39), "\n", sep="") for(i in 1:length(x$mods)){ if(i > 1) cat("\n") if(length(x$mods) > 1) cat("** Fitted model", i,"\n") summary(x$mods[[i]]) } cat("\n") cat(rep("*", 39), "\n", sep="") cat("Model selection criteria (",attr(x$modcrit, "crit"),"):\n", sep="") cat(rep("*", 39), "\n", sep="") crit <- attr(x$modcrit, "crit") attr(x$modcrit, "crit") <- NULL print(x$modcrit) if(crit != "aveAIC"){ cat("\nSelected model:", x$selMod, "\n") } else { cat("\nModel weights (AIC):\n") attr(x$selMod, "crit") <- NULL print(round(x$selMod, 4)) } if(is.null(length(x$doseEst))) return() cat("\n") cat(rep("*", 39), "\n", sep="") if(x$doseType == "TD") strn <- ", Delta=" if(x$doseType == "ED") strn <- ", p=" cat("Estimated ",x$doseType,strn,attr(x$doseEst, "addPar"),"\n", sep="") cat(rep("*", 39), "\n", sep="") attr(x$doseEst, "addPar") <- NULL print(round(x$doseEst, 4)) } plot.MCPMod <- function(x, CI = FALSE, level = 0.95, plotData = c("means", "meansCI", "raw", "none"), plotGrid = TRUE, colMn = 1, colFit = 1, ...){ if(is.null(x$mods)) stop("No models significant, nothing to plot") plotFunc(x, CI, level, plotData, plotGrid, colMn, colFit, ...) } DoseFinding/R/MCTtest.R0000644000176200001440000002012112620120667014310 0ustar liggesusers## here the multiple contrast test related functions ## performs multiple contrast test (MCP part of MCPMod) MCTtest <- function(dose, resp, data = NULL, models, S = NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, alpha = 0.025, df = NULL, critV = NULL, pVal = TRUE, alternative = c("one.sided", "two.sided"), na.action = na.fail, mvtcontrol = mvtnorm.control(), contMat = NULL){ ## perform multiple contrast test type <- match.arg(type) alternative <- match.arg(alternative) ## check for valid arguments cal <- as.character(match.call()) lst <- checkAnalyArgs(dose, resp, data, S, type, addCovars, placAdj, na.action, cal) dd <- lst$dd;type <- lst$type;S <- lst$S doseNam <- lst$doseNam;respNam <- lst$respNam ## calculate optimal contrasts and test-statistics doses <- unique(dd[[doseNam]]) k <- length(doses) if(type == "normal"){ dd[, doseNam] <- as.factor(dd[, doseNam]) form <- paste(respNam, "~", doseNam, "+", addCovars[2], "-1", sep="") lm.fit <- lm(as.formula(form), data = dd) est <- coef(lm.fit)[1:k] vc <- vcov(lm.fit)[1:k, 1:k] df <- lm.fit$df.residual } else { est <- dd[[respNam]] vc <- S if(is.null(df)) df <- Inf } if(is.null(contMat)){ # calculate optimal contrasts contMat <- optContr(models, doses, S=vc, placAdj=placAdj)$contMat rownames(contMat) <- doses } else { # contrast matrix specified if(inherits(contMat, "optContr")) contMat <- contMat$contMat if(nrow(contMat) != length(est)) stop("contMat of incorrect dimensions") } ct <- as.vector(est %*% contMat) covMat <- t(contMat) %*% vc %*% contMat den <- sqrt(diag(covMat)) tStat <- ct/den if(alternative == "two.sided"){ tStat <- abs(tStat) } corMat <- cov2cor(covMat) if(is.null(critV)){ if(!pVal){ stop("either p-values or critical value need to be calculated.") } } else if(is.logical(critV) & critV == TRUE){ critV <- critVal(corMat, alpha, df, alternative, mvtcontrol) attr(critV, "Calc") <- TRUE # determines whether cVal was calculated } else { pVal <- FALSE # pvals are not calculated if critV is supplied attr(critV, "Calc") <- FALSE } if(pVal){ pVals <- pValues(contMat, corMat, alpha, df, tStat, alternative, mvtcontrol) } res <- list(contMat = contMat, corMat = corMat, tStat = tStat, alpha = alpha, alternative = alternative[1]) if(pVal) attr(res$tStat, "pVal") <- pVals res$critVal <- critV class(res) <- "MCTtest" res } print.MCTtest <- function(x, digits = 3, eps = 1e-3, ...){ cat("Multiple Contrast Test\n") cat("\n","Contrasts:","\n", sep="") print(round(x$contMat, digits)) cat("\n","Contrast Correlation:","\n", sep="") print(round(x$corMat, digits)) cat("\n","Multiple Contrast Test:","\n",sep="") ord <- rev(order(x$tStat)) if(!any(is.null(attr(x$tStat, "pVal")))){ pval <- format.pval(attr(x$tStat, "pVal"), digits = digits, eps = eps) dfrm <- data.frame(round(x$tStat, digits)[ord], pval[ord]) names(dfrm) <- c("t-Stat", "adj-p") } else { dfrm <- data.frame(round(x$tStat, digits)[ord]) names(dfrm) <- c("t-Stat") } print(dfrm) if(!is.null(x$critVal)){ twoSide <- x$alternative == "two.sided" vec <- c(" one-sided)", " two-sided)") cat("\n","Critical value: ", round(x$critVal, digits), sep="") if(attr(x$critVal, "Calc")){ cat(" (alpha = ", x$alpha,",", vec[twoSide+1], "\n", sep="") } else { cat("\n") } } } critVal <- function(corMat, alpha = 0.025, df = NULL, alternative = c("one.sided", "two.sided"), control = mvtnorm.control()){ ## calculate critical value alternative <- match.arg(alternative) if(missing(corMat)) stop("corMat needs to be specified") if(is.null(df)) stop("degrees of freedom need to be specified") tail <- ifelse(alternative[1] == "two.sided", "both.tails", "lower.tail") if (!missing(control)) { if(!is.list(control)) { stop("when specified, 'control' must be a list") } ctrl <- do.call("mvtnorm.control", control) } else { ctrl <- control } if(!is.finite(df)) # normal case df <- 0 ## qmvtCall <- c(list(1-alpha, tail = tail, df = df, corr = corMat, ## algorithm = ctrl, interval = ctrl$interval)) ## do.call("qmvt", qmvtCall)$quantile qmvtDF(1-alpha, tail = tail, df=df, corr=corMat, algorithm = ctrl) } checkAnalyArgs <- function(dose, resp, data, S, type, addCovars, placAdj, na.action, cal){ if(class(addCovars) != "formula") stop("addCovars argument needs to be of class formula") if(class(placAdj) != "logical") stop("placAdj argument needs to be of class logical") if(placAdj){ if(type == "normal") stop("\"placAdj == TRUE\" only allowed for type = \"general\"") if(any(dose == 0)) stop("If placAdj == TRUE there should be no placebo group") } if(!is.null(data)){ # data handed over in data frame if(!is.data.frame(data)) stop("data argument needs to be a data frame") nams <- c(cal[2], cal[3], all.vars(addCovars)) ind <- match(nams, names(data)) if (any(is.na(ind))) stop("variable(s): ", paste(nams[is.na(ind)], collapse= ", "), " not found in ", cal[4]) dd <- na.action(data[,nams]) } else { # data handed over via vectors if(addCovars != ~1) stop("need to hand over data and covariates in data frame") if(length(dose) != length(resp)) stop(cal[2], " and ", cal[3], " not of equal length") dd <- na.action(data.frame(dose, resp)) cal[2:3] <- gsub("\\$", "", cal[2:3]) cal[2:3] <- gsub("\\[|\\]", "", cal[2:3]) colnames(dd) <- cal[2:3] } doseNam <- cal[2];respNam <- cal[3] if(any(dd[[doseNam]] < -.Machine$double.eps)) stop("dose values need to be non-negative") if(!is.numeric(dd[[doseNam]])) stop("dose variable needs to be numeric") if(!is.numeric(dd[[respNam]])) stop("response variable needs to be numeric") ## check type related arguments if(type == "general" & is.null(S)) stop("S argument missing") if(type == "normal" & !is.null(S)) message("Message: S argument ignored for type == \"normal\"\n") if(type == "general" & addCovars != ~1) message("Message: addCovars argument ignored for type == \"general\"") if(!is.null(S)){ if(!is.matrix(S)) stop("S needs to be of class matrix") nD <- length(dd[[doseNam]]) if(nrow(S) != nD | ncol(S) != nD) stop("S and dose have non-confirming size") } ord <- order(dd[[doseNam]]) dd <- dd[ord, ] Sout <- NULL if(type == "general") Sout <- S[ord, ord] return(list(dd=dd, type = type, S = Sout, ord=ord, doseNam=doseNam, respNam=respNam)) } pValues <- function(contMat, corMat, alpha = 0.025, df, tStat, alternative = c("one.sided", "two.sided"), control = mvtnorm.control()){ ## function to calculate p-values nD <- nrow(contMat) nMod <- ncol(contMat) if(missing(corMat)) stop("corMat needs to be specified") if(missing(df)) stop("degrees of freedom need to be specified") if(length(tStat) != nMod) stop("tStat needs to have length equal to the number of models") alternative <- match.arg(alternative) ctrl <- mvtnorm.control() if (!missing(control)) { control <- as.list(control) ctrl[names(control)] <- control } if(!is.finite(df)) # normal case df <- 0 lower <- switch(alternative[1], one.sided = matrix(rep(-Inf, nMod^2), nrow = nMod), two.sided = matrix(rep(-tStat, each = nMod), nrow = nMod)) upper <- switch(alternative[1], one.sided = matrix(rep(tStat, each = nMod), nrow = nMod), two.sided = matrix(rep(tStat, each = nMod), nrow = nMod)) pVals <- numeric(nMod) for(i in 1:nMod){ pVals[i] <- 1 - pmvt(lower[,i], upper[,i], df = df, corr = corMat, algorithm = ctrl) } pVals } DoseFinding/R/bFitMod.R0000644000176200001440000003756512416034503014331 0ustar liggesusers## Bayesian and bootstrap fitting of dose-response models checkPrior <- function(prior){ z <- 1 for(z in 1:length(prior)){ prvec <- prior[[z]] nam <- names(prior)[z] if(!all(is.numeric(prvec))) stop("non-numeric entry in prior") if(nam %in% c("norm", "t", "lnorm")){ if(nam == "t"){ if(length(prvec) != 3) stop("need vector of length 3 for ", nam, " prior") if(prvec[2] <= 0|prvec[3] <= 0) stop("2nd and 3rd entry needs to be positive for ", nam, " prior") } else { if(length(prvec) != 2) stop("need vector of length 2 for ", nam, " prior") if(prvec[2] <= 0) stop("2nd entry needs to be positive for ", nam, " prior") } } else { if(length(prvec) != 4) stop("need vector of length 4 for beta prior") if(min(prvec[3:4]) <= 0) stop("entry 3 and 4 need to be positive for beta prior") if(prvec[1] >= prvec[2]) stop("entry 1 needs to be smaller than entry 2 for beta prior") } } } getPrBnds <- function(prior){ prbnds <- matrix(ncol = 2, nrow = length(prior)) for(z in 1:length(prior)){ prvec <- prior[[z]] nam <- names(prior)[z] if(nam %in% c("norm", "t")) prbnds[z,] <- c(-Inf, Inf) if(nam == "lnorm") prbnds[z,] <- c(0, Inf) if(nam == "beta") prbnds[z,] <- c(prvec[1], prvec[2]) } prbnds } projPrBnds <- function(par, lbnd, ubnd){ ## project start parameter into bounds if(par > lbnd & par < ubnd){ return(par) } else { rng <- ubnd-lbnd if(!is.finite(rng)) rng <- 5 if(par <= lbnd) return(lbnd+0.05*rng) if(par >= ubnd) return(ubnd-0.05*rng) } } bFitMod <- function(dose, resp, model, S, placAdj = FALSE, type = c("Bayes", "bootstrap"), start = NULL, prior = NULL, nSim = 1000, MCMCcontrol = list(), control = NULL, bnds, addArgs = NULL){ if(placAdj & model %in% c("linlog", "logistic")) stop("logistic and linlog models can only be fitted with placAdj") nD <- length(dose) if (length(resp) != nD) stop("dose and resp need to be of the same size") dose <- as.numeric(dose) if (any(dose < -.Machine$double.eps)) stop("dose values need to be non-negative") if (!is.numeric(dose)) stop("dose variable needs to be numeric") resp <- as.numeric(resp) ## order dose and resp increasingly ord <- order(dose) dose <- dose[ord] resp <- resp[ord] if (nrow(S) != nD | ncol(S) != nD) stop("S and dose have non-confirming size") if (missing(model)) stop("need to specify the model that should be fitted") scal <- off <- nodes <- NULL if(model %in% c("linlog", "betaMod")){ lst <- getAddArgs(addArgs, dose) if(model == "betaMod") scal <- lst$scal if(model == "linlog") off <- lst$off } if(model == "linInt") nodes <- dose ## model number builtIn <- c("linear", "linlog", "quadratic", "linInt", "emax", "logistic", "exponential", "sigEmax", "betaMod") modNr <- match(model, builtIn) if(is.na(modNr)) stop("invalid model selected") ## number of parameters nPar <- as.integer(c(2, 2, 3, length(dose), 3, 4, 3, 4, 4)[modNr]) type <- match.arg(type) if(type == "Bayes"){ res <- bFitMod.Bayes(dose, resp, S, model, placAdj, start, prior, nSim, MCMCcontrol, off, scal, nPar, modNr) res <- matrix(res, nrow = nSim, ncol = nPar) if(placAdj & model != "linInt") res <- res[,-1, drop = FALSE] } else { ## bootstrap res <- bFitMod.bootstrap(dose, resp, S, model, placAdj, nSim, control, bnds, off, scal, nodes) } out <- list(samples = res) if(model != "linInt"){ nams <- names(formals(model))[-1] } else { nams <- paste("d", dose, sep="") } if(modNr %in% c(2,9)) nams <- nams[-length(nams)] if(placAdj & model != "linInt") nams <- nams[-1] colnames(out$samples) <- nams attr(out, "model") <- model lst <- list(dose, resp, S) doseNam <- as.list(match.call())$dose respNam <- as.list(match.call())$resp attr(out, "doseRespNam") <- as.character(c(doseNam, respNam)) names(lst) <- c(doseNam, respNam, "S") attr(out, "data") <- lst attr(out, "type") <- type attr(out, "call") <- match.call() attr(out, "placAdj") <- placAdj attr(out, "prior") <- prior attr(out, "scal") <- scal attr(out, "off") <- off attr(out, "nodes") <- nodes class(out) <- "bFitMod" out } bFitMod.Bayes <- function(dose, resp, S, model, placAdj, start, prior, nSim, MCMCcontrol, off, scal, nPar, modNr){ ## get defaults for MCMCcontrol ctrl <- list(thin = 1, w = NULL, adapt=TRUE) if (!is.null(MCMCcontrol)) { MCMCcontrol <- as.list(MCMCcontrol) ctrl[names(MCMCcontrol)] <- MCMCcontrol } ## check prior distribution if(is.null(prior)) stop("need specification of prior in prior argument") prnr <- match(names(prior), c("norm", "t", "lnorm", "beta")) if(any(is.na(prnr))) stop("invalid prior selected") np <- nPar if(placAdj){ if(model != "linInt"){ np <- nPar - 1 } else { placAdj <- FALSE ## can proceed as if placAdj = FALSE } } if(length(prnr) != np) stop(length(prnr), " priors specified, need ", np," for selected model") checkPrior(prior) prBnds <- getPrBnds(prior) ## add some checks here (scale > 0, a > b, alpha,beta>0) prior <- as.double(do.call("c", prior)) ## calculate starting value using fitMod if needed ## and width parameter for slice sampler if(is.null(start)|is.null(ctrl$w)){ mD <- max(dose) ll <- list(emax = c(0.1, 1.5) * mD, exponential = c(0.5, 1.5) * mD, logistic = matrix(c(0.1, 0.05, 1.5, 1/2) * mD, 2), sigEmax = matrix(c(0.1 * mD, 0.5, 1.5 * mD, 5), 2), betaMod = matrix(c(0.2, 0.2, 4, 4), 2)) gfit <- fitMod(dose, resp, S=S, model=model, type = "general", bnds = ll[[model]], placAdj = placAdj, addArgs=list(off = off, scal = scal)) if(is.null(start)){ start <- coef(gfit) for(i in 1:length(start)){ start[i] <- projPrBnds(start[i], prBnds[i,1], prBnds[i,2]) } } else { for(i in 1:length(start)){ if((start[i] < prBnds[i,1]) | (start[i] > prBnds[i,2])) stop("specified start value not consistent with bounds on prior distribution") } } if(is.null(ctrl$w)) ctrl$w <- rep(1.0, nPar)#sqrt(diag(vcov(gfit))) } if(np != length(start)) stop("start of wrong length") if(placAdj){ # append 0 if(model != "linInt") start <- c(0.0, start) } if(length(ctrl$w) != length(start)) stop("w and start need to be of same size") ## add information for beta and linlog model if(model == "betaMod"){ if(is.null(scal)) stop("need scal parameter for betaMod") start <- c(start, as.double(scal)) } if(model == "linlog"){ if(is.null(off)) stop("need off parameter for betaMod") start <- c(start, as.double(off)) } ## preliminary formatting to send information to C start <- as.double(start) inS <- solve(S) if(inherits(inS, "try-error")) stop("specified S is not invertible") clinS <- as.double(chol(inS)) ## ensure that parameters are of right class nSimTot <- as.integer(nSim*ctrl$thin);thin <- as.integer(ctrl$thin) out <- double(floor(nSimTot/thin)*nPar) resp <- as.double(resp);dose <- as.double(dose) modNr <- as.integer(modNr);clinS <- as.double(clinS) nD <- as.integer(length(dose));w <- as.double(ctrl$w) noint <- as.integer(placAdj) ## call c code if(ctrl$adapt){ res <- .C("sample", as.integer(500), as.integer(1), out=double(500*nPar), start, noint, w, dose, modNr, nPar, double(length(dose)), resp, clinS, nD, prior, prnr, double(nPar), double(nPar)) res <- matrix(res$out, nrow = 500, ncol = nPar) w <- apply(res, 2, function(x) IQR(x)/1.3) } res <- .C("sample", nSimTot, thin, out=out, start, noint, w, dose, modNr, nPar, double(length(dose)), resp, clinS, nD, prior, prnr, double(nPar), double(nPar)) res$out } bFitMod.bootstrap <- function(dose, resp, S, model, placAdj, nSim, control, bnds, off, scal, nodes){ if(model %in% c("emax", "exponential", "betaMod", "logistic", "sigEmax")){ if(missing(bnds)){ message("Message: Need bounds in \"bnds\" for nonlinear models, using default bounds from \"defBnds\".") bnds <- defBnds(max(dose))[[model]] } } ## same arguments as in gFitDRModel function sims <- rmvnorm(nSim, resp, S) func <- function(x){ fit <- fitMod.raw(dose, x, S=S, model=model, type="general", placAdj=placAdj, bnds=bnds, control=control, off=off, scal=scal, nodes=nodes, covarsUsed = FALSE, df = Inf, doseNam = "dose", respNam = "resp") coef(fit) } out <- apply(sims, 1, func) if(is.matrix(out)){ return(t(out)) } else { return(matrix(out, nrow = nSim, ncol = 1)) } } ## to do write print, predict and summary method ess.mcmc <- function(series, lag.max = NULL){ ## initial monotone sequence estimate of effective sample size ## Geyer, 1992, Statistical Science, idea: ## sum of even and un-even autocorrelations (gamma) ## needs to be positive and monotone decreasing N <- length(series) if (length(unique(series)) == 1) return(NA) if (is.null(lag.max)) lag.max <- 10 * log10(N) ac <- acf(series, plot = FALSE, lag.max = lag.max)$acf[2:(lag.max + 1), , 1] gam <- ac[-length(ac)]+ac[-1] dgam <- -diff(gam) if (gam[1] < 0) return(N) m1 <- m2 <- lag.max ind1 <- gam < 0 if (any(ind1)) m1 <- min(which(ind1)) ind2 <- dgam < 0 if (any(ind2)) m2 <- min(which(ind2)) ind <- min(2 * min(m1, m2) + 1, lag.max) N/(1 + 2 * sum(ac[1:ind])) } print.bFitMod <- function(x, digits = 3, ...){ ## print brief summary of MCMC samples doseNam <- attr(x, "doseRespNam")[1] respNam <- attr(x, "doseRespNam")[2] resp <- attr(x, "data")[[respNam]] names(resp) <- attr(x, "data")[[doseNam]] cat("Dose Response Model\n\n") cat(paste("Model:", attr(x, "model")), "\n\n") if(attr(x, "type") == "Bayes"){ cat("Summary of posterior draws\n") func <- function(x){ c(mean=mean(x), sdev=sd(x), quantile(x, c(0.025, 0.25, 0.5, 0.75, 0.975)), n.eff=ess.mcmc(x)) } print(t(apply(x$samples, 2, func)), digits=digits) } else { cat("Summary of bootstrap draws\n") func <- function(x){ c(mean=mean(x), sdev=sd(x), quantile(x, c(0.025, 0.25, 0.5, 0.75, 0.975))) } print(t(apply(x$samples, 2, func)), digits=digits) } cat("\nFitted to:\n") print(signif(resp, digits+2)) } predict.bFitMod <- function(object, predType = c("full-model", "effect-curve"), summaryFct = function(x) quantile(x, probs = c(0.025, 0.25, 0.5, 0.75, 0.975)), doseSeq = NULL, lenSeq = 101, ...){ predType <- match.arg(predType) doseNam <- attr(object, "doseRespNam")[1] if (is.null(doseSeq)) { doseSeq <- seq(0, max(attr(object, "data")[[doseNam]]), length = lenSeq) } model <- attr(object, "model") scal <- attr(object, "scal") off <- attr(object, "off") placAdj <- attr(object, "placAdj") if(placAdj){ nodes <- c(0,attr(object, "data")[[doseNam]]) } else { nodes <- attr(object, "data")[[doseNam]] } out <- predSamples(samples = object$samples, doseSeq = doseSeq, placAdj = placAdj, model = model, scal = scal, off = off, nodes = nodes) if(predType == "effect-curve"){ out <- out - out[,1] } if(!is.null(summaryFct)){ out0 <- apply(out, 2, summaryFct) out <- matrix(out0, ncol = ncol(out)) } colnames(out) <- doseSeq out } predSamples <- function(samples, placAdjfullPars = FALSE, doseSeq, placAdj, model, scal, off, nodes){ ## placAdjfullPars argument only of interest if placAdj = TRUE ## it determines whether the e0 parameter is included as a row in the ## samples argument or not if(model != "betaMod") scal <- NULL if(model != "linlog") off <- NULL if(placAdj){ if(placAdjfullPars){ if(model != "linInt"){ func <- function(x){ pred <- do.call(model, c(list(doseSeq), as.list(c(x, scal, off)))) pred0 <- do.call(model, c(list(0), as.list(c(x, scal, off)))) pred-pred0 } } else { func <- function(x){ pred <- do.call(model, c(list(doseSeq), as.list(list(x, nodes)))) pred0 <- do.call(model, c(list(0), as.list(list(x, nodes)))) pred-pred0 } } } else { if(model != "linInt"){ func <- function(x) do.call(model, c(list(doseSeq), as.list(c(c(0,x), scal, off)))) } else { func <- function(x) do.call(model, c(list(doseSeq), as.list(list(c(0,x), nodes)))) } } } else { if(model != "linInt"){ func <- function(x) do.call(model, c(list(doseSeq), as.list(c(x, scal, off)))) } else { func <- function(x) do.call(model, c(list(doseSeq), as.list(list(x, nodes)))) } } out <- t(apply(samples, 1, func)) } plot.bFitMod <- function (x, plotType = c("dr-curve", "effect-curve"), quant = c(0.025, 0.5, 0.975), plotData = c("means", "meansCI", "none"), level = 0.95, lenDose = 201, ...){ addArgs <- list(...) plotType <- match.arg(plotType) doseNam <- attr(x, "doseRespNam")[1] respNam <- attr(x, "doseRespNam")[2] dose <- attr(x, "data")[[doseNam]] resp <- attr(x, "data")[[respNam]] doseSeq <- seq(0, max(dose), length = lenDose) plotData <- match.arg(plotData) placAdj <- attr(x, "placAdj") sumFct <- function(x){ quantile(x, probs = quant) } if (placAdj) plotType <- "effect-curve" if (plotType == "effect-curve") { pred <- predict(x, predType = plotType, summaryFct = sumFct, doseSeq = doseSeq) main <- "Effect Curve" if (placAdj) { if (plotData == "meansCI") { sdev <- sqrt(diag(attr(x, "data")$S)) q <- qnorm(1 - (1 - level)/2) LBm <- UBm <- numeric(length(dose)) for (i in 1:length(dose)) { LBm[i] <- resp[i] - q * sdev[i] UBm[i] <- resp[i] + q * sdev[i] } } else { LBm <- UBm <- NULL } } else { LBm <- UBm <- NULL } } if (plotType == "dr-curve") { pred <- predict(x, predType = "full-model", summaryFct = sumFct, doseSeq = doseSeq) main <- "Dose-Response Curve\n" if (plotData == "meansCI") { sdev <- sqrt(diag(attr(x, "data")$S)) q <- qnorm(1 - (1 - level)/2) LBm <- UBm <- numeric(length(dose)) for (i in 1:length(dose)) { LBm[i] <- resp[i] - q * sdev[i] UBm[i] <- resp[i] + q * sdev[i] } } else { LBm <- UBm <- NULL } } rng <- range(c(pred, resp, LBm, UBm)) dff <- diff(rng) ylim <- c(rng[1] - 0.02 * dff, rng[2] + 0.02 * dff) callList <- list(doseSeq, t(pred), type = "l", xlab = doseNam, ylim = ylim, ylab = respNam, main = main, lty=1, col=1) callList[names(addArgs)] <- addArgs do.call("matplot", callList) if (plotType == "dr-curve" | placAdj) { if (plotData == "means") points(dose, resp, pch = 19, cex = 0.75) if (plotData == "meansCI") { points(dose, resp, pch = 19, cex = 0.75) for (i in 1:length(dose)) { lines(c(dose[i], dose[i]), c(LBm[i], UBm[i]), lty = 2) } } } res <- list(doseSeq = doseSeq) attr(res, "level") <- level attr(res, "ylim") <- ylim res$mean <- pred invisible(res) } coef.bFitMod <- function (object, ...){ object$samples } DoseFinding/R/optContr.R0000644000176200001440000001563112411760734014612 0ustar liggesusers## functions for calculating optimal contrasts and critical value optC <- function(mu, Sinv = NULL, placAdj = FALSE){ ## calculate optimal contrast for given mu and Sinv (Sinv = proportional to inv covariance matrix) if(!placAdj){ aux <- rowSums(Sinv) # Sinv %*% 1 mn <- sum(mu * aux)/sum(aux) # formula is: S^(-1)(mu-mu*S^(-1)*1/(1*S^(-1)1)1) val <- Sinv %*% (mu - mn) ## now center so that sum is 0 ## and standardize to have norm 1 val <- val - sum(val) } else { # placAdj = TRUE val <- Sinv %*% mu } val/sqrt(sum(val^2)) } constOptC <- function(mu, Sinv = NULL, placAdj = FALSE, direction){ ## calculate optimal contrasts under the additional constraint that ## the control and the active treatment groups have a different sign ## in the contrast S <- solve(Sinv) # ugly fix, we should use S as argument if(!placAdj){ k <- length(mu) CC <- cbind(-1,diag(k-1)) SPa <- CC%*%S%*%t(CC) muPa <- as.numeric(CC%*%mu) } else { k <- length(mu)+1 SPa <- S muPa <- mu } ## determine direction of effect unContr <- solve(SPa)%*%muPa # unconstrained optimal contrast mult <- ifelse(direction == "increasing", 1, -1) # 1 increasing, -1 decreasing ## prepare call of quadprog::solve.QP D <- SPa d <- rep(0,k-1) tA <- rbind(muPa, mult*diag(k-1)) A <- t(tA) bvec <- c(1,rep(0,k-1)) contr <- quadprog::solve.QP(D, d, A, bvec, meq=1)$solution contr[abs(contr) < 1e-10] <- 0 if(!placAdj) contr <- c(-sum(contr), contr) contr/sqrt(sum(contr^2)) } modContr <- function(means, W = NULL, Sinv = NULL, placAdj = FALSE, type, direction){ ## call optC on matrix ## check whether constant shape was specified and remove (can happen for linInt model) if(!placAdj){ ind <- apply(means, 2, function(x){ length(unique(x)) > 1 }) } else { ## placAdj ind <- apply(means, 2, function(x){ any(x != 0) }) } if(all(!ind)) stop("All models correspond to a constant shapes, no optimal contrasts calculated.") if(any(!ind)){ nam <- colnames(means)[!ind] namsC <- paste(nam, collapse = ", ") if(length(nam) == 1){ message("The ", namsC, " model has a constant shape, cannot calculate optimal contrasts for this shape.") } else { message("The ", namsC, " models have a constant shape, cannot calculate optimal contrasts for these shapes.") } means <- means[,ind, drop=FALSE] } if(is.null(Sinv)) Sinv <- solve(W) if(type == "unconstrained"){ out <- apply(means, 2, optC, Sinv = Sinv, placAdj = placAdj) } else { # type == "constrained" out <- apply(means, 2, constOptC, Sinv = Sinv, placAdj = placAdj, direction = direction) } if(!is.matrix(out)){ ## can happen for placAdj=T and only 1 act dose nam <- names(out) out <- matrix(out, nrow = 1) colnames(out) <- nam } out } optContr <- function(models, doses, w, S, placAdj = FALSE, type = c("unconstrained", "constrained")){ ## calculate optimal contrasts and critical value if(!(inherits(models, "Mods"))) stop("models needs to be of class Mods") if(missing(doses)) doses <- attr(models, "doses") scal <- attr(models, "scal") off <- attr(models, "off") nodes <- attr(models, "doses") direction <- unique(attr(models, "direction")) if(length(direction) > 1) stop("need to provide either \"increasing\" or \"decreasing\" as direction to optContr") mu <- getResp(models, doses) if(placAdj){ mu0 <- getResp(models, 0) mu <- mu-matrix(mu0[1,], byrow = TRUE, nrow=nrow(mu), ncol=ncol(mu)) } type <- match.arg(type) if(type == "constrained"){ avail <- requireNamespace("quadprog", quietly = TRUE) if(!avail) stop("Need suggested package quadprog to calculate constrained contrasts") } if(any(doses == 0) & placAdj) stop("If placAdj == TRUE there should be no placebo group in \"doses\"") ## check for n and vCov arguments if(!xor(missing(w), missing(S))) stop("Need to specify exactly one of \"w\" or \"S\"") if(!missing(w)){ if(length(w) == 1){ # assume equal weights S <- Sinv <- diag(length(doses)) } else { if(length(w) != length(doses)) stop("w needs to be of length 1 or of the same length as doses") S <- diag(1/w) Sinv <- diag(w) } } else { if(!is.matrix(S)) stop("S needs to be a matrix") Sinv <- solve(S) } contMat <- modContr(mu, Sinv=Sinv, placAdj = placAdj, type = type, direction = direction) rownames(contMat) <- doses corMat <- cov2cor(t(contMat) %*% S %*% contMat) res <- list(contMat = contMat, muMat = mu, corMat = corMat) attr(res, "type") <- type attr(res, "placAdj") <- placAdj class(res) <- "optContr" res } print.optContr <- function(x, digits = 3, ...){ cat("Optimal contrasts\n") print(round(x$contMat, digits)) } summary.optContr <- function(object, digits = 3, ...){ class(object) <- "summary.optContr" print(object, digits = digits) } print.summary.optContr <- function(x, digits = 3, ...){ cat("Optimal contrasts\n") cat("\n","Optimal Contrasts:","\n", sep="") print(round(x$contMat, digits)) cat("\n","Contrast Correlation Matrix:","\n", sep="") print(round(x$corMat, digits)) cat("\n") } plot.optContr <- function (x, superpose = TRUE, xlab = "Dose", ylab = NULL, plotType = c("contrasts", "means"), ...){ plotType <- match.arg(plotType) if (is.null(ylab)) { if (plotType == "contrasts") { ylab <- "Contrast coefficients" } else { ylab <- "Normalized model means" } } cM <- x$contMat if (plotType == "means") cM <- t(t(x$muMat)/apply(x$muMat, 2, max)) nD <- nrow(cM) nM <- ncol(cM) cMtr <- data.frame(resp = as.vector(cM), dose = rep(as.numeric(dimnames(cM)[[1]]), nM), model = factor(rep(dimnames(cM)[[2]], each = nD), levels = dimnames(cM)[[2]])) if(superpose){ spL <- trellis.par.get("superpose.line") spL$lty <- rep(spL$lty, nM%/%length(spL$lty) + 1)[1:nM] spL$lwd <- rep(spL$lwd, nM%/%length(spL$lwd) + 1)[1:nM] spL$col <- rep(spL$col, nM%/%length(spL$col) + 1)[1:nM] ## number of columns in legend nCol <- ifelse(nM < 5, nM, min(4,ceiling(nM/min(ceiling(nM/4),3)))) key <- list(lines = spL, transparent = TRUE, text = list(levels(cMtr$model), cex = 0.9), columns = nCol) ltplot <- xyplot(resp ~ dose, data = cMtr, subscripts = TRUE, groups = cMtr$model, panel = panel.superpose, type = "o", xlab = xlab, ylab = ylab, key = key, ...) } else { ltplot <- xyplot(resp ~ dose | model, data = cMtr, type = "o", xlab = xlab, ylab = ylab, strip = function(...){ strip.default(..., style = 1) }, ...) } print(ltplot) } DoseFinding/R/optDesign.R0000644000176200001440000004414012411760664014735 0ustar liggesusers## optimal designs for model-fitting ## calculate gradient of model and gradient of TD calcGrads <- function(fmodels, doses, weights, Delta, off, scal, direction, designCrit){ modgrad <- TDgrad <- nPar <- vector("list", modCount(fmodels, fullMod=TRUE)) z <- 1 for(nam in names(fmodels)){ pars <- fmodels[[nam]] if(is.matrix(pars)){ for(i in 1:nrow(pars)){ modgrad[[z]] <- t(gradCalc(nam, pars[i,], doses, off=off, scal=scal)*sqrt(weights)) if(designCrit != "Dopt") TDgrad[[z]] <- calcTDgrad(nam, pars[i,], Delta, direction, off, scal) nPar[[z]] <- nPars(nam) z <- z+1 } } else { modgrad[[z]] <- t(gradCalc(nam, pars, doses, off=off, scal=scal)*sqrt(weights)) if(designCrit != "Dopt") TDgrad[[z]] <- calcTDgrad(nam, pars, Delta, direction, off, scal) nPar[[z]] <- nPars(nam) z <- z+1 } } modgrads <- do.call("c", modgrad) TDgrad <- do.call("c", TDgrad) nPar <- do.call("c", nPar) list(modgrads=modgrads, TDgrad=TDgrad, nPar=nPar) } ## returns the number of parameters (needed for C call) nPars <- function(mods){ builtIn <- c("linlog", "linear", "quadratic", "emax", "exponential", "logistic", "betaMod", "sigEmax") ind <- match(mods, builtIn) if(any(is.na(ind))){ stop(mods[which(is.na(ind))], " model not allowed in optDesign") } c(2,2,3,3,3,4,4,4)[ind] } ## function which calls different optimizers callOptim <- function(func, method, nD, control, lowbnd, uppbnd){ ## actual optimizer if(method == "nlminb"){ # nlminb and optim run on transformed values res <- nlminb(getStart(nD), objective=func, control = control, lower=rep(0, nD), upper=rep(pi, nD)) } else if(method == "Nelder-Mead"){ res <- optim(getStart(nD), fn=func, control = control) } else if(method == "solnp"){ # no need for transformed values for solnp avail <- requireNamespace("Rsolnp", quietly = TRUE) if(!avail) stop("Need suggested package Rsolnp for this calculation to use solnp optimizer") ## get starting value (need feasible starting value for solnp) ## try whether equal allocation is feasible eq <- rep(1/nD, nD) if(all(eq > lowbnd+0.001) & all(eq < uppbnd-0.001)){ strt <- eq } else { slb <- sum(lowbnd) sub <- sum(uppbnd) gam <- (1-slb)/(sub-slb) strt <- lowbnd+gam*(uppbnd-lowbnd) } eqfun <- function(x, ...){ sum(x) } con <- list(trace = 0) con[(namc <- names(control))] <- control res <- Rsolnp::solnp(strt, fun=func, eqfun=eqfun, eqB=1, control = con, LB = lowbnd, UB = uppbnd) } res } ## transforms from unconstrained values R^k into constrained ## values in S^k = {w|sum_i w_i=1 and w_i >= 0} transTrig <- function(y, k){ a <- numeric(k) if(k == 2){ a[1] <- sin(y[1])^2 } else { a[1:(k-1)] <- sin(y)^2 a[2:(k-1)] <- a[2:(k-1)]*cumprod(cos(y[1:(k-2)])^2) } a[k] <- prod(cos(y[1:(k-1)])^2) a } ## identity function idtrans <- function(y, k){ y } ## calculate uniform design but on R^k scale ## (inverse of transTrig at uniform design) getStart <- function(k){ y <- numeric(k-1) eq <- 1/k y[1] <- asin(sqrt(eq)) for(j in 2:(k-1)){ y[j] <- asin(sqrt(eq/prod(cos(y[(1:j)-1])^2))) } y } ## function called in the optimization (design criterion is ## implemented in C and called "critfunc") optFunc <- function(x, xvec, pvec, nD, probs, M, n, nold, bvec, designCrit, trans, standInt){ xtrans <- do.call("trans", list(x, nD)) res <- .C("critfunc", xvec, pvec, nD, probs, M, xtrans, n, nold, double(16), as.double(1e-15), bvec, designCrit, standInt, double(1), PACKAGE = "DoseFinding") res[[14]] } ## user visible function calling all others optDesign <- function(models, probs, doses, designCrit = c("Dopt", "TD", "Dopt&TD", "userCrit"), Delta, standDopt = TRUE, weights, nold = rep(0, length(doses)), n, control=list(), optimizer = c("solnp", "Nelder-Mead", "nlminb", "exact"), lowbnd = rep(0, length(doses)), uppbnd = rep(1, length(doses)), userCrit, ...){ if(!missing(models)){ if(!inherits(models, "Mods")) stop("\"models\" needs to be of class Mods") direction <- attr(models, "direction") off <- attr(models, "off") scal <- attr(models, "scal") if(missing(doses)) doses <- attr(models, "doses") } else { if(missing(userCrit)) stop("either \"models\" or \"userCrit\" need to be specified") if(missing(doses)) stop("For userCrit one always needs to specify doses") } ## check arguments designCrit <- match.arg(designCrit) optimizer <- match.arg(optimizer) if(missing(n)){ if(optimizer == "exact") stop("need to specify sample size via n argument") if(any(nold > 0)) stop("need to specify sample size for next cohort via n argument") n <- 1 ## value is arbitrary in this case } else { if(length(n) > 1) stop("n needs to be of length 1") } if(missing(Delta)){ if(designCrit %in% c("TD", "Dopt&TD")) stop("need to specify target difference \"Delta\"") } else { if(Delta <= 0) stop("\"Delta\" needs to be > 0, if curve decreases use \"direction = decreasing\"") } if(missing(weights)){ weights <- rep(1, length(doses)) } else { if(length(weights) != length(doses)) stop("weights and doses need to be of equal length") } if(length(lowbnd) != length(doses)) stop("lowbnd needs to be of same length as doses") if(length(uppbnd) != length(doses)) stop("uppbnd needs to be of same length as doses") if(any(lowbnd > 0) | any(uppbnd < 1)){ if(optimizer != "solnp" & optimizer != "exact") stop("only optimizers solnp or exact can handle additional constraints on allocations") } if(sum(lowbnd) > 1) stop("Infeasible lower bound specified (\"sum(lowbnd) > 1\"!)") if(sum(uppbnd) < 1) stop("Infeasible upper bound specified (\"sum(lowbnd) < 1\"!)") if(!is.logical(standDopt)) stop("standDopt needs to contain a logical value") standInt <- as.integer(standDopt) # use standardized or non-stand. D-optimality nD <- length(doses) if(designCrit == "TD" | designCrit == "Dopt&TD"){ # check whether TD exists in (0,max(dose)) if(length(unique(direction)) > 1) stop("need to provide either \"increasing\" or \"decreasing\" as direction to optDesign, when TD optimal designs should be calculated") direction <- unique(direction) tdMods <- TD(models, Delta, "continuous", direction) tdMods[tdMods > max(doses)] <- NA if(any(is.na(tdMods))) stop("TD does not exist for ", paste(names(tdMods)[is.na(tdMods)], collapse=", " ), " model(s)") } if(designCrit == "Dopt" | designCrit == "Dopt&TD"){ # check whether Fisher matrix can be singular np <- nPars(names(models)) if(max(np) > length(doses)) stop("need at least as many dose levels as there are parameters to calculate Dopt design.") } ## use transformation for Nelder-Mead and nlminb if(is.element(optimizer, c("Nelder-Mead", "nlminb"))){ transform <- transTrig } else { transform <- idtrans } if(designCrit != "userCrit"){ # prepare criterion function ## check arguments if(abs(sum(probs)-1) > sqrt(.Machine$double.eps)){ stop("probs need to sum to 1") } ## prepare criterion function lst <- calcGrads(models, doses, weights, Delta, off, scal, direction, designCrit) ## check for invalid values (NA, NaN and +-Inf) checkInvalid <- function(x) any(is.na(x)|(is.nan(x)|!is.finite(x))) grInv <- checkInvalid(lst$modgrads) MvInv <- ifelse(designCrit != "Dopt", checkInvalid(lst$TDgrad), FALSE) if(grInv | MvInv) stop("NA, NaN or +-Inf in gradient or bvec") ## prepare arguments before passing to C M <- as.integer(length(probs)) if(M != length(lst$nPar)) stop("probs of wrong length") if(length(lst$modgrads) != length(doses)*sum(lst$nPar)) stop("Gradient of wrong length.") if(length(nold) != nD) stop("Either nold or doses of wrong length.") nD <- as.integer(nD) p <- as.integer(lst$nPar) intdesignCrit <- match(designCrit, c("TD", "Dopt", "Dopt&TD")) objFunc <- function(par){ optFunc(par, xvec=as.double(lst$modgrads), pvec=as.integer(p), nD=nD, probs=as.double(probs), M=M, n=as.double(n), nold = as.double(nold), bvec=as.double(lst$TDgrad), trans = transform, standInt = standInt,designCrit = as.integer(intdesignCrit)) } } else { # user criterion if(missing(userCrit)) stop("need design criterion in userCrit when specified") if(!is.function(userCrit)) stop("userCrit needs to be a function") objFunc <- function(par){ par2 <- do.call("transform", list(par, nD)) userCrit((par2*n+nold)/(sum(nold)+n), doses, ...) } } ## perform actual optimization if(optimizer != "exact"){ # use callOptim function res <- callOptim(objFunc, optimizer, nD, control, lowbnd, uppbnd) if(optimizer == "Nelder-Mead" | optimizer == "nlminb"){ # transform results back des <- transTrig(res$par, length(doses)) if(optimizer == "Nelder-Mead"){ crit <- res$value } else { crit <- res$objective } } if(optimizer == "solnp"){ # no need to transform back des <- res$pars crit <- res$values[length(res$values)] } if(res$convergence){ message("Message: algorithm indicates no convergence, the 'optimizerResults' attribute of the returned object contains more details.") } } else { # exact criterion (enumeration of all designs) ## enumerate possible exact designs con <- list(maxvls1 = 1e6, maxvls2 = 1e5, groupSize = 1) con[(namc <- names(control))] <- control mat <- getDesMat(n, nD, lowbnd, uppbnd, con$groupSize, con$maxvls1, con$maxvls2) designmat <- sweep(mat*n, 2, nold, "+") res <- sweep(designmat, 2, n+sum(nold), "/") ## evaluate criterion function if(designCrit != "userCrit"){ critv <- calcCrit(res, models, probs, doses, designCrit, Delta, standDopt, weights, nold, n) } else { critv <- apply(res, 1, objFunc) } des <- mat[which.min(critv),] crit <- min(critv) } out <- list(crit = crit, design = des, doses = doses, n = n, nold = nold, designCrit = designCrit) attr(out, "optimizerResults") <- res class(out) <- "DRdesign" out } calcCrit <- function(design, models, probs, doses, designCrit = c("Dopt", "TD", "Dopt&TD"), Delta, standDopt = TRUE, weights, nold = rep(0, length(doses)), n){ if(!inherits(models, "Mods")) stop("\"models\" needs to be of class Mods") off <- attr(models, "off") scal <- attr(models, "scal") if(missing(doses)) doses <- attr(models, "doses") ## extract design if(inherits(design, "DRdesign")) design <- design$design if(!is.numeric(design)) stop("design needs to be numeric") if(!is.matrix(design)) design <- matrix(design, ncol = length(design)) if(ncol(design) != length(doses)) stop("design and doses should be of the same length") if(any(abs(rowSums(design)-1) > 0.001)) stop("design needs to sum to 1") if(missing(n)){ n <- 1 # value arbitrary } else { if(length(n) > 1) stop("n needs to be of length 1") } if(missing(weights)){ weights <- rep(1, length(doses)) } else { if(length(weights) != length(doses)) stop("weights and doses need to be of equal length") } designCrit <- match.arg(designCrit) if(missing(Delta) & substr(designCrit, 1, 3) == "TD") stop("need to specify clinical relevance parameter") direction <- attr(models, "direction") if(designCrit == "TD" | designCrit == "Dopt&TD"){ # check whether TD exists in (0,max(dose)) if(length(unique(direction)) > 1) stop("need to provide either \"increasing\" or \"decreasing\" as direction to optDesign, when TD optimal designs should be calculated") direction <- unique(direction) tdMods <- TD(models, Delta, "continuous", direction) tdMods[tdMods > max(doses)] <- NA if(any(is.na(tdMods))) stop("TD does not exist for ", paste(names(tdMods)[is.na(tdMods)], collapse=", " ), " model(s)") } if(designCrit == "Dopt" | designCrit == "Dopt&TD"){ # check whether Fisher matrix can be singular np <- nPars(names(models)) if(max(np) > length(doses)) stop("need more dose levels to calculate Dopt design.") } if(!is.logical(standDopt)) stop("standDopt needs to contain a logical value") standInt <- as.integer(standDopt) lst <- calcGrads(models, doses, weights, Delta, off, scal, direction, designCrit) ## check for invalid values (NA, NaN and +-Inf) checkInvalid <- function(x) any(is.na(x)|(is.nan(x)|!is.finite(x))) grInv <- checkInvalid(lst$modgrads) MvInv <- ifelse(designCrit != "Dopt", checkInvalid(lst$TDgrad), FALSE) if(grInv | MvInv) stop("NA, NaN or +-Inf in gradient or bvec") ## prepare for input into C M <- as.integer(length(probs)) nD <- as.integer(length(doses)) if(M != length(lst$nPar)) stop("Probs of wrong length") if(length(lst$modgrads) != length(doses)*sum(lst$nPar)) stop("Gradient of wrong length.") if(length(nold) != nD) stop("Either nold or doses of wrong length.") p <- as.integer(lst$nPar) intdesignCrit <- match(designCrit, c("TD", "Dopt", "Dopt&TD")) res <- numeric(nrow(design)) ## check for sufficient number of design points iter <- 1:nrow(design) design0 <- sweep(design, 2, nold, "+") count <- apply(design0, 1, function(x) sum(x > 0.0001)) ind <- count < max(p[probs > 0]) if(any(ind)){ iter <- iter[!ind] res[ind] <- NA if(all(is.na(res))) warning("need at least as many dose levels in the design as parameters in the model") } for(i in iter){ res[i] <- optFunc(design[i,], xvec=as.double(lst$modgrads), pvec=as.integer(p), nD=nD, probs=as.double(probs), M=M, n=as.double(n), nold = as.double(nold), bvec=as.double(lst$TDgrad), trans = idtrans, standInt = standInt, designCrit = as.integer(intdesignCrit)) } res } ## print designs print.DRdesign <- function(x, digits = 5, eps = 0.001, ...){ nam <- switch(x$designCrit, "TD" = "TD", "Dopt" = "D", "Dopt&TD" = "TD and D mixture", "userCrit" = "userCrit") cat("Calculated", nam, "- optimal design:\n") ind <- x$design > eps vec <- x$design[ind] names(vec) <- x$doses[ind] print(round(vec, digits = digits)) } ## auxiliary function for efficient rounding which.is.max <- function (x){ y <- seq_along(x)[x == max(x)] if (length(y) > 1L) sample(y, 1L) else y } ## efficient rounding (see Pukelsheim (1993), Ch. 12) rndDesign <- function(design, n, eps = 0.0001){ if(missing(n)) stop("total sample size \"n\" needs to be specified") n <- round(n) # ensure n is an integer (at least numerically) if(inherits(design, "DRdesign")){ design <- design$design } if(!inherits(design, "numeric")) stop("design needs to be a numeric vector.") zeroind <- design < eps if(any(zeroind)){ design <- design[!zeroind]/sum(design[!zeroind]) } l <- sum(!zeroind) nn <- ceiling((n-0.5*l)*design) while(sum(nn)!=n){ if(sum(nn) maxvls1) stop(combn, " (unrestricted) combinations, increase maxvls1 in control argument if this calculation should be performed") desmat <- getCompositions(nG, nDoses)/nG if(any(lowbnd > 0) | any(uppbnd < 1)){ comp <- matrix(lowbnd, byrow = TRUE, ncol = nDoses, nrow=nrow(desmat)) LindMat <- desmat >= comp comp <- matrix(uppbnd, byrow=TRUE, ncol = nDoses, nrow=nrow(desmat)) UindMat <- desmat <= comp ind <- rowSums(LindMat*UindMat) == nDoses desmat <- desmat[ind,] if(nrow(desmat) == 0) stop("no design is compatible with bounds specified in lowbnd and uppbnd") } if(nrow(desmat) > maxvls2) stop(nrow(desmat), " combinations, increase maxvls2 in control argument if this calculation should be performed") desmat } ## plot method for design objects plot.DRdesign <- function(x, models, lwdDes = 10, colDes = rgb(0,0,0,0.3), ...){ if(missing(models)) stop("need object of class Mods to produce plot") plot(models, ...) layoutmat <- trellis.currentLayout() nc <- ncol(layoutmat) nr <- nrow(layoutmat) total <- sum(layoutmat > 0) z <- 1 for(i in 1:nc){ for(j in 1:nr){ if(z > total) break trellis.focus("panel", i, j) args <- trellis.panelArgs() miny <- min(args$y) maxy <- max(args$y) dy <- maxy-miny for(k in 1:length(x$doses)){ yy <- c(0,(x$design*dy)[k])+miny xx <- rep(x$doses[k],2) panel.xyplot(xx, yy, type="l", col = colDes, lwd = lwdDes) } z <- z+1 trellis.unfocus() } } } DoseFinding/R/drmodels.R0000644000176200001440000000541612207430166014607 0ustar liggesusers## model functions and model gradients ## model functions linear <- function(dose, e0, delta){ e0 + delta * dose } linlog <- function(dose, e0, delta, off = 1){ linear(log(dose + off), e0, delta) } emax <- function(dose, e0, eMax, ed50){ e0 + eMax*dose/(ed50 + dose) } quadratic <- function(dose, e0, b1, b2){ e0 + b1 * dose + b2 * dose^2 } exponential <- function(dose, e0, e1, delta){ e0 + e1*(exp(dose/delta) - 1) } logistic <- function(dose, e0, eMax, ed50, delta){ e0 + eMax/(1 + exp((ed50 - dose)/delta)) } betaMod <- function(dose, e0, eMax, delta1, delta2, scal){ maxDens <- (delta1^delta1)*(delta2^delta2)/ ((delta1 + delta2)^(delta1+delta2)) dose <- dose/scal e0 + eMax/maxDens * (dose^delta1) * (1 - dose)^delta2 } sigEmax <- function(dose, e0, eMax, ed50, h){ e0 + eMax*dose^h/(ed50^h + dose^h) } linInt <- function(dose, resp, nodes){ if(length(nodes) != length(resp)) stop("\"nodes\" and \"resp\" need to be of same length in \"linInt\"") approx(x=nodes, y=resp, xout = dose)$y } ## gradients of built-in model functions linearGrad <- function(dose, ...){ cbind(e0=1, delta=dose) } linlogGrad <- function(dose, off, ...){ cbind(e0=1, delta=log(dose+off)) } quadraticGrad <- function(dose, ...){ cbind(e0=1, b1 = dose, b2 = dose^2) } emaxGrad <- function(dose, eMax, ed50, ...){ cbind(e0=1, eMax=dose/(ed50 + dose), ed50=-eMax * dose/(dose + ed50)^2) } exponentialGrad <- function(dose, e1, delta, ...){ cbind(e0=1, e1=exp(dose/delta)-1, delta=-exp(dose/delta) * dose * e1/delta^2) } logisticGrad <- function(dose, eMax, ed50, delta, ...){ den <- 1 + exp((ed50 - dose)/delta) g1 <- -eMax * (den - 1)/(delta * den^2) g2 <- eMax * (den - 1) * (ed50 - dose)/(delta^2 * den^2) cbind(e0=1, eMax=1/den, ed50=g1, delta=g2) } betaModGrad <- function(dose, eMax, delta1, delta2, scal, ...){ lg2 <- function(x) ifelse(x == 0, 0, log(x)) dose <- dose/scal if(any(dose > 1)) { stop("doses cannot be larger than scal in betaModel") } maxDens <- (delta1^delta1) * (delta2^delta2)/((delta1 + delta2)^(delta1 + delta2)) g1 <- ((dose^delta1) * (1 - dose)^delta2)/maxDens g2 <- g1 * eMax * (lg2(dose) + lg2(delta1 + delta2) - lg2(delta1)) g3 <- g1 * eMax * (lg2(1 - dose) + lg2(delta1 + delta2) - lg2(delta2)) cbind(e0=1, eMax=g1, delta1=g2, delta2=g3) } sigEmaxGrad <- function(dose, eMax, ed50, h, ...){ lg2 <- function(x) ifelse(x == 0, 0, log(x)) den <- (ed50^h + dose^h) g1 <- dose^h/den g2 <- -ed50^(h - 1) * dose^h * h * eMax/den^2 g3 <- eMax * dose^h * ed50^h * lg2(dose/ed50)/den^2 cbind(e0=1, eMax=g1, ed50=g2, h=g3) } linIntGrad <- function(dose, resp, nodes, ...){ knts <- c(nodes[1], nodes, nodes[length(nodes)]) splines::splineDesign(knots=knts, ord=2, x=dose) } DoseFinding/R/powMCT.R0000644000176200001440000000730312203245054014140 0ustar liggesusers## all design related functions for power calculations mvtnorm.control <- function(maxpts = 30000, abseps = 0.001, releps = 0, interval = NULL){ res <- list(maxpts = maxpts, abseps = abseps, releps = releps, interval = interval) class(res) <- "GenzBretz" res } powCalc <- function(alternative, critV, df, corMat, deltaMat, control){ nC <- nrow(corMat) # number of contrasts if(alternative[1] == "two.sided"){ lower <- rep(-critV, nC) } else { lower <- rep(-Inf, nC) } upper <- rep(critV, nC) if (!missing(control)) { if(!is.list(control)) { stop("when specified, 'control' must be a list") } ctrl <- do.call("mvtnorm.control", control) } else { ctrl <- control } ctrl$interval <- NULL # not used with pmvt nScen <- ncol(deltaMat) res <- numeric(nScen) for(i in 1:nScen){ pmvtCall <- c(list(lower, upper, df = df, corr = corMat, delta = deltaMat[,i], algorithm = ctrl)) res[i] <- as.vector(1 - do.call("pmvt", pmvtCall)) } names(res) <- colnames(deltaMat) res } powMCT <- function(contMat, alpha = 0.025, altModels, n, sigma, S, placAdj = FALSE, alternative = c("one.sided", "two.sided"), df, critV = TRUE, control = mvtnorm.control()){ alternative <- match.arg(alternative) if(inherits(contMat, "optContr")){ if(attr(contMat, "placAdj") != placAdj){ message("using \"placAdj\" specification from contMat object") placAdj <- attr(contMat, "placAdj") } contMat <- contMat$contMat } if(!is.matrix(contMat)) stop("contMat needs to be a matrix") nD <- nrow(contMat) # nr of doses nC <- ncol(contMat) # nr of contrasts ## extract covariance matrix if(missing(S)){ if(missing(n) | missing(sigma)) stop("Either S or n and sigma need to be specified") if(length(n) == 1) n <- rep(n, nD) if(length(n) != nD) stop("n needs to be of length nrow(contMat)") S <- sigma^2*diag(1/n) df <- sum(n) - nD } else { if(!missing(n)|!missing(sigma)) stop("Need to specify exactly one of \"S\" or \"n\" and \"sigma\"") if(nrow(S) != ncol(S)) stop("S needs to be a square matrix") if(nrow(S) != nD) stop("S needs to have as many rows&cols as there are doses") if(missing(df)) stop("need to specify degrees of freedom in \"df\", when specifying \"S\"") } ## extract means under the alternative if(missing(altModels)) stop("altModels argument needs to be specified") muMat <- getResp(altModels) if(placAdj){ muMat <- sweep(muMat, 2, muMat[1,], "-") # remove placebo column muMat <- muMat[-1, , drop=FALSE] } if(nrow(muMat) != nD) stop("Incompatible contMat and muMat") ## extract df if(missing(S)){ if(missing(df)) stop("degrees of freedom need to be specified in df") df <- sum(n) - nD } ## calculate non-centrality parameter deltaMat <- t(contMat) %*% muMat covMat <- t(contMat) %*% S %*% contMat den <- sqrt(diag(covMat)) deltaMat <- deltaMat/den if(alternative == "two.sided"){ deltaMat <- abs(deltaMat) } corMat <- cov2cor(covMat) if(!is.finite(df)) df <- 0 ## calculate critical value if(is.logical(critV) & critV == TRUE){ critV <- critVal(corMat, alpha, df, alternative, control) } # else assume critV already contains critical value res <- powCalc(alternative, critV, df, corMat, deltaMat, control) ## class(res) <- "powMCT" ## attr(res, "type") <- ifelse(missing(n), "S", "n&sigma") ## attr(res, "contMat") <- contMat ## attr(res, "muMat") <- muMat res } ## print.powMCT <- function(x, ...){ ## attributes(x)[2:5] <- NULL ## print(x) ## } DoseFinding/R/fitMod.R0000644000176200001440000012046612231266136014224 0ustar liggesusers## functions related to fitting dose-response models using ML or generalized approach defBnds <- function(mD, emax = c(0.001, 1.5)*mD, exponential = c(0.1, 2)*mD, logistic = matrix(c(0.001, 0.01, 1.5, 1/2)*mD, 2), sigEmax = matrix(c(0.001*mD, 0.5, 1.5*mD, 10), 2), betaMod = matrix(c(0.05,0.05,4,4), 2)){ list(emax = emax, logistic = logistic, sigEmax = sigEmax, exponential = exponential, betaMod = betaMod) } fit.control <- function(control){ ## get control parameters for nonlinear fitting ## default parameters res <- list(nlminbcontrol = list(), optimizetol = .Machine$double.eps^0.5, gridSize = list(dim1 = 30, dim2 = 144)) if(!is.null(control)){ ## check arguments first if(!is.null(control$nlminbcontrol)){ if(!is.list(control$nlminbcontrol)) stop("nlminbcontrol element of fitControl must be a list") } if(!is.null(control$gridSize)){ if(!is.list(control$gridSize)) stop("gridSize element of fitControl must be a list") nams <- names(control$gridSize) ind <- any(is.na(match(nams,c("dim1", "dim2")))) if(ind){ stop("gridSize list needs to have names dim1 and dim2") } else { if(!is.numeric(control$gridSize$dim1) | !is.numeric(control$gridSize$dim1)) stop("gridSize$dim1 and gridSize$dim2 need to be numeric") } } nams <- names(control) res[nams] <- control if(!all(nams %in% c("nlminbcontrol","optimizetol","gridSize"))) warning("control needs to have entries called \"nlminbcontrol\",\"optimizetol\",\"gridSize\"") res[nams] <- control } res } getGrid <- function(Ngrd, bnds, dim){ if(dim == 1){ grdnods <- (2*(1:Ngrd)-1)/(2*Ngrd) mat <- matrix(grdnods*(bnds[2]-bnds[1])+bnds[1], ncol = 1) } else { # use generalized lattice point set (glp) set (maximum size 75025) glp <- c(3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025) if(Ngrd > 75025) Ngrd <- 75025 if(Ngrd < 5) Ngrd <- 5 ind <- min((1:22)[glp >= Ngrd]) N <- glp[ind] k <- 1:N mat <- cbind((k-0.5)/N, ((glp[ind-1]*k-0.5)/N)%%1) mat[,1] <- mat[,1]*(bnds[1,2]-bnds[1,1])+bnds[1,1] mat[,2] <- mat[,2]*(bnds[2,2]-bnds[2,1])+bnds[2,1] } mat } fitMod <- function(dose, resp, data = NULL, model = NULL, S = NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, bnds, df = NULL, start = NULL, na.action = na.fail, control = NULL, addArgs = NULL){ ## check for valid dose, resp and data cal <- as.character(match.call()) type <- match.arg(type) lst <- checkAnalyArgs(dose, resp, data, S, type, addCovars, placAdj, na.action, cal) doseNam <- lst$doseNam;respNam <- lst$respNam dose <- lst$dd[[doseNam]];type <- lst$type resp <- lst$dd[[respNam]];data <- lst$dd;S <- lst$S covarsUsed <- addCovars != ~1 ## check type related arguments if(type == "general"){ if(placAdj & model %in% c("linlog", "logistic")) # stop as fitting algorithm assumes f^0(0) = 0 stop("logistic and linlog models cannot be fitted to placebo adjusted data") if(covarsUsed) stop("addCovars argument ignored for type == \"general\"") if(is.null(df)) df <- Inf } ## check whether model has been specified correctly builtIn <- c("linlog", "linear", "quadratic", "linInt", "emax", "exponential", "logistic", "betaMod", "sigEmax") if(missing(model)) stop("Need to specify the model that should be fitted") modelNum <- match(model, builtIn) if(is.na(modelNum)) stop("Invalid dose-response model specified") ## check for start argument if(modelNum < 5 & !is.null(start)) message("Message: Starting values in \"start\" ignored for linear models") ## check for valid bnds if(modelNum > 4){ if(missing(bnds)){ message("Message: Need bounds in \"bnds\" for nonlinear models, using default bounds from \"defBnds\".") bnds <- defBnds(max(dose))[[model]] } else { if(is.null(bnds)){ message("Message: Need bounds in \"bnds\" for nonlinear models, using default bounds from \"defBnds\".") bnds <- defBnds(max(dose))[[model]] } } } ## addArgs argument scal <- off <- nodes <- NULL if(model %in% c("linlog", "betaMod")){ aPar <- getAddArgs(addArgs, sort(unique(dose))) if(model == "betaMod") scal <- aPar$scal if(model == "linlog") off <- aPar$off } if(model == "linInt"){ ## not allowed to use nodes different from used doses nodes <- sort(unique(dose)) } ## call fit-model raw! out <- fitMod.raw(dose, resp, data, model, S, type, addCovars, placAdj, bnds, df, start, na.action, control, doseNam=doseNam, respNam=respNam, off = off, scal = scal, nodes=nodes, covarsUsed) ## attach data to object reord <- order(lst$ord) if(type == "normal"){ if(covarsUsed){ attr(out, "data") <- data[reord,] } else { dat <- data.frame(dose=dose, resp=resp) colnames(dat) <- c(doseNam, respNam) attr(out, "data") <- dat[reord,] } } else { lst <- list(dose=dose[reord], resp=resp[reord], S=S[reord,reord]) names(lst) <- c(doseNam, respNam, "S") attr(out, "data") <- lst } out } fitMod.raw <- function(dose, resp, data, model, S, type, addCovars = ~1, placAdj = FALSE, bnds, df, start = NULL, na.action = na.fail, control, doseNam, respNam, off, scal, nodes, covarsUsed){ ## fit model but do not check for arguments (for use in MCPMod function)! ## differences to fitMod: ## - dose, resp need to be vectors containing the data ## - additional args: doseNam, respNam, off, scal builtIn <- c("linlog", "linear", "quadratic", "linInt", "emax", "exponential", "logistic", "betaMod", "sigEmax") modelNum <- match(model, builtIn) weights <- NULL;clinS <- NULL ## package data for model-fitting if(type == "general"){ # general approach dataFit <- data.frame(dose = dose, resp = resp) ## pre-calculate some necessary information clinS <- chol(solve(S)) } else { # normal data if(covarsUsed){ dataFit <- data ind1 <- which(names(dataFit) == doseNam) ind2 <- which(names(dataFit) == respNam) names(dataFit)[c(ind1, ind2)] <- c("dose", "resp") ord <- order(dataFit$dose) dataFit <- dataFit[ord,] ## sorting by increasing dose is needed for optGrid (specifically getZmat) } else { ## for efficiency fit on means in case of no covariates dataFit <- data.frame(dose = sort(unique(dose)), resp = as.numeric(tapply(resp, dose, mean))) ## calculate within group variance to recover full RSS later n <- as.vector(table(dose)) vars <- tapply(resp, dose, var) vars[n == 1] <- 0 S2 <- sum((n - 1) * vars) weights <- n } } ## call actual fitting algorithms if(is.element(modelNum, 1:4)){ # linear model fit <- fitModel.lin(dataFit, model, addCovars, off, type, weights, placAdj, clinS) } else { # non-linear model fit <- fitModel.bndnls(dataFit, model, addCovars, type, bnds, control, start, scal, weights, placAdj, clinS) } ## now need to post-process resid <- fit$resid if(type == "normal" & !covarsUsed) # fitted on means, need to recover full RSS resid <- fit$resid + S2 ## extract levels for factor covariates if(covarsUsed){ usedVars <- all.vars(addCovars) # variables used for fitting ind <- sapply(data, function(x) is.factor(x)) # determine factors in data ind <- is.element(names(data), usedVars) & ind # has the factor been used in fitting? xlev <- lapply(data[ind], levels) # extract levels } else { xlev <- NULL } df <- ifelse(is.null(fit$df), df, fit$df) res <- list(coefs = fit$coefs, resid, df=df, addCovars = addCovars) names(res)[2] <- ifelse(type == "normal", "RSS", "gRSS") attr(res, "model") <- model attr(res, "type") <- type attr(res, "placAdj") <- placAdj attr(res, "addCovars") <- addCovars attr(res, "xlev") <- xlev attr(res, "doseRespNam") <- c(doseNam, respNam) attr(res, "off") <- off attr(res, "scal") <- scal attr(res, "nodes") <- nodes class(res) <- "DRMod" res } fitModel.lin <- function(dataFit, model, addCovars, off, type, weights, placAdj, clinS){ dose <- dataFit$dose resp <- dataFit$resp ## build model matrices and fit model using QR decompositions X <- switch(model, linear = cbind(1, dose), linlog = cbind(1, log(dose + off)), quadratic = cbind(1, dose, dose^2), linInt = model.matrix(~as.factor(dose)-1, data=dataFit)) if(model == "quadratic"){ nam <- c("e0", "b1", "b2") } else { if(model == "linInt"){ nam <- paste("d", sort(unique(dose)), sep="") } else { nam <- c("e0", "delta") } } if(placAdj){ # no intercept if(model != "linInt"){ # only need to remove intercept for non-linInt mods X <- X[,-1, drop = FALSE] nam <- nam[-1] } } covarsUsed <- addCovars != ~1 if(type == "normal" & covarsUsed){ # normal with covariates form <- paste("resp ~", addCovars[2], sep="") m <- model.matrix(as.formula(form), data = dataFit) X <- cbind(X, m[,-1]) nam <- c(nam, colnames(m)[-1]) par <- as.numeric(qr.coef(qr(X),resp)) df <- nrow(X)-ncol(X) } else { # general or normal without covariates if(type == "normal"){ clinS <- diag(sqrt(weights)) df <- sum(weights) - length(nam) } else { df <- NULL } par <- as.numeric(qr.coef(qr(clinS%*%X),clinS%*%resp)) } pred <- as.numeric(X%*%par) names(par) <- nam if(covarsUsed){ out <- list(coefs=par, sum((resp-pred)^2), df = df) } else { out <- list(coefs=par, as.numeric(crossprod(clinS%*%(resp-pred))), df = df) } names(out)[2] <- "resid" out } fitModel.bndnls <- function(dataFit, model, addCovars, type, bnds, control, start, scal, weights, placAdj, clinS){ ctrl <- fit.control(control) if(model == "emax"|model == "exponential"){ dim <- 1 if(!is.matrix(bnds)) bnds <- matrix(bnds, nrow = 1) } else { dim <- 2 } dose <- dataFit$dose resp <- dataFit$resp ## preliminary calculations (need resXY, clinS and qrX) covarsUsed <- addCovars != ~1 covarNams <- NULL if(type == "general"){ # general approach if(placAdj){ # no intercept resXY <- as.numeric(clinS%*%resp) } else { X2 <- clinS%*%matrix(1, nrow = length(dose)) resp2 <- clinS%*%resp qrX <- qr(X2) resXY <- as.numeric(qr.resid(qrX, resp2)) } } else { # normal data form <- paste("resp ~", addCovars[2], sep="") m <- model.matrix(as.formula(form), dataFit) if(covarsUsed){ # covariates present covarNams <- colnames(m)[2:ncol(m)] qrX <- qr(m) resXY <- as.numeric(qr.resid(qrX, resp)) } else { # no covariates: fit on means clinS <- diag(sqrt(weights)) qrX <- qr(clinS%*%m) resXY <- as.numeric(qr.resid(qrX, sqrt(weights)*resp)) } } ## if no starting values provided use grid-search if(is.null(start)){ opt <- optGrid(model, dim, bnds, ctrl$gridSize, dose, type, qrX, resXY, clinS, placAdj, scal) strt <- opt$coefs;resid <- opt$resid if(dim == 1){ ## refine bounds N <- ctrl$gridSize$dim1 dif <- (bnds[2]-bnds[1])/N # distance between grid points bnds[1] <- max(c(strt-1.1*dif), bnds[1]) bnds[2] <- min(c(strt+1.1*dif), bnds[2]) } } else { strt <- start;resid <- Inf } ## start local optimizer at starting value opt2 <- optLoc(model, dim, bnds, dose, qrX, resXY, strt, scal, placAdj, type, ctrl$optimizetol, ctrl$nlminbcontrol, clinS) ## recover names nam1 <- switch(model, emax = c("eMax", "ed50"), sigEmax = c("eMax", "ed50", "h"), logistic = c("eMax", "ed50", "delta"), exponential = c("e1", "delta"), betaMod = c("eMax", "delta1", "delta2")) ## recover all parameters from nonlin parameter and return results f0 <- getStandDR(model, dose, opt2$coefs, scal) if(type == "general"){ # return "generalized" sum of squares if(placAdj){ # no intercept par0 <- sum((clinS %*% f0) * (clinS%*%resp))/sum((clinS %*% f0)^2) pred <- f0*par0 par <- c(par0, opt2$coefs) names(par) <- nam1 } else { # with intercept F <- cbind(1, f0) par0 <- qr.coef(qr(clinS %*% F), clinS %*% resp) pred <- F%*%par0 par <- c(par0, opt2$coefs) names(par) <- c("e0", nam1) } return(list(coefs=par, resid = opt2$resid)) } else { ## type == normal X <- cbind(1,f0,m[,-1]) if(covarsUsed){ par0 <- as.numeric(qr.coef(qr(X),resp)) pred <- as.numeric(X%*%par0) par <- c(par0[1:2], opt2$coefs, par0[3:length(par0)]) df <- nrow(X) - length(par) } else { # no covariates; was fitted on means par0 <- qr.coef(qr(clinS %*% X), clinS %*% resp) pred <- X%*%par0 par <- c(par0, opt2$coefs) df <- sum(weights) - length(par) } names(par) <- c("e0", nam1, covarNams) return(list(coefs=par, resid = opt2$resid, df = df)) } } optGrid <- function(model, dim, bnds, gridSize, dose, type, qrX, resXY, wMat, placAdj, scal){ ## grid optimizer for non-linear case N <- ifelse(dim==1, gridSize$dim1, gridSize$dim2) if(N < 1) stop("need N >= 1") nodes <- getGrid(N, bnds, dim) ## calculate residuals if(type == "normal" & is.null(wMat)){ # normal with covariates Zmat <- getZmat(dose, nodes, model, dim, scal) resZmat <- qr.resid(qrX, Zmat) } else { # normal without covariates or general Zmat <- getZmat.weighted(dose, nodes, model, dim, scal) Zmat <- wMat%*%Zmat if(placAdj & type == "general") # general without intercept resZmat <- Zmat else resZmat <- qr.resid(qrX, Zmat) } colsms1 <- colSums(resZmat * resXY) colsms2 <- colSums(resZmat * resZmat) RSSvec <- sum(resXY*resXY) - (colsms1*colsms1)/colsms2 indMin <- which.min(RSSvec) coefs <- nodes[indMin,] list(coefs=coefs, resid = RSSvec[indMin]) } getZmat <- function(x, nodes, model, dim, scal=NULL){ getPred <- function(vec, x, model, scal) getStandDR(model, x, vec, scal) xU <- sort(unique(x)) n <- as.numeric(table(x)) args <- nodes res0 <- apply(args, 1, getPred, x=xU, model=model, scal=scal) Zmat <- apply(res0, 2, function(x,n) rep(x,n), n=n) Zmat } getZmat.weighted <- function(x, nodes, model, dim, scal){ # does not exploit repeated observations getPred <- function(vec, x, model, scal) getStandDR(model, x, vec, scal) args <- nodes Zmat <- apply(args, 1, getPred, x=x, model=model, scal=scal) Zmat } getStandDR <- function(model, x, nl, scal){ ## calculate standardized response for nonlinear models switch(model, emax = emax(x, 0, 1, nl), sigEmax = sigEmax(x, 0, 1, nl[1], nl[2]), exponential = exponential(x, 0, 1, nl), logistic = logistic(x, 0, 1, nl[1], nl[2]), betaMod = betaMod(x, 0, 1, nl[1], nl[2], scal)) } optLoc <- function(model, dim, bnds, dose, qrX, resXY, start, scal, placAdj, type, tol, nlminbcontrol, clinS){ ## function to calculate ls residuals (to be optimized) optFunc <- function(nl, x, qrX, resXY, model, scal, clinS){ Z <- getStandDR(model, x, nl, scal) if(!is.null(clinS)){ Z <- clinS%*%Z } if(placAdj & type == "general"){ resXZ <- Z } else { resXZ <- try(qr.resid(qrX, Z)) # might be NaN if function is called on strange parameters if(inherits(resXZ, "try-error")) return(NA) } sumrsXYrsXZ <- sum(resXY*resXZ) sum(resXY*resXY) - sumrsXYrsXZ*sumrsXYrsXZ/sum(resXZ*resXZ) } if(dim == 1){ # one-dimensional models optobj <- optimize(optFunc, c(bnds[1], bnds[2]), x=dose, qrX=qrX, resXY=resXY, model = model, tol=tol, clinS=clinS, scal = scal) coefs <- optobj$minimum RSS <- optobj$objective } else { optobj <- try(nlminb(start, optFunc, x=dose, qrX=qrX, resXY=resXY, model = model, scal = scal, lower = bnds[,1], upper = bnds[,2], control = nlminbcontrol, clinS=clinS)) if(inherits(optobj, "try-error")){ coefs <- RSS <- NA } else { coefs <- optobj$par RSS <- optobj$objective } } list(coefs=coefs, resid=RSS) } sepCoef <- function(object){ model <- attr(object, "model") if(attr(object, "type") == "general") return(list(DRpars=object$coefs, covarPars = numeric(0))) if(attr(object, "type") == "normal" & object$addCovars == ~1) return(list(DRpars=object$coefs, covarPars = numeric(0))) ## determine the number of parameters (not counting e0 and eMax) if(model %in% c("linear","linlog")) dim <- 2 if(model %in% c("quadratic", "exponential", "emax")) dim <- 3 if(model %in% c("sigEmax", "logistic", "betaMod")) dim <- 4 if(model == "linInt") dim <- length(attr(object, "nodes")) cf <- object$coefs p <- length(cf) ## extract coefficients DRpars <- cf[1:dim] # coefs of DR model covarPars <- cf[(dim+1):p] return(list(DRpars=DRpars, covarPars=covarPars)) } print.DRMod <- function(x, digits = 4, ...){ if (length(x) == 1) { cat("NA\n") return() } cat("Dose Response Model\n\n") cat(paste("Model:", attr(x, "model")), "\n") cat(paste("Fit-type:", attr(x, "type")), "\n\n") Coefs <- sepCoef(x) cat("Coefficients dose-response model\n") print(signif(Coefs$DRpars, digits)) if(attr(x, "type") == "normal"){ if(x$addCovars != ~1){ cat("Coefficients additional covariates\n") print(signif(Coefs$covarPars, digits)) } cat("\nDegrees of freedom:", x$df, "\n") cat("Residual standard error:", signif(sqrt(x$RSS/x$df), digits),"\n") } if(attr(x, "type") == "general"){ cat("\nFitted to:\n") doseRespNam <- attr(x, "doseRespNam") resp <- attr(x, "data")[[doseRespNam[2]]] names(resp) <- attr(x, "data")[[doseRespNam[1]]] print(signif(resp, digits)) cat("\nGeneralized residual sum of squares:", signif(x$gRSS, digits),"\n") } } summary.DRMod <- function(object, digits = 3, ...){ class(object) <- "summary.DRMod" print(object, digits = digits) } print.summary.DRMod <- function(x, digits = 3, data, ...){ if(length(x) == 1){ cat("NA\n") return() } data <- attr(x, "data") cat("Dose Response Model\n\n") cat(paste("Model:", attr(x, "model")), "\n") type <- attr(x, "type") cat(paste("Fit-type:", type), "\n") if(type == "normal"){ ## residual information cat("\nResiduals:\n") nam <- c("Min", "1Q", "Median", "3Q", "Max") respNam <- attr(x, "doseRespNam")[2] resid <- predict.DRMod(x, predType = "full-model")-data[[respNam]] rq <- structure(quantile(resid), names = nam) print(rq, digits = digits, ...) } cat("\nCoefficients with approx. stand. error:\n") coefs <- x$coef sdv <- sqrt(diag(vcov.DRMod(x))) datf <- matrix(nrow = length(coefs), ncol = 2) datf[,1] <- coefs datf[,2] <- sdv colnam <- c("Estimate", "Std. Error") dimnames(datf) <- list(names(coefs), colnam) print(datf, digits = digits) if(type == "normal"){ cat("\nResidual standard error:", signif(sqrt(x$RSS/x$df), digits), "\n") cat("Degrees of freedom:", x$df, "\n") } if(type == "general"){ doseRespNam <- attr(x, "doseRespNam") dose <- attr(x, "data")[[doseRespNam[1]]] drEst <- attr(x, "data")[[doseRespNam[2]]] names(drEst) <- dose S <- attr(x, "data")$S dimnames(S) <- list(dose, dose) cat("\nFitted to:\n") print(signif(drEst, digits)) cat("\nwith Covariance Matrix:\n") print(signif(S, digits)) } } ## extract coefficients coef.DRMod <- function(object, sep = FALSE, ...){ if(length(object) == 1){ # object does not contain a converged fit warning("DRMod object does not contain a converged fit") return(NA) } if(sep){ return(sepCoef(object)) } object$coefs } vcov.DRMod <- function(object, ...){ ## object - DRMod object ## uGrad - function returning gradient for userModel if(length(object) == 1){ # object does not contain a converged fit warning("DRMod object does not contain a converged fit") return(NA) } type <- attr(object, "type") model <- attr(object, "model") cf <- sepCoef(object)$DRpars nams <- names(coef(object)) scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") doseNam <- attr(object, "doseRespNam")[1] if(type == "normal"){ addCovars <- object$addCovars xlev <- attr(object, "xlev") RSS <- object$RSS df <- object$df data <- attr(object, "data") dose <- attr(object, "data")[[doseNam]] m <- model.matrix(addCovars, data, xlev = xlev) } if(type == "general"){ placAdj <- attr(object, "placAdj") if(placAdj) # no intercept cf <- c(0, cf) dose <- attr(object, "data")[[doseNam]] inS <- solve(attr(object, "data")$S) } grd <- gradCalc(model, cf, dose, off, scal, nodes) if(type == "normal"){ J <- cbind(grd, m[,-1]) JtJ <- crossprod(J) covMat <- try(solve(JtJ)*RSS/df, silent=TRUE) if(!inherits(covMat, "matrix")){ covMat <- try(chol2inv(qr.R(qr(J)))*RSS/df, silent=TRUE) # more stable (a little slower) if(!inherits(covMat, "matrix")){ warning("cannot calculate covariance matrix. singular matrix in calculation of covariance matrix.") nrw <- length(grd[1,]) covMat <- matrix(NA, nrow=nrw, ncol=nrw) } dimnames(covMat) <- dimnames(JtJ) } } if(type == "general"){ if(placAdj){ if(model != "linInt") grd <- grd[,-1] } covMat <- try(solve(t(grd)%*%inS%*%grd), silent = TRUE) if(!inherits(covMat, "matrix")) { warning("cannot calculate covariance matrix. singular matrix in calculation of covariance matrix.") nrw <- length(grd[1,]) covMat <- matrix(NA, nrow=nrw, ncol=nrw) } } dimnames(covMat) <- list(nams, nams) covMat } gradCalc <- function(model, cf, dose, off, scal, nodes){ ## wrapper function to calculate gradient switch(model, linear = { linearGrad(dose) }, linlog = { linlogGrad(dose, off=off) }, quadratic = { quadraticGrad(dose) }, emax = { emaxGrad(dose, eMax = cf[2], ed50 = cf[3]) }, logistic = { logisticGrad(dose, eMax = cf[2], ed50 = cf[3], delta = cf[4]) }, sigEmax = { sigEmaxGrad(dose, eMax = cf[2], ed50 = cf[3], h = cf[4]) }, betaMod = { betaModGrad(dose, eMax = cf[2], delta1 = cf[3], delta2 = cf[4], scal = scal) }, exponential = { exponentialGrad(dose, e1 = cf[2], delta = cf[3]) }, linInt = { linIntGrad(dose, resp=cf, nodes=nodes) }) } predict.DRMod <- function(object, predType = c("full-model", "ls-means", "effect-curve"), newdata = NULL, doseSeq = NULL, se.fit = FALSE, ...){ ## Extract relevant information from object scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") model <- attr(object, "model") addCovars <- attr(object, "addCovars") xlev <- attr(object, "xlev") doseNam <- attr(object, "doseRespNam")[1] data <- attr(object, "data") type <- attr(object, "type") if(missing(predType)) stop("need to specify the type of prediction in \"predType\"") predType <- match.arg(predType) ## if model fitted on plac-adj. data can only produce predictions for effect-curve if(attr(object, "placAdj") & predType != "effect-curve"){ message("Message: Setting predType to \"effect-curve\" for placebo-adjusted data") predType <- "effect-curve" } if(type == "general" & predType == "full-model"){ ## there are no covariates message("Message: Setting predType to \"ls-means\" for \"type = general\"") predType <- "ls-means" } if(predType %in% c("ls-means", "full-model")){ ## create design-matrix according to the SAS predType ls-means if(predType == "ls-means"){ if(!is.null(newdata)) stop("newdata is ignored for \"predType = \"ls-means\"") if(is.null(doseSeq)){ ## use doses used for fitting if(type == "normal") doseSeq <- data[, doseNam] if(type == "general") doseSeq <- data[[doseNam]] } covarsUsed <- addCovars != ~1 if(covarsUsed){ nams <- all.vars(addCovars) out <- list() z <- 1 for(covar in nams){ varb <- data[,covar] if(is.numeric(varb)){ out[[z]] <- mean(varb) } else if(is.factor(varb)){ k <- nlevels(varb) out[[z]] <- rep(1/k, k-1) } z <- z+1 } out <- do.call("c", out) m <- matrix(rep(out, length(doseSeq)), byrow=TRUE, nrow = length(doseSeq)) } } ## create design-matrix either from newdata or data used for fitting if(predType == "full-model"){ if(!is.null(doseSeq) & predType == "full-model") stop("doseSeq should only be used when predType = \"effect-curve\" or \"ls-means\"") if(is.null(newdata)){ ## if not provided use covariates in observed data if(type == "normal"){ m <- model.matrix(addCovars, data) doseSeq <- data[, doseNam] } else { doseSeq <- data[[doseNam]] } } else { tms <- c(doseNam, attr(terms(addCovars), "term.labels")) missind <- !is.element(tms, names(newdata)) if(any(missind)){ chct <- paste("No values specified in newdata for", paste(tms[missind], collapse=", ")) stop(chct) } else { m <- model.matrix(addCovars, newdata, xlev = xlev) doseSeq <- newdata[, doseNam] if(nrow(m) != length(doseSeq)) stop("incompatible model matrix and doseSeq created from newdata") } } m <- m[,-1, drop=FALSE] # remove intercept column (is necessary) } coeflist <- sepCoef(object) # separate coefs of DR model and additional covars DRpars <- coeflist$DRpars covarPars <- coeflist$covarPars ## predictions if(model != "linInt"){ call <- c(list(doseSeq), as.list(c(DRpars, scal, off))) } else { call <- c(list(doseSeq), as.list(list(DRpars, nodes))) } mn <- do.call(model, call) if(addCovars != ~1) mn <- mn + as.numeric(m%*%covarPars) if(!se.fit){ return(as.numeric(mn)) } else { ## calculate standard error of predictions covMat <- vcov(object) if(any(is.na(covMat))){ seFit <- (rep(NA, length(doseSeq))) } else { grd <- gradCalc(model, DRpars, doseSeq, off, scal, nodes) if(addCovars != ~1) grd <- cbind(grd, m) cholcovMat <- try(chol(covMat), silent = TRUE) if (!inherits(cholcovMat, "matrix")) { warning("Cannot cannot calculate standard deviation for ", model, " model.\n") seFit <- rep(NA, length(doseSeq)) } else { seFit <- sqrt(rowSums((grd%*%t(cholcovMat))^2)) # t(grd)%*%covMat%*%grd } } return(list(fit = mn, se.fit = as.vector(seFit))) } } if(predType == "effect-curve") { ## predict effect curve if(!is.null(newdata)) stop("newdata is ignored for \"predType = \"effect-curve\"") if(is.null(doseSeq)){ if(type == "normal") doseSeq <- data[, doseNam] if(type == "general") doseSeq <- data[[doseNam]] } coeflist <- sepCoef(object) DRpars <- coeflist$DRpars if(attr(object, "placAdj")){ DRpars <- c(0, DRpars) if(model == "linInt") nodes <- c(0, nodes) } else { if(model != "linInt"){ DRpars[1] <- 0 } else { DRpars <- DRpars - DRpars[1] } } ## predictions if(model != "linInt"){ call <- c(list(doseSeq), as.list(c(DRpars, scal, off))) } else { call <- c(list(doseSeq), as.list(list(DRpars, nodes))) } mn <- do.call(model, call) if(is.element(model,c("logistic", "linlog"))){ # if standardized model not 0 at placebo call <- c(0, as.list(c(DRpars, scal, off))) predbase <- do.call(model, call) mn <- mn-predbase } if(!se.fit){ return(as.numeric(mn)) } else { ## calculate st. error (no need to calculate full covMat here) covMat <- vcov(object) if(addCovars != ~1) ## remove columns corresponding to covariates covMat <- covMat[1:length(DRpars), 1:length(DRpars)] if(!attr(object, "placAdj")){ ## remove intercept from cov-matrix if(model != "linInt"){ covMat <- covMat[-1,-1] } else { diffMat <- cbind(-1,diag(length(DRpars)-1)) covMat <- diffMat%*%covMat%*%t(diffMat) } } if(any(is.na(covMat))){ seFit <- (rep(NA, length(doseSeq))) } else { grd <- gradCalc(model, DRpars, doseSeq, off, scal, nodes) if(!is.matrix(grd)){ # can happen if length(doseSeq) == 1 grd <- matrix(grd, nrow = 1) } if(model == "linInt"){ grd <- grd[,-1, drop = FALSE] } else { grd0 <- gradCalc(model, DRpars, 0, off, scal, nodes) grd <- grd[, -1, drop=FALSE] grd0 <- grd0[,-1] grd <- sweep(grd, 2, grd0, "-") } cholcovMat <- try(chol(covMat), silent = TRUE) if (!inherits(cholcovMat, "matrix")) { warning("Cannot cannot calculate standard deviation for ", model, " model.\n") seFit <- rep(NA, length(doseSeq)) } else { seFit <- sqrt(rowSums((grd%*%t(cholcovMat))^2)) # t(grd)%*%covMat%*%grd } } res <- list(fit = mn, se.fit = as.vector(seFit)) return(res) } } } ## plot.DRMod <- function(x, CI = FALSE, level = 0.95, ## plotData = c("means", "meansCI", "none"), ## lenDose = 201, ...){ ## ## arguments passed to plot ## pArgs <- list(...) ## ## Extract relevant information from object ## scal <- attr(x, "addArgs")$scal ## off <- attr(x, "addArgs")$off ## model <- attr(x, "model") ## addCovars <- attr(x, "addCovars") ## covarsUsed <- addCovars != ~1 ## xlev <- attr(x, "xlev") ## doseNam <- attr(x, "doseRespNam")[1] ## respNam <- attr(x, "doseRespNam")[2] ## data <- attr(x, "data") ## type <- attr(x, "type") ## placAdj <- attr(x, "placAdj") ## doseSeq <- seq(0, max(data[[doseNam]]), length=lenDose) ## plotData <- match.arg(plotData) ## if(type == "normal"){ ## ## first produce estimates for ANOVA type model ## if(plotData %in% c("means", "meansCI")){ ## data$doseFac <- as.factor(data[[doseNam]]) ## form <- as.formula(paste(respNam, "~ doseFac +", addCovars[2])) ## fit <- lm(form, data=data) ## ## build design matrix for prediction ## dose <- sort(unique(data[[doseNam]])) ## preddat <- data.frame(doseFac=factor(dose)) ## m <- model.matrix(~doseFac, data=preddat) ## if(covarsUsed){ ## ## get sas type ls-means ## nams <- all.vars(addCovars) ## out <- list() ## z <- 1 ## for(covar in nams){ ## varb <- data[,covar] ## if(is.numeric(varb)){ ## out[[z]] <- mean(varb) ## } else if(is.factor(varb)){ ## k <- nlevels(varb) ## out[[z]] <- rep(1/k, k-1) ## } ## z <- z+1 ## } ## out <- do.call("c", out) ## m0 <- matrix(rep(out, length(dose)), byrow=TRUE, nrow = length(dose)) ## m <- cbind(m, m0) ## } ## mns <- as.numeric(m%*%coef(fit)) ## lbndm <- ubndm <- rep(NA, length(mns)) ## if(plotData == "meansCI"){ ## sdv <- sqrt(diag(m%*%vcov(fit)%*%t(m))) ## quant <- qt(1 - (1 - level)/2, df=x$df) ## lbndm <- mns-quant*sdv ## ubndm <- mns+quant*sdv ## } ## } ## } ## if(type == "general"){ ## ## extract ANOVA estimates ## if(plotData %in% c("means", "meansCI")){ ## dose <- data[[doseNam]] ## mns <- data[[respNam]] ## sdv <- sqrt(diag(data$S)) ## lbndm <- ubndm <- rep(NA, length(dose)) ## if(plotData == "meansCI"){ ## quant <- qnorm(1 - (1 - level)/2) ## lbndm <- mns-quant*sdv ## ubndm <- mns+quant*sdv ## } ## } ## } ## ## curve produced (use "ls-means" apart when data are fitted on placAdj scale) ## predtype <- ifelse(placAdj, "effect-curve", "ls-means") ## predmn <- predict(x, doseSeq = doseSeq, predType = predtype, se.fit = CI) ## lbnd <- ubnd <- rep(NA, length(doseSeq)) ## if(CI){ ## quant <- qt(1 - (1 - level)/2, df=x$df) ## lbnd <- predmn$fit-quant*predmn$se.fit ## ubnd <- predmn$fit+quant*predmn$se.fit ## predmn <- predmn$fit ## } ## ## determine plotting range ## if(plotData %in% c("means", "meansCI")){ ## rng <- range(lbndm, ubndm, mns, predmn, ubnd, lbnd, na.rm=TRUE) ## } else { ## rng <- range(predmn, ubnd, lbnd, na.rm=TRUE) ## } ## dff <- diff(rng) ## ylim <- c(rng[1] - 0.02 * dff, rng[2] + 0.02 * dff) ## ## default title ## main <- "Dose-Response Curve" ## main2 <- ifelse(placAdj, "(placebo-adjusted)", "(ls-means)") ## main <- paste(main, main2) ## ## plot ## callList <- list(doseSeq, predmn, type = "l", col = "white", ## xlab = doseNam, ylim = ylim, ## ylab = respNam, main = main) ## callList[names(pArgs)] <- pArgs ## do.call("plot", callList) ## grid() ## if(plotData %in% c("means", "meansCI")){ ## points(dose, mns, pch = 19, cex = 0.75) ## if(plotData == "meansCI"){ ## for(i in 1:length(dose)){ ## lines(c(dose[i],dose[i]), c(lbndm[i], ubndm[i]), lty=2) ## } ## } ## } ## lines(doseSeq, predmn, lwd=1.5) ## lines(doseSeq, ubnd, lwd=1.5) ## lines(doseSeq, lbnd, lwd=1.5) ## } plot.DRMod <- function(x, CI = FALSE, level = 0.95, plotData = c("means", "meansCI", "raw", "none"), plotGrid = TRUE, colMn = 1, colFit = 1, ...){ plotFunc(x, CI, level, plotData, plotGrid, colMn, colFit, ...) } plotFunc <- function(x, CI = FALSE, level = 0.95, plotData = c("means", "meansCI", "raw", "none"), plotGrid = TRUE, colMn = 1, colFit = 1, ...){ ## Extract relevant information from object if(class(x) == "DRMod") obj <- x if(class(x) == "MCPMod") obj <- x$mods[[1]] addCovars <- attr(obj, "addCovars") covarsUsed <- addCovars != ~1 xlev <- attr(obj, "xlev") doseNam <- attr(obj, "doseRespNam")[1] respNam <- attr(obj, "doseRespNam")[2] data <- attr(obj, "data") type <- attr(obj, "type") placAdj <- attr(obj, "placAdj") plotData <- match.arg(plotData) if(type == "general" & plotData == "raw") stop("plotData =\"raw\" only allowed if fitted DRmod object is of type = \"normal\"") ## save anova info in pList list pList <- as.list(data) if(type == "normal"){ if(plotData %in% c("means", "meansCI")){ ## produce estimates for ANOVA type model data$doseFac <- as.factor(data[[doseNam]]) form <- as.formula(paste(respNam, "~ doseFac +", addCovars[2])) fit <- lm(form, data=data) ## build design matrix for prediction dose <- sort(unique(data[[doseNam]])) preddat <- data.frame(doseFac=factor(dose)) m <- model.matrix(~doseFac, data=preddat) if(covarsUsed){ ## get sas type ls-means nams <- all.vars(addCovars) out <- list() z <- 1 for(covar in nams){ varb <- data[,covar] if(is.numeric(varb)){ out[[z]] <- mean(varb) } else if(is.factor(varb)){ k <- nlevels(varb) out[[z]] <- rep(1/k, k-1) } z <- z+1 } out <- do.call("c", out) m0 <- matrix(rep(out, length(dose)), byrow=TRUE, nrow = length(dose)) m <- cbind(m, m0) } pList$dos <- sort(unique(data[[doseNam]])) pList$mns <- as.numeric(m%*%coef(fit)) if(plotData == "meansCI"){ sdv <- sqrt(diag(m%*%vcov(fit)%*%t(m))) quant <- qt(1 - (1 - level)/2, df=fit$df) pList$lbndm <- pList$mns-quant*sdv pList$ubndm <- pList$mns+quant*sdv } } } if(type == "general"){ ## extract ANOVA estimates if(plotData %in% c("means", "meansCI")){ pList$dos <- data[[doseNam]] pList$mns <- data[[respNam]] sdv <- sqrt(diag(data$S)) if(plotData == "meansCI"){ quant <- qnorm(1 - (1 - level)/2) pList$lbndm <- pList$mns-quant*sdv pList$ubndm <- pList$mns+quant*sdv } } } doseSeq <- seq(0, max(data[[doseNam]]), length=201) ## create data frame for plotting dr-functions predtype <- ifelse(placAdj, "effect-curve", "ls-means") if(class(x) == "MCPMod"){ nmods <- length(x$mods) lst <- vector(mode = "list", nmods) for(i in 1:nmods){ pred <- predict(x$mods[[i]], predType = predtype, doseSeq = doseSeq, se.fit = CI) lbnd <- ubnd <- rep(NA, length(doseSeq)) if(CI){ quant <- qt(1 - (1 - level)/2, df=x$mods[[i]]$df) lbnd <- pred$fit-quant*pred$se.fit ubnd <- pred$fit+quant*pred$se.fit pred <- pred$fit } lst[[i]] <- data.frame(rep(doseSeq, 3), c(pred, lbnd, ubnd), rep(c("pred", "LB", "UB"), each=length(doseSeq)), attr(x$mods[[i]], "model")) } plotdf <- do.call("rbind", lst) } if(class(x) == "DRMod"){ pred <- predict(x, predType = predtype, doseSeq = doseSeq, se.fit = CI) lbnd <- ubnd <- rep(NA, length(doseSeq)) if(CI){ quant <- qt(1 - (1 - level)/2, df=x$df) lbnd <- pred$fit-quant*pred$se.fit ubnd <- pred$fit+quant*pred$se.fit pred <- pred$fit } plotdf <- data.frame(rep(doseSeq, 3), c(pred, lbnd, ubnd), rep(c("pred", "LB", "UB"), each=length(doseSeq)), attr(x, "model")) } names(plotdf) <- c(doseNam, respNam, "group", "model") ## calculate plotting range rng <- switch(plotData, raw = range(data[[respNam]]), none = range(plotdf[[respNam]], na.rm=TRUE), range(plotdf[[respNam]], pList$mns, pList$lbndm, pList$ubndm, na.rm=TRUE)) dff <- diff(rng) ylim <- c(rng[1] - 0.05 * dff, rng[2] + 0.05 * dff) ## produce plot form <- as.formula(paste(respNam, "~", doseNam, "|model", sep="")) print( xyplot(form, groups = plotdf$group, data = plotdf, pList=pList, ..., ylim = ylim, panel = function(x, y, ..., pList){ if(plotGrid) panel.grid(h = -1, v = -1, col = "lightgrey", lty = 2) if(plotData != "none"){ if(type == "normal" & plotData == "raw"){ lpoints(data[[doseNam]], data[[respNam]], col = "grey45", pch=19) } else { lpoints(pList$dos, pList$mns, pch=19, col = colMn) if(plotData == "meansCI"){ quant <- qnorm(1 - (1 - level)/2) for(i in 1:length(pList$dos)){ llines(rep(pList$dos[i], 2), c(pList$lbndm[i], pList$ubndm[i]), lty=2, col = colMn, ...) } } } } panel.xyplot(x, y, col=colFit, type="l", ...) })) } logLik.DRMod <- function(object, ...){ type <- attr(object, "type") data <- attr(object, "data") if(type == "normal"){ RSS <- object$RSS n <- nrow(data) sig2 <- RSS/n val <- -n/2*(log(2*pi) + 1 + log(sig2)) attr(val, "df") <- length(object$coefs)+1 # +1 because of sigma parameter class(val) <- "logLik" return(val) } if(type == "general") stop("method glogLik only available for type == \"normal\"") } AIC.DRMod <- function(object, ..., k = 2){ type <- attr(object, "type") if(type == "general") stop("use method gAIC for type == \"general\"") logL <- logLik(object) -2*as.vector(logL) + k*(attr(logL, "df")) } gAIC <- function (object, ..., k = 2) UseMethod("gAIC") gAIC.DRMod <- function(object, ..., k = 2){ type <- attr(object, "type") if(type == "normal") stop("use method AIC for type == \"normal\"") object$gRSS+k*length(object$coefs) } DoseFinding/R/sampSize.R0000644000176200001440000002145112207430166014566 0ustar liggesusers## function for sample size calculation and functions to evaluate ## performance metrics for different sample sizes sampSize <- function (upperN, lowerN = floor(upperN/2), targFunc, target, tol = 0.001, alRatio, Ntype = c("arm", "total"), verbose = FALSE){ ## target function to iterate func <- function(n){ targFunc(n) - target } Ntype <- match.arg(Ntype) if (!missing(alRatio)) { if (any(alRatio <= 0)) { stop("all entries of alRatio need to be positive") } else { alRatio <- alRatio/sum(alRatio) } if(Ntype == "arm") { alRatio <- alRatio/min(alRatio) } } else { ## by default assume stop("allocation ratios need to be specified") } ## first call upper <- func(round(upperN*alRatio)) if(length(upper) > 1) stop("targFunc(n) to evaluate to a vector of length 1.") if(!is.numeric(upper)) stop("targFunc(n) needs to evaluate to a numeric.") ## bracket solution if (upper < 0) message("upper limit for sample size is raised") while (upper < 0) { upperN <- 2 * upperN upper <- func(round(upperN*alRatio)) } lower <- func(round(lowerN*alRatio)) if (lower > 0) message("lower limit for sample size is decreased") while (lower > 0) { lowerN <- round(lowerN/2) if (lowerN == 0) stop("cannot find lower limit on n") lower <- func(round(lowerN*alRatio)) } ## now start bisection if (verbose) { cat("Upper N:", upperN, "Upper value", round(upper+target, 4), "\n") cat("Lower N:", lowerN, "Lower value", round(lower+target, 4), "\n\n") } current <- tol+1 niter <- 0 ## bisect sample size until tolerance is achieved while (abs(current) > tol & (upperN > lowerN + 1)) { currN <- round((upperN + lowerN)/2) current <- func(round(currN * alRatio)) if (current > 0) { upperN <- currN } else { lowerN <- currN } niter <- niter + 1 if (verbose) { cat("Iter: ", niter, ", N = ", currN, ", current value = ", round(current+target, 4), "\n", sep = "") } } ## increase sample size so that the obtained value is larger than the target while (current < 0) { currN <- currN + 1 current <- func(round(currN * alRatio)) } res <- list(samp.size = round(currN * alRatio), target = round(current+target, 4)) attr(res, "alRatio") <- round(alRatio/min(alRatio), 4) attr(res, "target") <- target attr(res, "Ntype") <- Ntype class(res) <- "sampSize" res } print.sampSize <- function(x, ...){ cat("Sample size calculation\n\n") cat("alRatio:", attr(x, "alRatio"), "\n") cat("Total sample size:", sum(x$samp.size), "\n") cat("Sample size per arm:", x$samp.size, "\n") cat("targFunc:", x$target,"\n") } sampSizeMCT <- function(upperN, lowerN = floor(upperN/2), ..., power, sumFct = mean, tol = 0.001, alRatio, Ntype = c("arm", "total"), verbose = FALSE){ ## function to calculate sample size for multiple contrast test ## if S is specified this needs to be the (hypothetical) covariance matrix ## for a total sample size of 1 patient Ntype <- match.arg(Ntype) args <- list(...) namargs <- names(args) if(is.element("placAdj", namargs)){ if(args$placAdj) stop("placAdj needs to be FALSE for sampSizeMCT. Use sampSize directly in placebo-adjusted case.") } if(is.element("S", namargs)){ S <- args[["S"]] if(Ntype == "arm"){ Ntype <- "total" message("Only Ntype == \"total\" possible if S is specified") } if(is.element("df", namargs)){ if(is.finite(args$df)) message("df argument set to Inf, if S is specified. Use sampSize directly in case exact df are required.") } args$df <- Inf tFunc <- function(n){ N <- sum(n) Sn <- 1/N*S args$S <- Sn powVals <- do.call("powMCT", args) sumFct(powVals) } } else { if(is.element("n", namargs)) stop("n is not allowed to be specified for sample size calculation") if(!is.element("sigma", namargs)) stop("need sigma if S is not specified") tFunc <- function(n){ powVals <- powMCT(n=n, ...) sumFct(powVals) } } sampSize(upperN, lowerN, targFunc = tFunc, target = power, alRatio = alRatio, Ntype = Ntype, verbose = verbose) } targN <- function(upperN, lowerN, step, targFunc, alRatio, Ntype = c("arm", "total"), sumFct = c("min", "mean", "max")){ if(!is.character(sumFct)) stop("sumFct needs to be a character vector") Ntype <- match.arg(Ntype) if (!missing(alRatio)) { if (any(alRatio <= 0)) { stop("all entries of alRatio need to be positive") } else { alRatio <- alRatio/sum(alRatio) } if(Ntype == "arm") { alRatio <- alRatio/min(alRatio) } } else { ## by default assume stop("allocation ratios need to be specified") } nseq <- seq(lowerN, upperN, by=step) out <-t(sapply(nseq, function(x){ targFunc(round(x * alRatio)) })) if(nrow(out) == 1 & length(nseq) > 1){ out <- t(out) colnames(out) <- "" } out2 <- out for(i in 1:length(sumFct)){ out2 <- cbind(out2, apply(out, 1, sumFct[i])) } dimnames(out2) <- list(nseq, c(colnames(out), sumFct)) attr(out2, "alRatio") <- alRatio attr(out2, "sumFct") <- sumFct attr(out2, "Ntype") <- Ntype class(out2) <- "targN" out2 } powN <- function(upperN, lowerN, step, ..., alRatio, Ntype = c("arm", "total"), sumFct = c("min", "mean", "max")){ args <- list(...) namargs <- names(args) if(is.element("placAdj", namargs)){ if(args$placAdj) stop("placAdj needs to be FALSE for powN. Use targN directly in placebo-adjusted case.") } Ntype <- match.arg(Ntype) if(is.element("S", namargs)){ S <- args[["S"]] if(Ntype == "arm"){ Ntype <- "total" message("Only Ntype == \"total\" possible if S is specified") } if(is.element("df", namargs)){ if(is.finite(args$df)) message("df argument set to Inf, if S is specified. Use sampSize directly in case exact df are required.") } args$df <- Inf tFunc <- function(n){ N <- sum(n) Sn <- 1/N*S args$S <- Sn do.call("powMCT", args) } } else { if(is.element("n", namargs)) stop("n is not allowed to be specified for sample size calculation") if(!is.element("sigma", namargs)) stop("need sigma if S is not specified") tFunc <- function(n) powMCT(n=n, ...) } targN(upperN=upperN, lowerN=lowerN, step=step, targFunc=tFunc, alRatio=alRatio, Ntype = Ntype, sumFct = sumFct) } ## Produces Trellis plot of targN object plot.targN <- function(x, superpose = TRUE, line.at = NULL, xlab = NULL, ylab = NULL, ...){ nSeq <- as.integer(dimnames(x)[[1]]) alRatio <- attr(x, "alRatio") unbN <- (length(unique(alRatio)) > 1) if (is.null(xlab)) { if(attr(x, "Ntype") == "total" | unbN){ xlab <- "Overall sample size" nSeq <- sapply(nSeq, function(x){ sum(round(x*alRatio)) }) } else { xlab <- "Sample size per dose (balanced)" } } nams <- dimnames(x)[[2]] ## separating model data from summary data x <- as.data.frame(unclass(x)) nams <- names(x) nC <- ncol(x) pMatTr <- data.frame(targ = as.vector(unlist(x)), n = rep(nSeq, nC), type = factor(rep(nams, each = length(nSeq)), levels = nams)) if(superpose){ panelFunc1 <- function(x, y, subscripts, groups, lineAt, ...) { panel.grid(h = -1, v = -1, col = "lightgrey", lty = 2) if(!is.null(line.at)) panel.abline(h = lineAt, lty = 3, ..., col = "red") panel.superpose(x, y, subscripts, groups, ...) } trLn <- trellis.par.get("superpose.line")[c("col", "lwd", "lty")] for(i in seq(along = trLn)) { if(length(trLn[[i]]) > nC) trLn[[i]] <- trLn[[i]][1:nC] } ltplot <- xyplot(targ ~ n, pMatTr, groups = pMatTr$type, subscripts = TRUE, panel = panelFunc1, type = "l", lineAt = line.at, xlab = xlab, ylab = ylab, key = list(lines = trLn, text = list(lab = nams), transparent = TRUE, columns = ifelse(nC < 5, nC, min(4,ceiling(nC/min(ceiling(nC/4),3))))), ...) } else { # models in different panels panelFunc2 <- function(x, y, lineAt, ...) { panel.grid(h = -1, v = -1, col = "lightgrey", lty = 2) if(!is.null(line.at)) panel.abline(h = lineAt, lty = 3, ..., col = "red") ## used 2 for consistency with above panel.xyplot(x, y, ...) } ltplot <- xyplot(targ ~ n | type, pMatTr, panel = panelFunc2, type = "l", lineAt = line.at, xlab = xlab, ylab = ylab, strip = function(...) strip.default(..., style = 1), ...) } print(ltplot) } DoseFinding/R/qmvtDF.R0000644000176200001440000002611612620172043014173 0ustar liggesusers## calculations of equi-coordinate quantiles for pmvt wgts <- function(typred, ttarg){ dists <- abs(typred-ttarg) if(sum(dists < 1 & dists > 0.0001) < 3){ return(rep(1, length(typred))) } (1-dists^3)^3*(dists < 1) } stop_crit <- function(xtol=NULL, ytol=NULL, fit, xest, ttarg, level=0.05){ if(!is.null(xtol)){ xvals <- c(xest-xtol, xest+xtol) pred <- predict(fit, data.frame(x=xvals), se.fit=TRUE) crit <- qt(1-level/2, df=fit$df.residual) cond1 <- pred$fit[1]+crit*pred$se.fit[1] < ttarg cond2 <- pred$fit[2]-crit*pred$se.fit[2] > ttarg return(list(stop=cond1 & cond2)) } if(!is.null(ytol)){ pred <- predict(fit, data.frame(x=xest), se.fit=TRUE) crit <- qt(1-level/2, df=fit$df.residual) cond1 <- pred$fit[1]+crit*pred$se.fit[1] < ttarg+ytol cond2 <- pred$fit[1]-crit*pred$se.fit[1] > ttarg-ytol return(list(stop=cond1 & cond2)) } } get_new_points <- function(fit, xest, targ, level=0.05){ pred <- predict(fit, data.frame(x=xest), se.fit=TRUE) cf <- coef(fit) xLB <- (pred$fit-pred$se.fit-cf[1])/cf[2] xUB <- (pred$fit+pred$se.fit-cf[1])/cf[2] c(xLB, xUB) } get_est <- function(fit, ttarg){ cf <- coef(fit) as.numeric(ttarg-cf[1])/cf[2] } fitlr <- function(x, y, weights, method, N, sterr){ ind <- weights > 0 weights <- weights[ind] x <- x[ind] if(method == "lm"){ y <- y[ind] fit <- glm(y~x, weights = weights, family=quasi("probit")) } if(method == "wlm"){ y <- y[ind] sterr <- sterr[ind] sterr <- sterr/min(sterr) wgts <- weights/sterr^2 fit <- glm(y~x, weights = wgts, family=quasi("probit")) } if(method == "glm"){ ymat <- cbind(y, 1-y)*N ymat <- ymat[ind,] fit <- glm(ymat~x, weights = weights, family=binomial("probit")) } return(fit) } get_quant_loclin <- function(func, targ, interval, ytol=NULL, xtol=NULL, maxiter = 500, method = c("lm", "wlm", "glm"), verbose = FALSE, ...){ ## input argument checks if(interval[2] <= interval[1]) stop("first entry of interval needs to be smaller than second") method <- match.arg(method) ## set up output vectors x <- y <- sterr <- numeric(4+maxiter) xest <- numeric(1+maxiter) ## first three evaluations x[1:3] <- c(interval, mean(interval)) res <- lapply(x[1:3], function(x) func(x, ...)) y[1:3] <- sapply(res, function(x) x) if(method == "wlm"){ if(is.null(attr(res[[1]], "sterr"))) stop("For method == wlm, func neets to have an attribute sterr") sterr[1:3] <- sapply(res, function(x) attr(x, "sterr")) } if(method == "glm"){ if(is.null(attr(res[[1]], "n"))) stop("For method == glm, func neets to have an attribute n") N <- attr(res[[1]], "n") } ## check for non-monotone function if(y[1] > y[2]) stop("func does not appear to be monotone") ttarg <- qnorm(targ) # transform target as well ## start iterating ycur <- y[1:3];xcur <- x[1:3] fit <- fitlr(xcur, ycur, weights=rep(1,3), method = method, N=N, sterr=sterr[1:3]) xest[1] <- get_est(fit, ttarg) x[4] <- xest[1] res <- func(x[4], ...) y[4] <- res if(method == "wlm") sterr[4] <- attr(res, "sterr") count <- 4 for(i in 1:maxiter){ ind <- 1:count xcur <- x[ind];ycur <- y[ind] tycurpred <- predict(fit, newdata=data.frame(x=xcur)) weights <- wgts(tycurpred, ttarg) fit <- fitlr(xcur, ycur, weights=weights, method = method, N=N, sterr=sterr[ind]) stp <- stop_crit(xtol, ytol, fit, xest[i], ttarg) if(verbose & i > 1){ txt <- sprintf("Iteration %i, Estimate: %f, LB: %f (%f), UB: %f (%f)\n", i, xest[i], x[count-1], y[count-1], x[count], y[count]) cat(txt) } if(stp$stop) break xest[i+1] <- get_est(fit, ttarg) new_x <- get_new_points(fit, xest[i+1], targ) new_y <- lapply(new_x, function(x) func(x, ...)) ind <- (count+1):(count+2) x[ind] <- new_x y[ind] <- c(new_y[[1]], new_y[[2]]) if(method == "wlm") sterr[ind] <- sapply(new_y, function(x) attr(x, "sterr")) count <- count+2 } out <- xest[i] if(i == maxiter){ attr(out, "message") <- "Maximum number of iterations reached without sufficient accuracy" } else { attr(out, "message") <- "Normal Completion" } attr(out, "iterations") <- list(xest=c(rep(NA,4), xest[1:i]), x=x[1:count], y=ycur[1:count]) out } ######################################################################## ## non-exported objects from mvtnorm (only temporarily needed) isInf <- function (x) x > 0 & is.infinite(x) dots2GenzBretz <- function (...) { addargs <- list(...) fm1 <- sapply(names(addargs), function(x) length(grep(x, names(formals(GenzBretz)))) == 1) fm2 <- sapply(names(addargs), function(x) length(grep(x, names(formals(uniroot)))) == 1) algorithm <- NULL uniroot <- NULL if (any(fm1)) algorithm <- do.call("GenzBretz", addargs[fm1]) if (any(fm2)) uniroot <- addargs[fm2] list(algorithm = algorithm, uniroot = uniroot) } checkmvArgs <- function (lower, upper, mean, corr, sigma) { UNI <- FALSE if (!is.numeric(lower) || !is.vector(lower)) stop(sQuote("lower"), " is not a numeric vector") if (!is.numeric(upper) || !is.vector(upper)) stop(sQuote("upper"), " is not a numeric vector") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") rec <- cbind(lower, upper, mean) lower <- rec[, "lower"] upper <- rec[, "upper"] if (!all(lower <= upper)) stop("at least one element of ", sQuote("lower"), " is larger than ", sQuote("upper")) mean <- rec[, "mean"] if (any(is.na(mean))) stop("mean contains NA") if (is.null(corr) && is.null(sigma)) { corr <- diag(length(lower)) } if (!is.null(corr) && !is.null(sigma)) { sigma <- NULL warning("both ", sQuote("corr"), " and ", sQuote("sigma"), " specified: ignoring ", sQuote("sigma")) } if (!is.null(corr)) { if (!is.numeric(corr)) stop(sQuote("corr"), " is not numeric") if (!is.matrix(corr)) { if (length(corr) == 1) UNI <- TRUE if (length(corr) != length(lower)) stop(sQuote("diag(corr)"), " and ", sQuote("lower"), " are of different length") } else { if (length(corr) == 1) { UNI <- TRUE corr <- corr[1, 1] if (length(lower) != 1) stop(sQuote("corr"), " and ", sQuote("lower"), " are of different length") } else { if (length(diag(corr)) != length(lower)) stop(sQuote("diag(corr)"), " and ", sQuote("lower"), " are of different length") if (!chkcorr(corr)) stop(sQuote("corr"), " is not a correlation matrix") } } } if (!is.null(sigma)) { if (!is.numeric(sigma)) stop(sQuote("sigma"), " is not numeric") if (!is.matrix(sigma)) { if (length(sigma) == 1) UNI <- TRUE if (length(sigma) != length(lower)) stop(sQuote("diag(sigma)"), " and ", sQuote("lower"), " are of different length") } else { if (length(sigma) == 1) { UNI <- TRUE sigma <- sigma[1, 1] if (length(lower) != 1) stop(sQuote("sigma"), " and ", sQuote("lower"), " are of different length") } else { if (length(diag(sigma)) != length(lower)) stop(sQuote("diag(sigma)"), " and ", sQuote("lower"), " are of different length") if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0)) stop(sQuote("sigma"), " is not a covariance matrix") } } } list(lower = lower, upper = upper, mean = mean, corr = corr, sigma = sigma, uni = UNI) } chkcorr <- function (x) { if (!is.matrix(x)) return(FALSE) rownames(x) <- colnames(x) <- NULL storage.mode(x) <- "numeric" ONE <- 1 + sqrt(.Machine$double.eps) ret <- (min(x) < -ONE || max(x) > ONE) || !isTRUE(all.equal(diag(x), rep(1, nrow(x)))) !ret } ######################################################################## ## main function qmvtDF <- function (p, tail = c("lower.tail", "upper.tail", "both.tails"), df = 1, delta = 0, corr = NULL, sigma = NULL, algorithm = GenzBretz(), type = c("Kshirsagar", "shifted"), ytol = 0.001, maxiter = 500, ...) { if (length(p) != 1 || (p <= 0 || p >= 1)) stop(sQuote("p"), " is not a double between zero and one") dots <- dots2GenzBretz(...) if (!is.null(dots$algorithm) && !is.null(algorithm)) algorithm <- dots$algorithm type <- match.arg(type) tail <- match.arg(tail) if (tail == "both.tails" && p < 0.5) stop("cannot compute two-sided quantile for p < 0.5") dim <- 1 if (!is.null(corr)) dim <- NROW(corr) if (!is.null(sigma)) dim <- NROW(sigma) lower <- upper <- rep.int(0, dim) args <- checkmvArgs(lower, upper, delta, corr, sigma) if (args$uni) { if (tail == "both.tails") p <- ifelse(p < 0.5, p/2, 1 - (1 - p)/2) if (df == 0 || isInf(df)) { q <- qnorm(p, mean = args$mean, lower.tail = (tail != "upper.tail")) } else { q <- qt(p, df = df, ncp = args$mean, lower.tail = (tail != "upper.tail")) } qroot <- list(quantile = q, f.quantile = 0) return(qroot) } dim <- length(args$mean) pfct <- function(q) { switch(tail, both.tails = { low <- rep(-abs(q), dim) upp <- rep(abs(q), dim) }, upper.tail = { low <- rep(q, dim) upp <- rep(Inf, dim) }, lower.tail = { low <- rep(-Inf, dim) upp <- rep(q, dim) }, ) ret <- pmvt(lower = low, upper = upp, df = df, delta = args$mean, corr = args$corr, sigma = args$sigma, algorithm = algorithm, type = type) return(ret) } intp <- switch(tail, both.tails = (1 - (1 - p)/2)^(c(1,1/dim)), upper.tail = 1 - p^c(1,1/dim), lower.tail = p^c(1,1/dim)) if(is.finite(df) && (df > 0)){ intq <- qt(intp, df = df) } else { intq <- qnorm(intp) } qroot <- get_quant_loclin(pfct, p, intq*c(0.8, 1.25), ytol=ytol, maxiter=maxiter, verbose=FALSE, method="lm") attr(qroot, "iterations") <- NULL qroot } DoseFinding/R/Mods.R0000644000176200001440000010011712416033253013670 0ustar liggesusers## functions related to creating, plotting candidate model sets modCount <- function(models, fullMod = FALSE){ ## counts the number of models in a candidate model-list if(!fullMod){ nr <- lapply(names(models), function(x){ xx <- models[[x]] if(is.null(xx)) return(1) if(is.element(x, c("emax", "quadratic", "exponential"))) return(length(xx)) if(is.element(x, c("sigEmax", "logistic", "betaMod"))) return(length(xx)/2) if(x == "linInt"){ if(is.vector(xx)) return(1) if(is.matrix(xx)) return(nrow(xx)) } }) } else { nr <- lapply(models, function(x){ if(is.vector(x)) return(1) if(is.matrix(x)) return(nrow(x)) }) } Reduce("+",nr) } getAddArgs <- function(addArgs, doses = NULL){ if(!is.null(doses)){ addArgs0 <- list(scal = 1.2*max(doses), off = 0.01*max(doses)) } else { addArgs0 <- list(scal = NULL, off = NULL) } if(!is.null(addArgs)){ if(!is.list(addArgs)) stop("addArgs needs to be of class list") namA <- names(addArgs) if(!all(namA %in% c("scal", "off"))) stop("addArgs need to have entries named scal and/or off") addArgs0[namA] <- addArgs if(length(addArgs0$scal) > 1 | length(addArgs0$off) > 1) stop("scal and/or off need to be of length 1") } list(scal=addArgs0$scal, off=addArgs0$off) } checkEntries <- function(modL, doses, fullMod){ biModels <- c("emax", "linlog", "linear", "quadratic", "exponential", "logistic", "betaMod", "sigEmax", "linInt") checkNam <- function(nam){ if(is.na(match(nam, biModels))) stop("Invalid model specified: ", nam) } checkStand <- function(nam){ pars <- modL[[nam]] ## checks for as many invalid values as possible if(!is.numeric(pars) & !is.null(pars)) stop("entries in Mods need to be of type: NULL, or numeric.\n", " invalid type specified for model ", nam) if((nam %in% c("linear", "linlog")) & !is.null(pars)) stop("For model ", nam, ", model entry needs to be equal to NULL") if((nam %in% c("emax", "sigEmax", "betaMod", "logistic", "exponential")) & any(pars <= 0)) stop("For model ", nam, " model entries needs to be positive") if((nam %in% c("emax", "exponential", "quadratic")) & is.matrix(nam)) stop("For model ", nam, " parameters need to specified in a vector") if((nam %in% c("sigEmax", "betaMod", "logistic"))){ if(is.matrix(pars)){ if(ncol(pars) != 2) stop("Matrix for ", nam, " model needs to have two columns") } if(length(pars)%%2 > 0) stop("Specified parameters need to be a multiple of two for ", nam, " model") } if(nam == "linInt"){ if(is.matrix(pars)){ len <- ncol(pars) } else { len <- length(pars) } if(len != (length(doses)-1)) stop("Need to provide guesstimates for each active dose. ", len, " specified, need ", length(doses)-1, ".") } } if(!fullMod){ lapply(names(modL), function(nam){ checkNam(nam) checkStand(nam) }) } else { lapply(names(modL), function(nam){ checkNam(nam) }) } } Mods <- function(..., doses, placEff = 0, maxEff, direction = c("increasing", "decreasing"), addArgs = NULL, fullMod = FALSE){ if(missing(doses)) stop("Need to specify dose levels") doses <- sort(doses) if(doses[1] < -.Machine$double.eps ^ 0.5) stop("Only dose-levels >= 0 allowed") if(abs(doses[1]) > .Machine$double.eps ^ 0.5) stop("Need to include placebo dose") ## check for adequate addArgs lst <- getAddArgs(addArgs, doses) if(lst$scal < max(doses)) stop("\"scal\" parameter needs to be >= max(doses)") if(lst$scal < 0) stop("\"scal\" parameter needs to be positive") if(lst$off < 0) stop("\"off\" parameter needs to be positive") ## obtain model list modL <- list(...) nams <- names(modL) ## perform some simple check for a valid standModel list if(length(nams) != length(unique(nams))) stop("only one list entry allowed for each model class") checkEntries(modL, doses, fullMod) if(!fullMod){ ## assume standardized models direction <- match.arg(direction) if (missing(maxEff)) maxEff <- ifelse(direction == "increasing", 1, -1) modL <- fullMod(modL, doses, placEff, maxEff, lst$scal, lst$off) } else { ## calculate placEff and maxEff from model pars. For unimodal ## models maxEff determination might fail if the dose with maximum ## efficacy is not among those used! resp <- calcResp(modL, doses, lst$off, lst$scal, lst$nodes) placEff <- resp[1,] maxEff <- apply(resp, 2, function(x){ difs <- x-x[1] indMax <- which.max(difs) indMin <- which.min(difs) if(difs[indMax] > 0) return(difs[indMax]) if(difs[indMin] < 0) return(difs[indMin]) }) } attr(modL, "placEff") <- placEff attr(modL, "maxEff") <- maxEff attr(modL, "direction") <- ifelse(maxEff > 0, "increasing", "decreasing") class(modL) <- "Mods" attr(modL, "doses") <- doses attr(modL, "scal") <- lst$scal attr(modL, "off") <- lst$off return(modL) } ## calculates parameters for all models in the candidate set returns a ## list with all model parameters. fullMod <- function(models, doses, placEff, maxEff, scal, off){ ## check for valid placEff and maxEff arguments nM <- modCount(models, fullMod = FALSE) if(length(placEff) > 1){ if(length(placEff) != nM) stop("placEff needs to be of length 1 or length equal to the number of models") } else { placEff <- rep(placEff, nM) } if(length(maxEff) > 1){ if(length(maxEff) != nM) stop("maxEff needs to be of length 1 or length equal to the number of models") } else { maxEff <- rep(maxEff, nM) } nodes <- doses # nodes parameter for linInt ## calculate linear parameters of models (with standardized ## parameters as in models), to achieve the specified placEff and maxEff complMod <- vector("list", length=length(models)) i <- 0;z <- 1 for(nm in names(models)){ pars <- models[[nm]] if(is.null(pars)){ ## linear and linlog Pars <- getLinPars(nm, doses, NULL, placEff[z], maxEff[z], off); i <- i+1; z <- z+1 } if(is.element(nm,c("emax", "exponential", "quadratic"))){ nmod <- length(pars) if(nmod > 1){ Pars <- matrix(ncol=3, nrow=nmod) for(j in 1:length(pars)){ Pars[j,] <- getLinPars(nm, doses, as.vector(pars[j]), placEff[z], maxEff[z]) z <- z+1 } i <- i+1 } else { Pars <- getLinPars(nm, doses, as.vector(pars), placEff[z], maxEff[z]) i <- i+1; z <- z+1 } } if(is.element(nm,c("logistic", "betaMod", "sigEmax"))){ if(is.matrix(pars)){ Pars <- matrix(ncol=4, nrow=nrow(pars)) for(j in 1:nrow(pars)){ Pars[j,] <- getLinPars(nm, doses, as.vector(pars[j,]), placEff[z], maxEff[z]) z <- z+1 } i <- i+1 } else { Pars <- getLinPars(nm, doses, as.vector(pars), placEff[z], maxEff[z]); i <- i+1; z <- z+1 } } if(nm == "linInt"){ if(is.matrix(pars)){ Pars <- matrix(ncol=length(nodes), nrow=nrow(pars)) for(j in 1:nrow(pars)){ Pars[j,] <- getLinPars(nm, doses, as.vector(pars[j,]), placEff[z], maxEff[z]) z <- z+1 } i <- i+1 } else { Pars <- getLinPars(nm, doses, as.vector(pars), placEff[z], maxEff[z]); i <- i+1; z <- z+1 } } complMod[[i]] <- Pars } names(complMod) <- names(models) complMod } plot.Mods <- function(x, nPoints = 200, superpose = FALSE, xlab = "Dose", ylab = "Model means", modNams = NULL, plotTD = FALSE, Delta, ...){ plotModels(x, nPoints = nPoints, superpose = superpose, xlab = xlab, ylab = ylab, modNams = modNams, plotTD = plotTD, Delta, ...) } plotModels <- function(models, nPoints = 200, superpose = FALSE, xlab = "Dose", ylab = "Model means", modNams = NULL, plotTD = FALSE, Delta, ...){ ## models is always assumed to be of class Mods doses <- nodes <- attr(models, "doses") placEff <- attr(models, "placEff") maxEff <- attr(models, "maxEff") off <- attr(models, "off") scal <- attr(models, "scal") if(!inherits(models, "Mods")) stop("\"models\" needs to be of class Mods") nM <- modCount(models, fullMod = TRUE) if(nM > 50) stop("too many models in Mods object to plot (> 50 models).") doseSeq <- sort(union(seq(min(doses), max(doses), length = nPoints), doses)) resp <- calcResp(models, doseSeq, off, scal, nodes) pdos <- NULL if(plotTD){ # also include TD in plot if(missing(Delta)) stop("need Delta, if \"plotTD = TRUE\"") ind <- maxEff > 0 if(length(unique(ind)) > 1) stop("inconsistent directions not possible, when \"plotTD = TRUE\"") direction <- ifelse(all(ind), "increasing", "decreasing") pdos <- TD(models, Delta, direction = direction) yax <- rep(ifelse(direction == "increasing", Delta, -Delta), length(pdos)) } if(length(placEff) == 1) placEff <- rep(placEff, nM) if(length(maxEff) == 1) maxEff <- rep(maxEff, nM) if(is.null(modNams)){ # use alternative model names nams <- dimnames(resp)[[2]] } else { if(length(modNams) != nM) stop("specified model-names in \"modNams\" of invalid length") nams <- modNams } modelfact <- factor(rep(nams, each = length(doseSeq)), levels = nams) if(superpose){ respdata <- data.frame(response = c(resp), dose = rep(doseSeq, ncol(resp)), model = modelfact) spL <- trellis.par.get("superpose.line") spL$lty <- rep(spL$lty, nM%/%length(spL$lty) + 1)[1:nM] spL$lwd <- rep(spL$lwd, nM%/%length(spL$lwd) + 1)[1:nM] spL$col <- rep(spL$col, nM%/%length(spL$col) + 1)[1:nM] ## data for plotting function within panel panDat <- list(placEff = placEff, maxEff = maxEff, doses = doses) ## number of columns nCol <- ifelse(nM < 5, nM, min(4,ceiling(nM/min(ceiling(nM/4),3)))) key <- list(lines = spL, transparent = TRUE, text = list(nams, cex = 0.9), columns = nCol) ltplot <- xyplot(response ~ dose, data = respdata, subscripts = TRUE, groups = respdata$model, panel.data = panDat, xlab = xlab, ylab = ylab, panel = function(x, y, subscripts, groups, ..., panel.data) { panel.grid(h=-1, v=-1, col = "lightgrey", lty=2) panel.abline(h = c(panel.data$placEff, panel.data$placEff + panel.data$maxEff), lty = 2) panel.superpose(x, y, subscripts, groups, type = "l", ...) ind <- !is.na(match(x, panel.data$doses)) panel.superpose(x[ind], y[ind], subscripts[ind], groups, ...) if(plotTD){ for(z in 1:length(pdos)){ panel.lines(c(0, pdos[z]), c(yax[z], yax[z]),lty=2, col=2) panel.lines(c(pdos[z], pdos[z]), c(0, yax[z]),lty=2, col=2) } }}, key = key, ...) } else { respdata <- data.frame(response = c(resp), dose = rep(doseSeq, ncol(resp)), model = modelfact) panDat <- list(placEff = placEff, maxEff = maxEff, doses = doses, pdos=pdos) ltplot <- xyplot(response ~ dose | model, data = respdata, panel.data = panDat, xlab = xlab, ylab = ylab, panel = function(x, y, ..., panel.data){ panel.grid(h=-1, v=-1, col = "lightgrey", lty=2) z <- panel.number() panel.abline(h = c(panel.data$placEff[z], panel.data$placEff[z] + panel.data$maxEff[z]), lty = 2) panel.xyplot(x, y, type = "l", ...) ind <- match(panel.data$doses, x) panel.xyplot(x[ind], y[ind], ...) if(plotTD){ if(direction == "increasing"){ delt <- Delta base <- panel.data$placEff[z] delt <- panel.data$placEff[z]+Delta } else { delt <- -Delta base <- panel.data$placEff[z]+panel.data$maxEff[z] delt <- panel.data$placEff[z]-Delta } panel.lines(c(0, pdos[z]), c(delt, delt), lty=2, col=2) panel.lines(c(pdos[z], pdos[z]), c(base, delt),lty=2, col=2) } }, strip = function(...) strip.default(..., style = 1), as.table = TRUE,...) } print(ltplot) } ## calculate target dose calcTD <- function(model, pars, Delta, TDtype = c("continuous", "discrete"), direction = c("increasing", "decreasing"), doses, off, scal, nodes){ ## calculate the smallest dose x for which ## f(x) > f(0) + Delta (increasing) or f(x) < f(0) - Delta (decreasing) ## => f0(x) > Delta (increasing) or f0(x) < - Delta (decreasing) (f0 effect-curve) ## need to multiply f0(x) (=slope parameter) with -1 then decreasing case ## can be covered equivalent to increasing case TDtype <- match.arg(TDtype) direction <- match.arg(direction) if(direction == "decreasing"){ ## transform problem to "increasing" case if(model == "linInt"){ pars <- -pars } else { pars[2] <- -pars[2] if(model == "quadratic") ## also need to negate pars[3] pars[3] <- -pars[3] } } if(model == "betaMod" & missing(scal)) stop("Need \"scal\" parameter for betaMod model") if(model == "linlog" & missing(off)) stop("Need \"off\" parameter for linlog model") if(model == "linInt"){ if(missing(nodes)) stop("Need \"nodes\" parameter for linlog model") if(length(nodes) != length(pars)) stop("nodes and pars of incompatible length") } if(TDtype == "continuous"){ ## calculate target dose analytically cf <- pars if(model == "linear"){ td <- Delta/cf[2] if(td > 0) return(td) return(NA) } if(model == "linlog"){ td <- off*exp(Delta/cf[2])-off if(td > 0) return(td) return(NA) } if(model == "quadratic"){ if(4*cf[3]*Delta+cf[2]^2 < 0) return(NA) d1 <- -(sqrt(4*cf[3]*Delta+cf[2]^2)+cf[2])/(2*cf[3]) d2 <- (sqrt(4*cf[3]*Delta+cf[2]^2)-cf[2])/(2*cf[3]) ind <- c(d1, d2) > 0 if(!any(ind)) return(NA) return(min(c(d1, d2)[ind])) } if(model == "emax"){ if(Delta > cf[2]) return(NA) return(Delta*cf[3]/(cf[2]-Delta)) } if(model == "logistic"){ if(Delta > cf[2] * (1 - logistic(0, 0, 1, cf[3], cf[4]))) return(NA) .tmp1 <- exp(cf[3]/cf[4]) num <- .tmp1*cf[2]-Delta*.tmp1-Delta den <- cf[2]+Delta*.tmp1+Delta return(cf[3]-cf[4]*log(num/den)) } if(model == "sigEmax"){ if(Delta > cf[2]) return(NA) return((Delta*cf[3]^cf[4]/(cf[2]-Delta))^(1/cf[4])) } if(model == "betaMod"){ if(Delta > cf[2]) return(NA) func <- function(x, Emax, delta1, delta2, scal, Delta){ betaMod(x, 0, 1, delta1, delta2, scal)-Delta/Emax } mode <- cf[3]/(cf[3]+cf[4])*scal out <- uniroot(func, lower=0, upper=mode, delta1=cf[3], delta2=cf[4], Emax=cf[2], scal=scal, Delta=Delta)$root return(out) } if(model == "exponential"){ if(Delta/cf[2] < 0) ## wrong direction return(NA) return(cf[3]*log(Delta/cf[2]+1)) } if(model == "linInt"){ inds <- cf < cf[1] + Delta if(all(inds)) return(NA) ind <- min((1:length(cf))[!inds])-1 tmp <- (cf[1]+Delta-cf[ind])/(cf[ind+1]-cf[ind]) td <- nodes[ind] + tmp*(nodes[ind+1]-nodes[ind]) if(td > 0) return(td) else return(NA) } } if(TDtype == "discrete"){ if(missing(doses)) stop("For TDtype = \"discrete\" need the possible doses in doses argument") if(!any(doses == 0)) stop("need placebo dose for TD calculation") if(model == "betaMod") pars <- c(pars, scal) if(model == "linlog") pars <- c(pars, off) doses <- sort(doses) if(model != "linInt"){ resp <- do.call(model, c(list(doses), as.list(pars))) } else { resp <- do.call(model, c(list(doses), as.list(list(pars, nodes)))) } ind <- resp >= resp[1] + Delta if(any(ind)){ ## TD does exist return smallest dose fulfilling threshold return(min(doses[ind])) } else { return(NA) } } } TD <- function(object, Delta, TDtype = c("continuous", "discrete"), direction = c("increasing", "decreasing"), doses){ ## calculate target doses for Mods or DRMod object, return in a numeric if(missing(Delta)) stop("need \"Delta\" to calculate TD") if(Delta <= 0) stop("\"Delta\" needs to be > 0") modNams <- tds <- NULL if(inherits(object, "Mods")){ off <- attr(object, "off") scal <- attr(object, "scal") nodes <- attr(object, "doses") ## loop through list for(nam in names(object)){ par <- object[[nam]] if(is.matrix(par)){ for(i in 1:nrow(par)){ td <- calcTD(nam, par[i,], Delta, TDtype, direction, doses, off, scal, nodes) modNams <- c(modNams, paste(nam, i, sep="")) tds <- c(tds, td) } } else { # single model td <- calcTD(nam, par, Delta, TDtype, direction, doses, off, scal, nodes) modNams <- c(modNams, nam) tds <- c(tds, td) } } names(tds) <- modNams return(tds) } if(inherits(object, "DRMod")){ # if fmodel is a DRMod object nam <- attr(object, "model") par <- sepCoef(object)$DRpars scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") if(attr(object, "placAdj")){ par <- c(0, par) if(nam == "linInt") nodes <- c(0, nodes) } td <- calcTD(nam, par, Delta, TDtype, direction, doses, off, scal, nodes) names(td) <- NULL return(td) } if(inherits(object, "bFitMod")){ # if fmodel is a bFitMod object nam <- attr(object, "model") scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") if(attr(object, "placAdj")){ if(nam == "linInt") nodes <- c(0, nodes) } td <- apply(object$samples, 1, function(x){ if(attr(object, "placAdj")){ par <- c(0, x) } else { par <- x } calcTD(nam, par, Delta, TDtype, direction, doses, off, scal, nodes) }) return(td) } } ## calculate gradient of target dose calcTDgrad <- function(model, pars, Delta, direction = c("increasing", "decreasing"), off, scal, nodes){ direction <- match.arg(direction) if(direction == "decreasing"){ ## transform problem to "increasing" case Delta <- -Delta ## TD is smallest x so that: } ## f(x) = f(0) + Delta (incr), f(x) = f(0) - Delta (decr) cf <- pars if(model == "linear") return(c(0, -Delta/cf[2]^2)) if(model == "linlog"){ ## version assuming off unknown ##c(0, -Delta*off*exp(Delta/cf[2])/cf[2]^2, exp(Delta/cf[2])-1) return(c(0, -Delta*off*exp(Delta/cf[2])/cf[2]^2)) } if(model == "quadratic"){ squrt <- sqrt(4*Delta*cf[3]+cf[2]^2) .p1 <- -(squrt-cf[2])/(2*cf[3]*squrt) .p2 <- cf[2]*squrt-2*Delta*cf[3]-cf[2]^2 .p2 <- .p2/(2*cf[3]^2*squrt) return(c(0, .p1, .p2)) } if(model == "emax"){ .p1 <- -Delta*cf[3]/(cf[2]-Delta)^2 .p2 <- -Delta/((Delta/cf[2]-1)*cf[2]) return(c(0, .p1, .p2)) } if(model == "logistic"){ et2t3 <- exp(cf[3]/cf[4]) t1 <- (1/(1+et2t3)+Delta/cf[2]) t2 <- (1/t1-1) .p1 <- -Delta*cf[4]/(cf[2]^2*t1^2*t2) .p2 <- 1-et2t3/((et2t3+1)^2*t1^2*t2) .p3 <- cf[3]*et2t3/(cf[4]*(et2t3+1)^2*t1^2*t2)-log(t2) return(c(0, .p1, .p2, .p3)) } if(model == "sigEmax"){ brack <- (-Delta*cf[3]^cf[4]/(Delta-cf[2]))^(1/cf[4]) .p1 <- brack/((Delta-cf[2])*cf[4]) .p2 <- brack/cf[3] .p3 <- brack*(log(cf[3])/cf[4]-log((-Delta*cf[3]^cf[4])/(Delta-cf[2]))/cf[4]^2) return(c(0, .p1, .p2, .p3)) } if(model == "betaMod"){ h0 <- function(cf, scal, Delta){ func <- function(x, delta1, delta2, Emax, scal, Delta){ betaMod(x, 0, 1, delta1, delta2, scal)-Delta/Emax } mode <- cf[3]/(cf[3]+cf[4])*scal uniroot(func, lower=0, upper=mode, delta1=cf[3], delta2=cf[4], Emax=cf[2], scal=scal, Delta=Delta)$root } td <- h0(cf, scal, Delta) ## calculate target dose .p1 <- -td*(scal-td)/(cf[2]*(cf[3]*(scal-td)-cf[4]*td)) .p2 <- .p1*cf[2]*(log(td/scal)+log(cf[3]+cf[4])-log(cf[3])) .p3 <- .p1*cf[2]*(log(1-td/scal)+log(cf[3]+cf[4])-log(cf[4])) return(c(0, .p1, .p2, .p3)) } if(model == "exponential"){ .p1 <- -Delta*cf[3]/(cf[2]*Delta+cf[2]^2) .p2 <- log(Delta/cf[2] + 1) return(c(0, .p1, .p2)) } if(model == "linInt"){ stop("linInt model not implemented") ## ## the below should be correct ## out <- numeric(length(cf)) ## indx <- 1:max(which(cf==max(cf))) ## ind <- max(indx[cf[indx] < cf[1] + Delta]) ## out[1] <- 1/(cf[ind+1]-cf[ind]) ## out[ind] <- -1/(cf[ind+1]-cf[ind]) ## out[ind+1] <- -(cf[1]+Delta-cf[ind])/(cf[ind+1]-cf[ind])^2 ## return(out*(nodes[ind+1]-nodes[ind])) } } calcED <- function(model, pars, p, maxD, EDtype = c("continuous", "discrete"), doses, off, scal, nodes){ ## calculate the smallest dose x for which ## f(x) > f(0) + p*(f(xmax)-f(0)) ## e.g. the EDp within the observed dose-range EDtype <- match.arg(EDtype) if(model == "betaMod" & missing(scal)) stop("Need \"scal\" parameter for betaMod model") if(model == "linlog" & missing(off)) stop("Need \"off\" parameter for linlog model") if(model == "linInt"){ if(missing(nodes)) stop("Need \"nodes\" parameter for linlog model") if(length(nodes) != length(pars)) stop("nodes and pars of incompatible length") } if(EDtype == "continuous"){ ## calculate target dose analytically cf <- pars if(cf[2] == 0){ return(NA) } if(model == "linear"){ return(p*maxD) } if(model == "linlog"){ return(off*(exp(p*(log(maxD+off)-log(off)))-1)) } if(model == "exponential"){ return(cf[3]*log(p*exp(maxD/cf[3])-p+1)) } if(model == "emax"){ return(p*cf[3]*maxD/((1-p)*maxD+cf[3])) } if(model == "logistic"){ res1 <- ((p-1)*exp(maxD/cf[4]+cf[3]/cf[4])-exp(2*cf[3]/cf[4])-p*exp(cf[3]/cf[4])) res2 <- ((p*exp(cf[3]/cf[4])+1)*exp(maxD/cf[4])+(1-p)*exp(cf[3]/cf[4])) return(cf[3]-cf[4]*log(-res1/res2)) } if(model == "sigEmax"){ out <- p*cf[3]^cf[4]*maxD^cf[4]/((1-p)*maxD^cf[4]+cf[3]^cf[4]) return(out^(1/cf[4])) } if(model == "quadratic"){ mode <- -pars[2]/(2*pars[3]) if(mode > maxD | mode < 0) ## maximum outside dose range mode <- maxD const <- pars[2]*mode+pars[3]*mode^2 d1 <- -(sqrt(4*pars[3]*const*p+pars[2]^2)+pars[2])/pars[3]/2.0 d2 <- (sqrt(4*pars[3]*const*p+pars[2]^2)-pars[2])/pars[3]/2.0 ind <- c(d1, d2) > 0 if(!any(ind)) return(NA) return(min(c(d1, d2)[ind])) } if(model == "betaMod"){ func <- function(x, Emax, delta1, delta2, scal, p, mode){ p - betaMod(x, 0, 1, delta1, delta2, scal)/betaMod(mode, 0, 1, delta1, delta2, scal) } mode <- cf[3]/(cf[3]+cf[4])*scal out <- uniroot(func, lower=0, upper=mode, delta1=cf[3], delta2=cf[4], Emax=cf[2], scal=scal, p=p, mode = mode)$root return(out) } if(model == "linInt"){ dif <- cf-cf[1] ind <- which.max(abs(dif)) maxEff <- abs(dif)[ind] if(dif[ind] > 0){ direc <- "increasing" } else { direc <- "decreasing" } out <- calcTD("linInt", cf, Delta=p*maxEff, TDtype="continuous", direction = direc, off=off, scal=scal, nodes=nodes) return(out) } } if(EDtype == "discrete"){ ## use calcTD function if(missing(doses)) stop("For EDtype = \"discrete\" need the possible doses in doses argument") if(!any(doses == 0)) stop("need placebo dose for ED calculation") doses <- sort(doses) if(model != "linInt"){ if(model == "betaMod") pars <- c(pars, scal) if(model == "linlog") pars <- c(pars, off) resp0 <- do.call(model, c(list(0), as.list(pars))) resp <- abs(do.call(model, c(list(doses), as.list(pars)))-resp0) } else { resp0 <- do.call(model, c(list(0), as.list(list(pars, nodes)))) resp <- abs(do.call(model, c(list(doses), as.list(list(pars, nodes))))-resp0) } ## calculate maximum response if(model %in% c("betaMod", "quadratic")){ func2 <- function(x){ resp0 <- do.call(model, c(list(0), as.list(pars))) abs(do.call(model, c(list(x), as.list(pars)))-resp0) } opt <- optimize(func2, range(doses), maximum=TRUE) maxResp <- opt$objective } else { maxResp <- max(resp) } } ind <- resp >= p*maxResp if(any(ind)){ ## TD does exist return smallest dose fulfilling threshold return(min(doses[ind])) } else { return(NA) } } ED <- function(object, p, EDtype = c("continuous", "discrete"), doses){ ## calculate target doses for Mods or DRMod object, return in a numeric if(missing(p)) stop("need \"p\" to calculate ED") if(p <= 0 | p >= 1) stop("\"p\" needs to be in (0,1)") modNams <- eds <- NULL if(inherits(object, "Mods")){ off <- attr(object, "off") scal <- attr(object, "scal") nodes <- attr(object, "doses") maxD <- max(attr(object, "doses")) ## loop through list for(nam in names(object)){ par <- object[[nam]] if(is.matrix(par)){ for(i in 1:nrow(par)){ ed <- calcED(nam, par[i,], p, maxD, EDtype, doses, off, scal, nodes) modNams <- c(modNams, paste(nam, i, sep="")) eds <- c(eds, ed) } } else { # single model ed <- calcED(nam, par, p, maxD, EDtype, doses, off, scal, nodes) modNams <- c(modNams, nam) eds <- c(eds, ed) } } names(eds) <- modNams return(eds) } if(inherits(object, "DRMod")){ # if fmodel is a DRMod object nam <- attr(object, "model") par <- sepCoef(object)$DRpars doseNam <- attr(object, "doseRespNam")[1] maxD <- max(attr(object,"data")[[doseNam]]) scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") if(attr(object, "placAdj")){ par <- c(0, par) if(nam == "linInt") nodes <- c(0, nodes) } ed <- calcED(nam, par, p, maxD, EDtype, doses, off, scal, nodes) names(ed) <- NULL return(ed) } if(inherits(object, "bFitMod")){ # if fmodel is a bFitMod object nam <- attr(object, "model") scal <- attr(object, "scal") off <- attr(object, "off") nodes <- attr(object, "nodes") if(attr(object, "placAdj")){ if(nam == "linInt") nodes <- c(0, nodes) } doseNam <- attr(object, "doseRespNam")[1] maxD <- max(attr(object,"data")[[doseNam]]) ed <- apply(object$samples, 1, function(x){ if(attr(object, "placAdj")){ par <- c(0, x) } else { par <- x } calcED(nam, par, p, maxD, EDtype, doses, off, scal, nodes) }) return(ed) } } calcEDgrad <- function(model, pars, maxD, p, off, scal, nodes){ cf <- pars if(model == "linear") return(c(0,0)) if(model == "linlog"){ return(c(0,0)) } if(model == "emax"){ p <- (1-p)*p*maxD^2/(p*maxD-maxD-cf[3])^2 return(c(0, 0, p)) } if(model == "exponential"){ p <- log(p*exp(maxD/cf[3])-p+1)-p*maxD*exp(maxD/cf[3])/(cf[3]*(p*exp(maxD/cf[3])-p+1)) return(c(0, 0, p)) } ## for other models calculate gradient numerically (formulas more complicated) if(model == "linInt"){ stop("linInt model not implemented") } avail <- requireNamespace("numDeriv", quietly = TRUE) if(!avail) stop("Need suggested package numDeriv for this calculation") func0 <- function(pars, model, p, maxD, off, scal){ calcED(model, pars, p, maxD, EDtype = "continuous", off=off, scal=scal) } scal0 <- off0 <- NULL if(model == "betaMod") scal0 <- scal if(model == "linlog") off0 <- off numDeriv::grad(func0, pars, model=model, p=p, maxD=maxD, off=off, scal=scal) } calcResp <- function(models, doses, off, scal, nodes){ ## generate response vectors for models and guesstimates in "models" ## models - candidate model list of class Mods nModels <- length(models) # number of model elements parList <- val <- vector("list", modCount(models, fullMod = TRUE)) k <- 1 nams <- character() for(nm in names(models)) { pars <- models[[nm]] if (!is.null(pars) && !is.numeric(pars)) { stop("elements of \"models\" must be NULL or numeric") } if (is.matrix(pars)) { # multiple models nmod <- nrow(pars) # number of models if(nm == "linlog") pars <- cbind(pars, off) if(nm == "betaMod") pars <- cbind(pars, scal) ind <- 1:nmod nams <- c(nams, paste(nm, ind, sep = "")) for(j in 1:nmod) { if(nm != "linInt"){ val[[k]] <- do.call(nm, c(list(doses), as.list(pars[j,]))) } else { val[[k]] <- linInt(doses, pars[j,], nodes) } parList[[k]] <- pars[j,] k <- k + 1 } } else { # single model if(nm == "linlog") pars <- c(pars, off) if(nm == "betaMod") pars <- c(pars, scal) nams <- c(nams, nm) if(nm != "linInt"){ val[[k]] <- do.call(nm, c(list(doses), as.list(pars))) } else { val[[k]] <- linInt(doses, pars, nodes) } parList[[k]] <- pars k <- k + 1 } } muMat <- do.call("cbind", val) dimnames(muMat) <- list(doses, nams) names(parList) <- nams attr(muMat, "parList") <- parList muMat } getResp <- function(fmodels, doses){ ## convenience function for getting the mean responses of ## the models in a Mods object (output in matrix) if(!inherits(fmodels, "Mods")) stop("\"fmodels\" needs to be of class Mods") if(missing(doses)) doses <- attr(fmodels, "doses") off <- attr(fmodels, "off") scal <- attr(fmodels, "scal") nodes <- attr(fmodels, "doses") calcResp(fmodels, doses, off=off, scal=scal, nodes=nodes) } ## calculates the location and scale parameters corresponding to ## given placEff, maxEff, and guesstimates getLinPars <- function(model, doses, guesstim, placEff, maxEff, off, scal){ if(model == "linear"){ e1 <- maxEff/max(doses) return(c(placEff, e1)) } if(model == "linlog"){ e1 <- maxEff/(log(max(doses) + off) - log(off)) return(c(placEff-e1*log(off), e1)) } if(model == "quadratic"){ dMax <- 1/(-2*guesstim) b1 <- maxEff/(dMax + guesstim*dMax^2) b2 <- guesstim * b1 return(c(placEff, b1, b2)) } if(model == "emax"){ emax.p <- maxEff * (guesstim + max(doses))/max(doses) return(c(placEff, emax.p, guesstim)) } if(model == "exponential"){ e1 <- maxEff/(exp(max(doses)/guesstim) - 1) e0 <- placEff return(c(e0, e1, guesstim)) } if(model == "logistic"){ emax.p <- maxEff/ (logistic(max(doses),0,1, guesstim[1], guesstim[2]) - logistic(0, 0, 1, guesstim[1], guesstim[2])) e0 <- placEff-emax.p*logistic(0,0,1,guesstim[1], guesstim[2]) return(c(e0, emax.p, guesstim[1], guesstim[2])) } if(model == "betaMod"){ return(c(placEff, maxEff, guesstim)) } if(model == "sigEmax"){ ed50 <- guesstim[1] h <- guesstim[2] dmax <- max(doses) eMax <- maxEff*(ed50^h+dmax^h)/dmax^h return(c(e0 = placEff, eMax = eMax, ed50 = ed50, h = h)) } if(model == "linInt"){ ind <- which.max(abs(guesstim)) return(c(placEff, placEff+maxEff*guesstim/guesstim[ind])) } } DoseFinding/R/guesst.R0000644000176200001440000000667612100014352014304 0ustar liggesusers## function to calculate guesstimates for nonlinear parameters of the dose-response models guesst <- function(d, p, model = c("emax", "exponential", "logistic", "quadratic", "betaMod", "sigEmax"), less = TRUE, local = FALSE, dMax, Maxd, scal){ model <- match.arg(model) if(any(p <= 0) | any(p > 1)) stop("must have 0 < p <= 1") if(model == "emax"){ if(!local){ return(c(ed50 = mean(d * (1 - p)/p))) } else { if (any(p <= d/Maxd)) stop("must have p > d/Maxd, for local version") val <- (d/p-d)/(1-d/(Maxd*p)) return(c(ed50=mean(val))) } } if(model == "exponential"){ if(any(p >= d/Maxd)) stop("must have p < d/Maxd") init <- d/log(1 + p) fooexp <- function(delta, d, p, Maxd){ sum((exponential(d, 0, 1, delta)/ exponential(Maxd, 0, 1, delta) - p)^2) } val <- optimize(fooexp, c(0, 2*Maxd), d=d, p=p, Maxd=Maxd)$minimum return(c(delta = mean(val))) } if(model == "logistic"){ if(length(d) == 1) { stop("logistic model needs at least two pairs (d,p)") } logit <- function(p) log(p/(1-p)) if(length(d) == 2) { ed50 <- diff(rev(d)*logit(p))/diff(logit(p)) delta <- diff(d)/diff(logit(p)) return(c(ed50 = ed50, delta = delta)) } else { m <- lm(logit(p)~d) par <- coef(m) names(par) <- NULL return(c(ed50 = -par[1]/par[2], delta = 1/par[2])) } if(local){ foolog <- function(par, d, p, Maxd) { e0 <- logistic(0,0,1,par[1],par[2]) sum(((logistic(d,0,1,par[1],par[2]) - e0)/ (logistic(Maxd,0,1,par[1],par[2])-e0)-p)^2) } res <- try(optim(par=res, fn=foolog, d=d, p=p, Maxd=Maxd)) if(res$convergence > 0) stop("cannot find guesstimates for specified values") else res <- res$par } if(res[1] < 0) message("Message: specified values lead to negative ed50, which should be positive") return(res) } if(model == "quadratic"){ aux <- sqrt(1 - p) if (less){ return(c(delta = mean(-(1 - aux)/(2 * d)))) } else { return(c(delta = mean(-(1 + aux)/(2 * d)))) } } if(model == "betaMod"){ if(scal <= dMax) stop("scal needs to be larger than dMax to calculate guesstimate") if(dMax > Maxd) stop("dose with maximum effect (dMax) needs to be smaller than maximum dose (Maxd)") k <- dMax/(scal-dMax) val <- d^k*(scal-d)/(dMax^k*(scal-dMax)) beta <- log(p)/(log(val)) return(c(delta1 = mean(k*beta), delta2 = mean(beta))) } if(model == "sigEmax"){ if(length(d) == 1) { stop("sigmoid Emax model needs at least two pairs (d,p)") } if(length(d) == 2){ num <- log((p[1]*(1-p[2]))/(p[2]*(1-p[1]))) h <- num/log(d[1]/d[2]) ed50 <- ((1-p[1])/p[1])^(1/h)*d[1] return(c(ed50=ed50, h=h)) } else { y <- log((1-p)/p) x <- log(d) par <- coef(lm(y~x)) names(par) <- NULL return(c(ed50 = exp(par[1]/-par[2]), delta = -par[2])) } if(local) { fooSE <- function(par, d, p, Maxd) { sum((sigEmax(d,0,1,par[1],par[2])/ sigEmax(Maxd,0,1,par[1],par[2])-p)^2) } res <- try(optim(par=res, fn=fooSE, d=d, p=p, Maxd=Maxd)) if(res$convergence > 0) stop("cannot find guesstimates for specified values") else res <- res$par } if(res[1] < 0) message("Message: specified values lead to negative ed50, which should be positive") return(res) } } DoseFinding/MD50000644000176200001440000000533112620475456012770 0ustar liggesusers585a7360f58e2dbf8494faee7b5dee18 *ChangeLog 473bcb326558e94940e4f8a4b8ffc6ec *DESCRIPTION d66ce8f07e721ea44ac142113f8e3874 *NAMESPACE b5248a72b64098b76efa2a9ad7967c87 *R/MCPMod.R 0066a36825434887203ceb031394a65f *R/MCTtest.R 435c213ea0f2f71c0ef20dac48cbcea1 *R/Mods.R 0f3143aa232414a7b949bc6ee57631ae *R/bFitMod.R 1b2659335d745655ba45716238695ad0 *R/drmodels.R a401c6bfa1c5e935586571d81d38d4b2 *R/fitMod.R 67fd322a12215ed9eadd8280498d910b *R/guesst.R 1438fb7f1422564f51a950bd91b964cf *R/optContr.R c66296c23da834c40e98d42edab58113 *R/optDesign.R b335402d91473c599e922d50e6a70966 *R/planMod.R 49d36ae6f084c62844dbdb7e4d622559 *R/powMCT.R de1f24ce932e3e3be780a9cf0d846a19 *R/qmvtDF.R 1386200078d09ca5a79ab0db0c1ac87d *R/sampSize.R 4eececc090f50b4ec9018ee1c745f398 *data/IBScovars.rda 3cc70c1f72990c43612c447828d99095 *data/biom.rda a946f02e98832a2e7d4280c301d9f389 *data/glycobrom.rda 33017675ed1b1a520c4635da8b1e9747 *data/migraine.rda 67d721d20a92e2c79456e51cf1c47c09 *data/neurodeg.rda cd381fb8748f7bb72ac54a8a97534480 *man/DoseFinding-package.Rd cdce05c5b303cd6e7e0c402f8daa1ba6 *man/IBScovars.Rd 78519be51d2f0b2612f4c55447ec03e2 *man/MCPMod.Rd f343b9ec17bc7c54b70d656aad5f958d *man/MCTtest.Rd ce5e6321e42db1a16ce15563b02d8db4 *man/Mods.Rd d81ddcc7c8a97afb8860ef4ff6fb11e5 *man/bFitMod.Rd a87614435c17a1e90d4d52107af0affe *man/biom.Rd 64a0989390928044215f8d5183c76b2e *man/defBnds.Rd 704082a6257a97c9cc52ab4f1f9ca487 *man/drmodels.Rd a13bb4a56083adc6432f689d6816a9d6 *man/fitMod.Rd b3c7f092af421ac3845f19e7436d6a4f *man/glycobrom.Rd 93c4401bd957309dbcbca774f3d77782 *man/guesst.Rd cedf7b4ea00494ad51abe03662591ca2 *man/migraine.Rd 61695b83d772755fbe846c01951495c1 *man/mvtnorm-control.Rd 462934ffd93550659f19ddd18babbdd4 *man/neurodeg.Rd 039b9a4f2059a303414113166bdcb12e *man/optContr.Rd e2d72ac955bd881d583c99c398bf15ed *man/optDesign.Rd c21c5257b3db5c3f21e2f1d8c5f8f89f *man/planMod.Rd 88e0330d04925e504b0a7d1c481bf61b *man/powMCT.Rd 71f35352372d214de20b143753a71483 *man/sampSize.Rd 7865230dee75123f353f9c60377aedb3 *man/targdose.Rd 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars 8b547a6a71b0d159ba742d49472cdb69 *src/bFitMod.c 8b06211458ee51d73920b8bde61d7db6 *src/combinations.c 2344088a61898992317743ef634c1892 *src/optDes.c 0364d3014932ea727046e1a6e8d2c495 *tests/testgFit.R 11a08146537000565b3209fc38b5f882 *tests/testplanMod.R 70df4f6ca90dcfa6c938039bad2e6c7f *tests/testsDesign.R 481dbb51f0ac9c684d0afc4310ab6e6c *tests/testsFitting.R 58aad5dadcf43e7c404a21f97fa1cbd8 *tests/testsMCPMod.R 05575c3f0f04371d0eafa9888fb412db *tests/testsMCT.R 955a875336a26e7e5a1364a6ab51e9ab *tests/testsoptContr.R f72550137d9a1f40c07ea4b3017eb3a3 *tests/testsplotDRMod.R d1bb805579d8571880983782161a1848 *tests/testssampSize.R 58554c52a7bc83f476f32ed1192f4014 *tests/testssamplMod.R DoseFinding/DESCRIPTION0000644000176200001440000000156212620475456014170 0ustar liggesusersPackage: DoseFinding Type: Package Title: Planning and Analyzing Dose Finding Experiments Version: 0.9-13 Date: 2015-11-10 Author: Bjoern Bornkamp, Jose Pinheiro, Frank Bretz Depends: lattice, mvtnorm, R (>= 2.15.0) Suggests: numDeriv, Rsolnp, quadprog, parallel, multcomp Maintainer: Bjoern Bornkamp Description: The DoseFinding package provides functions for the design and analysis of dose-finding experiments (with focus on pharmaceutical Phase II clinical trials). It provides functions for: multiple contrast tests, fitting non-linear dose-response models (using Bayesian and non-Bayesian estimation), calculating optimal designs and an implementation of the MCPMod methodology. License: GPL-3 LazyLoad: yes NeedsCompilation: yes Packaged: 2015-11-10 20:01:29 UTC; bjoern Repository: CRAN Date/Publication: 2015-11-11 00:18:06 DoseFinding/ChangeLog0000644000176200001440000003576612620172625014241 0ustar liggesusers2015-10-31 Bjoern Bornkamp (version 0.9-13) * projPrBnds now also covers the case when parameter was exactly on the bound * bFitMod doseNam changes * critVal Added self-written qmvt function qmvtDF (as mvtnorm::qmvt got instable on Windows 32bit from release 1.0-3), hopefully superfluous once mvtnorm fixes this. 2014-09-28 Bjoern Bornkamp (version 0.9-12) * glycobrom dataset: Included column for number of observations per treatment. * calcCrit now takes into account "nold" in determining whether enough design points were specified to be able to calculate the design criteria. * bFitMod documentation for plot.bFitMod and predict.bFitMod methods added. coef.bFitMod method added. Thanks to Lieven Nils Kennes for pointing towards the issue. 2014-02-11 Bjoern Bornkamp (version 0.9-11) * Mods Introduce fullMod argument to allow specification of full model parameters (again). * calcTDgrad now calculates the analytical gradient for TD optimal designs for the beta model. The previous numerical gradient could get unstable for particular parameter values. Thanks to Tobias Mielke for the calculations! * planMod.Rd, powMCT.Rd More description on what "sigma" is * optDesign, optContr Catch Mods objects with multiple direction attributes properly in these functions. 2013-11-25 Bjoern Bornkamp (version 0.9-10) * plot.MCPMod In case of no significant model, do not plot anything. * optContr Bugfix in function constOptC, previous algorithm selected in some situation an incorrect active set (and hence a suboptimal solution), the current implementation uses quadratic programming (hence the new suggested package quadprog). 2013-10-15 Bjoern Bornkamp (version 0.9-9) * bFitMod.Bayes Stop if starting values lie outside of bounds specified for the prior distribution * predict.bFitMod Remove incorrect "if" statement (use "effect-curve" not "EffectCurve") * fitModels.bndnls now uses narrowed bounds for 1d models again (as in 0.9-5 and earlier), thanks to Tobias Mielke for reporting the three points above. * optContr now allows for constrained contrasts, i.e. where the contrast coefficients in placebo and active treatment groups are required to have different signs. 2013-09-17 Bjoern Bornkamp (version 0.9-8) * MCPMod Major changes needed (also in fitMod and MCTtest) to allow for dose/response names different from "dose", "resp" when a data-frame is specified (the problem existed as MCTtest, fitMod were called from inside MCPMod). * bFitMod.Bayes Ensure that the starting values for the parameters are within the bounds specified by the prior (if no starting values are specified). Thanks to Tobias Mielke for reporting this. * bFitMod.bootstrap Remove bug for model = "linear" and placAdj = TRUE. Thanks to Tobias Mielke for reporting this. 2013-08-15 Bjoern Bornkamp (version 0.9-7) * fitMod ensure that the data set returned with DRMod objects is in the original order (not sorted by dose). Also ensure the right S matrix is used for fitting for type = "general" and unsorted dose, resp. * MCTtest fixed problems for type = "general" and unsorted dose, resp. * glycobrom Added glycobrom data set * planMod Added planning functions for non-linear modelling * Coded calculations of compositions to be able to remove dependency on the partitions package * man files: added reference to paper on generalized MCPMod * plot.DRMod Minor changes to ensure raw means are always inside the plotting region (for plotData = "means") 2013-04-16 Bjoern Bornkamp (version 0.9-6) * optDesign Re-named "fmodels" argument to "models". * optDesign for solnp if lowbnd and uppbnd are specified now use a feasible starting value (otherwise solnp might get into problems). * plot.DRMod, plot.MCPMod now use lattice graphics * powMCT removed bug in case of placAdj = TRUE (thanks to Tobias Mielke for reporting this) * ess.mcmc minor change to avoid occasional NA's * Mods removed class c("Mods", "standMod"), now there is only a class "Mods", this changes the API of MCTtest, optContr and MCPMod function (direction argument no longer needed, as this info is now contained in the "Mods" object). * neurodeg added the simulated longitudinal dose-finding data set neurodeg * targN catch incorrect matrix dimension, when in case of only one alternative model * fitModel.bndnls old version used narrowed bnds for 1-dim model, when a starting value was supplied manually (instead of calculated via optGrid); fixed. * MCTtest re-name of p-value column to "adj-p". 2013-03-06 Bjoern Bornkamp (version 0.9-5) * targN, powN added function targN to evaluate a target function (e.g. power) for different sample sizes (similar to the old powerMM function). powN is a convenience function for multiple contrast tests using the power. * sampSizeMCT added convenience function for sample size calculation for multiple contrast tests using the power. * optContr Re-named "weights" argument to "w" 2013-02-12 Bjoern Bornkamp (version 0.9-4) * TD, ED Fixed bug for model = linInt and placAdj = TRUE * powMCT Fixed bug for nr(altModels)=1 in case placAdj=TRUE * Mods Add requirement that placebo dose needs to be included * print.bFitMod Do not show n.eff for bootstrap samples * ess.mcmc return NA, if there is just one unique value in chain * fitMod, MCTtest catch situations, where type = "normal" and placAdj = TRUE * bFitMod fixed bug for column names of linear model in case of placAdj = TRUE * MCPMod: Fixed sign error in model selection, when critV was specified 2013-01-29 Bjoern Bornkamp (version 0.9-3) * fitMod Improvements of efficiency (removed calls to do.call in optLoc) * MCPMod passes direction argument now also to TD * optDesign solnp is now the default optimizer * calcCrit default for arg designCrit in calcCrit changed (to harmonize calcCrit and optDesign) * bFitMod use fitMod.raw in bFitMod.bootstrap (for efficiency) * critVal Remove contMat argument (was unused) * powMCT Allow power calculation for placebo adjusted data 2013-01-09 Bjoern Bornkamp (version 0.9-1) * Complete re-structuring and tidying up of the package. Main ideas: (i) smoother integration of g-functions (ii) focus on core functionality (iii) more general code/easier extensibility. * New features: Bayesian dose-response fitting, nicer plots, optimal designs for non-normal responses, ... * Special Thanks to Tobias Mielke for testing of the package and numerous bug reports. * Previous versions of the source are available under http://cran.r-project.org/package=DoseFinding under "Old sources", a Windows binary of the last version before the changes is available under http://goo.gl/p1UZ7. 2012-08-22 Bjoern Bornkamp (version 0.6-3) * Added PACKAGE = "DoseFinding" to ".C" calls 2012-04-04 Bjoern Bornkamp (version 0.6-2) * calcOptDesign partial rewrite of optDes.c and optDesign.R to fix segfault bug. 2012-02-20 Bjoern Bornkamp (version 0.6-1) * vcov.gDRMod is now functional, predict.gDRMod now allows calculation of confidence intervals * gFitDRModel minor changes in underlying optimizer * explicitly export the gradients of the model functions now 2012-01-24 Bjoern Bornkamp (version 0.5-7) * gFitDRModel now always returns an estimate (either the best value from nlminb or from the grid search if nlminb fails) * gMCPtest: use sigma = corMat instead of corr = corMat in p/qmvnorm calls (mvtnorm complained in 1-dimensional case) * gFitDRModel: Introduced default for bnds argument. * plot.MCPMod: Plot clinRel in the right place, when direction is equal to "decreasing" (thanks to Jan Rekowski) * planMM, critVal: When vCov is specified now right correlation matrix is calculated * calcOptDesign: Additional argument (standDopt) to allow for optional standardization (division of the log determinant by the number of parameters) of the D-optimal design criterion. 2011-10-19 Bjoern Bornkamp (version 0.5-6) * getGrid corrected bug for Ngrd > 75025 * calcOptDesign: For method = "exact" and n2 > 0 the function did not return the optimal incremental design but the overall optimal design 2011-08-31 Bjoern Bornkamp (version 0.5-5) * gFitDRModel can now fit dose-response models without intercept * gMCPtest minor changes to allow for user defined contrast matrix 2011-08-09 Bjoern Bornkamp (version 0.5-4) * MCPtest now uses correct degrees of freedom if addCovars != ~1 * Feedback from Andreas Krause led to a number smaller changes in the package (e.g., plot.(g)DRMod or fitDRModel). Thanks Andreas! * Print lattice plots explicitly to increase compability with Sweave. 2011-05-06 Bjoern Bornkamp (version 0.5-3) * Ensure in rndDesign that N is recognized as an integer by using N <- round(N), to avoid floating point problems. * Remove naming bug in gFitDRModel (drFit instead of drEst) 2011-04-27 Bjoern Bornkamp (version 0.5-2) * Corrected bug in b-vector for sigEmax model (calcBvec, only affected MED-type optimal designs) * Included INDEX file to order the overview help-page better * predict.DRMod now stops when type = "fullModel" and the argument newdata does not contain values for all variables specified in addCovars (thanks to Mouna). 2011-03-25 Bjoern Bornkamp (version 0.5-1) * Restructured calcOptDesign function to allow for user defined criteria functions. * The MCPMod object now always contains a estDose and fm entry (which is NA in case of non-significance or non-convergence) * Added generalized fitting code, variances and covariances of estimates are not available at the moment. * Added vCov argument to planMM, sampSize, powerMM (so it is possible to take into account covariances when calculating optimal contrasts) * Changed order in trellis plots in plotModels (order as specified in models list instead of alphanumerical order) * Restructured and summarized help pages * Removed dependency on numDeriv package (only suggested now), this is only needed for calculating optimal designs involving the beta model. 2011-02-09 Bjoern Bornkamp (version 0.4-3) * Minor change in Makevars file (so that DoseFinding works on Solaris). 2011-02-09 Bjoern Bornkamp (version 0.4-2) * calcBayesEst, getUpdDesign: Minor changes to make functions more suited for general purpose use. 2010-12-01 Bjoern Bornkamp (version 0.4-1) * Introduced new functions calcBayesEst and getUpdDesign, both were used for simulation purposes in the paper Bornkamp et al. (2011) "Response Adaptive Dose-Finding under Model Uncertainty" (to appear in Annals of Applied Statistics). 2010-11-12 Bjoern Bornkamp (version 0.3-1) * calcOptDesign now has an additional optimizer "exact". This methods calculates all possible designs for a given sample size and then selects the best. * Changed order in MakeVars as requested * calcCrit now checks whether there are not less design points than parameters. * Code now checks for positive sigma in powCalc and powerMM 2010-11-05 Bjoern Bornkamp (version 0.2-3) * MED function now checks for clinRel > 0 (thanks to Georgina). * Changed minor bug in output from print.MCPtest (print one-sided just once) * Code now outputs a warning, when 'models' argument is missing (in MCPMod and fullMod function); in fitDRModel it outputs a warning if 'model' is missing * Introduced a default base = 0 and maxEff = 1 for the plotModels function. * Added a summary method for DRMod objects. * Removed superfluous addCovarVals argument from predict.DRMod * Removed option method = "mult" in calcOptDesign 2010-07-08 Bjoern Bornkamp (version 0.2-2) * calcCrit and calcOptDesign now check for NA, NaN or +-Inf values in the gradient and bvector (and stop execution when these values occur) before passing these values to the C code. * Introduced a logLik method for DRMod objects * Changed mvtnorm.control default argument for "interval" to reflect recent changes in mvtnorm package. 2010-05-27 Bjoern Bornkamp (version 0.2-1) * Made the getGrad function (gradient for dose-response model), including documentation available for end-user (was previously hidden in NAMESPACE) * Changes in the plot.MCPMod function (col argument for panel.superpose was read in different order depending on lattice options, now there is a manual workaround with panel.xyplot calls for each group) 2010-05-20 Bjoern Bornkamp (version 0.1-3) * Smaller changes in calcCrit functions (the parameter p is now calculated by the nPars function as in getOptDesign) * Add further options to powerScenario function (now possible to for user-specified row and column names for output matrix) 2010-05-13 Bjoern Bornkamp (version 0.1-2) * Removed one example from sampSize to reduce check time. * modelSelect: Use first model when two models have exactly the same AIC or BIC value. * predict.DRMod: Return NA as standard deviation if code cannot calculate Cholesky transformation of covariance matrix (thanks to Setia Pramana for the hint). * calcCrit: Code now allows for specifying multiple designs in a matrix. 2010-04-14 Bjoern Bornkamp (version 0.1-1) * fitModel.nls now checks whether nls with plinear option made a positive number of iterations (as additional convergence check). In some cases (eg. when number of parameters = number of doses) plinear does not do any iteration and does *not* put out a warning message that the algorithm failed. * The calcOptDesign function now allows for upper and lower bounds on the allocation weights. * There is no longer the need to specify clinRel, when one wants to calculate a D-optimal design. * Output of bootMCPMod function in case of model averaging now also includes dose estimates under each model & corrected bug in print.bootMCPMod function (thanks to Setia Pramana) 2010-03-08 Bjoern Bornkamp (version 0.1) * 1st Release as version 0.1. Improvements over MCPMod package: - Extended and improved version of MCPMod (allowing for covariates and robustified self-developed optimizer) - Functions for MCP (MCPtest) and Modelling (fitDRModel) part now available to the user - New functions (eg. bootMCPMod, powerScenario) - Functions for calculating optimal designs DoseFinding/man/0000755000176200001440000000000012620446427013225 5ustar liggesusersDoseFinding/man/bFitMod.Rd0000644000176200001440000002161412377154523015046 0ustar liggesusers\name{bFitMod} \alias{bFitMod} \alias{coef.bFitMod} \alias{predict.bFitMod} \alias{plot.bFitMod} \title{ Fit a dose-response model using Bayesian or bootstrap methods. } \description{ For \samp{type = "Bayes"}, MCMC sampling from the posterior distribution of the dose-response model is done. The function assumes a multivariate normal distribution for \code{resp} with covariance matrix \code{S}, and this is taken as likelihood function and combined with the prior distributions specified in prior to form the posterior distribution. For \samp{type = "bootstrap"}, a multivariate normal distribution for \code{resp} with covariance matrix \code{S} is assumed, and a large number of samples is drawn from this distribution. For each draw the fitMod function with \samp{type = "general"} is used to fit the draws from the multivariate normal distribution. } \usage{ bFitMod(dose, resp, model, S, placAdj = FALSE, type = c("Bayes", "bootstrap"), start = NULL, prior = NULL, nSim = 1000, MCMCcontrol = list(), control = NULL, bnds, addArgs = NULL) \method{coef}{bFitMod}(object, ...) \method{predict}{bFitMod}(object, predType = c("full-model", "effect-curve"), summaryFct = function(x) quantile(x, probs = c(0.025, 0.25, 0.5, 0.75, 0.975)), doseSeq = NULL, lenSeq = 101, ...) \method{plot}{bFitMod}(x, plotType = c("dr-curve", "effect-curve"), quant = c(0.025, 0.5, 0.975), plotData = c("means", "meansCI", "none"), level = 0.95, lenDose = 201, ...) } \arguments{ \item{dose}{ Numeric specifying the dose variable. } \item{resp}{ Numeric specifying the response estimate corresponding to the doses in \code{dose}} \item{S}{ Covariance matrix associated with the dose-response estimate specified via \code{resp}} \item{model}{ Dose-response model to fit, possible models are "linlog", "linear", "quadratic", "emax", "exponential", "sigEmax", "betaMod" and "logistic", see \code{\link{drmodels}}. } \item{placAdj}{ Whether or not estimates in "placAdj" are placebo-adjusted (note that the linear in log and the logistic model cannot be fitted for placebo-adjusted data) } \item{type}{ Character with allowed values "Bayes" and "bootstrap", Determining whether samples are drawn from the posterior, or the bootstrap distribution. } \item{start}{ Optional starting values for the dose-response parameters in the MCMC algorithm. } \item{prior}{ List containing the information regarding the prior distributions for \samp{type = "Bayes"}. The list needs to have as many entries as there are model parameters. The ordering of the list entries should be the same as in the arguments list of the model see (see \code{\link{drmodels}}). For example for the Emax model the first entry determines the prior for e0, the second to eMax and the third to ed50. For each list entry the user has the choice to choose from 4 possible distributions: \itemize{ \item \code{norm}: Vector of length 2 giving mean and standard deviation. \item \code{t}: Vector of length 3 giving median, scale and degrees of freedom of the t-distribution. \item \code{lnorm}: Vector of length 2 giving mean and standard deviation on log scale. \item \code{beta}: Vector of length 4 giving lower and upper bound of the beta prior as well as the alpha and beta parameters of the beta distribution } } \item{nSim}{ Desired number of samples to produce with the algorithm } \item{MCMCcontrol}{ List of control parameters for the MCMC algorithm \itemize{ \item \code{thin} Thinning rate. Must be a positive integer. \item \code{w} Numeric of same length as number of parameters in the model, specifies the width parameters of the slice sampler. \item \code{adapt} Logical whether to adapt the \code{w} (width) parameter of the slice sampler in a short trial run. The widths are chosen as IQR/1.3 of the trial run. } } \item{control}{ Same as the control argument in \code{\link{fitMod}}. } \item{bnds}{ Bounds for non-linear parameters, in case \samp{type = "bootstrap"}. If missing the the default bounds from \code{\link{defBnds}} is used. } \item{addArgs}{ List containing two entries named "scal" and "off" for the "betaMod" and "linlog" model. When addArgs is NULL the following defaults are used \samp{list(scal = 1.2*max(doses), off = 0.01*max(doses))}} \item{x, object}{ A bFitMod object } \item{predType, summaryFct, doseSeq, lenSeq}{ Arguments for the predict method. \samp{predType}: predType determines whether predictions are returned for the dose-response curve or the effect curve (difference to placebo). \samp{summaryFct}: If equal to NULL predictions are calculated for each sampled parameter value. Otherwise a summary function is applied to the dose-response predictions for each parameter value. The default is to calculate 0.025, 0.25, 0.5, 0.75, 0.975 quantiles of the predictions for each dose. \samp{doseSeq}: Where to calculate predictions. If not specified predictions are calculated on a grid of length \samp{lenSeq} between minimum and maximum dose. \samp{lenSeq}: Length of the default grid where to calculate predictions. } \item{plotType, quant, plotData, level, lenDose}{ Arguments for plot method. \samp{plotType}: Determining whether the dose-response curve or the effect curve should be plotted. \samp{quant}: Vector of quantiles to display in plot \samp{plotData}: Determines how the original data are plotted: Either as means or as means with CI or not. The level of the CI is determined by the argument \samp{level}. \samp{level}: Level for CI, when plotData is equal to \samp{meansCI}. \samp{lenDose}: Number of grid values to use for display. } \item{...}{ Additional arguments are ignored. } } \details{ Componentwise univariate slice samplers are implemented (see Neal, 2003) to sample from the posterior distribution. } \value{ An object of class bFitMod, which is a list containing the matrix of posterior simulations plus some additional information on the fitted model. } \references{ Neal, R. M. (2003), Slice sampling, Annals of Statistics, 31, 705-767 } \author{ Bjoern Bornkamp } \seealso{ \code{\link{fitMod}} } \examples{ data(biom) ## produce first stage fit (using dose as factor) anMod <- lm(resp~factor(dose)-1, data=biom) drFit <- coef(anMod) S <- vcov(anMod) dose <- sort(unique(biom$dose)) ## define prior list ## normal prior for E0 (mean=0 and sdev=10) ## normal prior for Emax (mean=0 and sdev=100) ## beta prior for ED50: bounds: [0,1.5] parameters shape1=0.45, shape2=1.7 prior <- list(norm = c(0, 10), norm = c(0,100), beta=c(0,1.5,0.45,1.7)) ## now fit an emax model gsample <- bFitMod(dose, drFit, S, model = "emax", start = c(0, 1, 0.1), nSim = 1000, prior = prior) ## summary information gsample ## samples are stored in head(gsample$samples) ## predict 0.025, 0.25, 0.5, 0.75, 0.975 Quantile at 0, 0.5 and 1 predict(gsample, doseSeq = c(0, 0.5, 1)) ## simple plot function plot(gsample) ## now look at bootstrap distribution gsample <- bFitMod(dose, drFit, S, model = "emax", type = "bootstrap", nSim = 100, bnds = defBnds(1)$emax) plot(gsample) ## now fit linear interpolation prior <- list(norm = c(0,1000), norm = c(0,1000), norm = c(0,1000), norm = c(0,1000), norm = c(0,100)) gsample <- bFitMod(dose, drFit, S, model = "linInt", start = rep(1,5), nSim = 1000, prior = prior) gsample <- bFitMod(dose, drFit, S, model = "linInt", type = "bootstrap", nSim = 100) ## data fitted on placebo adjusted scale data(IBScovars) anovaMod <- lm(resp~factor(dose)+gender, data=IBScovars) drFit <- coef(anovaMod)[2:5] # placebo adjusted estimates at doses vCov <- vcov(anovaMod)[2:5,2:5] dose <- sort(unique(IBScovars$dose))[-1] prior <- list(norm = c(0,100), beta=c(0,6,0.45,1.7)) ## Bayes fit gsample <- bFitMod(dose, drFit, vCov, model = "emax", placAdj=TRUE, start = c(1, 0.1), nSim = 1000, prior = prior) ## bootstrap fit gsample <- bFitMod(dose, drFit, vCov, model = "emax", placAdj=TRUE, type = "bootstrap", start = c(1, 0.1), nSim = 100, prior = prior, bnds = c(0.01,6)) ## calculate target dose estimate TD(gsample, Delta = 0.2) ## now fit linear interpolation prior <- list(norm = c(0,1000), norm = c(0,1000), norm = c(0,1000), norm = c(0,100)) gsample <- bFitMod(dose, drFit, vCov, model = "linInt", placAdj=TRUE, start = rep(1,4), nSim = 1000, prior = prior) gsample <- bFitMod(dose, drFit, vCov, model = "linInt", type = "bootstrap", placAdj = TRUE, nSim = 100) } DoseFinding/man/glycobrom.Rd0000644000176200001440000000377212323407770015520 0ustar liggesusers\name{glycobrom} \alias{glycobrom} \docType{data} \title{ Glycopyrronium Bromide dose-response data } \description{ Data from a clinical study evaluating Efficacy and Safety of Four Doses of Glycopyrronium Bromide in Patients With Stable Chronic Obstructive Pulmonary Disease (COPD). This data set was obtained from clinicaltrials.gov (NCT00501852). The study design was a 4 period incomplete cross-over design. The primary endpoint is the trough forced expiratory volume in 1 second (FEV1) following 7 days of Treatment. The data given here are summary estimates (least-square means) for each dose. } \usage{data(glycobrom)} \format{ A data frame with 5 summary estimates (one per dose). Variables: \describe{ \item{\code{dose}}{a numeric vector containing the dose values} \item{\code{fev1}}{a numeric vector containing the least square mean per dose} \item{\code{sdev}}{a numeric vector containing the standard errors of the least square means per dose} \item{\code{n}}{Number of participants analyzed per treatment group} } } \source{ http://clinicaltrials.gov/ct2/show/results/NCT00501852 } \examples{ ## simulate a full data set with given means and sdv (here we ignore ## the original study was a cross-over design, and simulate a parallel ## group design) simData <- function(mn, sd, n, doses, fixed = TRUE){ ## simulate data with means (mns) and standard deviations (sd), for ## fixed = TRUE, the data set will have observed means and standard ## deviations as given in mns and sd resp <- numeric(sum(n)) uppind <- cumsum(n) lowind <- c(0,uppind)+1 for(i in 1:length(n)){ rv <- rnorm(n[i]) if(fixed) rv <- scale(rv) resp[lowind[i]:uppind[i]] <- mn[i] + sd[i]*rv } data.frame(doses=rep(doses, n), resp=resp) } data(glycobrom) fullDat <- simData(glycobrom$fev1, glycobrom$sdev, glycobrom$n, glycobrom$dose) } \keyword{datasets} DoseFinding/man/fitMod.Rd0000644000176200001440000002634212323407770014743 0ustar liggesusers\name{fitMod} \alias{fitMod} \alias{coef.DRMod} \alias{vcov.DRMod} \alias{predict.DRMod} \alias{plot.DRMod} \alias{logLik.DRMod} \alias{AIC.DRMod} \alias{gAIC} \alias{gAIC.DRMod} \title{ Fit non-linear dose-response model } \description{ Fits a dose-response model. Built-in dose-response models are "linlog", "linear", "quadratic", "emax", "exponential", "sigEmax", "betaMod" and "logistic" (see \code{\link{drmodels}}). When \samp{type = "normal"} ordinary least squares is used and additional additive covariates can be specified in \samp{addCovars}. The underlying assumption is hence normally distributed data and homoscedastic variance. For \samp{type = "general"} a generalized least squares criterion is used \deqn{ (f(dose,\theta)-resp)'S^{-1}(f(dose,\theta)-resp)}{(f(dose,theta)-resp)'S^{-1}(f(dose,theta)-resp)} and an inverse weighting matrix is specified in \samp{S}, \samp{type = "general"} is primarily of interest, when fitting a model to AN(C)OVA type estimates obtained in a first stage fit, then \samp{resp} contains the estimates and \samp{S} is the estimated covariance matrix for the estimates in \samp{resp}. Statistical inference (e.g. confidence intervals) rely on asymptotic normality of the first stage estimates, which makes this method of interest only for sufficiently large sample size for the first stage fit. A modified model-selection criterion can be applied to these model fits (see also Pinheiro et al. 2013 for details). For details on the implemented numerical optimizer see the Details section below. } \usage{ fitMod(dose, resp, data = NULL, model, S = NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, bnds, df = NULL, start = NULL, na.action = na.fail, control = NULL, addArgs = NULL) \method{coef}{DRMod}(object, sep = FALSE, ...) \method{predict}{DRMod}(object, predType = c("full-model", "ls-means", "effect-curve"), newdata = NULL, doseSeq = NULL, se.fit = FALSE, ...) \method{vcov}{DRMod}(object, ...) \method{plot}{DRMod}(x, CI = FALSE, level = 0.95, plotData = c("means", "meansCI", "raw", "none"), plotGrid = TRUE, colMn = 1, colFit = 1, ...) \method{logLik}{DRMod}(object, ...) \method{AIC}{DRMod}(object, ..., k = 2) \method{gAIC}{DRMod}(object, ..., k = 2) } \arguments{ \item{dose, resp}{ Either vectors of equal length specifying dose and response values, or names of variables in the data frame specified in \samp{data}. } \item{data}{ Data frame containing the variables referenced in dose and resp if \samp{data} is not specified it is assumed that \samp{dose} and \samp{resp} are variables referenced from data (and no vectors) } \item{model}{ The dose-response model to be used for fitting the data. Built-in models are "linlog", "linear", "quadratic", "emax", "exponential", "sigEmax", "betaMod" and "logistic" (see \link{drmodels}). } \item{S}{ The inverse weighting matrix used in case, when \samp{type = "general"}, see Description. For later inference statements (vcov or predict methods) it is assumed this is the estimated covariance of the estimates in the first stage fit. } \item{type}{ Determines whether inference is based on an ANCOVA model under a homoscedastic normality assumption (when \samp{type = "normal"}), or estimates at the doses and their covariance matrix and degrees of freedom are specified directly in \samp{resp}, \samp{S} and \samp{df}. See also the Description above and Pinheiro et al. (2013). } \item{addCovars}{ Formula specifying additional additive linear covariates (only for \samp{type = "normal"}) } \item{placAdj}{ Logical, if true, it is assumed that placebo-adjusted estimates are specified in \samp{resp} (only possible for \samp{type = "general"}). } \item{bnds}{ Bounds for non-linear parameters. If missing the the default bounds from \code{\link{defBnds}} is used. When the dose-response model has only one non-linear parameter (for example Emax or exponential model), \samp{bnds} needs to be a vector containing upper and lower bound. For models with two non-linear parameters \samp{bnds} needs to be a matrix containing the bounds in the rows, see the Description section of \code{\link{defBnds}} for details on the formatting of the bounds for the individual models. } \item{df}{ Degrees of freedom to use in case of \samp{type = "general"}. If this argument is missing \samp{df = Inf} is used. For \samp{type = "normal"} this argument is ignored as the exact degrees of freedom can be deduced from the model. } \item{start}{ Vector of starting values for the nonlinear parameters (ignored for linear models). When equal to NULL, a grid optimization is performed and the best value is used as starting value for the local optimizer. } \item{na.action}{ A function which indicates what should happen when the data contain NAs. } \item{control}{ A list with entries: "nlminbcontrol", "optimizetol" and "gridSize". The entry nlminbcontrol needs to be a list and it is passed directly to control argument in the nlminb function, that is used internally for models with 2 nonlinear parameters. The entry optimizetol is passed directly to the tol argument of the optimize function, which is used for models with 1 nonlinear parameters. The entry gridSize needs to be a list with entries dim1 and dim2 giving the size of the grid for the gridsearch in 1d or 2d models. } \item{addArgs}{ List containing two entries named "scal" and "off" for the "betaMod" and "linlog" model. When addArgs is NULL the following defaults is used \samp{list(scal = 1.2*max(doses), off = 0.01*max(doses))}. } \item{object, x}{ DRMod object } \item{sep}{ Logical determining whether all coefficients should be returned in one numeric or separated in a list. } \item{predType, newdata, doseSeq, se.fit}{ predType determines whether predictions are returned for the full model (including potential covariates), the ls-means (SAS type) or the effect curve (difference to placebo). newdata gives the covariates to use in producing the predictions (for predType = "full-model"), if missing the covariates used for fitting are used. doseSeq dose-sequence on where to produce predictions (for predType = "effect-curve" and predType = "ls-means"). If missing the doses used for fitting are used. se.fit: logical determining, whether the standard error should be calculated. } \item{CI, level, plotData, plotGrid, colMn, colFit}{ Arguments for plot method: \samp{CI} determines whether confidence intervals should be plotted. \samp{level} determines the level of the confidence intervals. \samp{plotData} determines how the data are plotted: Either as means or as means with CI, raw data or none. In case of \samp{type = "normal"} and covariates the ls-means are displayed, when \samp{type = "general"} the option "raw" is not available. \samp{colMn} and \samp{colFit} determine the colors of fitted model and the raw means. } \item{k}{ Penalty to use for model-selection criterion (AIC uses 2, BIC uses log(n)). } \item{...}{ Additional arguments for plotting for the \samp{plot} method. For all other cases additional arguments are ignored. } } \details{ Details on numerical optimizer for model-fitting:\cr For linear models fitting is done using numerical linear algebra based on the QR decomposition. For nonlinear models numerical optimization is performed only in the nonlinear parameters in the model and optimizing over the linear parameters in each iteration (similar as the Golub-Pereyra implemented in \code{\link{nls}}). For models with 1 nonlinear parameter the \code{\link{optimize}} function is used for 2 nonlinear parameters the \code{\link{nlminb}} function is used. The starting value is generated using a grid-search (with the grid size specified via \samp{control$gridSize}), or can directly be handed over via \samp{start}. For details on the asymptotic approximation used for \samp{type = "normal"}, see Seber and Wild (2003, chapter 5). For details on the asymptotic approximation used for \samp{type = "general"}, and the gAIC, see Pinheiro et al. (2013). } \value{ An object of class DRMod. Essentially a list containing information about the fitted model coefficients, the residual sum of squares (or generalized residual sum of squares), } \references{ Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 Seber, G.A.F. and Wild, C.J. (2003). Nonlinear Regression, Wiley. } \author{ Bjoern Bornkamp } \seealso{ \code{\link{defBnds}}, \code{\link{drmodels}} } \examples{ ## Fit the emax model to the IBScovars data set data(IBScovars) fitemax <- fitMod(dose, resp, data=IBScovars, model="emax", bnds = c(0.01, 4)) ## methods for DRMod objects summary(fitemax) ## extracting coefficients coef(fitemax) ## (asymptotic) covariance matrix of estimates vcov(fitemax) ## predicting newdat <- data.frame(dose = c(0,0.5,1), gender=factor(1)) predict(fitemax, newdata=newdat, predType = "full-model", se.fit = TRUE) ## plotting plot(fitemax, plotData = "meansCI", CI=TRUE) ## now include (additive) covariate gender fitemax2 <- fitMod(dose, resp, data=IBScovars, model="emax", addCovars = ~gender, bnds = c(0.01, 4)) vcov(fitemax2) plot(fitemax2) ## fitted log-likelihood logLik(fitemax2) ## extracting AIC (or BIC) AIC(fitemax2) ## Illustrating the "general" approach for a binary regression ## produce first stage fit (using dose as factor) data(migraine) PFrate <- migraine$painfree/migraine$ntrt doseVec <- migraine$dose doseVecFac <- as.factor(migraine$dose) ## fit logistic regression with dose as factor fitBin <- glm(PFrate~doseVecFac-1, family = binomial, weights = migraine$ntrt) drEst <- coef(fitBin) vCov <- vcov(fitBin) ## now fit an Emax model (on logit scale) gfit <- fitMod(doseVec, drEst, S=vCov, model = "emax", bnds = c(0,100), type = "general") ## model fit on logit scale plot(gfit, plotData = "meansCI", CI = TRUE) ## model on probability scale logitPred <- predict(gfit, predType ="ls-means", doseSeq = 0:200, se.fit=TRUE) plot(0:200, 1/(1+exp(-logitPred$fit)), type = "l", ylim = c(0, 0.5), ylab = "Probability of being painfree", xlab = "Dose") LB <- logitPred$fit-qnorm(0.975)*logitPred$se.fit UB <- logitPred$fit+qnorm(0.975)*logitPred$se.fit lines(0:200, 1/(1+exp(-LB))) lines(0:200, 1/(1+exp(-UB))) ## now illustrate "general" approach for placebo-adjusted data (on ## IBScovars) note that the estimates are identical to fitemax2 above) anovaMod <- lm(resp~factor(dose)+gender, data=IBScovars) drFit <- coef(anovaMod)[2:5] # placebo adjusted estimates at doses vCov <- vcov(anovaMod)[2:5,2:5] dose <- sort(unique(IBScovars$dose))[-1] ## now fit an emax model to these estimates gfit2 <- fitMod(dose, drFit, S=vCov, model = "emax", type = "general", placAdj = TRUE, bnds = c(0.01, 2)) ## some outputs summary(gfit2) coef(gfit2) vcov(gfit2) predict(gfit2, se.fit = TRUE, doseSeq = c(1,2,3,4), predType = "effect-curve") plot(gfit2, CI=TRUE, plotData = "meansCI") gAIC(gfit2) } DoseFinding/man/biom.Rd0000644000176200001440000000131010673511136014431 0ustar liggesusers\name{biom} \alias{biom} \docType{data} \title{ Biometrics Dose Response data } \description{ An example data set for dose response studies. This data set was used in Bretz et al. (2005) to illustrate the MCPMod methodology. } \usage{data(biom)} \format{ A data frame with 100 observations on the following 2 variables. \describe{ \item{\code{resp}}{a numeric vector containing the response values} \item{\code{dose}}{a numeric vector containing the dose values} } } \source{ Bretz, F., Pinheiro, J. C., and Branson, M. (2005), Combining multiple comparisons and modeling techniques in dose-response studies, \emph{Biometrics}, \bold{61}, 738--748 } \keyword{datasets} DoseFinding/man/sampSize.Rd0000644000176200001440000001702212126252274015305 0ustar liggesusers\name{sampSize} \alias{sampSize} \alias{sampSizeMCT} \alias{targN} \alias{plot.targN} \alias{powN} \title{ Sample size calculations } \description{ The \samp{sampSize} function implements a bisection search algorithm for sample size calculation. The user can hand over a general target function (via \samp{targFunc}) that is then iterated so that a certain \samp{target} is achieved. The \samp{sampSizeMCT} is a convenience wrapper of \samp{sampSize} for multiple contrast tests using the power as target function. The \samp{targN} functions calculates a general target function for different given sample sizes. The \samp{powN} function is a convenience wrapper of \samp{targN} for multiple contrast tests using the power as target function. } \usage{ sampSize(upperN, lowerN = floor(upperN/2), targFunc, target, tol = 0.001, alRatio, Ntype = c("arm", "total"), verbose = FALSE) sampSizeMCT(upperN, lowerN = floor(upperN/2), ..., power, sumFct = mean, tol = 0.001, alRatio, Ntype = c("arm", "total"), verbose = FALSE) targN(upperN, lowerN, step, targFunc, alRatio, Ntype = c("arm", "total"), sumFct = c("min", "mean", "max")) powN(upperN, lowerN, step, ..., alRatio, Ntype = c("arm", "total"), sumFct = c("min", "mean", "max")) \method{plot}{targN}(x, superpose = TRUE, line.at = NULL, xlab = NULL, ylab = NULL, ...) } \arguments{ \item{upperN, lowerN}{ Upper and lower bound for the target sample size. \code{lowerN} defaults to \code{floor(upperN/2)}. } \item{step}{ Only needed for functions \samp{targN} and \samp{powN}. Stepsize for the sample size at which the target function is calculated. The steps are calculated via \code{seq(lowerN,upperN,by=step)}. } \item{targFunc, target}{ The target function needs to take as an input the vector of sample sizes in the different dose groups. For \samp{sampSize} it needs to return a univariate number. For function \samp{targN} it should return a numerical vector.\cr \cr Example: \samp{targFunc} could be a function that calculates the power of a test, and \samp{target} the desired target power value. \cr For function \samp{sampSize} the bisection search iterates the sample size so that a specific target value is achieved (the implicit assumption is that targFunc is monotonically increasing in the sample size).\cr \cr Function \samp{targN} simply calculates \samp{targFunc} for a given set of sample sizes. } \item{tol}{ A positive numeric value specifying the tolerance level for the bisection search algorithm. Bisection is stopped if the \samp{targFunc} value is within \samp{tol} of \samp{target}. } \item{alRatio}{ Vector describing the relative patient allocations to the dose groups up to proportionality, e.g. \samp{rep(1, length(doses))} corresponds to balanced allocations. } \item{Ntype}{ One of "arm" or "total". Determines, whether the sample size in the smallest arm or the total sample size is iterated in bisection search algorithm. } \item{verbose}{ Logical value indicating if a trace of the iteration progress of the bisection search algorithm should be displayed. } \item{...}{ Arguments directly passed to the \code{\link{powMCT}} function in the \samp{sampSizeMCT} and \samp{powN} function. The \samp{placAdj} argument needs to be \samp{FALSE} (which is the default value for this argument). If sample size calculations are desired for a placebo-adjusted formulation use \samp{sampSize} or \samp{targN} directly. In case \code{S} is specified, the specified matrix needs to be proportional to the (hypothetical) covariance matrix of one single observation. The covariance matrix used for sample size calculation is 1/N*S, where N is the total sample size. Hence \samp{Ntype == "total"} needs to be used if \code{S} is specified. When \code{S} is specified, automatically \samp{df = Inf} is assumed in the underlying \samp{powMCT} calls. For a homoscedastic normally distributed response variable only \samp{sigma} needs to be specified, as the sample size \samp{n} is iterated in the different \samp{powMCT} calls. } \item{power, sumFct}{ power is a numeric defining the desired summary power to achieve (in \samp{sampSizeMCT}). sumFct needs to be a function that combines the power values under the different alternatives into one value (in \samp{sampSizeMCT}). } \item{x, superpose, line.at, xlab, ylab}{ arguments for the plot method of \samp{targN} and \samp{powN}, additional arguments are passed down to the low-level lattice plotting routines. } } \references{ Pinheiro, J. C., Bornkamp, B., and Bretz, F. (2006). Design and analysis of dose finding studies combining multiple comparisons and modeling procedures, \emph{Journal of Biopharmaceutical Statistics}, \bold{16}, 639--656 } \author{ Jose Pinheiro, Bjoern Bornkamp } \seealso{ \code{\link{powMCT}} } \examples{ ## sampSize examples ## first define the target function ## first calculate the power to detect all of the models in the candidate set fmodels <- Mods(linear = NULL, emax = c(25), logistic = c(50, 10.88111), exponential=c(85), betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2), doses = c(0,10,25,50,100,150), placEff=0, maxEff=0.4, addArgs = list(scal=200)) ## contrast matrix to use contMat <- optContr(fmodels, w=1) ## this function calculates the power under each model and then returns ## the average power under all models tFunc <- function(n){ powVals <- powMCT(contMat, altModels=fmodels, n=n, sigma = 1, alpha=0.05) mean(powVals) } ## assume we want to achieve 80\% average power over the selected shapes ## and want to use a balanced allocations \dontrun{ sSize <- sampSize(upperN = 80, targFunc = tFunc, target=0.8, alRatio = rep(1,6), verbose = TRUE) sSize ## Now the same using the convenience sampSizeMCT function sampSizeMCT(upperN=80, contMat = contMat, sigma = 1, altModels=fmodels, power = 0.8, alRatio = rep(1, 6), alpha = 0.05) ## Alternatively one can also specify an S matrix ## covariance matrix in one observation (6 total observation result in a ## variance of 1 in each group) S <- 6*diag(6) ## this uses df = Inf, hence a slightly smaller sample size results sampSizeMCT(upperN=500, contMat = contMat, S=S, altModels=fmodels, power = 0.8, alRatio = rep(1, 6), alpha = 0.05, Ntype = "total") ## targN examples ## first calculate the power to detect all of the models in the candidate set fmodels <- Mods(linear = NULL, emax = c(25), logistic = c(50, 10.88111), exponential=c(85), betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2), doses = c(0,10,25,50,100,150), placEff=0, maxEff=0.4, addArgs = list(scal=200)) ## corresponding contrast matrix contMat <- optContr(fmodels, w=1) ## define target function tFunc <- function(n){ powMCT(contMat, altModels=fmodels, n=n, sigma = 1, alpha=0.05) } powVsN <- targN(upperN = 100, lowerN = 10, step = 10, tFunc, alRatio = rep(1, 6)) plot(powVsN) ## the same can be achieved using the convenience powN function ## without the need to specify a target function powN(upperN = 100, lowerN=10, step = 10, contMat = contMat, sigma = 1, altModels = fmodels, alpha = 0.05, alRatio = rep(1, 6)) } } DoseFinding/man/DoseFinding-package.Rd0000644000176200001440000000672412620446427017307 0ustar liggesusers\name{DoseFinding-package} \alias{DoseFinding-package} \alias{DoseFinding} \docType{package} \title{ Design and Analysis of dose-finding studies } \description{ The DoseFinding package provides functions for the design and analysis of dose-finding experiments (for example pharmaceutical Phase II clinical trials). It provides functions for: multiple contrast tests (\code{MCTtest}), fitting non-linear dose-response models (\code{fitMod}), a combination of testing and dose-response modelling (\code{MCPMod}), and calculating optimal designs (\code{optDesign}), both for normal and general response variable. } \details{ \tabular{ll}{ Package: \tab DoseFinding\cr Type: \tab Package\cr Version: \tab 0.9-13\cr Date: \tab 2015-11-10\cr License: \tab GPL-3 } The main functions are:\cr \bold{MCTtest}: Implements a multiple contrast tests\cr \bold{powMCT}: Power calculations for multiple contrast tests\cr \bold{fitMod}: Fits non-linear dose-response models\cr \bold{optDesign}: Calculates optimal designs for dose-response models\cr \bold{MCPMod}: Performs MCPMod methodology\cr \bold{sampSize}: General function for sample size calculation\cr } \author{ Bjoern Bornkamp, Jose Pinheiro, Frank Bretz Maintainer: Bjoern Bornkamp } \references{ Bornkamp, B., Bretz, F., Dette, H. and Pinheiro, J. C. (2011). Response-Adaptive Dose-Finding under model uncertainty, \emph{Annals of Applied Statistics}, \bold{5}, 1611--1631 Bornkamp B., Pinheiro J. C., and Bretz, F. (2009). MCPMod: An R Package for the Design and Analysis of Dose-Finding Studies, \emph{Journal of Statistical Software}, \bold{29}(7), 1--23 Bretz, F., Pinheiro, J. C., and Branson, M. (2005), Combining multiple comparisons and modeling techniques in dose-response studies, \emph{Biometrics}, \bold{61}, 738--748 Dette, H., Bretz, F., Pepelyshev, A. and Pinheiro, J. C. (2008). Optimal Designs for Dose Finding Studies, \emph{Journal of the American Statisical Association}, \bold{103}, 1225--1237 Pinheiro, J. C., Bornkamp, B., and Bretz, F. (2006). Design and analysis of dose finding studies combining multiple comparisons and modeling procedures, \emph{Journal of Biopharmaceutical Statistics}, \bold{16}, 639--656 Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 Seber, G.A.F. and Wild, C.J. (2003). Nonlinear Regression, Wiley } \keyword{ package } \examples{ data(IBScovars) ## perform (model based) multiple contrast test ## define candidate dose-response shapes models <- Mods(linear = NULL, emax = 0.2, quadratic = -0.17, doses = c(0, 1, 2, 3, 4)) ## plot models plot(models) ## perform multiple contrast test test <- MCTtest(dose, resp, IBScovars, models=models, addCovars = ~ gender) ## fit non-linear emax dose-response model fitemax <- fitMod(dose, resp, data=IBScovars, model="emax", bnds = c(0.01,5)) ## display fitted dose-effect curve plot(fitemax, CI=TRUE, plotData="meansCI") ## Calculate optimal designs for target dose (TD) estimation doses <- c(0, 10, 25, 50, 100, 150) fmodels <- Mods(linear = NULL, emax = 25, exponential = 85, logistic = c(50, 10.8811), doses = doses, placEff=0, maxEff=0.4) plot(fmodels, plotTD = TRUE, Delta = 0.2) weights <- rep(1/4, 4) desTD <- optDesign(fmodels, weights, Delta=0.2, designCrit="TD") } DoseFinding/man/IBScovars.Rd0000644000176200001440000000217512100014352015331 0ustar liggesusers\name{IBScovars} \alias{IBScovars} \docType{data} \title{ Irritable Bowel Syndrome Dose Response data with covariates } \description{ A subset of the data used by (Biesheuvel and Hothorn, 2002). The data are part of a dose ranging trial on a compound for the treatment of the irritable bowel syndrome with four active treatment arms, corresponding to doses 1,2,3,4 and placebo. Note that the original dose levels have been blinded in this data set for confidentiality. The primary endpoint was a baseline adjusted abdominal pain score with larger values corresponding to a better treatment effect. In total 369 patients completed the study, with nearly balanced allocation across the doses. } \usage{data(IBScovars)} \format{ A data frame with 369 observations on the following 2 variables. \describe{ \item{\code{gender}}{a factor specifying the gender} \item{\code{dose}}{a numeric vector} \item{\code{resp}}{a numeric vector} } } \source{ Biesheuvel, E. and Hothorn, L. A. (2002). Many-to-one comparisons in stratified designs, \emph{Biometrical Journal}, \bold{44}, 101--116 } \keyword{datasets} DoseFinding/man/migraine.Rd0000644000176200001440000000141712203245054015300 0ustar liggesusers\name{migraine} \alias{migraine} \docType{data} \title{ Migraine Dose Response data } \description{ Data set obtained from clinicaltrials.gov (NCT00712725). This was randomized placebo controlled dose-response trial for treatment of acute migraine. The primary endpoint was "pain freedom at 2 hours postdose" (a binary measurement). } \usage{data(migraine)} \format{ A data frame with 517 columns corresponding to the patients that completed the trial \describe{ \item{\code{dose}}{a numeric vector containing the dose values} \item{\code{painfree}}{number of treatment responders} \item{\code{ntrt}}{number of subject per treatment group} } } \source{ http://clinicaltrials.gov/ct2/show/results/NCT00712725 } \keyword{datasets} DoseFinding/man/neurodeg.Rd0000644000176200001440000000550612323407770015330 0ustar liggesusers\name{neurodeg} \alias{neurodeg} \docType{data} \title{ Neurodegenerative disease simulated longitudinal dose-finding data set } \description{ This simulated data set is motivated by a real Phase 2 clinical study of a new drug for a neurodegenerative disease. The state of the disease is measured through a functional scale, with smaller values corresponding to more severe neurodeterioration. The goal of the drug is to reduce the rate of disease progression, which is measured by the linear slope of the functional scale over time. The trial design includes placebo and four doses: 1, 3, 10, and 30 mg, with balanced allocation of 50 patients per arm. Patients are followed up for one year, with measurements of the functional scale being taken at baseline and then every three months. The functional scale response is assumed to be normally distributed and, based on historical data, it is believed that the longitudinal progression of the functional scale over the one year of follow up can be modeled a simple linear trend. See the example below on how to analyse this type of data. This data set was used in Pinheiro et al. (2013) to illustrate the generalized MCPMod methodology. } \usage{data(neurodeg)} \format{ A data frame with 100 observations on the following 2 variables. \describe{ \item{\code{resp}}{a numeric vector containing the response values} \item{\code{dose}}{a numeric vector containing the dose values} \item{\code{id}}{Patient ID} \item{\code{time}}{time of measurement} } } \source{ Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 } \examples{ \dontrun{ ## reproduce analysis from Pinheiro et al. (2013) data(neurodeg) ## first fit the linear mixed effect model library(nlme) fm <- lme(resp ~ as.factor(dose):time, neurodeg, ~time|id) muH <- fixef(fm)[-1] # extract estimates covH <- vcov(fm)[-1,-1] ## derive optimal contrasts for candidate shapes doses <- c(0, 1, 3, 10, 30) mod <- Mods(emax = 1.11, quadratic= -0.022, exponential = 8.867, linear = NULL, doses = doses) # contMat <- optContr(mod, S=covH) # calculate optimal contrasts ## multiple contrast test MCTtest(doses, muH, S=covH, type = "general", critV = TRUE, contMat=contMat) ## fit the emax model fitMod(doses, muH, S=covH, model="emax", type = "general", bnds=c(0.1, 10)) ## alternatively one can also fit the model using nlme nlme(resp ~ b0 + (e0 + eM * dose/(ed50 + dose))*time, neurodeg, fixed = b0 + e0 + eM + ed50 ~ 1, random = b0 + e0 ~ 1 | id, start = c(200, -4.6, 1.6, 3.2)) ## both approaches lead to rather similar results } } \keyword{datasets} DoseFinding/man/defBnds.Rd0000644000176200001440000000247012203245054015052 0ustar liggesusers\name{defBnds} \alias{defBnds} \title{ Calculates default bounds for non-linear parameters in dose-response models } \description{ Calculates reasonable bounds for non-linear parameters for the built-in non-linear regression model based on the dose range under investigation. For the logistic model the first row corresponds to the ED50 parameter and the second row to the delta parameter. For the sigmoid Emax model the first row corresponds to the ED50 parameter and the second row to the h parameter, while for the beta model first and second row correspond to the delta1 and delta2 parameters. See \code{\link{logistic}}, \code{\link{sigEmax}} and \code{\link{betaMod}} for details. } \usage{ defBnds(mD, emax = c(0.001, 1.5)*mD, exponential = c(0.1, 2)*mD, logistic = matrix(c(0.001, 0.01, 1.5, 1/2)*mD, 2), sigEmax = matrix(c(0.001*mD, 0.5, 1.5*mD, 10), 2), betaMod = matrix(c(0.05,0.05,4,4), 2)) } \arguments{ \item{mD}{ Maximum dose in the study. } \item{emax, exponential, logistic, sigEmax, betaMod}{ values for the nonlinear parameters for these model-functions } } \value{ List containing bounds for the model parameters. } \author{ Bjoern Bornkamp } \seealso{ \code{\link{fitMod}} } \examples{ defBnds(mD = 1) defBnds(mD = 200) } DoseFinding/man/guesst.Rd0000644000176200001440000001213512100014352015005 0ustar liggesusers\name{guesst} \alias{guesst} \title{ Calculate guesstimates based on prior knowledge } \description{ Calculates guesstimates for standardized model parameter(s) using the general approach described in Pinheiro et al. (2006). } \usage{ guesst(d, p, model = c("emax", "exponential", "logistic", "quadratic", "betaMod", "sigEmax"), less = TRUE, local = FALSE, dMax, Maxd, scal) } \arguments{ \item{d}{ Vector containing dose value(s). } \item{p}{ Vector of expected percentages of the maximum effect achieved at d. } \item{model}{ Character string. Should be one of "emax", "exponential", "quadratic", "betaMod", "sigEmax", "logistic". } \item{less}{ Logical, only needed in case of quadratic model. Determines if d is smaller (\samp{less=TRUE}) or larger (\samp{less=FALSE}) than dopt (see Pinheiro et al. (2006) for details).} \item{local}{ Logical indicating whether local or asymptotic version of guesstimate should be derived (defaults to \samp{FALSE}). Only needed for emax, logistic and sigEmax model. When \samp{local=TRUE} the maximum dose must be provided via \samp{Maxd}.} \item{dMax}{ Dose at which maximum effect occurs, only needed for the beta model } \item{Maxd}{ Maximum dose to be administered in the trial } \item{scal}{ Scale parameter, only needed for the beta model } } \details{ Calculates guesstimates for the parameters \eqn{\theta_2}{theta2} of the standardized model function based on the prior expected percentage of the maximum effect at certain dose levels. Note that this function should be used together with the \code{\link{plot.Mods}} function to ensure that the guesstimates are reflecting the prior beliefs. For the logistic and sigmoid emax models at least two pairs (d,p) need to be specified. For the beta model the dose at which the maximum effect occurs (dMax) has to be specified in addition to the (d,p) pair. For the exponential model the maximum dose administered (Maxd) needs to be specified in addition to the (d,p) pair. For the quadratic model one (d,p) pair is needed. It is advisable to specify the location of the maximum within the dose range with this pair. For the emax, sigmoid Emax and logistic model one can choose between a local and an asymptotic version. In the local version one explicitly forces the standardized model function to pass through the specified points (d,p). For the asymptotic version it assumed that the standardized model function is equal to 1 at the largest dose (this is the approach described in Pinheiro et al. (2006)). If the local version is used, convergence problems with the underlying nonlinear optimization can occur. } \value{ Returns a numeric vector containing the guesstimates. } \references{ Bornkamp B., Pinheiro J. C., and Bretz, F. (2009). MCPMod: An R Package for the Design and Analysis of Dose-Finding Studies, \emph{Journal of Statistical Software}, \bold{29}(7), 1--23 Pinheiro, J. C., Bretz, F., and Branson, M. (2006). Analysis of dose-response studies - modeling approaches, \emph{in} N. Ting (ed.), \emph{Dose Finding in Drug Development}, Springer, New York, pp. 146--171 } \examples{ ## Emax model ## Expected percentage of maximum effect: 0.8 is associated with ## dose 0.3 (d,p)=(0.3, 0.8), dose range [0,1] emx1 <- guesst(d=0.3, p=0.8, model="emax") ## local approach emx2 <- guesst(d=0.3, p=0.8, model="emax", local = TRUE, Maxd = 1) ## plot models models <- Mods(emax=c(emx1, emx2), doses=c(0,1)) plot(models) ## Logistic model ## Select two (d,p) pairs (0.2, 0.5) and (0.6, 0.95) lgc1 <- guesst(d = c(0.2, 0.5), p = c(0.6, 0.95), "logistic") ## local approach lgc2 <- guesst(d = c(0.2, 0.5), p = c(0.6, 0.95), "logistic", local = TRUE, Maxd = 1) ## plot models models <- Mods(logistic = rbind(lgc1, lgc2), doses=c(0,1)) plot(models) ## Beta Model ## Select one pair (d,p): (0.5,0.5) ## dose, where maximum occurs: 0.8 bta <- guesst(d=0.5, p=0.5, model="betaMod", dMax=0.8, scal=1.2, Maxd=1) ## plot models <- Mods(betaMod = bta, doses=c(0,1), addArgs = list(scal = 1.2)) plot(models) ## Sigmoid Emax model ## Select two (d,p) pairs (0.2, 0.5) and (0.6, 0.95) sgE1 <- guesst(d = c(0.2, 0.5), p = c(0.6, 0.95), "sigEmax") ## local approach sgE2 <- guesst(d = c(0.2, 0.5), p = c(0.6, 0.95), "sigEmax", local = TRUE, Maxd = 1) models <- Mods(sigEmax = rbind(sgE1, sgE2), doses=c(0,1)) plot(models) ## Quadratic model ## For the quadratic model it is assumed that the maximum effect occurs at ## dose 0.7 quad <- guesst(d = 0.7, p = 1, "quadratic") models <- Mods(quadratic = quad, doses=c(0,1)) plot(models) ## exponential model ## (d,p) = (0.8,0.5) expo <- guesst(d = 0.8, p = 0.5, "exponential", Maxd=1) models <- Mods(exponential = expo, doses=c(0,1)) plot(models) } \seealso{ \code{\link{emax}}, \code{\link{logistic}}, \code{\link{betaMod}}, \code{\link{sigEmax}}, \code{\link{quadratic}}, \code{\link{exponential}}, \code{\link{plot.Mods}} } DoseFinding/man/optContr.Rd0000644000176200001440000001236012323407770015324 0ustar liggesusers\name{optContr} \alias{optContr} \alias{plot.optContr} \title{ Calculate optimal contrasts } \description{ This function calculates a contrast vectors that are optimal for detecting certain alternatives. The contrast is optimal in the sense of maximizing the non-centrality parameter of the underlying contrast test statistic: \deqn{\frac{c'\mu}{\sqrt{c'Sc}}}{c'mu/sqrt(c'Sc).} Here \eqn{\mu}{mu} is the mean vector under the alternative and \eqn{S}{S} the covariance matrix associated with the estimate of \eqn{\mu}{mu}. The optimal contrast is given by \deqn{ c^{opt} \propto S^{-1}\left(\mu - \frac{\mu^{\prime}S^{-1} 1}{1^\prime S^{-1} 1}\right),}{c propto S^(-1) (mu - mu'S^(-1)1)/(1'S^(-1)1),} see Pinheiro et al. (2013). Note that the directionality (i.e. whether in "increase" in the response variable is beneficial or a "decrease", is inferred from the specified \samp{models} object, see \code{\link{Mods}} for details). Constrained contrasts (type = "constrained") add the additional constraint in the optimization that the sign of the contrast coefficient for control and active treatments need to be different. The quadratic programming algorithm from the quadprog package is used to calculate the contrasts. } \usage{ optContr(models, doses, w, S, placAdj = FALSE, type = c("unconstrained", "constrained")) \method{plot}{optContr}(x, superpose = TRUE, xlab = "Dose", ylab = NULL, plotType = c("contrasts", "means"), ...) } \arguments{ \item{models}{ An object of class \samp{Mods} defining the dose-response shapes for which to calculate optimal contrasts. } \item{doses}{ Optional argument. If this argument is missing the doses attribute in the \samp{Mods} object specified in \samp{models} is used. } \item{w, S}{ Arguments determining the matrix S used in the formula for the optimal contrasts. Exactly one of \samp{w} and \samp{S} has to be specified. Note that \samp{w} and \samp{S} only have to be specified up to proportionality \cr \itemize{ \item{w}{ Vector specifying weights for the different doses, in the formula for calculation of the optimal contrasts. Specifying a weights vector is equivalent to specifying S=diag(1/w) (e.g. in a homoscedastic case with unequal sample sizes, \samp{w} should be proportional to the group sample sizes).} \item{S} {Directly specify a matrix proportional to the covariance matrix to use.} } } \item{placAdj}{ Logical determining, whether the contrasts should be applied to placebo-adjusted estimates. If yes the returned coefficients are no longer contrasts (i.e. do not sum to 0). However, the result of multiplying of this "contrast" matrix with the placebo adjusted estimates, will give the same results as multiplying the original contrast matrix to the unadjusted estimates. } \item{type}{ For \samp{type = "constrained"} the contrast coefficients of the zero dose group are constrained to be different from the coefficients of the active treatment groups. So that a weighted sum of the active treatments is compared against the zero dose group. For an increasing trend the coefficient of the zero dose group is negative and all other coefficients have to be positive (for a decreasing trend the other way round). } \item{x, superpose, xlab, ylab, plotType}{ Arguments for the plot method for optContr objects. plotType determines, whether the contrasts or the underlying (standardized) mean matrix should be plotted. } \item{...}{ Additional arguments for plot method } } \value{ Object of class \samp{optContr}. A list containing entries contMat and muMat (i.e. contrast, mean and correlation matrix). } \references{ Bretz, F., Pinheiro, J. C., and Branson, M. (2005), Combining multiple comparisons and modeling techniques in dose-response studies, \emph{Biometrics}, \bold{61}, 738--748 Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 } \author{ Bjoern Bornkamp } \seealso{ \code{\link{MCTtest}} } \examples{ doses <- c(0,10,25,50,100,150) models <- Mods(linear = NULL, emax = 25, logistic = c(50, 10.88111), exponential= 85, betaMod=rbind(c(0.33,2.31), c(1.39,1.39)), doses = doses, addArgs = list(scal = 200)) contMat <- optContr(models, w = rep(50,6)) plot(contMat) ## now we would like the "contrasts" for placebo adjusted estimates dosPlac <- doses[-1] ## matrix proportional to cov-matrix of plac. adj. estimates for balanced data S <- diag(5)+matrix(1, 5,5) ## note that we explicitly hand over the doses here contMat0 <- optContr(models, doses=dosPlac, S = S, placAdj = TRUE) ## -> contMat0 is no longer a contrast matrix (columns do not sum to 0) colSums(contMat0$contMat) ## calculate contrast matrix for unadjusted estimates from this matrix ## (should be same as above) aux <- rbind(-colSums(contMat0$contMat), contMat0$contMat) t(t(aux)/sqrt(colSums(aux^2))) ## compare to contMat$contMat ## now calculate constrained contrasts optContr(models, w = rep(50,6), type = "constrained") optContr(models, doses=dosPlac, S = S, placAdj = TRUE, type = "constrained") } DoseFinding/man/planMod.Rd0000644000176200001440000001541112300143062015070 0ustar liggesusers\name{planMod} \alias{planMod} \alias{plot.planMod} \alias{summary.planMod} \title{ Evaluate performance metrics for fitting dose-response models } \description{ This function evaluates, the performance metrics for fitting dose-response models (using asymptotic approximations or simulations). Metrics are \itemize{ \item Root of the mean-squared error to estimate the placebo-adjusted dose-response averaged over the used dose-levels (\code{dRMSE}). \item Root of the mean-squared error to estimate the placebo-adjusted dose-response (\code{cRMSE}) averaged over fine (almost continuous) grid at 101 equally spaced values between placebo and the maximum dose. Available via the summary method. \item Ratio of the placebo-adjusted mean-squared error (at the observed doses) of model-based vs ANOVA approach (\code{Eff-vs-ANOVA}). This can be interpreted on the sample size scale and is available via the summary method. \item Power that the (unadjusted) one-sided \samp{1-alpha} confidence interval comparing the dose with maximum effect vs placebo is larger than \samp{tau}. By default \samp{alpha = 0.025} and \samp{tau = 0} (\code{Pow(maxDose)}). \item Probability that the EDp estimate is within the true [EDpLB, EDpUB] (by default \samp{p=0.5}, \samp{pLB=0.25} and \samp{pUB=0.75}). This metric gives an idea on the ability to characterize the increasing part of the dose-response curve (\code{P(EDp)}). \item Length of the confidence interval for a target dose (TD or EDp). This is calculated by taking the difference of the dUB and dLB quantile of the empirical distribution of the target dose estimates. (\code{lengthTDCI} and \code{lengthEDpCI}). Available via the summary method. } A plot method exists to summarize dose-response and dose estimations graphically. } \usage{ planMod(model, altModels, n, sigma, S, doses, asyApprox = TRUE, simulation = FALSE, alpha = 0.025, tau = 0, p = 0.5, pLB = 0.25, pUB = 0.75, nSim = 100, cores = 1, showSimProgress = TRUE, bnds, addArgs = NULL) \method{plot}{planMod}(x, type = c("dose-response", "ED", "TD"), p, Delta, direction, placAdj = FALSE, xlab, ylab, ...) \method{summary}{planMod}(object, digits = 3, len = 101, Delta, direction = c("increasing", "decreasing"), p, dLB = 0.05, dUB = 0.95, ...) } \arguments{ \item{model}{ Character vector determining the dose-response model(s) to be used for fitting the data. When more than one dose-response model is provided the best fitting model is chosen using the AIC. Built-in models are "linlog", "linear", "quadratic", "emax", "exponential", "sigEmax", "betaMod" and "logistic" (see \link{drmodels}). } \item{altModels}{ An object of class \samp{Mods}, defining the true mean vectors under which operating characteristics should be calculated. } \item{n, sigma, S}{ Either a vector \samp{n} and \samp{sigma} or \samp{S} need to be specified. When \samp{n} and \samp{sigma} are specified it is assumed computations are made for a normal homoscedastic ANOVA model with group sample sizes given by \samp{n} and residual standard deviation \samp{sigma}, i.e. the covariance matrix used for the estimates is thus \code{sigma^2*diag(1/n)} and the degrees of freedom are calculated as \code{sum(n)-nrow(contMat)}. When a single number is specified for \samp{n} it is assumed this is the sample size per group and balanced allocations are used.\cr When \samp{S} is specified this will be used as covariance matrix for the estimates. } \item{doses}{ Doses to use } \item{asyApprox, simulation}{ Logicals determining, whether asymptotic approximations or simulations should be calculated. If multiple models are specified in \samp{model} asymptotic approximations are not available. } \item{alpha, tau}{ Significance level for the one-sided confidence interval for model-based contrast of best dose vs placebo. Tau is the threshold to compare the confidence interval limit to. CI(MaxDCont) gives the percentage that the bound of the confidence interval was larger than tau. } \item{p, pLB, pUB}{ p determines the type of EDp to estimate. pLB and pUB define the bounds for the EDp estimate. The performance metric Pr(Id-ED) gives the percentage that the estimated EDp was within the true EDpLB and EDpUB. } \item{nSim}{ Number of simulations } \item{cores}{ Number of cores to use for simulations. By default 1 cores is used, note that cores > 1 will have no effect Windows, as the mclapply function is used internally. } \item{showSimProgress}{ In case of simulations show the progress using a progress-bar. } \item{bnds}{ Bounds for non-linear parameters. This needs to be a list with list entries corresponding to the selected bounds. The names of the list entries need to correspond to the model names. The \code{\link{defBnds}} function provides the default selection. } \item{addArgs}{ See the corresponding argument in function \code{\link{fitMod}}. This argument is directly passed to fitMod. } \item{x}{ An object of class planMod } \item{type}{ Type of plot to produce } \item{Delta, direction}{ Additional arguments determining what dose estimate to plot, when \samp{type = "ED"} or \samp{type = "TD"} } \item{placAdj}{ When \samp{type = "dose-response"}, this determines whether dose-response estimates are shown on placebo-adjusted or original scale } \item{xlab, ylab}{ Labels for the plot (ylab only applies for \samp{type = "dose-response"}) } \item{len}{ Number of equally spaced points to determine the mean-squared error on a grid (cRMSE). } \item{dLB, dUB}{ Which quantiles to use for calculation of \code{lengthTDCI} and \code{lengthEDpCI}. By default dLB = 0.05 and dUB = 0.95, so that this corresponds to a 90\% interval. } \item{object, digits}{ object: A planMod object. digits: Digits in summary output } \item{...}{ Additional arguments (currently ignored) } } \references{ TBD } \author{ Bjoern Bornkamp } \seealso{ \code{\link{fitMod}} } \examples{ \dontrun{ doses <- c(0,10,25,50,100,150) fmodels <- Mods(linear = NULL, emax = 25, logistic = c(50, 10.88111), exponential= 85, betaMod=rbind(c(0.33,2.31),c(1.39,1.39)), doses = doses, addArgs=list(scal = 200), placEff = 0, maxEff = 0.4) sigma <- 1 n <- rep(62, 6)*2 model <- "quadratic" pObj <- planMod(model, fmodels, n, sigma, doses=doses, simulation = TRUE, alpha = 0.025, nSim = 200, p = 0.5, pLB = 0.25, pUB = 0.75) print(pObj) summary(pObj, p = 0.5, Delta = 0.3) plot(pObj) plot(pObj, type = "TD", Delta=0.3, direction = "increasing") plot(pObj, type = "ED", p = 0.5) } } DoseFinding/man/powMCT.Rd0000644000176200001440000001107312300143062014647 0ustar liggesusers\name{powMCT} \alias{powMCT} \title{ Calculate power for multiple contrast test } \description{ Calculate power for a multiple contrast test for a set of specified alternatives. } \usage{ powMCT(contMat, alpha = 0.025, altModels, n, sigma, S, placAdj=FALSE, alternative = c("one.sided", "two.sided"), df, critV, control = mvtnorm.control()) } \arguments{ \item{contMat}{ Contrast matrix to use. The individual contrasts should be saved in the columns of the matrix } \item{alpha}{ Significance level to use } \item{altModels}{ An object of class \samp{Mods}, defining the mean vectors under which the power should be calculated } \item{n, sigma, S}{ Either a vector \samp{n} and \samp{sigma} or \samp{S} need to be specified. When \samp{n} and \samp{sigma} are specified it is assumed computations are made for a normal homoscedastic ANOVA model with group sample sizes given by \samp{n} and residual standard deviation \samp{sigma}, i.e. the covariance matrix used for the estimates is thus \code{sigma^2*diag(1/n)} and the degrees of freedom are calculated as \code{sum(n)-nrow(contMat)}. When a single number is specified for \samp{n} it is assumed this is the sample size per group and balanced allocations are used.\cr When \samp{S} is specified this will be used as covariance matrix for the estimates. } \item{placAdj}{ Logical, if true, it is assumed that the standard deviation or variance matrix of the placebo-adjusted estimates are specified in \samp{sigma} or \samp{S}, respectively. The contrast matrix has to be produced on placebo-adjusted scale, see \code{\link{optContr}}, so that the coefficients are no longer contrasts (i.e. do not sum to 0). } \item{alternative}{ Character determining the alternative for the multiple contrast trend test. } \item{df}{ Degrees of freedom to assume in case \samp{S} (a general covariance matrix) is specified. When \samp{n} and \samp{sigma} are specified the ones from the corresponding ANOVA model are calculated. } \item{critV}{ Critical value, if equal to \samp{TRUE} the critical value will be calculated. Otherwise one can directly specify the critical value here. } \item{control}{ A list specifying additional control parameters for the \samp{qmvt} and \samp{pmvt} calls in the code, see also \samp{mvtnorm.control} for details. } } \value{ Numeric containing the calculated power values } \references{ Pinheiro, J. C., Bornkamp, B., and Bretz, F. (2006). Design and analysis of dose finding studies combining multiple comparisons and modeling procedures, \emph{Journal of Biopharmaceutical Statistics}, \bold{16}, 639--656 } \author{ Bjoern Bornkamp } \seealso{ \code{\link{powN}}, \code{\link{sampSizeMCT}}, \code{\link{MCTtest}}, \code{\link{optContr}}, \code{\link{Mods}} } \examples{ ## look at power under some dose-response alternatives ## first the candidate models used for the contrasts doses <- c(0,10,25,50,100,150) ## define models to use as alternative fmodels <- Mods(linear = NULL, emax = 25, logistic = c(50, 10.88111), exponential= 85, betaMod=rbind(c(0.33,2.31),c(1.39,1.39)), doses = doses, addArgs=list(scal = 200), placEff = 0, maxEff = 0.4) ## plot alternatives plot(fmodels) ## power for to detect a trend contMat <- optContr(fmodels, w = 1) powMCT(contMat, altModels = fmodels, n = 50, alpha = 0.05, sigma = 1) ## power under the Dunnett test ## contrast matrix for Dunnett test with informative names contMatD <- rbind(-1, diag(5)) rownames(contMatD) <- doses colnames(contMatD) <- paste("D", doses[-1], sep="") powMCT(contMatD, altModels = fmodels, n = 50, alpha = 0.05, sigma = 1) ## now investigate power of the contrasts in contMat under "general" alternatives altFmods <- Mods(linInt = rbind(c(0, 1, 1, 1, 1), c(0.5, 1, 1, 1, 0.5)), doses=doses, placEff=0, maxEff=0.5) plot(altFmods) powMCT(contMat, altModels = altFmods, n = 50, alpha = 0.05, sigma = 1) ## now the first example but assume information only on the ## placebo-adjusted scale ## for balanced allocations and 50 patients with sigma = 1 one obtains ## the following covariance matrix S <- 1^2/50*diag(6) ## now calculate variance of placebo adjusted estimates CC <- cbind(-1,diag(5)) V <- (CC)\%*\%S\%*\%t(CC) linMat <- optContr(fmodels, doses = c(10,25,50,100,150), S = V, placAdj = TRUE) powMCT(linMat, altModels = fmodels, placAdj=TRUE, alpha = 0.05, S = V, df=6*50-6) # match df with the df above } DoseFinding/man/optDesign.Rd0000644000176200001440000003171612203245054015446 0ustar liggesusers\name{optDesign} \alias{optDesign} \alias{plot.DRdesign} \alias{calcCrit} \alias{rndDesign} \title{ Function to calculate optimal designs } \description{ Given a set of models (with full parameter values and model probabilities) the \samp{optDesign} function calculates the optimal design for estimating the dose-response model parameters (D-optimal) or the design for estimating the target dose (TD-optimal design) (see Dette, Bretz, Pepelyshev and Pinheiro (2008)), or a mixture of these two criteria. The design can be plotted (together with the candidate models) using \samp{plot.design}. \samp{calcCrit} calculates the design criterion for a discrete set of design(s). \samp{rndDesign} provides efficient rounding for the calculated continous design to a finite sample size. } \usage{ optDesign(models, probs, doses, designCrit = c("Dopt", "TD", "Dopt&TD", "userCrit"), Delta, standDopt = TRUE, weights, nold = rep(0, length(doses)), n, control=list(), optimizer = c("solnp", "Nelder-Mead", "nlminb", "exact"), lowbnd = rep(0, length(doses)), uppbnd = rep(1, length(doses)), userCrit, ...) \method{plot}{DRdesign}(x, models, lwdDes = 10, colDes = rgb(0,0,0,0.3), ...) calcCrit(design, models, probs, doses, designCrit = c("Dopt", "TD", "Dopt&TD"), Delta, standDopt = TRUE, weights, nold = rep(0, length(doses)), n) rndDesign(design, n, eps = 0.0001) } \arguments{ \item{models}{ An object of class \samp{c(Mods, fullMod)}, see the \code{\link{Mods}} function for details. When an TD optimal design should be calculated, the TD needs to exist for all models. If a D-optimal design should be calculated, you need at least as many doses as there are parameters in the specified models. } \item{probs}{ Vector of model probabilities for the models specified in \samp{models}, assumed in the same order as specified in models } \item{doses}{ Optional argument. If this argument is missing the doses attribute in the \samp{c(Mods, fullMod)} object specified in \samp{models} is used. } \item{designCrit}{ Determines which type of design to calculate. "TD&Dopt" uses both optimality criteria with equal weight. } \item{Delta}{ Target effect needed for calculating "TD" and "TD&Dopt" type designs. } \item{standDopt}{ Logical determining, whether the D-optimality criterion (specifically the log-determinant) should be standardized by the number of parameters in the model or not (only of interest if type = "Dopt" or type = "TD&Dopt"). This is of interest, when there is more than one model class in the candidate model set (traditionally standardization this is done in the optimal design literature). } \item{weights}{ Vector of weights associated with the response at the doses. Needs to be of the same length as the \samp{doses}. This can be used to calculate designs for heteroscedastic or for generalized linear model situations. } \item{nold, n}{ When calculating an optimal design at an interim analysis, \samp{nold} specifies the vector of sample sizes already allocated to the different doses, and \samp{n} gives sample size for the next cohort. For \samp{optimizer = "exact"} one always needs to specify the total sample size via \samp{n}. } \item{control}{ List containing control parameters passed down to numerical optimization algorithms (\code{\link{optim}}, \code{\link{nlminb}} or solnp function).\cr For \samp{type = "exact"} this should be a list with possible entries \samp{maxvls1} and \samp{maxvls2}, determining the maximum number of designs allowed for passing to the criterion function (default \samp{maxvls2=1e5}) and for creating the initial unrestricted matrix of designs (default \samp{maxvls1=1e6}). In addition there can be an entry \samp{groupSize} in case the patients are allocated a minimum group size is required. } \item{optimizer}{ Algorithm used for calculating the optimal design. Options "Nelder-Mead" and "nlminb" use the \code{\link{optim}} and \code{\link{nlminb}} function and use a trigonometric transformation to turn the constrained optimization problem into an unconstrained one (see Atkinson, Donev and Tobias, 2007, pages 130,131). Option "solnp" uses the solnp function from the Rsolnp package, which implements an optimizer for non-linear optimization under general constraints. Option "exact" tries all given combinations of \samp{n} patients to the given dose groups (subject to the bounds specified via \samp{lowbnd} and \samp{uppbnd}) and reports the best design. When patients are only allowed to be allocated in groups of a certain \samp{groupSize}, this can be adjusted via the control argument. \samp{n/groupSize} and \samp{length(doses)} should be rather small for this approach to be feasible. When the number of doses is small (<8) usually \samp{"Nelder-Mead"} and \samp{"nlminb"} are best suited (\samp{"nlminb"} is usually a bit faster but less stable than \samp{"Nelder-Mead"}). For a larger number of doses \samp{"solnp"} is the most reliable option (but also slowest) (\samp{"Nelder-Mead"} and \samp{"nlminb"} often fail). When the sample size is small \samp{"exact"} provides the optimal solution rather quickly. } \item{lowbnd, uppbnd}{ Vectors of the same length as dose vector specifying upper and lower limits for the allocation weights. This option is only available when using the "solnp" and "exact" optimizers. } \item{userCrit}{ User defined design criterion, should be a function that given a vector of allocation weights and the doses returns the criterion function. When specified \samp{models} does not need to be handed over. The first argument of \samp{userCrit} should be the vector of design weights, while the second argument should be the \samp{doses} argument (see example below). Additional arguments to \samp{userCrit} can be passed via ... } \item{...}{ For function \samp{optDesign} these are additional arguments passed to \samp{userCrit}.\cr \cr For function \samp{plot.design} these are additional parameters passed to \code{\link{plot.Mods}}.\cr } \item{design}{ Argument for \samp{rndDesign} and \samp{calcCrit} functions: Numeric vector (or matrix) of allocation weights for the different doses. The rows of the matrices need to sum to 1. Alternatively also an object of class "DRdesign" can be used for \samp{rndDesign}. Note that there should be at least as many design points available as there are parameters in the dose-response models selected in \code{models} (otherwise the code returns an NA). } \item{eps}{ Argument for \samp{rndDesign} function: Value under which elements of w will be regarded as 0. } \item{x}{ Object of class \samp{DRdesign} (for \samp{plot.design}) } \item{lwdDes, colDes}{ Line width and color of the lines plotted for the design (in \samp{plot.design}) } } \author{ Bjoern Bornkamp } \details{ Let \eqn{M_m}{M_m} denote the Fisher information matrix under model m (up to proportionality). \eqn{M_m}{M_m} is given by \eqn{\sum a_i w_i g_i^Tg_i}{\sum a_i w_i g_i^Tg_i}, where \eqn{a_i}{a_i} is the allocation weight to dose i, \eqn{w_i}{w_i} the weight for dose i specified via \samp{weights} and \eqn{g_i}{g_i} the gradient vector of model m evaluated at dose i. For \samp{designCrit = "Dopt"} the code minimizes the design criterion \deqn{-\sum_{m}p_m/k_m \log(\det(M_m))}{-sum_m p_m/k_m log(det(M_m))} where \eqn{p_m}{p_m} is the probability for model m and \eqn{k_m}{k_m} is the number of parameters for model m. When \samp{standDopt = FALSE} the \eqn{k_m}{k_m} are all assumed to be equal to one. For \samp{designCrit = "TD"} the code minimizes the design criterion \deqn{\sum_{m}p_m \log(v_m)}{sum_m p_m log(v_m)} where \eqn{p_m}{p_m} is the probability for model m and \eqn{v_m}{v_m} is proportional to the asymptotic variance of the TD estimate and given by \eqn{b_m'M_m^{-}b_m}{b_m'Minv_m b_m} (see Dette et al. (2008), p. 1227 for details). For \samp{designCrit = "Dopt&TD"} the code minimizes the design criterion \deqn{\sum_{m}p_m(-0.5\log(\det(M_m))/k_m+0.5\log(v_m))}{sum_m p_m(-0.5log(det(M_m))/k_m+0.5log(v_m))} Again, for \samp{standDopt = FALSE} the \eqn{k_m}{k_m} are all assumed to be equal to one. For details on the \samp{rndDesign} function, see Pukelsheim (1993), Chapter 12. } \note{ In some cases (particularly when the number of doses is large, e.g. 7 or larger) it might be necessary to allow a larger number of iterations in the algorithm (via the argument \samp{control}), particularly for the Nelder-Mead algorithm. Alternatively one can use the solnp optimizer that is usually the most reliable, but not fastest option. } \seealso{ \code{\link{Mods}}, \code{\link{drmodels}} } \references{ Atkinson, A.C., Donev, A.N. and Tobias, R.D. (2007). Optimum Experimental Designs, with SAS, Oxford University Press Dette, H., Bretz, F., Pepelyshev, A. and Pinheiro, J. C. (2008). Optimal Designs for Dose Finding Studies, \emph{Journal of the American Statisical Association}, \bold{103}, 1225--1237 Pukelsheim, F. (1993). Optimal Design of Experiments, Wiley } \examples{ ## calculate designs for Emax model doses <- c(0, 10, 100) emodel <- Mods(emax = 15, doses=doses, placEff = 0, maxEff = 1) optDesign(emodel, probs = 1) ## TD-optimal design optDesign(emodel, probs = 1, designCrit = "TD", Delta=0.5) ## 50-50 mixture of Dopt and TD optDesign(emodel, probs = 1, designCrit = "Dopt&TD", Delta=0.5) ## use dose levels different from the ones specified in emodel object des <- optDesign(emodel, probs = 1, doses = c(0, 5, 20, 100)) ## plot models overlaid by design plot(des, emodel) ## round des to a sample size of exactly 90 patients rndDesign(des, n=90) ## using the round function would lead to 91 patients ## illustrating different optimizers (see Note above for more comparison) optDesign(emodel, probs=1, optimizer="Nelder-Mead") optDesign(emodel, probs=1, optimizer="nlminb") ## optimizer solnp (the default) can deal with lower and upper bounds: optDesign(emodel, probs=1, designCrit = "TD", Delta=0.5, optimizer="solnp", lowbnd = rep(0.2,3)) ## exact design using enumeration of all possibilites optDesign(emodel, probs=1, optimizer="exact", n = 30) ## also allows to fix minimum groupSize optDesign(emodel, probs=1, designCrit = "TD", Delta=0.5, optimizer="exact", n = 30, control = list(groupSize=5)) ## optimal design at interim analysis ## assume there are already 10 patients on each dose and there are 30 ## left to randomize, this calculates the optimal increment design optDesign(emodel, 1, designCrit = "TD", Delta=0.5, nold = c(10, 10, 10), n=30) ## use a larger candidate model set doses <- c(0, 10, 25, 50, 100, 150) fmods <- Mods(linear = NULL, emax = 25, exponential = 85, linlog = NULL, logistic = c(50, 10.8811), doses = doses, addArgs=list(off=1), placEff=0, maxEff=0.4) probs <- rep(1/5, 5) # assume uniform prior desDopt <- optDesign(fmods, probs, optimizer = "nlminb") desTD <- optDesign(fmods, probs, designCrit = "TD", Delta = 0.2, optimizer = "nlminb") desMix <- optDesign(fmods, probs, designCrit = "Dopt&TD", Delta = 0.2) ## plot design and truth plot(desMix, fmods) ## illustrate calcCrit function ## calculate optimal design for beta model doses <- c(0, 0.49, 25.2, 108.07, 150) models <- Mods(betaMod = c(0.33, 2.31), doses=doses, addArgs=list(scal=200), placEff=0, maxEff=0.4) probs <- 1 deswgts <- optDesign(models, probs, designCrit = "Dopt", control=list(maxit=1000)) ## now compare this design to equal allocations on ## 0, 10, 25, 50, 100, 150 doses2 <- c(0, 10, 25, 50, 100, 150) design2 <- c(1/6, 1/6, 1/6, 1/6, 1/6, 1/6) crit2 <- calcCrit(design2, models, probs, doses2, designCrit = "Dopt") ## ratio of determinants (returned criterion value is on log scale) exp(deswgts$crit-crit2) ## example for calculating an optimal design for logistic regression doses <- c(0, 0.35, 0.5, 0.65, 1) fMod <- Mods(linear = NULL, doses=doses, placEff=-5, maxEff = 10) ## now calculate weights to use in the covariance matrix mu <- as.numeric(getResp(fMod, doses=doses)) mu <- 1/(1+exp(-mu)) weights <- mu*(1-mu) des <- optDesign(fMod, 1, doses, weights = weights) ## one can also specify a user defined criterion function ## here D-optimality for cubic polynomial CubeCrit <- function(w, doses){ X <- cbind(1, doses, doses^2, doses^3) CVinv <- crossprod(X*w) -log(det(CVinv)) } optDesign(doses = c(0,0.05,0.2,0.6,1), designCrit = "userCrit", userCrit = CubeCrit, optimizer = "nlminb") } DoseFinding/man/mvtnorm-control.Rd0000644000176200001440000000141212100014352016647 0ustar liggesusers\name{mvtnorm.control} \alias{mvtnorm.control} \title{ Control options for pmvt and qmvt functions } \description{ Returns a list (an object of class "GenzBretz") with control parameters for the \samp{pmvt} and \samp{qmvt} functions from the \samp{mvtnorm} package. Note that the DoseFinding package always uses "GenzBretz" algorithm. See the mvtnorm documentation for more information. } \usage{ mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0, interval = NULL) } \arguments{ \item{maxpts}{ Maximum number of function values as integer. } \item{abseps}{ Absolute error tolerance as double. } \item{releps}{ Relative error tolerance as double. } \item{interval}{ Interval to be searched, when the quantile is calculated. } } DoseFinding/man/drmodels.Rd0000644000176200001440000002227512170313470015324 0ustar liggesusers\name{DR-Models} \alias{drmodels} \alias{betaMod} \alias{emax} \alias{sigEmax} \alias{exponential} \alias{logistic} \alias{linear} \alias{linlog} \alias{quadratic} \alias{linInt} \alias{betaModGrad} \alias{emaxGrad} \alias{sigEmaxGrad} \alias{exponentialGrad} \alias{logisticGrad} \alias{linearGrad} \alias{linlogGrad} \alias{quadraticGrad} \alias{linIntGrad} \title{ Built-in dose-response models in DoseFinding } \description{ Dose-response model functions and gradients. Below are the definitions of the model functions: \bold{Emax model} \deqn{ f(d,\theta)=E_0+E_{max}\frac{d}{ED_{50}+d}}{f(d,theta)=E0+Emax d/(ED50 + d).} \bold{Sigmoid Emax Model} \deqn{ f(d,\theta)=E_0+E_{max}\frac{d^h}{ED^h_{50}+d^h}}{f(d,theta)=E0+Emax d^h/(ED50^h + d^h).} \bold{Exponential Model} \deqn{ f(d,\theta)=E_0+E_1(\exp(d/\delta)-1)}{f(d,theta)=E0+E1 (exp(d/delta)-1).} \bold{Beta model} \deqn{ f(d,\theta)=E_0+E_{max}B(\delta_1,\delta_2)(d/scal)^{\delta_1}(1-d/scal)^{\delta_2} }{f(d,theta)=E0+Emax B(delta1,delta2)(d/scal)^delta1(1-d/scal)^delta2} here \deqn{B(\delta_1,\delta_2)=(\delta_1+\delta_2)^{\delta_1+\delta_2}/(\delta_1^{\delta_1} \delta_2^{\delta_2})}{B(delta1,delta2)=(delta1+delta2)^(delta1+delta2)/(delta1^delta1 delta2^delta2).} and \eqn{scal}{scal} is a fixed dose scaling parameter. \bold{Linear Model} \deqn{ f(d,\theta)=E_0+\delta d}{f(d,theta)=E0+delta d.} \bold{Linear in log Model} \deqn{ f(d,\theta)=E_0+\delta \log(d + off)}{f(d,theta)=E0+delta log(d + off),} here \eqn{off}{off} is a fixed offset parameter. \bold{Logistic Model} \deqn{ f(d, \theta) = E_0 + E_{\max}/\left\{1 + \exp\left[ \left(ED_{50} - d \right)/\delta \right] \right\}}{f(d,theta)=E0+Emax/(1 + exp((ED50-d)/delta)).} \bold{Quadratic Model} \deqn{ f(d,\theta)=E_0+\beta_1d+\beta_2d^2}{f(d,theta)=E0+beta1 d+beta2 d^2.} \bold{Linear Interpolation model}\cr The linInt model provides linear interpolation at the values defined by the nodes vector. In virtually all situations the nodes vector is equal to the doses used in the analysis. For example the \code{\link{Mods}} and the \code{\link{fitMod}} function automatically use the doses that are used in the context of the function call as nodes. The guesstimates specified in the \code{\link{Mods}} function need to be the treatment effects at the active doses standardized to the interval [0,1] (see the examples in the \code{\link{Mods}} function). } \usage{ emax(dose, e0, eMax, ed50) emaxGrad(dose, eMax, ed50, ...) sigEmax(dose, e0, eMax, ed50, h) sigEmaxGrad(dose, eMax, ed50, h, ...) exponential(dose, e0, e1, delta) exponentialGrad(dose, e1, delta, ...) quadratic(dose, e0, b1, b2) quadraticGrad(dose, ...) betaMod(dose, e0, eMax, delta1, delta2, scal) betaModGrad(dose, eMax, delta1, delta2, scal, ...) linear(dose, e0, delta) linearGrad(dose, ...) linlog(dose, e0, delta, off = 1) linlogGrad(dose, off, ...) logistic(dose, e0, eMax, ed50, delta) logisticGrad(dose, eMax, ed50, delta, ...) linInt(dose, resp, nodes) linIntGrad(dose, resp, nodes, ...) } \arguments{ \item{dose}{ Dose variable } \item{e0}{ For most models placebo effect. For logistic model left-asymptote parameter, corresponding to a basal effect level (not the placebo effect) } \item{eMax}{ Beta Model: Maximum effect within dose-range\cr Emax, sigmoid Emax, logistic Model: Asymptotic maximum effect } \item{ed50}{ Dose giving half of the asymptotic maximum effect } \item{h}{ Hill parameter, determining the steepness of the model at the ED50 } \item{e1}{ Slope parameter for exponential model} \item{delta}{ Exponential model: Parameter, controlling the convexity of the model.\cr Linear and linlog model: Slope parameter\cr Logistic model: Parameter controlling determining the steepness of the curve} \item{delta1}{ delta1 parameter for beta model } \item{delta2}{ delta2 parameter for beta model} \item{b1}{ first parameter of quadratic model } \item{b2}{ second parameter of quadratic model (controls, whether model is convex or concave) } \item{resp}{ Response values at the nodes for the linInt model} \item{off}{ Offset value to avoid problems with dose=0 (treated as a fixed value, not estimated) } \item{scal}{ Scale parameter (treated as a fixed value, not estimated)} \item{nodes}{ Interpolation nodes for the linear interpolation for the linInt model (treated as a fixed value, not estimated)} \item{...}{ Just included for convenience in the gradient functions, so that for example \code{quadratic(dose, e0=0, b1=1, b2=3)} will not throw an error (although the gradient of the quadratic model is independent of e0, b1 and b2). } } \details{ The \bold{Emax model} is used to represent monotone, concave dose-response shapes. To distinguish it from the more general sigmoid emax model it is sometimes also called hyperbolic emax model. The \bold{sigmoid Emax} model is an extension of the (hyperbolic) Emax model by introducing an additional parameter h, that determines the steepness of the curve at the ed50 value. The sigmoid Emax model describes monotonic, sigmoid dose-response relationships. The \bold{quadratic} model is intended to capture a possible non-monotonic dose-response relationship. The \bold{exponential model} is intended to capture a possible sub-linear or a convex dose-response relationship. The \bold{beta model} is intended to capture non-monotone dose-response relationships and is more flexible than the quadratic model. The kernel of the beta model function consists of the kernel of the density function of a beta distribution on the interval [0,scal]. The parameter scal is not estimated but needs to be set to a value larger than the maximum dose. It can be set in most functions (\samp{fitMod}, \samp{Mods}) via the \samp{addArgs} argument, when omitted a value of \samp{1.2*(maximum dose)} is used as default, where the maximum dose is inferred from other input to the respective function. The \bold{linear in log-dose} model is intended to capture concave shapes. The parameter \code{off} is not estimated in the code but set to a pre-specified value. It can be set in most functions (\samp{fitMod}, \samp{Mods}) via the \samp{addArgs} argument, when omitted a value of \samp{0.01*(maximum dose)} is used as default, where the maximum dose is inferred from other input to the respective function. The \bold{logistic model} is intended to capture general monotone, sigmoid dose-response relationships. The logistic model and the sigmoid Emax model are closely related: The sigmoid Emax model is a logistic model in log(dose). The \bold{linInt model} provids linear interpolation of the means at the doses. This can be used as a "nonparametric" estimate of the dose-response curve, but is probably most interesting for specifying a "nonparametric" truth during planning and assess how well parametric models work under a nonparametric truth. For the function \samp{Mods} and \samp{fitMod} the interpolation \samp{nodes} are selected equal to the dose-levels specified. } \value{ Response value for model functions or matrix containing the gradient evaluations. } \references{ MacDougall, J. (2006). Analysis of dose-response studies - Emax model,\emph{in} N. Ting (ed.), \emph{Dose Finding in Drug Development}, Springer, New York, pp. 127--145 Pinheiro, J. C., Bretz, F. and Branson, M. (2006). Analysis of dose-response studies - modeling approaches, \emph{in} N. Ting (ed.). \emph{Dose Finding in Drug Development}, Springer, New York, pp. 146--171 } \examples{ ## some quadratic example shapes quadModList <- Mods(quadratic = c(-0.5, -0.75, -0.85, -1), doses = c(0,1)) plot(quadModList) ## some emax example shapes emaxModList <- Mods(emax = c(0.02,0.1,0.5,1), doses = c(0,1)) plot(emaxModList) ## example for gradient emaxGrad(dose = (0:4)/4, eMax = 1, ed50 = 0.5) ## some sigmoid emax example shapes sigEmaxModList <- Mods(sigEmax = rbind(c(0.05,1), c(0.15,3), c(0.4,8), c(0.7,8)), doses = c(0,1)) plot(sigEmaxModList) sigEmaxGrad(dose = (0:4)/4, eMax = 1, ed50 = 0.5, h = 8) ## some exponential example shapes expoModList <- Mods(exponential = c(0.1,0.25,0.5,2), doses=c(0,1)) plot(expoModList) exponentialGrad(dose = (0:4)/4, e1 = 1, delta = 2) ## some beta model example shapes betaModList <- Mods(betaMod = rbind(c(1,1), c(1.5,0.75), c(0.8,2.5), c(0.4,0.9)), doses=c(0,1), addArgs=list(scal = 1.2)) plot(betaModList) betaModGrad(dose = (0:4)/4, eMax = 1, delta1 = 1, delta2 = 1, scal = 5) ## some logistic model example shapes logistModList <- Mods(logistic = rbind(c(0.5,0.05), c(0.5,0.15), c(0.2,0.05), c(0.2,0.15)), doses=c(0,1)) plot(logistModList) logisticGrad(dose = (0:4)/4, eMax = 1, ed50 = 0.5, delta = 0.05) ## some linInt shapes genModList <- Mods(linInt = rbind(c(0.5,1,1), c(0,1,1), c(0,0,1)), doses=c(0,0.5,1,1.5)) plot(genModList) linIntGrad(dose = (0:4)/4, resp=c(0,0.5,1,1,1), nodes=(0:4)/4) } \seealso{ \code{\link{fitMod}} } DoseFinding/man/MCTtest.Rd0000644000176200001440000001443512323407770015044 0ustar liggesusers\name{MCTtest} \alias{MCTtest} \title{ Performs multiple contrast test } \description{ This function performs a multiple contrast test. The contrasts are either directly specified in \samp{contMat} or optimal contrasts derived from the \samp{models} argument. The directionality of the data (i.e. whether an increase or decrease in the response variable is beneficial is inferred from the \samp{models} object, see \code{\link{Mods}}). For \samp{type = "normal"} an ANCOVA model based on a homoscedastic normality assumption (with additive covariates specified in \samp{addCovars}) is fitted. For \samp{type = "general"} it is assumed multivariate normally distributed estimates are specified in \samp{resp} with covariance given by \samp{S}, and the contrast test statistic is calculated based on this assumption. Degrees of freedom specified in \samp{df}. } \usage{ MCTtest(dose, resp, data = NULL, models, S = NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, alpha = 0.025, df = NULL, critV = NULL, pVal = TRUE, alternative = c("one.sided", "two.sided"), na.action = na.fail, mvtcontrol = mvtnorm.control(), contMat = NULL) } \arguments{ \item{dose, resp}{ Either vectors of equal length specifying dose and response values, or names of variables in the data frame specified in \samp{data}. } \item{data}{ Data frame containing the variables referenced in dose and resp if \samp{data} is not specified it is assumed that \samp{dose} and \samp{resp} are variables referenced from data (and no vectors) } \item{models}{ An object of class \samp{Mods}, see \code{\link{Mods}} for details } \item{S}{ The covariance matrix of \samp{resp} when \samp{type = "general"}, see Description. } \item{type}{ Determines whether inference is based on an ANCOVA model under a homoscedastic normality assumption (when \samp{type = "normal"}), or estimates at the doses and their covariance matrix and degrees of freedom are specified directly in \samp{resp}, \samp{S} and \samp{df}. See also \code{\link{fitMod}} and Pinheiro et al. (2013). } \item{addCovars}{ Formula specifying additive linear covariates (for \samp{type = "normal"}) } \item{placAdj}{ Logical, if true, it is assumed that placebo-adjusted estimates are specified in \samp{resp} (only possible for \samp{type = "general"}). } \item{alpha}{ Significance level for the multiple contrast test } \item{df}{ Specify the degrees of freedom to use in case \samp{type = "general"}. If this argument is missing \samp{df = Inf} is used (which corresponds to the multivariate normal distribution). For type = "normal" the degrees of freedom deduced from the AN(C)OVA fit are used and this argument is ignored. } \item{critV}{ Supply a pre-calculated critical value. If this argument is NULL, no critical value will be calculated and the test decision is based on the p-values. If \samp{critV = TRUE} the critical value will be calculated. } \item{pVal}{ Logical determining, whether p-values should be calculated. } \item{alternative}{ Character determining the alternative for the multiple contrast trend test. } \item{na.action}{ A function which indicates what should happen when the data contain NAs. } \item{mvtcontrol}{ A list specifying additional control parameters for the \samp{qmvt} and \samp{pmvt} calls in the code, see also \code{\link{mvtnorm.control}} for details. } \item{contMat}{ Contrast matrix to apply to the ANCOVA dose-response estimates. The contrasts need to be in the columns of the matrix (i.e. the column sums need to be 0). } } \details{ Integrals over the multivariate t and multivariate normal distribution are calculated using the \samp{mvtnorm} package. } \value{ An object of class MCTtest, a list containing the output. } \references{ Hothorn, T., Bretz, F., and Westfall, P. (2008). Simultaneous Inference in General Parametric Models, \emph{Biometrical Journal}, \bold{50}, 346--363 Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 } \author{ Bjoern Bornkamp } \seealso{ \code{\link{powMCT}}, \code{\link{optContr}} } \examples{ ## example without covariates data(biom) ## define shapes for which to calculate optimal contrasts modlist <- Mods(emax = 0.05, linear = NULL, logistic = c(0.5, 0.1), linInt = c(0, 1, 1, 1), doses = c(0, 0.05, 0.2, 0.6, 1)) m1 <- MCTtest(dose, resp, biom, models=modlist) ## now calculate critical value (but not p-values) m2 <- MCTtest(dose, resp, biom, models=modlist, critV = TRUE, pVal = FALSE) ## now hand over critical value m3 <- MCTtest(dose, resp, biom, models=modlist, critV = 2.24) ## example with covariates data(IBScovars) modlist <- Mods(emax = 0.05, linear = NULL, logistic = c(0.5, 0.1), linInt = c(0, 1, 1, 1), doses = c(0, 1, 2, 3, 4)) MCTtest(dose, resp, IBScovars, models = modlist, addCovars = ~gender) ## example using general approach (fitted on placebo-adjusted scale) ancMod <- lm(resp~factor(dose)+gender, data=IBScovars) ## extract estimates and information to feed into MCTtest drEst <- coef(ancMod)[2:5] vc <- vcov(ancMod)[2:5, 2:5] doses <- 1:4 MCTtest(doses, drEst, S = vc, models = modlist, placAdj = TRUE, type = "general", df = Inf) ## example with general alternatives handed over data(biom) ## calculate contrast matrix for the step-contrasts ## represent them as linInt models models <- Mods(linInt=rbind(c(1,1,1,1), c(0,1,1,1), c(0,0,1,1), c(0,0,0,1)), doses=c(0,0.05,0.2,0.6,1)) plot(models) ## now calculate optimal contrasts for these means ## use weights from actual sample sizes weights <- as.numeric(table(biom$dose)) contMat <- optContr(models, w = weights) ## plot contrasts plot(contMat) ## perform multiple contrast test MCTtest(dose, resp, data=biom, contMat = contMat) ## example for using the Dunnett contrasts ## Dunnett contrasts doses <- sort(unique(biom$dose)) contMat <- rbind(-1, diag(4)) rownames(contMat) <- doses colnames(contMat) <- paste("D", doses[-1], sep="") MCTtest(dose, resp, data=biom, contMat = contMat) } DoseFinding/man/Mods.Rd0000644000176200001440000001767012300143062014411 0ustar liggesusers\name{Mods} \alias{Mods} \alias{getResp} \alias{plot.Mods} \title{ Define dose-response models } \description{ The Mods functions allows to define a set of dose-response models. The function is used as input object for a number of other different functions. The dose-response models used in this package (see \code{\link{drmodels}} for details) are of form \deqn{f(d) = \theta_0+\theta_1 f^0(d,\theta_2)}{f(d) = theta0+theta1 f0(d,theta2)} where the parameter \eqn{\theta_2}{theta2} is the only non-linear parameter and can be one- or two-dimensional, depending on the used model. One needs to hand over the effect at placebo and the maximum effect in the dose range, from which \eqn{\theta_0,\theta_1}{theta0,theta1} are then back-calculated, the output object is of class \samp{"Mods"}. This object can form the input for other functions to extract the mean response (\samp{getResp}) or target doses (\code{\link{TD}} and \code{\link{ED}}) corresponding to the models. It is also needed as input to the functions \code{\link{powMCT}}, \code{\link{optDesign}} Some models, for example the beta model (\samp{scal}) and the linlog model (\samp{off}) have parameters that are not estimated by the code, they need to be specified via the \samp{addArgs} argument. NOTE: If a decreasing effect is beneficial for the considered response variable it needs to specified here, either by using \samp{direction = "decreasing"} or by specifying a negative "maxEff" argument. } \usage{ Mods(..., doses, placEff = 0, maxEff, direction = c("increasing", "decreasing"), addArgs=NULL, fullMod = FALSE) getResp(fmodels, doses) \method{plot}{Mods}(x, nPoints = 200, superpose = FALSE, xlab = "Dose", ylab = "Model means", modNams = NULL, plotTD = FALSE, Delta, ...) } \arguments{ \item{...}{ In function Mods:\cr Dose-response model names with parameter values specifying the guesstimates for the \eqn{\theta_2}{theta2} parameters. See \code{\link{drmodels}} for a complete list of dose-response models implemented. See below for an example specification.\cr \cr In function plot.Mods:\cr Additional arguments to the \samp{xyplot} call. } \item{doses}{ Dose levels to be used, this needs to include placebo. } \item{addArgs}{ List containing two entries named "scal" and "off" for the "betaMod" and "linlog". When addArgs is NULL the following defaults are used \samp{list(scal = 1.2*max(doses), off = 0.01*max(doses), nodes = doses)}. } \item{fullMod}{ Logical determining, whether the model parameters specified in the Mods function (via the ... argument) should be interpreted as standardized or the full model parameters. } \item{placEff, maxEff}{ Specify used placebo effect and the maximum effect over placebo. Either a numeric vector of the same size as the number of candidate models or of length one.\cr When these parameters are not specified \samp{placEff = 0} is assumed, for \samp{maxEff = 1} is assumed, if \samp{direction = "increasing"} and \samp{maxEff = -1} is assumed, for \samp{direction = "decreasing"}. } \item{direction}{ Character determining whether the beneficial direction is \samp{increasing} or \samp{decreasing} with increasing dose levels. This argument is ignored if \samp{maxEff} is specified. } \item{fmodels}{ An object of class Mods } \item{Delta}{ Delta: The target effect size use for the target dose (TD) (Delta should be > 0). } \item{x}{ Object of class Mods with type Mods } \item{nPoints}{ Number of points for plotting } \item{superpose}{ Logical determining, whether model plots should be superposed } \item{xlab, ylab}{ Label for y-axis and x-axis. } \item{modNams}{ When \samp{modNams == NULL}, the names for the panels are determined by the underlying model functions, otherwise the contents of \samp{modNams} are used. } \item{plotTD}{ \samp{plotTD} is a logical determining, whether the TD should be plotted. \samp{Delta} is the target effect to estimate for the TD. } } \value{ Returns an object of class \samp{"Mods"}. The object contains the specified model parameter values and the derived linear parameters (based on \samp{"placEff"} and \samp{"maxEff"}) in a list. } \references{ Pinheiro, J. C., Bornkamp, B., and Bretz, F. (2006). Design and analysis of dose finding studies combining multiple comparisons and modeling procedures, \emph{Journal of Biopharmaceutical Statistics}, \bold{16}, 639--656 } \author{ Bjoern Bornkamp } \seealso{ \code{\link{Mods}}, \code{\link{drmodels}}, \code{\link{optDesign}}, \code{\link{powMCT}} } \examples{ ## Example on how to specify candidate models ## Suppose one would like to use the following models with the specified ## guesstimates for theta2, in a situation where the doses to be used are ## 0, 0.05, 0.2, 0.6, 1 ## Model guesstimate(s) for theta2 parameter(s) (name) ## linear - ## linear in log - ## Emax 0.05 (ED50) ## Emax 0.3 (ED50) ## exponential 0.7 (delta) ## quadratic -0.85 (delta) ## logistic 0.4 0.09 (ED50, delta) ## logistic 0.3 0.1 (ED50, delta) ## betaMod 0.3 1.3 (delta1, delta2) ## sigmoid Emax 0.5 2 (ED50, h) ## linInt 0.5 0.75 1 1 (perc of max-effect at doses) ## linInt 0.5 1 0.7 0.5 (perc of max-effect at doses) ## for the linInt model one specifies the effect over placebo for ## each active dose. ## The fixed "scal" parameter of the betaMod is set to 1.2 ## The fixed "off" parameter of the linlog is set to 0.1 ## These (standardized) candidate models can be specified as follows models <- Mods(linear = NULL, linlog = NULL, emax = c(0.05, 0.3), exponential = 0.7, quadratic = -0.85, logistic = rbind(c(0.4, 0.09), c(0.3, 0.1)), betaMod = c(0.3, 1.3), sigEmax = c(0.5, 2), linInt = rbind(c(0.5, 0.75, 1, 1), c(0.5, 1, 0.7, 0.5)), doses = c(0, 0.05, 0.2, 0.6, 1), addArgs = list(scal=1.2, off=0.1)) ## "models" now contains the candidate model set, as placEff, maxEff and ## direction were not specified a placebo effect of 0 and an effect of 1 ## is assumed ## display of specified candidate set plot(models) ## example for creating a candidate set with decreasing response doses <- c(0, 10, 25, 50, 100, 150) fmodels <- Mods(linear = NULL, emax = 25, logistic = c(50, 10.88111), exponential = 85, betaMod = rbind(c(0.33, 2.31), c(1.39, 1.39)), linInt = rbind(c(0, 1, 1, 1, 1), c(0, 0, 1, 1, 0.8)), doses=doses, placEff = 0.5, maxEff = -0.4, addArgs=list(scal=200)) plot(fmodels) ## some customizations (different model names, symbols, line-width) plot(fmodels, lwd = 3, pch = 3, cex=1.2, col="red", modNams = paste("mod", 1:8, sep="-")) ## for a full-model object one can calculate the responses ## in a matrix getResp(fmodels, doses=c(0, 20, 100, 150)) ## calculate doses giving an improvement of 0.3 over placebo TD(fmodels, Delta=0.3, direction = "decreasing") ## discrete version TD(fmodels, Delta=0.3, TDtype = "discrete", doses=doses, direction = "decreasing") ## doses giving 50\% of the maximum effect ED(fmodels, p=0.5) ED(fmodels, p=0.5, EDtype = "discrete", doses=doses) plot(fmodels, plotTD = TRUE, Delta = 0.3) ## example for specifying all model parameters (fullMod=TRUE) fmods <- Mods(emax = c(0, 1, 0.1), linear = cbind(c(-0.4,0), c(0.2,0.1)), sigEmax = c(0, -1.1, 0.5, 3), doses = 0:4, fullMod = TRUE) getResp(fmods, doses=seq(0,4,length=11)) ## calculate doses giving an improvement of 0.3 over placebo TD(fmods, Delta=0.3) ## discrete version TD(fmods, Delta=0.3, TDtype = "discrete", doses=0:4) ## doses giving 50\% of the maximum effect ED(fmods, p=0.5) ED(fmods, p=0.5, EDtype = "discrete", doses=0:4) plot(fmods) } DoseFinding/man/targdose.Rd0000644000176200001440000000647412126252274015333 0ustar liggesusers\name{Target Doses} \alias{TD} \alias{ED} \title{ Calculate dose estimates for a fitted dose-response model (via \code{\link{fitMod}} or \code{\link{bFitMod}}) or a \code{\link{Mods}} object. } \usage{ TD(object, Delta, TDtype = c("continuous", "discrete"), direction = c("increasing", "decreasing"), doses) ED(object, p, EDtype = c("continuous", "discrete"), doses) } \description{ The TD (target dose) is defined as the dose that achieves a target effect of Delta over placebo (if there are multiple such doses, the smallest is chosen): \deqn{TD_\Delta = \min \{x|f(x) > f(0)+\Delta\}}{TD = min {x|f(x) > f(0)+Delta}} If a decreasing trend is beneficial the definition of the TD is \deqn{TD_\Delta = \min \{x|f(x) < f(0)-\Delta\}}{TD = min {x|f(x) < f(0)-Delta}} When \eqn{\Delta}{Delta} is the clinical relevance threshold, then the TD is similar to the usual definition of the minimum effective dose (MED). The ED (effective dose) is defined as the dose that achieves a certain percentage p of the full effect size (within the observed dose-range!) over placebo (if there are multiple such doses, the smallest is chosen). \deqn{ED_p=\min\{x|f(x) > f(0) + p(f(dmax)-f(0))}{ EDp=min {x|f(x) > f(0) + p(f(dmax)-f(0))}} Note that this definition of the EDp is different from traditional definition based on the Emax model, where the EDp is defined relative to the \emph{asymptotic} maximum effect (rather than the maximum effect in the observed dose-range). } \arguments{ \item{object}{ An object of class c(Mods, fullMod), DRMod or bFitMod } \item{Delta, p}{ Delta: The target effect size use for the target dose (TD) (Delta should be > 0).\cr p: The percentage of the dose to use for the effective dose. } \item{TDtype, EDtype}{ character that determines, whether the dose should be treated as a continuous variable when calculating the TD/ED or whether the TD/ED should be calculated based on a grid of doses specified in \samp{doses} } \item{direction}{ Direction to be used in defining the TD. This depends on whether an increasing or decreasing of the response variable is beneficial. } \item{doses}{ Dose levels to be used, this needs to include placebo, \samp{TDtype} or \samp{EDtype} are equal to \samp{"discrete"}. } } \value{ Returns the dose estimate } \author{ Bjoern Bornkamp } \seealso{ \code{\link{Mods}}, \code{\link{fitMod}}, \code{\link{bFitMod}}, \code{\link{drmodels}} } \examples{ ## example for creating a "full-model" candidate set placebo response ## and maxEff already fixed in Mods call doses <- c(0, 10, 25, 50, 100, 150) fmodels <- Mods(linear = NULL, emax = 25, logistic = c(50, 10.88111), exponential = 85, betaMod = rbind(c(0.33, 2.31), c(1.39, 1.39)), linInt = rbind(c(0, 1, 1, 1, 1), c(0, 0, 1, 1, 0.8)), doses=doses, placEff = 0, maxEff = 0.4, addArgs=list(scal=200)) ## calculate doses giving an improvement of 0.3 over placebo TD(fmodels, Delta=0.3) ## discrete version TD(fmodels, Delta=0.3, TDtype = "discrete", doses=doses) ## doses giving 50\% of the maximum effect ED(fmodels, p=0.5) ED(fmodels, p=0.5, EDtype = "discrete", doses=doses) plot(fmodels, plotTD = TRUE, Delta = 0.3) } DoseFinding/man/MCPMod.Rd0000644000176200001440000002352212323407770014575 0ustar liggesusers\name{MCPMod} \alias{MCPMod} \alias{predict.MCPMod} \alias{plot.MCPMod} \title{ MCPMod - Multiple Comparisons and Modeling } \description{ Tests for a dose-response effect using a model-based multiple contrast test (see \code{\link{MCTtest}}), selects one (or several) model(s) from the significant shapes, fits them using \code{\link{fitMod}}. For details on the method see Bretz et al. (2005). } \usage{ MCPMod(dose, resp, data, models, S = NULL, type = c("normal", "general"), addCovars = ~1, placAdj = FALSE, selModel = c("AIC", "maxT", "aveAIC"), alpha = 0.025, df = NULL, critV = NULL, doseType = c("TD", "ED"), Delta, p, pVal = TRUE, alternative = c("one.sided", "two.sided"), na.action = na.fail, mvtcontrol = mvtnorm.control(), bnds, control = NULL) \method{predict}{MCPMod}(object, predType = c("full-model", "ls-means", "effect-curve"), newdata = NULL, doseSeq = NULL, se.fit = FALSE, ...) \method{plot}{MCPMod}(x, CI = FALSE, level = 0.95, plotData = c("means", "meansCI", "raw", "none"), plotGrid = TRUE, colMn = 1, colFit = 1, ...) } \arguments{ \item{dose, resp}{ Either vectors of equal length specifying dose and response values, or names of variables in the data frame specified in \samp{data}. } \item{data}{ Data frame containing the variables referenced in dose and resp if \samp{data} is not specified it is assumed that \samp{dose} and \samp{resp} are variables referenced from data (and no vectors) } \item{models}{ An object of class \samp{"Mods"}, see \code{\link{Mods}} for details } \item{S}{ The covariance matrix of \samp{resp} when \samp{type = "general"}, see Description. } \item{type}{ Determines whether inference is based on an ANCOVA model under a homoscedastic normality assumption (when \samp{type = "normal"}), or estimates at the doses and their covariance matrix and degrees of freedom are specified directly in \samp{resp}, \samp{S} and \samp{df}. See also \code{\link{fitMod}} and Pinheiro et al. (2013). } \item{addCovars}{ Formula specifying additive linear covariates (for \samp{type = "normal"}) } \item{placAdj}{ Logical, if true, it is assumed that placebo-adjusted estimates are specified in \samp{resp} (only possible for \samp{type = "general"}). } \item{selModel}{ Optional character vector specifying the model selection criterion for dose estimation. Possible values are \itemize{ \item \code{AIC}: Selects model with smallest AIC (this is the default) \item \code{maxT}: Selects the model corresponding to the largest t-statistic. \item \code{aveAIC}: Uses a weighted average of the models corresponding to the significant contrasts. The model weights are chosen by the formula: \eqn{w_i = \exp(-0.5AIC_i)/\sum_i(\exp(-0.5AIC_i))}{w_i = exp(-0.5AIC_i)/sum(exp(-0.5AIC_i))} See Buckland et al. (1997) for details. } For \samp{type = "general"} the "gAIC" is used. } \item{alpha}{ Significance level for the multiple contrast test } \item{df}{ Specify the degrees of freedom to use in case \samp{type = "general"}, for the call to \code{\link{MCTtest}} and \code{\link{fitMod}}. Infinite degrees of (\samp{df=Inf}) correspond to the multivariate normal distribution. For type = "normal" the degrees of freedom deduced from the AN(C)OVA fit are used and this argument is ignored. } \item{critV}{ Supply a pre-calculated critical value. If this argument is NULL, no critical value will be calculated and the test decision is based on the p-values. If \samp{critV = TRUE} the critical value will be calculated. } \item{doseType, Delta, p}{ \samp{doseType} determines the dose to estimate, ED or TD (see also \code{\link{Mods}}), and \samp{Delta} and \samp{p} need to be specified depending on whether TD or ED is to be estimated. See \code{\link{TD}} and \code{\link{ED}} for details. } \item{pVal}{ Logical determining, whether p-values should be calculated. } \item{alternative}{ Character determining the alternative for the multiple contrast trend test. } \item{na.action}{ A function which indicates what should happen when the data contain NAs. } \item{mvtcontrol}{ A list specifying additional control parameters for the \samp{qmvt} and \samp{pmvt} calls in the code, see also \code{\link{mvtnorm.control}} for details. } \item{bnds}{ Bounds for non-linear parameters. This needs to be a list with list entries corresponding to the selected bounds. The names of the list entries need to correspond to the model names. The \code{\link{defBnds}} function provides the default selection. } \item{control}{ Control list for the optimization.\cr A list with entries: "nlminbcontrol", "optimizetol" and "gridSize". The entry nlminbcontrol needs to be a list and is passed directly to control argument in the nlminb function, that is used internally for models with 2 nonlinear parameters (e.g. sigmoid Emax or beta model). The entry optimizetol is passed directly to the tol argument of the optimize function, which is used for models with 1 nonlinear parameters (e.g. Emax or exponential model). The entry gridSize needs to be a list with entries dim1 and dim2 giving the size of the grid for the gridsearch in 1d or 2d models. } \item{object, x}{ MCPMod object } \item{predType, newdata, doseSeq, se.fit, ...}{ predType determines whether predictions are returned for the full model (including potential covariates), the ls-means (SAS type) or the effect curve (difference to placebo). newdata gives the covariates to use in producing the predictions (for \samp{predType = "full-model"}), if missing the covariates used for fitting are used. doseSeq dose-sequence on where to produce predictions (for \samp{predType = "effect-curve"} and \samp{predType = "ls-means"}). If missing the doses used for fitting are used. se.fit: logical determining, whether the standard error should be calculated. \ldots: Additional arguments, for plot.MCPMod these are passed to plot.DRMod. } \item{CI, level, plotData, plotGrid, colMn, colFit}{ Arguments for plot method: \samp{CI} determines whether confidence intervals should be plotted. \samp{level} determines the level of the confidence intervals. \samp{plotData} determines how the data are plotted: Either as means or as means with CI, raw data or none. In case of \samp{type = "normal"} and covariates the ls-means are displayed, when \samp{type = "general"} the option "raw" is not available. \samp{colMn} and \samp{colFit} determine the colors of fitted model and the raw means. } } \value{ An object of class \samp{MCPMod}, which contains the fitted \samp{MCTtest} object as well as the \samp{DRMod} objects and additional information (model selection criteria, dose estimates, selected models). } \references{ Bretz, F., Pinheiro, J. C., and Branson, M. (2005), Combining multiple comparisons and modeling techniques in dose-response studies, \emph{Biometrics}, \bold{61}, 738--748 Pinheiro, J. C., Bornkamp, B., and Bretz, F. (2006). Design and analysis of dose finding studies combining multiple comparisons and modeling procedures, \emph{Journal of Biopharmaceutical Statistics}, \bold{16}, 639--656 Pinheiro, J. C., Bretz, F., and Branson, M. (2006). Analysis of dose-response studies - modeling approaches, \emph{in} N. Ting (ed.). \emph{Dose Finding in Drug Development}, Springer, New York, pp. 146--171 Pinheiro, J. C., Bornkamp, B., Glimm, E. and Bretz, F. (2014) Model-based dose finding under model uncertainty using general parametric models, \emph{Statistics in Medicine}, \bold{33}, 1646--1661 Bretz, F., Pinheiro, J. C., and Branson, M. (2004), On a hybrid method in dose-finding studies, \emph{Methods of Information in Medicine}, \bold{43}, 457--460 Buckland, S. T., Burnham, K. P. and Augustin, N. H. (1997). Model selection an integral part of inference, \emph{Biometrics}, \bold{53}, 603--618 Seber, G.A.F. and Wild, C.J. (2003). Nonlinear Regression, Wiley. } \author{ Bjoern Bornkamp } \seealso{ \code{\link{MCTtest}}, \code{\link{fitMod}}, \code{\link{drmodels}} } \examples{ data(biom) ## first define candidate model set (only need "standardized" models) models <- Mods(linear = NULL, emax=c(0.05,0.2), linInt=c(1, 1, 1, 1), doses=c(0,0.05,0.2,0.6,1)) ## perform MCPMod procedure MM <- MCPMod(dose, resp, biom, models, Delta=0.5) ## a number of things can be done with an MCPMod object MM # print method provides basic information summary(MM) # more information ## predict all significant dose-response models predict(MM, se.fit=TRUE, doseSeq=c(0,0.2,0.4, 0.9, 1), predType="ls-means") ## display all model functions plot(MM, plotData="meansCI", CI=TRUE) ## now perform model-averaging MM2 <- MCPMod(dose, resp, biom, models, Delta=0.5, selModel = "aveAIC") sq <- seq(0,1,length=11) pred <- predict(MM, doseSeq=sq, predType="ls-means") modWeights <- MM2$selMod ## model averaged predictions pred <- do.call("cbind", pred)\%*\%modWeights ## model averaged dose-estimate TDEst <- MM2$doseEst\%*\%modWeights ## now an example using a general fit and fitting based on placebo ## adjusted first-stage estimates data(IBScovars) ## ANCOVA fit model including covariates anovaMod <- lm(resp~factor(dose)+gender, data=IBScovars) drFit <- coef(anovaMod)[2:5] # placebo adjusted estimates at doses vCov <- vcov(anovaMod)[2:5,2:5] dose <- sort(unique(IBScovars$dose))[-1] # no estimate for placebo ## candidate models models <- Mods(emax = c(0.5, 1), betaMod=c(1,1), doses=c(0,4)) ## hand over placebo-adjusted estimates drFit to MCPMod MM3 <- MCPMod(dose, drFit, S=vCov, models = models, type = "general", placAdj = TRUE, Delta=0.2) plot(MM3, plotData="meansCI") ## The first example, but with critical value handed over ## this is useful, e.g. in simulation studies MM4 <- MCPMod(dose, resp, biom, models, Delta=0.5, critV = 2.31) }