ipred/0000755000176200001440000000000014646210722011361 5ustar liggesusersipred/tests/0000755000176200001440000000000014646200411012515 5ustar liggesusersipred/tests/ipred-bugs.R0000644000176200001440000000336714172231220014706 0ustar liggesuserslibrary(ipred) suppressWarnings(RNGversion("3.5.3")) actversion <- paste(R.version$major, R.version$minor, sep=".") thisversion <- "1.7.0" #if (compareVersion(actversion, thisversion) >= 0) { # RNGversion("1.6.2") #} set.seed(29081975) data("BreastCancer", package = "mlbench") mod <- bagging(Class ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion + Epith.c.size + Bare.nuclei + Bl.cromatin + Normal.nucleoli + Mitoses, data=BreastCancer, coob=TRUE) print(mod) print(a <- predict(mod, newdata=BreastCancer)) stopifnot(length(a) == nrow(BreastCancer)) # bagging failed if only one predictor was specified # by Christoph M. Friedrich , April 29th, 2002 X <- as.data.frame(matrix(rnorm(1000), ncol=10)) y <- factor(ifelse(apply(X, 1, mean) > 0, 1, 0)) learn <- cbind(y, X) mt <- bagging(y ~ V1, data=learn, coob=TRUE) # # This won't work because of some difficulties with predict.lda # mt <- bagging(y ~ V1, data=learn, method="double", coob=FALSE) # X <- as.data.frame(matrix(rnorm(1000), ncol=10)) y <- apply(X, 1, mean) + rnorm(nrow(X)) learn <- cbind(y, X) mt <- bagging(y ~ V1, data=learn, coob=TRUE) # cv.numeric and bootest.numeric were broken, check for reasonaly values X <- as.data.frame(matrix(rnorm(1000), ncol=10)) y <- apply(X, 1, mean) + rnorm(nrow(X)) learn <- cbind(y, X) newy <- apply(X, 1, mean) + rnorm(nrow(X)) mod <- lm(y ~ ., data=learn) trueerr <- sqrt(mean((newy - fitted(mod))^2)) cverr <- rep(0,5) for (i in 1:5) cverr[i] <- errorest(y ~., data=learn, model=lm)$error booterr <- errorest(y ~., data=learn, model=lm, estimator="boot",est.para=control.errorest(nboot=50))$error print(trueerr/mean(cverr)) print(trueerr/booterr) ipred/tests/ipred-bugs.Rout.save0000644000176200001440000002454314172231220016372 0ustar liggesusers R Under development (unstable) (2019-04-24 r76421) -- "Unsuffered Consequences" Copyright (C) 2019 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(ipred) > > suppressWarnings(RNGversion("3.5.3")) > actversion <- paste(R.version$major, R.version$minor, sep=".") > thisversion <- "1.7.0" > > #if (compareVersion(actversion, thisversion) >= 0) { > # RNGversion("1.6.2") > #} > set.seed(29081975) > > data("BreastCancer", package = "mlbench") > mod <- bagging(Class ~ Cl.thickness + Cell.size + + Cell.shape + Marg.adhesion + + Epith.c.size + Bare.nuclei + + Bl.cromatin + Normal.nucleoli + + Mitoses, data=BreastCancer, coob=TRUE) > print(mod) Bagging classification trees with 25 bootstrap replications Call: bagging.data.frame(formula = Class ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion + Epith.c.size + Bare.nuclei + Bl.cromatin + Normal.nucleoli + Mitoses, data = BreastCancer, coob = TRUE) Out-of-bag estimate of misclassification error: 0.0439 > > print(a <- predict(mod, newdata=BreastCancer)) [1] benign benign benign malignant benign malignant benign [8] benign benign benign benign benign malignant benign [15] malignant malignant benign benign malignant benign malignant [22] malignant benign malignant benign malignant benign benign [29] benign benign benign benign malignant benign benign [36] benign malignant benign malignant malignant malignant malignant [43] malignant malignant malignant benign malignant benign benign [50] malignant malignant malignant malignant malignant malignant malignant [57] malignant malignant malignant malignant malignant benign malignant [64] malignant benign malignant benign malignant malignant benign [71] benign malignant benign malignant malignant benign benign [78] benign benign benign benign benign benign benign [85] malignant malignant malignant malignant benign benign benign [92] benign benign benign benign benign benign benign [99] malignant malignant malignant malignant benign malignant malignant [106] malignant malignant malignant benign malignant benign malignant [113] malignant malignant benign benign benign malignant benign [120] benign benign benign malignant malignant malignant benign [127] malignant benign malignant benign benign benign malignant [134] benign benign benign benign benign benign benign [141] benign benign malignant benign benign benign malignant [148] benign benign malignant benign malignant malignant benign [155] benign malignant benign benign benign malignant malignant [162] benign benign benign benign benign malignant malignant [169] benign benign benign benign benign malignant malignant [176] malignant benign malignant benign malignant benign benign [183] benign malignant malignant benign malignant malignant malignant [190] benign malignant malignant benign benign benign benign [197] benign benign benign benign malignant malignant benign [204] benign benign malignant malignant benign benign benign [211] malignant malignant benign malignant malignant malignant benign [218] benign malignant benign benign malignant malignant malignant [225] malignant benign malignant malignant benign malignant malignant [232] malignant benign malignant benign benign malignant malignant [239] malignant malignant benign benign benign benign benign [246] benign malignant malignant benign benign benign malignant [253] benign malignant malignant malignant benign benign benign [260] benign malignant malignant malignant malignant malignant benign [267] malignant malignant malignant benign malignant benign malignant [274] malignant benign benign benign benign benign malignant [281] benign benign malignant malignant malignant malignant malignant [288] benign malignant malignant benign benign malignant malignant [295] benign malignant benign benign benign malignant malignant [302] benign malignant benign malignant malignant benign benign [309] malignant benign benign benign malignant benign benign [316] malignant malignant malignant benign benign malignant benign [323] benign malignant benign benign malignant benign malignant [330] malignant malignant benign benign malignant malignant benign [337] malignant benign benign malignant malignant benign benign [344] benign malignant benign benign benign malignant malignant [351] benign benign benign malignant benign benign malignant [358] malignant malignant malignant malignant malignant benign benign [365] benign benign malignant malignant benign benign benign [372] benign benign benign benign benign benign benign [379] benign benign benign malignant benign benign benign [386] benign malignant benign benign benign benign malignant [393] benign benign benign benign benign benign benign [400] benign malignant benign benign benign benign benign [407] benign benign benign benign benign benign malignant [414] benign malignant benign malignant benign benign benign [421] benign malignant benign benign benign malignant benign [428] malignant benign benign benign benign benign benign [435] benign malignant malignant benign benign benign malignant [442] benign benign benign benign benign benign benign [449] benign malignant benign benign benign malignant benign [456] malignant malignant malignant benign benign benign benign [463] benign benign benign malignant malignant malignant benign [470] benign benign benign benign benign benign benign [477] benign benign benign malignant benign benign malignant [484] malignant benign benign benign malignant malignant malignant [491] benign malignant benign malignant benign benign benign [498] benign benign benign benign benign benign benign [505] benign benign malignant benign benign benign benign [512] benign benign benign malignant malignant benign benign [519] benign malignant benign benign malignant malignant benign [526] benign benign benign benign benign malignant benign [533] benign benign benign benign benign benign benign [540] benign benign benign benign benign benign benign [547] malignant benign benign malignant benign benign benign [554] benign benign benign benign benign benign benign [561] benign benign benign benign benign malignant benign [568] benign malignant malignant malignant malignant benign benign [575] malignant benign benign benign benign benign benign [582] malignant malignant benign benign benign malignant benign [589] malignant benign malignant malignant malignant benign malignant [596] benign benign benign benign benign benign benign [603] benign malignant malignant malignant benign benign malignant [610] benign malignant malignant malignant benign benign benign [617] benign benign benign benign benign benign benign [624] benign benign benign malignant benign benign benign [631] benign benign benign malignant benign benign malignant [638] benign benign benign benign benign benign benign [645] benign benign benign benign malignant benign benign [652] benign benign benign benign benign benign benign [659] malignant benign benign benign benign benign benign [666] benign benign benign malignant malignant malignant benign [673] benign benign benign benign benign benign benign [680] benign malignant malignant benign benign benign benign [687] benign benign benign benign benign malignant benign [694] benign benign benign malignant malignant malignant Levels: benign malignant > stopifnot(length(a) == nrow(BreastCancer)) > > # bagging failed if only one predictor was specified > # by Christoph M. Friedrich , April 29th, 2002 > > X <- as.data.frame(matrix(rnorm(1000), ncol=10)) > y <- factor(ifelse(apply(X, 1, mean) > 0, 1, 0)) > learn <- cbind(y, X) > mt <- bagging(y ~ V1, data=learn, coob=TRUE) > # > # This won't work because of some difficulties with predict.lda > # mt <- bagging(y ~ V1, data=learn, method="double", coob=FALSE) > # > X <- as.data.frame(matrix(rnorm(1000), ncol=10)) > y <- apply(X, 1, mean) + rnorm(nrow(X)) > learn <- cbind(y, X) > mt <- bagging(y ~ V1, data=learn, coob=TRUE) > > # cv.numeric and bootest.numeric were broken, check for reasonaly values > X <- as.data.frame(matrix(rnorm(1000), ncol=10)) > y <- apply(X, 1, mean) + rnorm(nrow(X)) > learn <- cbind(y, X) > newy <- apply(X, 1, mean) + rnorm(nrow(X)) > mod <- lm(y ~ ., data=learn) > trueerr <- sqrt(mean((newy - fitted(mod))^2)) > cverr <- rep(0,5) > for (i in 1:5) cverr[i] <- errorest(y ~., data=learn, model=lm)$error > booterr <- errorest(y ~., data=learn, model=lm, + estimator="boot",est.para=control.errorest(nboot=50))$error > print(trueerr/mean(cverr)) [1] 0.9612632 > print(trueerr/booterr) [1] 0.9073771 > > proc.time() user system elapsed 2.980 0.156 3.131 ipred/tests/ipred-smalltest.R0000644000176200001440000000143214172231220015745 0ustar liggesusers library(ipred) suppressWarnings(RNGversion("3.5.3")) # check if SdiffKM computes # # int_start^stop (exp(-h*t) - c)^2 dt # # in the correct way # low-level interface needed myfoo <- function(times, prob, h, window=0.0001) { .Call("SdiffKM", as.double(c(0, times)), as.double(c(prob[1], prob)), as.double(c(h, window)), PACKAGE = "ipred") } # to compare with mexp <- function(start, stop, haz, c=0) { foo <- function(t) exp(-2*haz*t)/(-2*haz) - 2*c*exp(-haz*t)/(-haz) + c^2*t foo(stop) - foo(start) } times <- seq(from=0.01, to=8, by=0.01) for (myc in c(0,0.5,0.9)) { for (h in c(1,2,3)) { prob <- rep(myc, length(times)) a <- round(mexp(0, max(times), h, c=myc),7) b <- round(myfoo(times, prob, h), 7) stopifnot(all.equal(a,b)) } } ipred/tests/ipred-smalltest.Rout.save0000644000176200001440000000306714172231220017440 0ustar liggesusers R Under development (unstable) (2019-04-24 r76421) -- "Unsuffered Consequences" Copyright (C) 2019 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > > library(ipred) > suppressWarnings(RNGversion("3.5.3")) > > # check if SdiffKM computes > # > # int_start^stop (exp(-h*t) - c)^2 dt > # > # in the correct way > > # low-level interface needed > myfoo <- function(times, prob, h, window=0.0001) { + .Call("SdiffKM", as.double(c(0, times)), + as.double(c(prob[1], prob)), as.double(c(h, + window)), PACKAGE = "ipred") + } > > # to compare with > mexp <- function(start, stop, haz, c=0) { + foo <- function(t) + exp(-2*haz*t)/(-2*haz) - 2*c*exp(-haz*t)/(-haz) + c^2*t + foo(stop) - foo(start) + } > > > times <- seq(from=0.01, to=8, by=0.01) > > for (myc in c(0,0.5,0.9)) { + for (h in c(1,2,3)) { + prob <- rep(myc, length(times)) + a <- round(mexp(0, max(times), h, c=myc),7) + b <- round(myfoo(times, prob, h), 7) + stopifnot(all.equal(a,b)) + } + } > > proc.time() user system elapsed 1.696 0.168 1.862 ipred/tests/ipred-segfault.R0000644000176200001440000001046014172231220015550 0ustar liggesuserslibrary("ipred") library("mlbench") library("MASS") library("survival") suppressWarnings(RNGversion("3.5.3")) actversion <- paste(R.version$major, R.version$minor, sep=".") thisversion <- "1.7.0" #if (compareVersion(actversion, thisversion) >= 0) { # RNGversion("1.6.2") #} set.seed(29081975) # Classification learn <- as.data.frame(mlbench.twonorm(200)) test <- as.data.frame(mlbench.twonorm(100)) # bagging mod <- bagging(classes ~ ., data=learn, coob=TRUE, nbagg=10) mod predict(mod)[1:10] # Double-Bagging comb.lda <- list(list(model=lda, predict=function(obj, newdata) predict(obj, newdata)$x)) mod <- bagging(classes ~ ., data=learn, comb=comb.lda, nbagg=10) mod predict(mod, newdata=test[1:10,]) predict(mod, newdata=test[1:10,], agg="aver") predict(mod, newdata=test[1:10,], agg="wei") predict(mod, newdata=test[1:10,], type="prob") predict(mod, newdata=test[1:10,], type="prob", agg="aver") predict(mod, newdata=test[1:10,], type="prob", agg="wei") mypredict.lda <- function(object, newdata) predict(object, newdata = newdata)$class errorest(classes ~ ., data=learn, model=lda, predict=mypredict.lda) errorest(classes ~ ., data=learn, model=lda, predict=mypredict.lda, est.para=control.errorest(k=5, random=FALSE)) lapply(errorest(classes ~ ., data=learn, model=lda, predict=mypredict.lda, est.para=control.errorest(k=5, random=FALSE, getmodels=TRUE))$models, class) errorest(classes ~ ., data=learn, model=bagging, est.para=control.errorest(k=2), nbagg=10) errorest(classes ~ ., data=learn, model=bagging, est.para=control.errorest(k=2), nbagg=10, comb=comb.lda) errorest(classes ~ ., data=learn, model=lda, predict=mypredict.lda, estimator="boot") errorest(classes ~ ., data=learn, model=lda, predict=mypredict.lda, estimator="632plus") # Regression learn <- as.data.frame(mlbench.friedman1(100)) test <- as.data.frame(mlbench.friedman1(100)) # bagging mod <- bagging(y ~ ., data=learn, coob=TRUE, nbagg=10) mod predict(mod)[1:10] predict(mod, newdata=test[1:10,]) predict(mod, newdata=test[1:10,], agg="aver") predict(mod, newdata=test[1:10,], agg="wei") errorest(y ~ ., data=learn, model=lm) errorest(y ~ ., data=learn, model=lm, est.para=control.errorest(k=5, random=FALSE)) lapply(errorest(y ~ ., data=learn, model=lm, est.para=control.errorest(k=5, random=FALSE, getmodels=TRUE))$models, class) errorest(y ~ ., data=learn, model=lm, estimator="boot") # survival learn <- rsurv(100, model="C") test <- rsurv(100, model="C") mod <- bagging(Surv(time, cens) ~ ., data=learn, nbagg=10) mod predict(mod, newdata=test[1:10,]) #errorest(Surv(time, cens) ~ ., data=learn, model=bagging, # est.para=list(k=2, random=FALSE), nbagg=5) #errorest(Surv(time, cens) ~ ., data=learn, model=bagging, # estimator="boot", nbagg=5, est.para=list(nboot=5)) #insert control.errorest errorest(Surv(time, cens) ~ ., data=learn, model=bagging, est.para=control.errorest(k=2, random=FALSE), nbagg=5) errorest(Surv(time, cens) ~ ., data=learn, model=bagging, estimator="boot", nbagg=5, est.para=control.errorest(nboot=5)) #lapply(errorest(Surv(time, cens) ~ ., data=learn, model=bagging, # estimator="cv", nbagg=1, est.para=list(k=2, random=FALSE, # getmodels=TRUE))$models, class) #insert control.errorest lapply(errorest(Surv(time, cens) ~ ., data=learn, model=bagging, estimator="cv", nbagg=1, est.para=control.errorest(k=2, random=FALSE, getmodels=TRUE))$models, class) # bundling for regression learn <- as.data.frame(mlbench.friedman1(100)) test <- as.data.frame(mlbench.friedman1(100)) comb <- list(list(model=lm, predict=predict.lm)) modc <- bagging(y ~ ., data=learn, nbagg=10, comb=comb) modc predict(modc, newdata=learn)[1:10] # bundling for survival while(FALSE) { data("GBSG2", package = "ipred") rcomb <- list(list(model=coxph, predict=predict)) mods <- bagging(Surv(time,cens) ~ ., data=GBSG2, nbagg=10, comb=rcomb, control=rpart.control(xval=0)) predict(mods, newdata=GBSG2[1:3,]) # test for method dispatch on integer valued responses y <- sample(1:100, 100) class(y) x <- matrix(rnorm(100*5), ncol=5) mydata <- as.data.frame(cbind(y, x)) cv(y, y ~ ., data=mydata, model=lm, predict=predict) bootest(y, y ~ ., data=mydata, model=lm, predict=predict) bagging(y ~., data=mydata, nbagg=10) } ipred/MD50000644000176200001440000000761714646210722011704 0ustar liggesusers10d5d4768d5ca68f232b2369d8c4a16e *DESCRIPTION 977e05a9ebf9500af96b45199917e62a *NAMESPACE bd957f8c9597e280c5bc486db1b50fed *R/bagging.R c55f9e65194e69cac7b3f6a14819f7ee *R/bootest.R d8b949efde7413d8d639a64be6528403 *R/checkfunArgs.R 85963ba4287c3ffd4c84dbe8a48d9c28 *R/csurv.R 16fcd24ff394cdec7d929f8acacf5823 *R/cv.R b17e241d115e8e9491c5e07e612295fc *R/errorest.R f3f9979d3d1de170acc34bb1cfd15ef6 *R/inbagg.R 9bc7ff831e43ae443c3afd0015f9b45e *R/inclass.R 96677befab2b4535bc23d755ad5f6c92 *R/ipredbagg.R d22859a559e876b2e478d366b705d0a1 *R/ipredknn.R a98cc646bc7f7a9173d511917bc2db34 *R/irpart.R 6100af39d915430ab680a3889d52d4ec *R/kfoldcv.R 7d64c8043f1426fe3f41e324dad74be6 *R/mypredict.lm.R 51cb58bde04a793a35b2caf233517f43 *R/predict.bagging.R 1714695c8d4dd8439fc6d1e3f7968e1c *R/predict.inbagg.R 301f8e08bee09b9817e8ffdce0a4c398 *R/predict.inclass.R 8b4b9b3039c4fb86d2c96555b32872a8 *R/predict.irpart.R 5427f43c2b21d2e7f3c6ff2fc521c33a *R/print.R 7fff7c589cfae0b7e57f6af2bf6974f4 *R/prune.bagging.R ea8bb0575e5ee83b6a4d595212906f83 *R/rsurv.R 065dbafb0e228351e7b2a4e25eccfb0d *R/sbrier.R bb92fccd557c101dc3f52a3c7a7a1961 *R/slda.R 51c9a057a29e6be4477aaf23bcce4000 *R/ssubset.R 860d6ab9df7627b349a2fd9ac71237fe *R/varset.R 50b6f648a4203aa0d8f9ffc284ad4997 *build/vignette.rds f83cb6bdc4e6265a64be0914da7979f6 *cleanup b77f49ce74f90948e4d09122c5fac724 *data/DLBCL.rda b07616370b51419752d4219f1f4f9f55 *data/GlaucomaMVF.rda 1f87b4f0d914888b1be71028fef8d316 *data/Smoking.rda e54b730797d71b1483cc26bfb3ea552b *data/dystrophy.rda 45a8a599f130fd94e2bf0ccea723a290 *inst/COPYRIGHTS 8e0f8b2bd86214181f5f6a1e8641601a *inst/NEWS 82eeec327b400aadc3c7fe0905483d8a *inst/doc/ipred-examples.R 72c8610d330afc2376692437ffd0b5e0 *inst/doc/ipred-examples.Rnw 7a824a464b5a2a03c836faace127feb0 *inst/doc/ipred-examples.pdf ef01004837ec42585a18b1292cc00952 *man/DLBCL.Rd 3cf72f2749b7029a0b8b685461d99d3c *man/GlaucomaMVF.Rd 8d16887d434ff61979e9f89f26770ec5 *man/Smoking.Rd 10ff39c6b8c15609187444afcf7350d7 *man/bagging.Rd f4694ae7448c30a49814bc23fee777a7 *man/bootest.Rd 920bf08095b01ae4d943fbff21eedb57 *man/control.errorest.Rd b9874254d1a440ce7d1373ddb88ed18b *man/cv.Rd 54ef878e658ab5d41e3a54642ca9a451 *man/dystrophy.Rd ebb22088953b88f1a62151d295fc2ebd *man/errorest.Rd ca29c817b98efbef404f5b8467c796c3 *man/inbagg.Rd ceeaae7b39770e87036bd86c4b899a92 *man/inclass.Rd d34608ac849321130774fc768f28c475 *man/ipred-internal.Rd 86ea0f01231c482541c1ae86fa6f1652 *man/ipredknn.Rd 37eee681bff7ad10df28bb18eab6cf2e *man/kfoldcv.Rd 941c6c8d91a74ae8a046811346664b0b *man/mypredict.lm.Rd d1705bf690f5e92abf7123ca0e0ad2b7 *man/predict.bagging.Rd b3dbc86c0c9cab4b549b162b75248c31 *man/predict.inbagg.Rd b80ad2198613b405333aef1e6e3cc504 *man/predict.inclass.Rd 0da9ab0fcef3c03dc6f04309c8830b83 *man/predict.ipredknn.Rd d45eeb09998f42d52f180a46296b9744 *man/predict.slda.Rd d02437884a49f5b9937791ed6d07c53b *man/print.bagging.Rd 755e22b8d9799ff3d55f7364be332931 *man/print.cvclass.Rd b1ae5acecd41d145898b794567a38231 *man/print.inbagg.Rd 0e78f227c282e6474a5b504a74b97fe2 *man/print.inclass.Rd 8903ad93aa5f9f092faab0db112733bd *man/prune.bagging.Rd d88328ca9e52b501e01d1b291de8f16d *man/rsurv.Rd f617299850199477617f8c80f9967fae *man/sbrier.Rd ac34c8ccf9d10e1e15e7a2138c14b2cb *man/slda.Rd 9000c36afc63d72103df0e5c41dfffc5 *man/summary.bagging.Rd dd36ca065d305401e0326680b5cda910 *man/summary.inbagg.Rd 8ce9a1f1379d492df5aea55687b6b95c *man/summary.inclass.Rd 79fed002bac6fba4b49458b352873e8c *man/varset.Rd 0ac59b9f3655966e0fb52ca8a8b2b27a *src/SdiffKM.c 4c8242f0f0243ec116d9d1dd8ed99150 *src/init.c 48d16c5ed9e2eebd086e1244a0839308 *tests/ipred-bugs.R ee8dd469906916e9c86b5580f51b8bc0 *tests/ipred-bugs.Rout.save 6f1c14a02814a289c65b56125e576d3d *tests/ipred-segfault.R c28fb98cadd385924acb0c03856e4149 *tests/ipred-smalltest.R 2d85b750ceb36f266749f63921c26fa1 *tests/ipred-smalltest.Rout.save 72c8610d330afc2376692437ffd0b5e0 *vignettes/ipred-examples.Rnw c642c366927d8bf2522e3c078f6e34a2 *vignettes/ipred.bib ipred/R/0000755000176200001440000000000014646200411011554 5ustar liggesusersipred/R/ipredbagg.R0000644000176200001440000002043714172231220013625 0ustar liggesusers#$Id: ipredbagg.R,v 1.13 2003/06/11 10:40:17 peters Exp $ workhorse <- function(y, X, control, comb, bcontrol, thisclass, ...) { # This is double-bagging (comb is lda) or bundling (any arbritrary # model in comb) if (!is.data.frame(X)) X <- as.data.frame(X) # check user supplied functions if (!is.list(comb)) stop("comb not a list") N <- nrow(X) mydata <- cbind(data.frame(y), X) mtrees <- vector(mode="list", length=bcontrol$nbagg) for (i in 1:bcontrol$nbagg) { # double-bagging or bundling # comb is a list of lists, each of them having two elements: # model and predict bindx <- sample(1:N, bcontrol$ns, replace=bcontrol$replace) objs <- vector(mode="list", length=length(comb)) addclass <- function() { myindx <- 1:length(comb) for (k in 1:length(comb)) { # put the user supplied models into a try statement # if this fails, simply ignore it. # options(show.error.messages = FALSE) oX <- mydata[-bindx,] foo <- try(comb[[k]]$model(y ~ ., data=oX)) if (inherits(foo, "try-error")) { warning("could not build model:") print(foo[1]) foo <- NA myindx <- myindx[-k] } objs[[k]] <- foo # options(show.error.messages = TRUE) } fct <- function(newdata) { # use lexical scoping: return this function for the computation of # the additional predictors if (!is.data.frame(newdata)) newdata <- as.data.frame(newdata) addpred <- c() # the user supplied model failed, ignore it here. if (length(myindx) < 1) { RET <- NULL } else { # compute additional predictors for user supplied models for (k in myindx) addpred <- cbind(addpred, comb[[k]]$predict(objs[[k]], newdata)) # : more informative names??? colnames(addpred) <- paste("addpred", 1:ncol(addpred), sep="") # RET <- addpred } RET } if (length(myindx) < 1) return(NULL) else return(fct) } bfct <- addclass() # may have failed if (!is.null(bfct)) { # grow a tree using the original predictors # from the bootstrap sample and the additional predictors computed on # the bootstrap sample. oX <- cbind(mydata, bfct(X))[bindx,] btree <- rpart(y ~., data=oX, control = control,...) # return this object this <- list(bindx = bindx, btree = btree, bfct=bfct) } else { # return a simple tree if the user supplied model failed. oX <- mydata[bindx,] btree <- rpart(y ~., data=oX, control = control,...) this <- list(bindx = bindx, btree = btree) } class(this) <- thisclass mtrees[[i]] <- this } mtrees } ipredbagg <- function(y, ...) { if(is.null(class(y))) class(y) <- data.class(y) # UseMethod("ipredbagg", y, ...) UseMethod("ipredbagg", y) } ipredbagg.default <- function(y, ...) { stop(paste("Do not know how to handle objects of class", class(y))) } ipredbagg.integer <- function(y, ...) { ipredbagg.numeric(y,...) } ipredbagg.factor <- function(y, X=NULL, nbagg=25, control= rpart.control(minsplit=2, cp=0, xval=0), comb=NULL, coob=FALSE, ns=length(y), keepX = TRUE, ...) { # bagging classification trees if (!is.null(comb) && coob) stop("cannot compute out-of-bag estimate for combined models") if (nbagg == 1 && coob) stop("cannot compute out-of-bag estimate for single tree") # check nbagg if (nbagg < 1) stop("nbagg is not a positive integer") # bagging only if nbagg greater 1, else use the whole sample, i.e. one # simple tree if (nbagg == 1) { REPLACE <- FALSE } else { if (ns < length(y)) { # this is "subagging", i.e. sampling ns out of length(y) WITHOUT # replacement REPLACE <- FALSE } else { # the usual bootstrap: n out of n with replacement REPLACE <- TRUE } } if (!is.null(comb)) { # this is rather slow but we need to be as general as possible # with respect to classifiers as well as outcome of prediction (classes, # linear discriminant functions, conditional class probabilities, random # noise, if you like) mtrees <- workhorse(y, X, control, comb, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE), thisclass="sclass") } else { # use an optimized version mydata <- cbind(data.frame(y), X) mtrees <- irpart(y ~ ., data=mydata, control=control, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE)) } # always keep response and predictors as well as a list of nbagg objects # of class "sclass" if (keepX) RET <- list(y=y, X=X, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) else RET <- list(y=y, X=NULL, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) class(RET) <- "classbagg" if (coob) { pred <- predict(RET) ae <- all.equal(levels(pred), levels(RET$y)) if (is.logical(ae) && ae) RET$err <- mean(pred != RET$y, na.rm=TRUE) else RET$err <- mean(as.character(pred) != as.character(RET$y), na.rm=TRUE) } RET } ipredbagg.numeric <- function(y, X=NULL, nbagg=25, control= rpart.control(xval=0), comb=NULL, coob=FALSE, ns=length(y), keepX = TRUE, ...) { # is control meaningful here??? # bagging regression trees if (!is.null(comb) && coob) stop("cannot compute out-of-bag estimate for combined models") if (nbagg == 1 && coob) stop("cannot compute out-of-bag estimate for single tree") # check nbagg if (nbagg < 1) stop("nbagg is not a positive integer") # only bagg if nbagg greater 1, else use the whole sample if (nbagg == 1) { REPLACE <- FALSE } else { if (ns < length(y)) { # this is "subagging", i.e. sampling ns out of length(y) WITHOUT # replacement REPLACE <- FALSE } else { # the usual bootstrap: n out of n with replacement REPLACE <- TRUE } } if (!is.null(comb)) { mtrees <- workhorse(y, X, control, comb, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE), thisclass="sreg") } else { mydata <- cbind(data.frame(y), X) mtrees <- irpart(y ~ ., data=mydata, control=control, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE)) } if (keepX) RET <- list(y=y, X=X, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) else RET <- list(y=y, X=NULL, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) class(RET) <- "regbagg" if (coob) RET$err <- sqrt(mean((predict(RET) - RET$y)^2, na.rm=TRUE)) RET } ipredbagg.Surv <- function(y, X=NULL, nbagg=25, control= rpart.control(xval=0), comb=NULL, coob=FALSE, ns=dim(y)[1], keepX = TRUE, ...) { # is control meaningful here??? # bagging survival trees if (!is.null(comb) && coob) stop("cannot compute out-of-bag estimate for combined models") if (nbagg == 1 && coob) stop("cannot compute out-of-bag estimate for single tree") # check nbagg if (nbagg < 1) stop("nbagg is not a positive integer") # only bagg if nbagg greater 1, else use the whole sample if (nbagg == 1) { REPLACE <- FALSE } else { if (ns < dim(y)[1]) { # this is "subagging", i.e. sampling ns out of length(y) WITHOUT # replacement REPLACE <- FALSE } else { # the usual bootstrap: n out of n with replacement REPLACE <- TRUE } } if (!is.null(comb)) { mtrees <- workhorse(y, X, control, comb, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE), thisclass="ssurv") } else { mydata <- cbind(data.frame(y), X) mtrees <- irpart(y ~ ., data=mydata, control=control, bcontrol=list(nbagg=nbagg, ns=ns, replace=REPLACE)) } if (keepX) RET <- list(y=y, X=X, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) else RET <- list(y=y, X=NULL, mtrees=mtrees, OOB=coob, comb=!is.null(comb)) class(RET) <- "survbagg" if (coob) RET$err <- sbrier(RET$y, predict(RET)) RET } ipred/R/inbagg.R0000644000176200001440000002153614172231220013131 0ustar liggesusersworkhorse.inbagg <- function(object, y, X, W, cFUN, w.training.set, y.training.set, bcontrol, control, ...) { formula.list <- object data <- data.frame(y, X, W) mtrees <- vector(mode="list", length=bcontrol$nbagg) if(w.training.set[1] == "all") fit.vals <- 1:length(y) for (i in 1:bcontrol$nbagg) { bindx <- sample(1:length(y), bcontrol$ns, replace=bcontrol$replace) if(w.training.set[1] == "oob") fit.vals <- (-bindx) if(w.training.set[1] == "bag") fit.vals <- bindx objs <- vector(mode="list", length=length(formula.list)) #prediction models for intermediate variables names(objs) <- names(formula.list) addclass <- function() { ##START addclass <- function() for (j in 1:length(formula.list)) { ##Fitting prediction models for intermediates oX <- data[fit.vals, c(paste(formula.list[[j]]$formula[[2]]), attr(terms(formula.list[[j]]$formula, dataa = data), "term.labels"))] foo <- try(formula.list[[j]]$model(formula.list[[j]]$formula, data = oX)) objs[[j]] <- foo } fct <- function(newdata) { ##START fct <- function(newdata) if (!is.data.frame(newdata)) newdata <- as.data.frame(newdata) add.predictors <- rep(0, nrow(newdata)) for (j in 1:length(formula.list)){ ## predict additional intermediates using fitted models oXnewdata <- newdata[,attr(terms(formula.list[[j]]$formula, data = data), "term.labels")] if(is.null(formula.list[[j]]$predict)) { res <- try(predict(objs[[j]], newdata = oXnewdata)) } else { res <- try(formula.list[[j]]$predict(objs[[j]], newdata = oXnewdata)) } ###FIX: action for class(res) == "try-error" add.predictors <- data.frame(add.predictors, res) } add.predictors <- add.predictors[,-1] if(is.null(dim(add.predictors))) add.predictors <- matrix(add.predictors, ncol = 1) colnames(add.predictors) <- names(formula.list) add.predictors } ##END fct <- function(newdata) return(fct) } ##END addclass <- function() bfct <- addclass() ###bfct is a function (addclass) if (!is.null(bfct)) { expl.cFUN <- attr(terms(as.formula(cFUN$formula), data = data), "term.labels") if(!is.null(cFUN$fixed.function)) { btree <- cFUN } else { W.new <- bfct(X) W.new.names <- sub(".[0-9]$", "", colnames(W.new)) if(y.training.set[1] == "fitted.bag") { ###contstruct on bag oX <- data.frame(y, X, W.new)[bindx,] right.side <- paste(c(expl.cFUN[!(expl.cFUN %in% W.new.names)], colnames(W.new)[W.new.names %in% expl.cFUN]), collapse = "+") cFUN$formula <- as.formula(paste(cFUN$formula[[2]], "~", right.side)) } if(y.training.set[1] == "original") { ###construct on original variables if(length(W.new.names)> length(colnames(W))) stop("If classifying function is trained on original intermediate, only one predictive model per intermediate can be constructed.") oX <- data.frame(y, X, W[,W.new.names]) names(oX)[(ncol(oX)-ncol(W)+1):ncol(oX)] <- colnames(W.new) } if(y.training.set[1] == "fitted.subset") { ###construct on subset oX <- data.frame(y, X, W.new)[!subset,] right.side <- paste(c(expl.cFUN[!(expl.cFUN %in% W.new.names)], colnames(W.new)[W.new.names %in% expl.cFUN]), collapse = "+") cFUN$formula <- as.formula(paste(cFUN$formula[[2]], "~", right.side)) } names(oX)[names(oX) == "y"] <- paste(cFUN$formula[[2]]) btree <- cFUN$model(cFUN$formula, data = oX, ...) btree <- list(model = btree, predict = cFUN$predict) } this <- list(bindx = bindx, btree = btree, bfct=bfct) } else { stop("Predictive function for intermediates not executable: Classifying function can not be applied.") } class(this) <- "thisclass" mtrees[[i]] <- this } mtrees } inbagg <- function(formula, data, ...) UseMethod("inbagg", data) inbagg.default <- function(formula, data,...) { stop(paste("Do not know how to handle objects of class", class(data))) } inbagg.data.frame <- function(formula, data, pFUN=NULL, cFUN=list(model = NULL, predict = NULL, training.set = NULL), nbagg = 25, ns = 0.5, replace = FALSE, ...) { if(!is.function(cFUN)) { if(is.null(cFUN$model)) { cFUN$model <- function(formula, data) rpart(formula, data, control = rpart.control(minsplit=2, cp=0, xval=0)) if(is.null(cFUN$predict)) cFUN$predict <- function(object, newdata) predict(object, newdata, type = "class") if(is.null(cFUN$training.set)) cFUN$trainig.set <- "fitted.bag" } } ##check formula if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1)) stop("formula missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ##editing formula if(length(formula[[2]])==3) { if(!is.function(cFUN)) { if (is.null(cFUN$formula)) y.formula <- as.formula(formula[[2]]) else y.formula <- cFUN$formula } w.formula <- XX~YY w.formula[[2]] <- formula[[2]][[3]] w.formula[[3]] <- formula[[3]] response <- paste(formula[[2]][[2]]) w.names <- attr(terms(as.formula(formula[[2]]), data = data), "term.labels") x.names <- attr(terms(as.formula(formula), data = data), "term.labels") if((length(x.names) == 1) && x.names == ".") x.names <- colnames(data)[!(colnames(data) %in% c(response, w.names))] y <- data[, response] X <- data[, x.names] W <- data[, w.names] if(is.null(dim(X))) X <- matrix(X, ncol = 1, dimnames = list(rownames(W), x.names)) if(is.null(dim(W))) W <- matrix(W, ncol = 1, dimnames = list(rownames(X), w.names)) if(is.function(cFUN)) { y.formula <- as.formula(paste(formula[[2]][[2]], "~", paste(c(x.names, w.names), collapse = "+"))) fixed.function <- cFUN cFUN <- list() cFUN$fixed.function <- fixed.function } cFUN$formula <- y.formula } else { stop(paste("Specified formula has to be of type y~x~w")) } ##remove settings of training.set if(is.null(pFUN$training.set)) w.training.set <- "oob" else w.training.set <- pFUN$training.set[1] pFUN$training.set <- NULL if(is.null(cFUN$training.set)) y.training.set <- "fitted.bag" else y.training.set <- cFUN$training.set[1] cFUN$training.set <- NULL bcontrol <- list(nbagg = nbagg, ns = length(y)*ns, replace = replace) if(is.null(w.formula)) stop("no formula for prediction model specified") ##formula.list : list of lists which specify an abitrary number of models for intermediate variables: ##w1.1, w2.1, w3.1, ...., w2.1, w2.2, w3.1, .... where 'w*' is the variable and '.*' describes the model P <- length(pFUN) number.models <- c() for(i in 1:P) { if(is.null(pFUN[[i]]$formula)) pFUN[[i]]$formula <- w.formula number.models <- c(number.models, paste(attr(terms(pFUN[[i]]$formula[-3], data = data), "term.labels"), ".", i, sep = "")) } formula.list <- vector(mode = "list", length= length(number.models)) names(formula.list) <- paste(number.models) for(i in 1:P) { res <- list() Qi <- length(attr(terms(pFUN[[i]]$formula[-3], data = data), "term.labels")) for(j in 1:Qi) { res$formula <- w.formula res$formula[[2]] <- as.name(attr(terms(res$formula[-3], data = data), "term.labels")[j]) res$formula[[3]] <- pFUN[[i]]$formula[[3]] if(res$formula[[3]] == ".") res$formula <- as.formula(paste(res$formula[[2]], "~", paste(x.names, collapse= "+"))) res$model <- pFUN[[i]]$model res$predict <- pFUN[[i]]$predict formula.list[[paste(res$formula[[2]], ".", i, sep = "")]] <- res } } ##apply res <- workhorse.inbagg(object = formula.list, y = y, X = X, W = W, cFUN = cFUN, w.training.set = w.training.set, y.training.set = y.training.set, bcontrol = bcontrol, ...) RET <- list(mtrees = res, y = y, W = W, X = X) class(RET) <- "inbagg" RET } print.inbagg <- function(x, ...) { q <- length(x$mtrees) intermediates <- attr(x$W, "names") text.intermediates <- paste("Indirect bagging, with", q, "bootstrap samples and intermediate variables: \n", paste(intermediates, collapse = " ")) cat("\n", text.intermediates, "\n") } summary.inbagg <- function(object, ...) { class(object) <- "summary.inbagg" object } print.summary.inbagg <- function(x, ...) { q <- length(x$mtrees) intermediates <- attr(x$W, "names") text.intermediates <- paste("Indirect bagging, with", q, "bootstrap samples and intermediate variables:", paste(intermediates, collapse = " ")) cat("\n", text.intermediates, "\n") for(i in 1:length(x)) { print(x$mtrees[[i]]) } } ipred/R/print.R0000644000176200001440000001204414172231220013030 0ustar liggesusers#$Id: print.R,v 1.4 2004/02/09 08:08:21 peters Exp $ print.classbagg <- function(x, digits=4, ...) { cat("\n") B <- length(x$mtrees) if (B > 1) method <- paste("Bagging classification trees with", B, "bootstrap replications") else method <- "Classification tree" cat(method, "\n") if (!is.null(x$call)) { cat("\nCall: ") print(x$call) cat("\n") } if (x$OOB) { cat("Out-of-bag estimate of misclassification error: ", round(x$err, digits), "\n") } cat("\n") } print.regbagg <- function(x, digits=4, ...) { cat("\n") B <- length(x$mtrees) if (B > 1) method <- paste("Bagging regression trees with", B, "bootstrap replications") else method <- "Regression tree" cat(method, "\n") if (!is.null(x$call)) { cat("\nCall: ") print(x$call) cat("\n") } if (x$OOB) cat("Out-of-bag estimate of root mean squared error: ", round(x$err, digits), "\n") cat("\n") } print.survbagg <- function(x, digits=4, ...) { cat("\n") B <- length(x$mtrees) if (B > 1) method <- paste("Bagging survival trees with", B, "bootstrap replications") else method <- "Survival tree" cat(method, "\n") if (!is.null(x$call)) { cat("\nCall: ") print(x$call) cat("\n") } if (x$OOB) cat("Out-of-bag estimate of Brier's score: ", round(x$err, digits), "\n") cat("\n") } summary.classbagg <- function(object, ...) { print(object, ...) class(object) <- "summary.bagging" object } summary.regbagg <- function(object, ...) { print(object, ...) class(object) <- "summary.bagging" object } summary.survbagg <- function(object, ...) { print(object, ...) class(object) <- "summary.bagging" object } print.summary.bagging <- function(x, digits = max(3, getOption("digits")-3), ...) { cat("Trees: \n") print(x$mtrees) invisible(x$mtrees) } print.cvclass <- function(x, digits=4, ...) { cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } cat("\t", paste(x$k, "-fold", sep=""), "cross-validation estimator of misclassification error \n") cat("\n") cat("Misclassification error: ", round(x$error, digits), "\n") cat("\n") } print.bootestclass <- function(x, digits=4, ...) { if(all(names(x)[names(x)!="call"] %in% c("boot", "632plus"))) { XX <- x for(i in c("boot", "632plus")) { x <- XX[[i]] x$call <- XX[["call"]] cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } if (x$bc632plus) { cat("\t", ".632+ Bootstrap estimator of misclassification error \n") } else { cat("\t", "Bootstrap estimator of misclassification error \n") } cat("\t with" , x$nboot, "bootstrap replications\n") cat("\n") cat("Misclassification error: ", round(x$error, digits), "\n") if (!x$bc632plus) cat("Standard deviation:", round(x$sd, digits), "\n") cat("\n") } } else { # if(!all(names(x) %in% c("boot", "632plus"))){ cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } if (x$bc632plus) cat("\t", ".632+ Bootstrap estimator of misclassification error \n") else cat("\t", "Bootstrap estimator of misclassification error \n") cat("\t with" , x$nboot, "bootstrap replications\n") cat("\n") cat("Misclassification error: ", round(x$error, digits), "\n") if (!x$bc632plus) cat("Standard deviation:", round(x$sd, digits), "\n") cat("\n") } } print.cvreg <- function(x, digits=4, ...) { cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } cat("\t", paste(x$k, "-fold", sep=""), "cross-validation estimator of root mean squared error\n") cat("\n") cat("Root mean squared error: ", round(x$error, digits), "\n") cat("\n") } print.bootestreg <- function(x, digits=4, ...) { cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } cat("\t", "Bootstrap estimator of root mean squared error \n") cat("\t with" , x$nboot, "bootstrap replications\n") cat("\n") cat("Root mean squared error: ", round(x$error, digits), "\n") cat("\n") } print.cvsurv <- function(x, digits=4, ...) { cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } cat("\t", paste(x$k, "-fold", sep=""), "cross-validation estimator of Brier's score\n") cat("\n") cat("Brier's score: ", round(x$error, digits), "\n") cat("\n") } print.bootestsurv <- function(x, digits=4, ...) { cat("\n") if (!is.null(x$call)) { cat("Call:\n") print(x$call) cat("\n") } cat("\t", "Bootstrap estimator of Brier's score\n") cat("\t with" , x$nboot, "bootstrap replications\n") cat("\n") cat("Brier's score: ", round(x$error, digits), "\n") cat("\n") } ipred/R/checkfunArgs.R0000644000176200001440000000152014172231220014274 0ustar liggesusers# $Id: checkfunArgs.R,v 1.1 2003/02/17 09:49:31 hothorn Exp $ checkfunArgs <- function(fun, type=c("model", "predict")) { # check for appropriate arguments of user-supplied function "fun" # this will not work for generics in R < 1.7.0 and therefore not used by # now type <- match.arg(type) if (!is.function(fun)) { warning("fun is not a function") return(FALSE) } funargs <- formals(fun) switch(type, "model"={ if (!all(names(funargs)[1:2] %in% c("formula", "data"))) { warning("fun is not a function with at least 'formula' and 'data' arguments") return(FALSE) } else { return(TRUE) } }, "predict"={ if (length(funargs) < 2) { warnings("fun is not a function with at least 'object' and 'newdata' arguments") return(FALSE) } else { return(TRUE) } }) } ipred/R/ipredknn.R0000644000176200001440000000520714172231220013511 0ustar liggesusers# $Id: ipredknn.R,v 1.5 2005/06/29 08:50:28 hothorn Exp $ # k-NN compatible with the fit(formula) - predict(object) framework ipredknn <- function(formula, data, subset, na.action, k=5, ...) { cl <- match.call() if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1) || (length(attr(terms(formula[-3], data = data), "term.labels")) != 1)) stop("formula missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m[[1]] <- as.name("model.frame") m$... <- NULL m$k <- NULL m <- eval(m, parent.frame()) Terms <- attr(m, "terms") y <- model.extract(m, "response") x <- model.matrix(Terms, m) xvars <- as.character(attr(Terms, "variables"))[-1] if ((yvar <- attr(Terms, "response")) > 0) xvars <- xvars[-yvar] xlev <- if (length(xvars) > 0) { xlev <- lapply(m[xvars], levels) xlev[!sapply(xlev, is.null)] } xint <- match("(Intercept)", colnames(x), nomatch=0) if(xint > 0) x <- x[, -xint, drop=FALSE] RET <- list(learn=list(y=y, X=x)) RET$k <- k RET$terms <- Terms RET$call <- match.call() RET$contrasts <- attr(x, "contrasts") RET$xlevels <- xlev attr(RET, "na.message") <- attr(m, "na.message") if(!is.null(attr(m, "na.action"))) RET$na.action <- attr(m, "na.action") class(RET) <- "ipredknn" RET } predict.ipredknn <- function(object, newdata, type=c("prob", "class"), ...) { type <- match.arg(type) if(!inherits(object, "ipredknn")) stop("object not of class ipredknn") if(!is.null(Terms <- object$terms)) { # # formula fit (only) if(missing(newdata)) newdata <- model.frame(object) else { newdata <- model.frame(as.formula(delete.response(Terms)), newdata, na.action=function(x) x, xlev = object$xlevels) } x <- model.matrix(delete.response(Terms), newdata, contrasts = object$contrasts) xint <- match("(Intercept)", colnames(x), nomatch=0) if(xint > 0) x <- x[, -xint, drop=FALSE] } else { stop("object has no terms element") } # : check for variable names # if(length(colnames(x)) > 0 && # any(colnames(x) != dimnames(object$means)[[2]])) # warning("Variable names in newdata do not match those in object") # RET <- knn(object$learn$X, x, object$learn$y, k=object$k, prob=TRUE) if (type=="prob") return(attr(RET, "prob")) else return(RET) } ipred/R/prune.bagging.R0000644000176200001440000000105714172231220014424 0ustar liggesusers# $Id: prune.bagging.R,v 1.2 2002/09/12 08:59:13 hothorn Exp $ prune.classbagg <- function(tree, cp=0.01,...) { for(i in 1:length(tree$mtrees)) tree$mtrees[[i]]$btree <- prune( tree$mtrees[[i]]$btree, cp=cp, ...) tree } prune.regbagg <- function(tree, cp=0.01,...) { for(i in 1:length(tree$mtrees)) tree$mtrees[[i]]$btree <- prune( tree$mtrees[[i]]$btree, cp=cp, ...) tree } prune.survbagg <- function(tree, cp=0.01,...) { for(i in 1:length(tree$mtrees)) tree$mtrees[[i]]$btree <- prune( tree$mtrees[[i]]$btree, cp=cp, ...) tree } ipred/R/ssubset.R0000644000176200001440000000177114172231220013371 0ustar liggesusers ssubset <- function(y, k, strat=TRUE) { if (!is.factor(y)) stop("y is not of class factor") N <- length(y) nlevel <- table(y) nindx <- list() indx <- 1:N outindx <- list() if (strat) { for (j in 1:length(nlevel)) nindx <- c(nindx, list(indx[which(y == levels(y)[j])])) kmat <- kfoldcv(k, N, nlevel) for (i in 1:k) { sset <- kmat[,i] kindx <- c() for (j in 1:length(nlevel)) { if (i > 1) kindx <- c(kindx, nindx[[j]][(sum(kmat[j, 1:(i-1)])+1):sum(kmat[j,1:i])]) else kindx <- c(kindx, nindx[[j]][1:kmat[j,1]]) } kindx <- kindx[!is.na(kindx)] outindx <- c(outindx, list(kindx)) } return(outindx) } else { kmat <- kfoldcv(k, N) nindx <- indx for (i in 1:k) { if (i > 1) outindx <- c(outindx, list(nindx[(sum(kmat[1:(i-1)])+1):sum(kmat[1:i])])) else outindx <- c(outindx, list(nindx[1:kmat[1]])) } } return(outindx) } ipred/R/errorest.R0000644000176200001440000001302314172231220013537 0ustar liggesusers# $Id: errorest.R,v 1.25 2005/06/29 08:50:28 hothorn Exp $ control.errorest <- function(k= 10, nboot = 25, strat=FALSE, random=TRUE, predictions=FALSE, getmodels=FALSE, list.tindx = NULL) { if (k < 1) { warning("k < 1, using k=10") k <- 10 } if (nboot < 1) { warning("nboot < 1, using nboot=25") nboot <- 25 } if (!is.logical(strat)) { warning("strat is not a logical, using strat=FALSE") strat <- FALSE } if (!is.logical(random)) { warning("random is not a logical, using random=TRUE") random <- TRUE } if (!is.logical(predictions)) { warning("predictions is not a logical, using predictions=FALSE") predictions <- FALSE } if (!is.logical(getmodels)) { warning("getmodel is not a logical, using getmodels=FALSE") getmodels <- FALSE } RET <- list(k=k, nboot=nboot, strat=strat, random=random, predictions=predictions, getmodels=getmodels, list.tindx = list.tindx) return(RET) } errorest <- function(formula, data, ...) UseMethod("errorest", data) errorest.default <- function(formula, data, ...) stop(paste("Do not know how to handle objects of class", class(data))) errorest.data.frame <- function(formula, data, subset, na.action=na.omit, model=NULL, predict=NULL, estimator = c("cv", "boot", "632plus"), est.para = control.errorest(), ...) { cl <- match.call() m <- match.call(expand.dots = FALSE) if (length(grep("inclass", paste(m$model))) > 0 || length(grep("inbagg", paste(m$model))) > 0) { RET <- errorestinclass(formula, data=data, subset, na.action, model, predict, estimator, est.para, ...) RET$call <- cl } else { if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-3], data = data), "term.labels")) != 1)) stop("formula missing or incorrect") NOPRED <- (length(attr(terms(formula[-2], data = data), "term.labels")) < 1) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m[[1]] <- as.name("model.frame") m$... <- NULL m$model <- NULL m$predict <- NULL m$estimator <- NULL m$est.para <- NULL mf <- eval(m, parent.frame()) response <- attr(attr(mf, "terms"), "response") # just extract the data.frame, NA handling here # make sure to leave the time and censoring variable here # for "Surv(time, cens) ~ ." formulas # delete terms attribute attr(mf, "terms") <- NULL y <- mf[,response] if (!NOPRED & !is.Surv(y)) data <- mf else data <- data[complete.cases(data),] if(all(estimator %in% c("boot", "632plus")) & all(c("boot", "632plus") %in% estimator)) { estimator <- paste(sort(estimator), collapse = "_") } else { if(length(estimator) > 1) { estimator <- estimator[1] # warning(paste("Multiple choice of estimators, only", estimator, "is performed")) } else { estimator <- match.arg(estimator) } } if(is.null(model)) stop("no model specified") switch(estimator, "cv" = { RET <- cv(y, formula, data, model=model, predict=predict, k=est.para$k, random=est.para$random, predictions=est.para$predictions, strat=est.para$strat, getmodels=est.para$getmodels, list.tindx = est.para$list.tindx, ...) }, "boot" = { RET <- bootest(y, formula, data, model=model, predict=predict, nboot=est.para$nboot, list.tindx = est.para$list.tindx, predictions = est.para$predictions, ...) }, "632plus" = { RET <- bootest(y, formula, data, model=model, predict=predict, nboot=est.para$nboot, bc632plus=TRUE, list.tindx = est.para$list.tindx, predictions = est.para$predictions, ...) }, "632plus_boot" = { RET <- bootest(y, formula, data, model=model, predict=predict, nboot=est.para$nboot, bc632plus = TRUE, list.tindx = est.para$list.tindx, predictions = est.para$predictions, both.boot = TRUE, ...) } ) } RET$call <- cl return(RET) } errorestinclass <- function(formula, data, subset=NULL, na.action=NULL, model=NULL, predict=NULL, estimator = c("cv", "boot", "632plus"), est.para = control.errorest(), ...) { if (is.null(data)) stop("data argument required but not given") # if (is.null(iclass)) # stop("no class membership variable for indirect classification given") iclass <- paste(formula[[2]][[2]]) if (!(iclass %in% colnames(data))) stop("membership variable not in given data") # # data <- data[complete.cases(data),] # iclassindx <- which(colnames(data) == iclass) y <- data[,iclassindx] if (!is.factor(y)) stop("iclass is not a factor") # X <- data[,-iclassindx] X <- data if(is.null(model)) stop("no classifier specified") switch(estimator, "cv" = { RET <- cv(y, formula, data=X, model=model, predict=predict, k=est.para$k, random=est.para$random, list.tindx = est.para$list.tindx, ...) }, "boot" = { RET <- bootest(y, formula, data=X, model=model, predict=predict, nboot=est.para$nboot, list.tindx = est.para$list.tindx, ...) }, "632plus" = { RET <- bootest(y, formula, data=X, model=model, predict=predict, nboot=est.para$nboot, bc632plus=TRUE, list.tindx = est.para$list.tindx, ...) }) RET } ipred/R/csurv.R0000644000176200001440000000376714172231220013052 0ustar liggesusers# $Id: csurv.R,v 1.6 2003/03/28 12:55:32 hothorn Exp $ csurv <- function(newdata, pred, minprob=0, window=0.0001) { N <- nrow(newdata) if (!"hazard" %in% names(attributes(newdata))) stop("hazards attribute to newdata missing") hazards <- attr(newdata, "hazard") error <- rep(0, N) # if there is only one prediction for all observations GETPROB <- TRUE if (inherits(pred, "survfit")) { times <- pred$time # get times predprob <- getsurv(pred, times) # get steps GETPROB <- FALSE } for (i in 1:N) { if (GETPROB) { times <- pred[[i]]$time # get times predprob <- getsurv(pred[[i]], times) # get steps } # compute the integrated squared difference between # KM and S(t) # minprob: stop integration when S(t) < minprob lasttime <- -(log(minprob) / hazards[i]) if (max(times) > lasttime) { thisprob <- predprob[times <= lasttime] thistimes <- times[times <= lasttime] } else { thisprob <- predprob thistimes <- times } error[i] <- .Call(SdiffKM, as.double(c(0,thistimes)), as.double(c(1,thisprob)), as.double(c(hazards[i], window))) # adjust for time scale by last event error[i] <- error[i]/max(thistimes) if (length(unique(hazards)) == 1) { error <- error[i] break } } error <- mean(error) error } foo <- function (time, prob, hazard, window) { myint <- 0 time <- c(0, time) s <- exp(-time * hazard) prob <- c(1, prob) for (i in 1:(length(time)-1)) { d <- time[i+1] - time[i] if (d < window) { myint <- myint + 0.5 * d * ((prob[i] - s[i])^2 + (prob[i] - s[i + 1])^2) } else { k <- ceiling(d/window) wi <- d/k for (j in 1:k) myint <- myint + 0.5 * wi * ((prob[i] - exp(-(time[i] + (j - 1) * wi) * hazard))^2 + (prob[i] - exp(-(time[i] + j * wi) * hazard))^2) } } myint } ipred/R/rsurv.R0000644000176200001440000000374414172231220013064 0ustar liggesusers# $Id: rsurv.R,v 1.5 2003/03/31 08:44:16 peters Exp $ rsurv <- function(N, model=c("A", "B", "C", "D", "tree"), gamma=NULL, fact=1, pnon=10, gethaz=FALSE) { model <- match.arg(model) X <- matrix(runif(N*5), ncol=5) colnames(X) <- paste("X", 1:ncol(X), sep="") switch(model, "A" = { time <- rexp(N) haz <- rep(1, N) }, "B" = { hazard <- as.numeric(X[,1] <= 0.5 & X[,2] > 0.5) time <- rexp(N) time[hazard == 1] <- rexp(sum(hazard==1), exp(3)) haz <- rep(1, N) haz[hazard == 1] <- exp(3) }, "C" = { hazard <- 3*X[,1] + X[,2] haz <- exp(hazard) time <- sapply(haz, rexp, n=1) }, "D" = { hazard <- 3*X[,1] - 3*X[,2] + 4*X[,3] - 2*X[,4] haz <- exp(hazard) time <- sapply(haz, rexp, n=1) }, "tree" = { hazard <- rep(0, nrow(X)) hazard[(X[,1] <= 0.5 & X[,2] <= 0.5)] <- 0 hazard[(X[,1] <= 0.5 & X[,2] > 0.5 & X[,4] <= 0.5)] <- 1 hazard[(X[,1] <= 0.5 & X[,2] > 0.5 & X[,4] > 0.5)] <- 0 hazard[(X[,1] > 0.5 & X[,3] <= 0.5 & X[,5] <= 0.3)] <- 1 hazard[(X[,1] > 0.5 & X[,3] <= 0.5 & X[,5] > 0.3)] <- 2 hazard[(X[,1] > 0.5 & X[,3] > 0.5 & X[,4] <= 0.7)] <- 2 hazard[(X[,1] > 0.5 & X[,3] > 0.5 & X[,4] > 0.7)] <- 3 hazard <- hazard * fact haz <- exp(hazard) time <- sapply(haz, rexp, n=1) if (pnon > 0) X <- cbind(X, matrix(runif(N*pnon), ncol=pnon)) colnames(X) <- paste("X", 1:ncol(X), sep="") }) if (!is.null(gamma)) censtime <- runif(N, min=0, max=gamma) else censtime <- Inf cens <- as.numeric(time <= censtime) time <- pmin(time, censtime) simdat <- as.data.frame(cbind(time, cens, X)) if (gethaz) attr(simdat, "hazard") <- haz return(simdat) } ipred/R/cv.R0000644000176200001440000001642414172231220012312 0ustar liggesusers#$Id: cv.R,v 1.21 2004/02/11 09:13:51 peters Exp $ cv <- function(y, ...) { if(is.null(class(y))) class(y) <- data.class(y) UseMethod("cv", y) } cv.default <- function(y, ...) { stop(paste("Do not know how to handle objects of class", class(y))) } cv.integer <- function(y, ...) { cv.numeric(y, ...) } cv.factor <- function(y, formula, data, model, predict, k=10, random=TRUE, strat=FALSE, predictions=NULL, getmodels=NULL, list.tindx = NULL, ...) { # k-fold cross-validation of misclassification error if (!is.data.frame(data)) stop("data is not of class data.frame") N <- length(y) classes <- levels(y) if (is.null(k)) k <- 10 if (is.null(random)) random <- TRUE if (is.null(strat)) strat <- FALSE if (is.null(predictions)) predictions <- FALSE if (is.null(getmodels)) getmodels <- FALSE USEPM <- FALSE if(!is.null(list.tindx)) k <- length(list.tindx) if(!is.null(list.tindx)) { random <- FALSE } # to reproduce results, either use `set.seed' or a fixed partition of # the samples if (random) myindx <- sample(1:N, N) else myindx <- 1:N y <- y[myindx] data <- data[myindx,] # determine an appropriate splitting for the sample size into # k roughly equally sized parts mysplit <- ssubset(y, k, strat=strat) allpred <- vector(mode="character", length=N) fu <- function(x) levels(x)[as.integer(x)] nindx <- 1:N if (getmodels) models <- vector(k, mode="list") for(i in 1:k) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] } else { tindx <- mysplit[[i]] } folddata <- subset(data, !(nindx %in% tindx)) mymodel <- model(formula, data=folddata, ...) if (getmodels) models[[i]] <- mymodel # check of mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } # we assume predict to return factor levels if (USEPM) pred <- predict(newdata=data) else pred <- predict(mymodel, newdata = data) if (!is.factor(pred)) stop("predict does not return factor values") pred <- factor(pred, levels=classes) # # there is no c() for factors which preserves the levels, isn't it? # use characters allpred[tindx] <- fu(pred[tindx]) # } allpred <- factor(allpred, levels=classes) allpred <- allpred[order(myindx)] err <- mean(allpred != y[order(myindx)], na.rm = TRUE) if (predictions) RET <- list(error = err, k = k, predictions=allpred) else RET <- list(error = err, k = k) if (getmodels) RET <- c(RET, models=list(models)) class(RET) <- "cvclass" RET } cv.numeric <- function(y, formula, data, model, predict, k=10, random=TRUE, predictions=NULL, strat=NULL, getmodels=NULL, list.tindx = NULL, ...) { # k-fold cross-validation of mean squared error if (!is.data.frame(data)) stop("data is not of class data.frame") if(!is.null(list.tindx)) k <- length(list.tindx) N <- length(y) if (is.null(k)) k <- 10 if (is.null(random)) random <- TRUE if (is.null(predictions)) predictions <- FALSE if (is.null(getmodels)) getmodels <- FALSE USEPM <- FALSE # determine an appropriate splitting for the sample size into # k roughly equally sized parts # if(is.null(list.tindx)) { a <- kfoldcv(k, N) # to reproduce results, either use `set.seed' or a fixed partition of # the samples if (random) myindx <- sample(1:N, N) else myindx <- 1:N nindx <- 1:N # } if (getmodels) models <- vector(k, mode="list") allpred <- rep(0, N) for(i in 1:k) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] } else { if (i > 1) tindx <- myindx[(sum(a[1:(i-1)])+1):sum(a[1:i])] else tindx <- myindx[1:a[1]] } folddata <- subset(data, !(nindx %in% tindx)) mymodel <- model(formula, data=folddata, ...) if (getmodels) models[[i]] <- mymodel # check of mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } outfolddata <- subset(data, nindx %in% tindx) if (USEPM) pred <- predict(newdata=outfolddata) else pred <- predict(mymodel, newdata = outfolddata) if (!is.numeric(pred)) stop("predict does not return numerical values") allpred[sort(tindx)] <- pred } err <- sqrt(mean((allpred - y)^2, na.rm = TRUE)) if (predictions) RET <- list(error = err, k = k, predictions=allpred) else RET <- list(error = err, k = k) if (getmodels) RET <- c(RET, models=list(models)) class(RET) <- "cvreg" RET } cv.Surv <- function(y, formula, data=NULL, model, predict, k=10, random=TRUE, predictions=FALSE, strat=FALSE, getmodels=NULL, list.tindx = NULL, ...) { # k-fold cross-validation of Brier's score if (is.null(predictions)) predictions <- FALSE if(is.null(random)) random <- TRUE if (is.null(predictions)) predictions <- FALSE if (is.null(strat)) strat <- FALSE if (is.null(getmodels)) getmodels <- FALSE USEPM <- FALSE N <- length(y[,1]) nindx <- 1:N if(is.null(random)) random <- TRUE if(is.null(k)) k <- 10 if (is.null(data)) data <- rep(1, N) if(!is.null(list.tindx)) k <- length(list.tindx) if(is.null(k)) stop("k for k-fold cross-validation is missing") # determine an appropriate splitting for the sample size into # k roughly equally sized parts # if(is.null(list.tindx)) { a <- kfoldcv(k, N) # to reproduce results, either use `set.seed' or a fixed partition of # the samples if (random) myindx <- sample(1:N, N) else myindx <- 1:N # } if (getmodels) models <- vector(k, mode="list") cverr <- c() for(i in 1:k) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] } else { if (i > 1) tindx <- myindx[(sum(a[1:(i-1)])+1):sum(a[1:i])] else tindx <- myindx[1:a[1]] } folddata <- subset(data, !(nindx %in% tindx)) mymodel <- model(formula, data=folddata, ...) if (getmodels) models[[i]] <- mymodel # check if mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } outfolddata <- subset(data, (nindx %in% tindx)) if (USEPM) pred <- predict(newdata=outfolddata) else pred <- predict(mymodel, newdata = outfolddata) if (is.list(pred)) { if (!inherits(pred[[1]], "survfit") && !inherits(pred, "survfit")) stop("predict does not return a list of survfit objects") } else { stop("predict does not return a list of survfit objects") } err <- sbrier(y[sort(tindx)], pred) cverr <- c(cverr,rep(err, length(tindx))) } RET <- list(error = mean(cverr), k=k) if (getmodels) RET <- c(RET, models=list(models)) class(RET) <- "cvsurv" RET } ipred/R/predict.inbagg.R0000644000176200001440000000231214172231220014551 0ustar liggesuserspredict.inbagg <- function(object, newdata, ...) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) if(any(names(object$W) %in% names(newdata))) newdata <- newdata[!(names(newdata) %in% names(object$W))] NBAGG <- length(object$mtrees) N <- nrow(newdata) classes <- levels(object$y) vote <- matrix(0, nrow=N, ncol=length(classes)) for(i in 1:NBAGG) { intermed <- object$mtrees[[i]]$bfct(newdata) # XX <- data.frame(newdata, intermed) if(!is.null(object$mtrees[[i]]$btree$fixed.function)) { names(intermed) <- sub(".[0-9]$", "", names(intermed)) XX <- data.frame(newdata, intermed) # names(XX)[(ncol(XX)-ncol(intermed)+1):ncol(XX)] <- sub(".[0-9]$", "", names(intermed)) res <- object$mtrees[[i]]$btree$fixed.function(XX) } else { XX <- data.frame(newdata, intermed) if(is.null(object$mtrees[[i]]$btree$predict)) { res <- try(predict(object$mtrees[[i]]$btree$model, newdata = XX, ...)) } else { res <- try(object$mtrees[[i]]$btree$predict(object$mtrees[[i]]$btree$model, newdata = XX, ...)) } } res <- cbind(1:N, res) vote[res] <- vote[res] +1 } RET <- factor(classes[apply(vote, 1, uwhich.max)]) RET } ipred/R/predict.inclass.R0000644000176200001440000000241114172231220014756 0ustar liggesusers# $Id: predict.inclass.R,v 1.19 2003/03/31 08:44:16 peters Exp $ # Additional option type ="class", if intermediate is nominal predict.inclass <- function(object, newdata, ...) { if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata) q <- length(object$model.intermediate) # number of intermediates namen <- names(object$model.intermediate) intermediate <- is.vector(NULL, mode = "NULL") for(i in 1:q) { if(!is.null(object$para.intermediate[[i]][["predict"]])) { RET <- object$para.intermediate[[i]][["predict"]](object$model.intermediate[[i]], newdata = newdata, ...) } else { RET <- predict(object$model.intermediate[[i]], newdata = newdata, ...) } intermediate <- data.frame(intermediate, RET) } intermediate <- intermediate[,-1] names(intermediate) <- namen intermediate <- data.frame(newdata[,!(names(newdata) %in% names(intermediate))], intermediate) if(!is.function(object$para.response)) { if(!is.null(object$para.response[["predict"]])) { RET <- object$para.response[["predict"]](object$model.response, newdata = intermediate, ...) } else { RET <- predict(object$model.response, newdata = intermediate, ...) } } else { RET <- object$para.response(intermediate) } return(RET) } ipred/R/slda.R0000644000176200001440000000744614172231220012631 0ustar liggesusers# $Id: slda.R,v 1.9 2005/06/29 08:50:28 hothorn Exp $ # stabilized linear discriminant analysis according to Laeuter & Kropf slda <- function(y, ...) UseMethod("slda") slda.default <- function(y, ...) stop(paste("Do not know how to handle objects of class", class(data))) slda.formula <- function(formula, data, subset, na.action=na.rpart, ...) { cl <- match.call() if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1) || (length(attr(terms(formula[-3], data = data), "term.labels")) != 1)) stop("formula missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m[[1]] <- as.name("model.frame") m$... <- NULL m <- eval(m, parent.frame()) Terms <- attr(m, "terms") grouping <- model.extract(m, "response") x <- model.matrix(Terms, m) xvars <- as.character(attr(Terms, "variables"))[-1] if ((yvar <- attr(Terms, "response")) > 0) xvars <- xvars[-yvar] xlev <- if (length(xvars) > 0) { xlev <- lapply(m[xvars], levels) xlev[!sapply(xlev, is.null)] } xint <- match("(Intercept)", colnames(x), nomatch=0) if(xint > 0) x <- x[, -xint, drop=FALSE] RET <- slda(y=grouping, X=x, ...) RET$terms <- Terms RET$call <- match.call() RET$contrasts <- attr(x, "contrasts") RET$xlevels <- xlev attr(RET, "na.message") <- attr(m, "na.message") if(!is.null(attr(m, "na.action"))) RET$na.action <- attr(m, "na.action") RET } slda.factor <- function(y, X, q=NULL, ...) { p <- ncol(X) # substract global mean Xnull <- X - apply(X, 2, mean) if (!is.null(q)) { if (floor(q) != q) stop("q is not an integer") if (q > p) { q <- p warning("q is greater ncol(X), using q = ncol(X)") } if (q < 1) { q <- 1 warning("q is less than 1, using q = 1") } } # this is S_0 in Kropf (2000) Snull <- cov(Xnull) ewp <- svd(solve(diag(diag(Snull), ncol = ncol(Snull)))%*%Snull) if (!is.complex(ewp$d)) { # determine q by the number of eigenvalues > 1 if (is.null(q)) q <- sum(ewp$d > 1) D <- ewp$v[,1:q] if (q == 1) D <- as.matrix(D) # Xstab is still spherically distributed (Fang & Zhang, Laeuter, Kropf & # Glimm)! } else { D <- diag(p) } Xstab <- as.matrix(X) %*% D colnames(Xstab) <- paste("V", 1:ncol(Xstab), sep="") mylda <- lda(Xstab, grouping = y, ...) RET <- list(scores = D, mylda = mylda) class(RET) <- "slda" RET } predict.slda <- function(object, newdata, ...) { if(!inherits(object, "slda")) stop("object not of class slda") if(!is.null(Terms <- object$terms)) { # # formula fit (only) if(missing(newdata)) newdata <- model.frame(object) else { newdata <- model.frame(as.formula(delete.response(Terms)), newdata, na.action=function(x) x, xlev = object$xlevels) } x <- model.matrix(delete.response(Terms), newdata, contrasts = object$contrasts) xint <- match("(Intercept)", colnames(x), nomatch=0) if(xint > 0) x <- x[, -xint, drop=FALSE] } else { stop("object has no terms element") } if(ncol(x) != nrow(object$scores)) stop("wrong number of variables") # : check for variable names! # if(length(colnames(x)) > 0 && # any(colnames(x) != dimnames(object$means)[[2]])) # warning("Variable names in newdata do not match those in object") # X <- x %*% object$scores if (inherits(object$mylda, "lda")) return(predict(object$mylda, newdata=as.data.frame(X), ...)) else stop(paste("Do not know how to predict from objects of class", class(object$mylda))) } ipred/R/irpart.R0000644000176200001440000000302214172231220013171 0ustar liggesusers# # use update to fit multiple trees to bootstrap samples # irpart <- function(formula, data=NULL, weights, subset, na.action=na.rpart, method, model=FALSE, x=FALSE, y=TRUE, parms, control, cost, bcontrol, ...) { mc <- match.call() mc$bcontrol <- NULL mc[[1]] <- as.name("rpart") m <- match.call(expand.dots=FALSE) m$model <- m$method <- m$control <- m$bcontrol <- NULL m$x <- m$y <- m$parms <- m$... <- NULL m$cost <- NULL m$na.action <- na.action m[[1]] <- as.name("model.frame.default") m <- eval(m, parent.frame()) init_tree <- eval(mc, parent.frame()) nobs <- length(init_tree$where) if (missing(weights)) { weights <- rep(1.0, nobs) } else { warning("weights argument ignored in irpart") } yclasses <- c(class = "sclass", exp = "ssurv", anova = "sreg", poisson = "sreg") # # Bagging: repeat this several times! # if (is.null(bcontrol)) stop("bcontrol not given") mod <- vector(mode="list", length=bcontrol$nbagg) for (b in 1:bcontrol$nbagg) { if (bcontrol$nbagg > 1) bindx <- sample(1:nobs, bcontrol$ns, replace=bcontrol$replace) else bindx <- 1:nobs tab <- tabulate(bindx, nbins = nobs) mc$data <- m[bindx,,drop = FALSE] ### tab * weights ans <- eval(mc, parent.frame()) # return the appropriate class this <- list(bindx = bindx, btree = ans) class(this) <- yclasses[init_tree$method] mod[[b]] <- this } mod } ipred/R/bagging.R0000644000176200001440000000225514172231220013275 0ustar liggesusers# $Id: bagging.R,v 1.19 2005/06/29 08:50:28 hothorn Exp $ bagging <- function(formula, data, ...) UseMethod("bagging", data) bagging.default <- function(formula, data, ...) stop(paste("Do not know how to handle objects of class", class(data))) bagging.data.frame <- function(formula, data, subset, na.action=na.rpart, ...) { cl <- match.call() if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1) || (length(attr(terms(formula[-3], data = data), "term.labels")) != 1)) stop("formula missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m[[1]] <- as.name("model.frame") m$... <- NULL mf <- eval(m, parent.frame()) response <- attr(attr(mf, "terms"), "response") # just extract the data.frame, no handling of contrasts or NA's here. # this is done by rpart or the user supplied methods DATA <- list(y = mf[,response], X = mf[,-response, drop = FALSE]) names(DATA) <- c("y", "X") y <- do.call("ipredbagg", c(DATA, list(...))) y$call <- cl return(y) } ipred/R/kfoldcv.R0000644000176200001440000000120314172231220013317 0ustar liggesusers# $Id: kfoldcv.R,v 1.3 2002/09/12 08:56:42 hothorn Exp $ kfoldcv <- function(k,N, nlevel=NULL) { if (is.null(nlevel)) { # no stratification if (k > N) return(c(rep(1, N), rep(0, k-N))) fl <- floor(N/k) ce <- ceiling(N/k) if (fl == ce) return(rep(fl, k)) else return(c(rep(ce, round((N/k - fl)*k)), rep(fl, round((1 - (N/k - fl))*k)))) } else { # stratification # if (!is.integer(nlevel)) stop("nlevel is not a vector if integers") kmat <- matrix(0, ncol=k, nrow=length(nlevel)) for (i in 1:length(nlevel)) kmat[i,] <- kfoldcv(k, nlevel[i]) return(kmat) } } ipred/R/inclass.R0000644000176200001440000001423314172231220013332 0ustar liggesusers# $Id: inclass.R,v 1.33 2008/08/04 08:18:41 hothorn Exp $ inclass <- function(formula, data, ...) UseMethod("inclass", data) inclass.default <- function(formula, data, ...) { stop(paste("Do not know how to handle objects of class", class(data))) } inclass.data.frame <- function(formula, data, pFUN = NULL, cFUN = NULL, ...) { ##check formula if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1)) stop("formula missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ##editing formula ###main formula if(length(formula[[2]])==3) { if(is.function(cFUN)) y.formula <- formula[[2]] else y.formula <- cFUN$formula w.formula <- XX~YY w.formula[[2]] <- formula[[2]][[3]] w.formula[[3]] <- formula[[3]] response <- paste(formula[[2]][[2]]) w.names <- attr(terms(as.formula(formula[[2]]), data = data), "term.labels") x.names <- attr(terms(as.formula(formula), data = data), "term.labels") if(x.names[1] == ".") x.names <- colnames(data)[!(colnames(data) %in% c(response, w.names))] } else { stop(paste("Specified formula has to be of type y~x~w")) } if(is.null(w.formula)) stop("no formula for prediction model specified") formula.list <- vector(mode = "list", length= length(w.names)) names(formula.list) <- w.names P <- length(pFUN) Qi <- length(w.names) for(j in 1:Qi) { res <- list() res$formula <- w.formula res$formula[[2]] <- as.name(attr(terms(res$formula[-3], data = data), "term.labels")[j]) if(res$formula[[3]] == ".") { res$formula <- as.formula(paste(res$formula[[2]], "~", paste(x.names, collapse= "+"))) } for(i in 1:P) { if(is.null(pFUN[[i]]$formula)) { if(is.null(formula.list[[w.names[j]]]$formula)) formula.list[[w.names[j]]]$formula <- res$formula if(is.null(formula.list[[w.names[j]]]$model)) formula.list[[w.names[j]]]$model <- pFUN[[i]]$model if(is.null(formula.list[[w.names[j]]]$predict)) formula.list[[w.names[j]]]$predict <- pFUN[[i]]$predict } else { QQ <- attr(terms(pFUN[[i]]$formula[-3], data = data), "term.labels") for(k in QQ) { if(w.names[j] == k) { res$formula[[3]] <- pFUN[[i]]$formula[[3]] if(paste(pFUN[[i]]$formula[[3]]) == ".") { res$formula[[3]] <- as.formula(paste(w.names[j], "~", paste(x.names, collapse= "+"))) } formula.list[[w.names[j]]]$formula <- pFUN[[i]]$formula formula.list[[w.names[j]]]$model <- pFUN[[i]]$model formula.list[[w.names[j]]]$predict <- pFUN[[i]]$predict } } } } } if(!is.function(cFUN)) { cFUN$formula <- y.formula if(is.null(cFUN$training.set)) cFUN$training.set <- "original" } result <- workhorse.inclass(object = formula.list, data = data, cFUN = cFUN, ...) return(result) } workhorse.inclass <- function(object, data, cFUN, subset, na.action, ...) { formula.list <- object q <- length(formula.list) result <- list() namen <- c() ##model fitting for(i in 1:q) { formula <- formula.list[[i]]$formula ##check necessary?? > if(missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2], data = data), "term.labels")) < 1) || (length(attr(terms(formula[-3], data = data), "term.labels")) != 1)) stop("formula missing or incorrect") ## check necessary?? < m <- match.call(expand.dots= FALSE) res <- formula.list[[i]]$model(formula = formula, data = data) namen <- c(namen, as.character(formula[[2]])) result <- c(result, list(res)) } names(result) <- namen if(!is.function(cFUN)) { ###cFUN can be trained on original intermediate variables or on fitted values or on the subset if(!is.null(m$subset) && cFUN$training.set == "subset") dataresp <- data[!subset, ] if(cFUN$training.set == "original") dataresp <- data if(cFUN$training.set == "fitted") { dataresp <- data for(i in 1:q){ if(!is.null(formula.list[[i]]$predict)){ dataresp[,namen[i]] <- formula.list[[i]]$predict(result[[i]], newdata = data)} else { dataresp[,namen[i]] <- predict(result[[i]], newdata = data) } } } model.response <- cFUN$model(as.formula(cFUN$formula), data = dataresp, ...) } else { model.response <- cFUN } ###predict specificatiations are not delivered result <- list("model.intermediate" = result, "model.response" = model.response, "para.intermediate" = object, "para.response" = cFUN) class(result) <- "inclass" return(result) } print.inclass <- function(x, ...) { x <- x$model.intermediate q <- length(x) intermediates <- attr(x, "names") classes <- c() for(i in 1:q) { classes <- c(classes, class(x[[i]])) } text.intermediates <- paste("Indirect classification, with", q, "intermediate variables:") if(length(unique(classes)) == 1) { predictive <- paste("Predictive model per intermediate is", unique(classes)) } else { predictive <- paste("Predictive model per intermediate is \n", paste(intermediates, ": ", classes, "\n ", collapse = "")) } cat("\n", text.intermediates, "\n", intermediates, "\n", "\n", predictive, "\n") } summary.inclass <- function(object, ...) { class(object) <- "summary.inclass" object } print.summary.inclass <- function(x, ...) { x <- x$model.intermediate q <- length(x) intermediates <- attr(x, "names") classes <- c() for(i in 1:q) { classes <- c(classes, class(x[[i]])) } text.intermediates <- paste("Indirect classification, with", q, "intermediate variables:") if(length(unique(classes)) == 1) { predictive <- paste("Predictive model per intermediate is", unique(classes)) } else { predictive <- paste("Predictive model per intermediate is", "\n ", paste(intermediates, ": ", classes, "\n ", collapse = "")) } cat("\n", text.intermediates, "\n", intermediates, "\n", "\n", predictive, "\n", "\n", "Models:", "\n") print(x) } ipred/R/sbrier.R0000644000176200001440000001162414172231220013165 0ustar liggesusers# $Id: sbrier.R,v 1.5 2009/03/27 16:18:38 hothorn Exp $ getsurv <- function(obj, times) { # get the survival probability for times from KM curve `obj' if (!inherits(obj, "survfit")) stop("obj is not of class survfit") # class(obj) <- NULL # lt <- length(times) nsurv <- times # if the times are the same, return the km-curve if(length(times) == length(obj$time)) { if (all(times == obj$time)) return(obj$surv) } # otherwise get the km-value for every element of times separatly inside <- times %in% obj$time for (i in (1:lt)) { if (inside[i]) nsurv[i] <- obj$surv[obj$time == times[i]] else { less <- obj$time[obj$time < times[i]] if (length(less) == 0) nsurv[i] <- 1 else nsurv[i] <- obj$surv[obj$time == max(less)] } } nsurv } sbrier <- function(obj, pred, btime = range(obj[,1])) { if(!inherits(obj, "Surv")) stop("obj is not of class Surv") # check for right censoring # class(obj) <- NULL # if (attr(obj, "type") != "right") stop("only right-censoring allowed") N <- nrow(obj) # get the times and censoring of the data, order them with resp. to time time <- obj[,1] ot <- order(time) cens <- obj[ot,2] time <- time[ot] # get the times to compute the (integrated) Brier score over if (is.null(btime)) stop("btime not given") if (length(btime) < 1) stop("btime not given") if (length(btime) == 2) { if (btime[1] < min(time)) warning("btime[1] is smaller than min(time)") if (btime[2] > max(time)) warning("btime[2] is larger than max(time)") btime <- time[time >= btime[1] & time <= btime[2]] } ptype <- class(pred) # S3 workaround if (is.null(ptype)) { if (is.vector(pred)) ptype <- "vector" if (is.list(pred)) ptype <- "list" } # if (ptype == "numeric" && is.vector(pred)) ptype <- "vector" survs <- NULL switch(ptype, survfit = { survs <- getsurv(pred, btime) survs <- matrix(rep(survs, N), nrow=length(btime)) }, list = { if (!inherits(pred[[1]], "survfit")) stop("pred is not a list of survfit objects") if (length(pred) != N) stop("pred must be of length(time)") pred <- pred[ot] survs <- matrix(unlist(lapply(pred, getsurv, times = btime)), nrow=length(btime), ncol=N) }, vector = { if (length(pred) != N) stop("pred must be of length(time)") if (length(btime) != 1) stop("cannot compute integrated Brier score with pred") survs <- pred[ot] }, matrix = { # if (all(dim(pred) == c(length(btime), N))) survs <- pred[,ot] else stop("wrong dimensions of pred") # }) if (is.null(survs)) stop("unknown type of pred") # reverse Kaplan-Meier: estimate censoring distribution ### deal with ties hatcdist <- prodlim(Surv(time, cens) ~ 1,reverse = TRUE) csurv <- predict(hatcdist, times = time, type = "surv") csurv[csurv == 0] <- Inf # hatcdist <- survfit(Surv(time, 1 - cens) ~ 1) # csurv <- getsurv(hatcdist, time) # csurv[csurv == 0] <- Inf # conditional survival for new timepoints csurv_btime <- predict(hatcdist, times = btime, type = "surv") csurv_btime[is.na(csurv_btime)] <- min(csurv_btime, na.rm = TRUE) csurv_btime[csurv_btime == 0] <- Inf bsc <- rep(0, length(btime)) # compute Lebesque-integrated Brier score if (length(btime) > 1) { for (j in 1:length(btime)) { help1 <- as.integer(time <= btime[j] & cens == 1) help2 <- as.integer(time > btime[j]) bsc[j] <- mean((0 - survs[j,])^2*help1*(1/csurv) + (1-survs[j,])^2*help2*(1/csurv_btime[j])) } ### apply trapezoid rule idx <- 2:length(btime) RET <- diff(btime) %*% ((bsc[idx - 1] + bsc[idx]) / 2) RET <- RET / diff(range(btime)) ### previously was #diffs <- c(btime[1], btime[2:length(btime)] - # btime[1:(length(btime)-1)]) #RET <- sum(diffs*bsc)/max(btime) names(RET) <- "integrated Brier score" attr(RET, "time") <- range(btime) # compute Brier score at one single time `btime' } else { help1 <- as.integer(time <= btime & cens == 1) help2 <- as.integer(time > btime) cs <- predict(hatcdist, times=btime, type = "surv") ### cs <- getsurv(hatcdist, btime) if (cs == 0) cs <- Inf RET <- mean((0 - survs)^2*help1*(1/csurv) + (1-survs)^2*help2*(1/cs)) names(RET) <- "Brier score" attr(RET, "time") <- btime } RET } ipred/R/predict.irpart.R0000644000176200001440000000347514172231220014636 0ustar liggesusers# # a modified version of `predict.rpart.s' from the rpart package # see COPYRIGHTS for details. # predict.irpart <- function(object, newdata = list(), type = c("vector", "prob", "class", "matrix"), ...) { if(!inherits(object, "rpart")) stop("Not legitimate tree") mtype <- missing(type) type <- match.arg(type) if(missing(newdata)) where <- object$where else { if(is.null(attr(newdata, "terms")) & !inherits(newdata, "rpart.matrix")) { Terms <- delete.response(object$terms) act <- (object$call)$na.action if (is.null(act)) act<- na.rpart newdata <- model.frame(Terms, newdata, na.action = act, xlev=attr(object, "xlevels")) newdata <- getFromNamespace("rpart.matrix", ns = "rpart")(newdata) } where <- getFromNamespace("pred.rpart", ns = "rpart")(object, newdata) } frame <- object$frame method <- object$method ylevels <- attr(object, "ylevels") nclass <- length(ylevels) if(mtype && nclass > 0) type <- "prob" if(type == "vector" || (type=="matrix" && is.null(frame$yval2))) { pred <- frame$yval[where] names(pred) <- names(where) } else if (type == "matrix") { pred <- frame$yval2[where,] dimnames(pred) <- list(names(where), NULL) } else if(type == "class" && nclass > 0) { pred <- factor(ylevels[frame$yval[where]], levels=ylevels) names(pred) <- names(where) } else if (type == "prob" && nclass > 0) { pred <- frame$yval2[where, 1 + nclass + 1:nclass] dimnames(pred) <- list(names(where), ylevels) } else stop("Invalid prediction for rpart object") # Expand out the missing values in the result # But only if operating on the original dataset if (missing(newdata) && !is.null(object$na.action)) pred <- naresid(object$na.action, pred) pred } ipred/R/predict.bagging.R0000644000176200001440000002106614172231220014727 0ustar liggesusers# $Id: predict.bagging.R,v 1.17 2009/03/27 16:18:38 hothorn Exp $ uwhich.max <- function(x) { # need to determine all maxima in order to sample from them wm <- (1:length(x))[x == max(x)] if (length(wm) > 1) wm <- wm[sample(length(wm), 1)] wm } predict.classbagg <- function(object, newdata=NULL, type=c("class", "prob"), aggregation=c("majority", "average", "weighted"), ...) { type <- match.arg(type) agg <- match.arg(aggregation) if (missing(newdata)) { if (length(object$mtrees) < 10) stop("cannot compute out-of-bag predictions for small number of trees") OOB <- TRUE if (!is.null(object$X)) newdata <- object$X else stop("cannot compute out-of-bag predictions without object$X!") } else { OOB <- FALSE } if (!is.data.frame(newdata)) newdata <- as.data.frame(newdata) N <- nrow(newdata) if (!object$comb) { tree <- object$mtrees[[1]]$btree Terms <- delete.response(tree$terms) act <- (tree$call)$na.action if (is.null(act)) act<- na.rpart newdata <- model.frame(Terms, newdata, na.action = act, xlev=attr(tree, "xlevels")) newdata <- getFromNamespace("rpart.matrix", ns = "rpart")(newdata) } classes <- levels(object$y) switch(agg, "majority" = { vote <- matrix(0, nrow=N, ncol=length(classes)) for (i in 1:length(object$mtrees)) { if (OOB) { bindx <- object$mtrees[[i]]$bindx if (!is.null(object$mtrees[[i]]$bfct)) stop("cannot compute out-of-bag estimate for combined models!") pred <- predict(object$mtrees[[i]], newdata, type="class") tindx <- cbind((1:N), pred)[-bindx,] } else { tindx <- cbind(1:N, predict(object$mtrees[[i]], newdata, type="class")) } vote[tindx] <- vote[tindx] + 1 } if (type=="class") { RET <- factor(classes[apply(vote, 1, uwhich.max)], levels = classes, labels = classes) } else { RET <- vote/apply(vote, 1, sum) colnames(RET) <- classes } }, "average" = { cprob <- matrix(0, nrow=N, ncol=length(classes)) if (OOB) ncount <- rep(0,N) else ncount <- length(object$mtrees) for (i in 1:length(object$mtrees)) { if (OOB) { bindx <- object$mtrees[[i]]$bindx pred <- predict(object$mtrees[[i]], newdata, type="prob")[-bindx,] tindx <- (1:N)[-bindx] ncount[tindx] <- ncount[tindx] + 1 } else { pred <- predict(object$mtrees[[i]], newdata, type="prob") tindx <- 1:N } cprob[tindx,] <- cprob[tindx,] + pred } switch(type, "class" = { RET <- factor(classes[apply(cprob, 1, uwhich.max)], levels = classes, labels = classes) }, "prob" = { ncount[ncount < 1] <- NA RET <- cprob / ncount colnames(RET) <- classes }) }, "weighted" = { agglsample <- matrix(0, ncol=length(classes), nrow=N) for (i in 1:length(object$mtrees)) { bdata <- object$y[object$mtrees[[i]]$bindx] newpart <- getpartition(object$mtrees[[i]], newdata) oldpart <- object$mtrees[[i]]$btree$where if (OOB) tindx <- (1:N)[-object$mtrees[[i]]$bindx] else tindx <- 1:N for (j in tindx) { aggobs <- table(bdata[oldpart == newpart[j]]) agglsample[j,] <- agglsample[j,] + aggobs } } switch(type, "class" = { RET <- c() for (j in 1:N) RET <- c(RET, uwhich.max(agglsample[j,])) RET <- factor(classes[RET], levels = classes, labels = classes) }, "prob" = { RET <- agglsample / apply(agglsample, 1, sum) colnames(RET) <- classes }) }) RET } predict.sclass <- function(object, newdata=NULL, type=c("class", "prob"), ...) { if (!is.null(object$bfct)) newdata <- cbind(newdata, object$bfct(newdata)) pred <- predict.irpart(object$btree, newdata, type=type) RET <- pred if (type == "class") RET <- as.integer(pred) if (type == "prob" && is.vector(pred)) RET <- cbind(pred, 1 - pred) RET } predict.regbagg <- function(object, newdata=NULL, aggregation=c("average", "weighted"), ...) { agg <- match.arg(aggregation) if (missing(newdata)) { if (length(object$mtrees) < 10) stop("cannot compute out-of-bag predictions for small number of trees") OOB <- TRUE if (!is.null(object$X)) newdata <- object$X else stop("cannot compute out-of-bag predictions without object$X!") } else { OOB <- FALSE } if (!is.data.frame(newdata)) newdata <- as.data.frame(newdata) N <- nrow(newdata) if (!object$comb) { tree <- object$mtrees[[1]]$btree Terms <- delete.response(tree$terms) act <- (tree$call)$na.action if (is.null(act)) act<- na.rpart newdata <- model.frame(Terms, newdata, na.action = act, xlev=attr(tree, "xlevels")) newdata <- getFromNamespace("rpart.matrix", ns = "rpart")(newdata) } switch(agg, "average" = { cprob <- rep(0, N) if (OOB) ncount <- rep(0,N) else ncount <- length(object$mtrees) for (i in 1:length(object$mtrees)) { if (OOB) { bindx <- object$mtrees[[i]]$bindx if (!is.null(object$mtrees[[i]]$bfct)) stop("cannot compute out-of-bag estimate for combined models!") pred <- predict(object$mtrees[[i]], newdata)[-bindx] tindx <- (1:N)[-bindx] ncount[tindx] <- ncount[tindx] + 1 } else { pred <- predict(object$mtrees[[i]], newdata) tindx <- 1:N } cprob[tindx] <- cprob[tindx] + pred } ncount[ncount < 1] <- NA RET <- cprob / ncount }, "weighted" = { agglsample <- rep(0, N) ncount <- rep(0, N) for (i in 1:length(object$mtrees)) { bdata <- object$y[object$mtrees[[i]]$bindx] newpart <- getpartition(object$mtrees[[i]], newdata) oldpart <- object$mtrees[[i]]$btree$where if (OOB) tindx <- (1:N)[-object$mtrees[[i]]$bindx] else tindx <- 1:N for (j in tindx) { aggobs <- bdata[oldpart == newpart[j]] agglsample[j] <- agglsample[j] + sum(aggobs) ncount[j] <- ncount[j] + length(aggobs) } } ncount[ncount < 1] <- NA RET <- agglsample / ncount }) RET } predict.sreg <- function(object, newdata=NULL, ...) { if (!is.null(object$bfct)) newdata <- cbind(newdata, object$bfct(newdata)) predict.irpart(object$btree, newdata) } predict.survbagg <- function(object, newdata=NULL, ...) { if (missing(newdata)) { if (length(object$mtrees) < 10) stop("cannot compute out-of-bag predictions for small number of trees") OOB <- TRUE if (!is.null(object$X)) newdata <- object$X else stop("cannot compute out-of-bag predictions without object$X!") } else { OOB <- FALSE } if (!is.data.frame(newdata)) newdata <- as.data.frame(newdata) N <- nrow(newdata) if (!object$comb) { tree <- object$mtrees[[1]]$btree Terms <- delete.response(tree$terms) act <- (tree$call)$na.action if (is.null(act)) act<- na.rpart newdata <- model.frame(Terms, newdata, na.action = act, xlev=attr(tree, "xlevels")) newdata <- getFromNamespace("rpart.matrix", ns = "rpart")(newdata) } agglsample <- list() aggcens <- list() for (j in 1:N) { agglsample <- c(agglsample, list(c())) aggcens <- c(aggcens, list(c())) } for (i in 1:length(object$mtrees)) { bdata <- object$y[object$mtrees[[i]]$bindx] newpart <- getpartition(object$mtrees[[i]], newdata) oldpart <- object$mtrees[[i]]$btree$where if (OOB) { if (!is.null(object$mtrees[[i]]$bfct)) stop("cannot compute out-of-bag estimate for combined models!") tindx <- (1:N)[-object$mtrees[[i]]$bindx] } else { tindx <- 1:N } for (j in tindx) { aggobs <- bdata[oldpart == newpart[j],1] agglsample[[j]] <- c(agglsample[[j]], aggobs) aggobs <- bdata[oldpart == newpart[j],2] aggcens[[j]] <- c(aggcens[[j]], aggobs) } } RET <- list() for (j in 1:N) RET <- c(RET, list(survfit(Surv(agglsample[[j]], aggcens[[j]]) ~ 1))) RET } getpartition <- function(object, newdata=NULL) { if (!is.null(object$bfct)) { newdata <- cbind(newdata, object$bfct(newdata)) Terms <- delete.response(object$btree$terms) act <- (object$btree$call)$na.action if (is.null(act)) act<- na.rpart newdata <- model.frame(Terms, newdata, na.action = act, xlev=attr(object$btree, "xlevels")) newdata <- getFromNamespace("rpart.matrix", ns = "rpart")(newdata) } getFromNamespace("pred.rpart", ns = "rpart")(object$btree, newdata) } ipred/R/varset.R0000644000176200001440000000143714172231220013204 0ustar liggesusers# $Id: varset.R,v 1.2 2002/03/26 16:29:15 hothorn Exp $ varset <- function(N, sigma = 0.1, theta = 90, threshold = 0, u = 1:3) { # create U U <- matrix(rep(0, 4), ncol = 2) U[1, 1] <- u[1] U[1, 2] <- u[2] U[2, 1] <- u[3] U[2, 2] <- (theta-u[1]*u[3])/u[2] lambda <- sqrt(U[1, 1]^2 + U[1, 2]^2) U[1, ] <- U[1, ]/lambda lambda <- sqrt(U[2, 1]^2 + U[2, 2]^2) U[2, ] <- U[2, ]/lambda e <- matrix(rnorm(2*N, sd = sigma), ncol = 2, byrow = TRUE) expl <- matrix(rnorm(2*N), ncol = 2, byrow = TRUE) inter <- t(U %*%t(expl) + t(e)) z <- (inter > threshold) resp <- as.factor(ifelse((z[,1] + z[,2]) > 1, 1, 0)) colnames(expl) <- c("x1", "x2") colnames(inter) <- c("y1", "y2") result <- list(explanatory = expl, intermediate = inter, response = resp) return(result) } ipred/R/mypredict.lm.R0000644000176200001440000000126414172231220014305 0ustar liggesusers# $Id: mypredict.lm.R,v 1.7 2003/04/02 11:22:49 peters Exp $ mypredict.lm <- function(object, newdata) { xn <- as.data.frame(newdata) test <- attr(terms(object), "term.labels") xn <- xn[,test] if (!is.null(nrow(xn))) { pred <- rep(NA, nrow(xn)) names(pred) <- row.names(xn) } else { pred <- NA names(pred) <- "1" } # evaluate na.omit (delete lines containing NA) xnn <- na.omit(xn) # attr(xnn, "na.action") returns which na.action is # evaluated, lines and corresponding row.name where NAs occur if(is.null(attr(xnn, "na.action"))) pred <- predict(object, xnn) else pred[-attr(xnn, "na.action")] <- predict(object, xnn) pred } ipred/R/bootest.R0000644000176200001440000002136214172231220013356 0ustar liggesusers# $Id: bootest.R,v 1.18 2004/02/09 08:08:21 peters Exp $ bootest <- function(y, ...) { if(is.null(class(y))) class(y) <- data.class(y) UseMethod("bootest", y) } bootest.default <- function(y, ...) { stop(paste("Do not know how to handle objects of class", class(y))) } bootest.integer <- function(y, ...) { bootest.numeric(y, ...) } bootest.factor <- function(y, formula, data, model, predict, nboot=25, bc632plus = FALSE, list.tindx = NULL, predictions = FALSE, both.boot = FALSE, ...) { # bootstrap estimator of misclassification error N <- length(y) nindx <- 1:N if(!is.null(list.tindx)) nboot <- length(list.tindx) bootindx <- matrix(NA, ncol=nboot, nrow=N) if(predictions) { BOOTINDX <- data.frame(matrix(NA, ncol=nboot, nrow=N)) } classes <- levels(y) USEPM <- FALSE if(!is.data.frame(data)) stop("data is not a data.frame") if(nboot <=2) stop("to small number of bootstrap replications") if(is.null(nboot)) stop("number of bootstrap replications is missing") if(!is.null(list.tindx) & length(list.tindx) != nboot) stop(paste("List of selected observations per bootstrap sample has to be", nboot)) for(i in 1:nboot) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] if(length(tindx) > N) warning("number of selected observations is larger than the sample size") } else { tindx <- sample(nindx, N, replace = TRUE) } mymodel <- model(formula, data = data[tindx,], ...) # check if mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } if (USEPM) pred <- predict(newdata=data) else pred <- predict(mymodel, newdata = data) if (!is.factor(pred)) stop("predict does not return factor values") pred <- factor(pred, levels=classes)[-tindx] if (length(pred) != length(y[-tindx])) stop("different length of data and prediction") if(predictions) { BOOTINDX[,i] <- factor(BOOTINDX[,i],levels = classes) BOOTINDX[-tindx, i] <- pred } bootindx[-tindx, i] <- (pred != y[-tindx]) } fun <- function(x) ifelse(all(is.na(x)), NA, mean(as.integer(x), na.rm = TRUE)) one <- mean(apply(bootindx, 1, fun), na.rm = TRUE) if (bc632plus) { full.model <- model(formula, data = data, ...) # check if full.model is a function which should be used instead of # predict if (is.function(full.model)) { predict <- full.model USEPM <- TRUE } if (USEPM) full.pred <- predict(newdata=data) else full.pred <- predict(full.model, newdata = data) resubst <- mean(full.pred != y, na.rm = TRUE) err632 <- 0.368*resubst + 0.632*one y <- y[!is.na(y) & !is.na(full.pred)] full.pred <- full.pred[!is.na(y) & !is.na(full.pred)] gamma <- sum(outer(y, full.pred, function(x, y) ifelse(x==y, 0, 1) ))/ (length(y)^2) r <- (one - resubst)/(gamma - resubst) r <- ifelse(one > resubst & gamma > resubst, r, 0) errprime <- min(one, gamma) # weight <- .632/(1-.368*r) # err <- (1-weight)*resubst + weight*one err <- err632 + (errprime - resubst)*(0.368*0.632*r)/(1-0.368*r) if(predictions) RET <- list(error = err, nboot = nboot, bc632plus = TRUE, predictions = BOOTINDX) else RET <- list(error = err, nboot=nboot, bc632plus = TRUE) if(both.boot){ bc632plus <- FALSE RETbc <- RET } } if(!bc632plus) { err <- one expb <- rep(0, nboot) for(i in 1:nboot) expb[i] <- mean(apply(bootindx[,-i], 1, fun), na.rm = TRUE) sdint <- sqrt( ((nboot - 1)/nboot)*sum((expb - mean(expb))^2) ) if(predictions) RET <- list(error = err, sd = sdint, bc632plus = FALSE, nboot = nboot, predictions = BOOTINDX) else RET <- list(error = err, sd=sdint, bc632plus=FALSE, nboot=nboot) if(both.boot){ RET <- list("boot" = RET, "632plus" = RETbc) } } class(RET) <- "bootestclass" RET } bootest.numeric <- function(y, formula, data, model, predict, nboot=25, bc632plus=FALSE, list.tindx = NULL, predictions = FALSE, ...) { # bootstrap estimator of root of mean squared error if (bc632plus) stop("cannot compute 632+ estimator of mean squared error") if(!is.null(list.tindx)) nboot <- length(list.tindx) if (nboot <=2) stop("to small number of bootstrap replications") ##FIX: nrow = N <- length(y) nindx <- 1:N bootindx <- matrix(NA, ncol=nboot, nrow=N) if(predictions) BOOTINDX <- matrix(NA, ncol=nboot, nrow=N) USEPM <- FALSE if (!is.data.frame(data)) stop("data is not a data.frame") if(is.null(nboot)) stop("number of bootstrap replications is missing") if(!is.null(list.tindx) & length(list.tindx) != nboot) stop(paste("List of selected observations per bootstrap sample has to be", nboot)) for(i in 1:nboot) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] if(length(tindx) > N) warning("number of selected observations is larger than the sample size") } else { tindx <- sample(nindx, N, replace = TRUE) } # tindx <- ifelse(!is.null(list.tindx), list.tindx[[i]], sample(nindx, N, replace = TRUE)) mymodel <- model(formula, data = data[tindx,], ...) outbootdata <- subset(data, !(nindx %in% tindx)) # check if mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } if (USEPM) pred <- predict(newdata=outbootdata) else pred <- predict(mymodel, newdata = outbootdata) if (!is.numeric(pred)) stop("predict does not return numerical values") if (length(pred) != length(y[-tindx])) stop("different length of data and prediction") if(predictions) BOOTINDX[-tindx, i] <- pred bootindx[-tindx, i] <- (pred - y[-tindx])^2 } fun <- function(x) ifelse(all(is.na(x)), NA, mean(x, na.rm = TRUE)) err <- sqrt(mean(apply(bootindx, 1, fun), na.rm = TRUE)) if(predictions) RET <- list(error = err, nboot = nboot, predictions = BOOTINDX) else RET <- list(error = err, nboot=nboot) class(RET) <- "bootestreg" RET } bootest.Surv <- function(y, formula, data=NULL, model, predict, nboot=25, bc632plus=FALSE, list.tindx = NULL, predictions = FALSE, ...) { # bootstrap estimator of Brier's score if (bc632plus) stop("cannot compute 632+ estimator of Brier's score") N <- dim(y)[1] if(!is.null(list.tindx)) nboot <- length(list.tindx) nindx <- 1:N bootindx <- matrix(NA, ncol=nboot, nrow=N) if(predictions) BOOTINDX <- matrix(NA, ncol=nboot, nrow=N) USEPM <- FALSE if(is.null(nboot)) stop("number of bootstrap replications is missing") if (nboot <=2) stop("to small number of bootstrap replications") if (is.null(data)) data <- as.data.frame(rep(1, N)) if (!is.data.frame(data)) stop("data is not a data.frame") if(!is.null(list.tindx)) nboot <- length(list.tindx) for(i in 1:nboot) { if(!is.null(list.tindx)) { tindx <- list.tindx[[i]] if(tindx > N) warning("number of selected observations is larger than the sample size") } else { tindx <- sample(nindx, N, replace = TRUE) } #tindx <- ifelse(!is.null(list.tindx), list.tindx[[i]], sample(nindx, N, replace = TRUE)) #tindx <- sample(nindx, N, replace = TRUE) mymodel <- model(formula, data=data[tindx,], ...) outbootdata <- subset(data, !(nindx %in% tindx)) # check if mymodel is a function which should be used instead of # predict if (is.function(mymodel)) { if(!is.null(predict) & i == 1) warning("model returns a function and predict is specified, using models output") predict <- mymodel USEPM <- TRUE } if (USEPM) pred <- predict(newdata=outbootdata) else pred <- predict(mymodel, newdata = outbootdata) if (is.list(pred)) { if (!inherits(pred[[1]], "survfit") && !inherits(pred, "survfit")) stop("predict does not return a list of survfit objects") } else { stop("predict does not return a list of survfit objects") } if(predictions) BOOTINDX[-tindx, i] <- sbrier(y[-tindx], pred) ###??? bootindx[-tindx, i] <- sbrier(y[-tindx], pred) } fun <- function(x) ifelse(all(is.na(x)), NA, mean(x, na.rm = TRUE)) err <- mean(apply(bootindx, 1, fun), na.rm = TRUE) if(predictions) RET <- list(error = err, nboot = nboot, predictions = BOOTINDX) else RET <- list(error = err, nboot=nboot) class(RET) <- "bootestsurv" RET } ipred/cleanup0000755000176200001440000000045014646200411012727 0ustar liggesusers#!/bin/sh for f in ./R/*~; do rm -f $f done for f in ./tests/*~; do rm -f $f done for f in .*~; do rm -f $f done for f in ./man/*~; do rm -f $f done for f in ./data/*~; do rm -f $f done for f in *~; do rm -f $f done find . -name "DEADJOE" -exec rm -f {} \; exit 0 ipred/vignettes/0000755000176200001440000000000014646200411013363 5ustar liggesusersipred/vignettes/ipred.bib0000644000176200001440000000400314172231220015135 0ustar liggesusers@article{breiman:1996, key = {53}, author = {L. Breiman}, title = {Bagging Predictors}, journal = {Machine Learning}, pages = {123-140}, year = {1996}, volume = {24}, number = {2} } @article{efron:1997, key = {52}, author = {B. Efron and R. Tibshirani}, title = {Improvements on Cross-Validation: The .632+ Bootstrap Method}, journal = {Journal of the American Statistical Association}, pages = {548-560}, year = {1997}, volume = {92}, number = {438} } @article{hand:2001, key = {32}, author = {D.J. Hand and H.G. Li and N.M. Adams}, title = {Supervised classification with structured class definitions}, journal = {Computational Statistics \& Data Analysis}, pages = {209-225}, year = {2001}, volume = {36} } @inproceedings{ifcs:2001, author = {A. Peters and T. Hothorn and B. Lausen}, title = {Glaucoma diagnosis by indirect classifiers}, booktitle = {Studies in Classification, Data Analysis, and Knowledge Organization (to appear)}, organization = {Proceedings of the 8th Conference of the International Federation of Classification Societies}, year = {2002} } @techreport{out-of-bag:1996, key = {T162}, author = {Leo Breiman}, title = {Out-Of-Bag Estimation}, institution = {Statistics Department, University of California Berkeley}, year = {1996}, address = {Berkeley CA 94708} } @article{double-bag:2002, key = {247}, author = {Torsten Hothorn and Berthold Lausen}, title = {Double-Bagging: Combining classifiers by bootstrap aggregation}, journal = {Pattern Recognition}, year = {2003}, pages = {1303-1309}, volume = {36}, number = {6} } @article{Rnews:Peters+Hothorn+Lausen:2002, key = {308}, author = {Andrea Peters and Torsten Hothorn and Berthold Lausen}, title = {ipred: Improved Predictors}, journal = {R News}, year = 2002, month = {June}, volume = 2, number = 2, pages = {33--36}, url = {http://CRAN.R-project.org/doc/Rnews/} } ipred/vignettes/ipred-examples.Rnw0000644000176200001440000004270414172231220016775 0ustar liggesusers\documentclass[11pt]{article} \usepackage[round]{natbib} \usepackage{bibentry} \usepackage{amsfonts} \usepackage{hyperref} \renewcommand{\baselinestretch}{1.3} \newcommand{\ipred}{\texttt{ipred }} %\VignetteIndexEntry{Some more or less useful examples for illustration.} %\VignetteDepends{ipred} %\textwidth=6.2in %\VignetteDepends{mvtnorm,TH.data,rpart,MASS} \begin{document} \title{\ipred: Improved Predictors} \date{} \SweaveOpts{engine=R,eps=TRUE,pdf=TRUE} <>= options(prompt=">", width=50) set.seed(210477) @ \maketitle This short manual is heavily based on \cite{Rnews:Peters+Hothorn+Lausen:2002} and needs some improvements. \section{Introduction} In classification problems, there are several attempts to create rules which assign future observations to certain classes. Common methods are for example linear discriminant analysis or classification trees. Recent developments lead to substantial reduction of misclassification error in many applications. Bootstrap aggregation \citep[``bagging'',][]{breiman:1996} combines classifiers trained on bootstrap samples of the original data. Another approach is indirect classification, which incorporates a priori knowledge into a classification rule \citep{hand:2001}. Since the misclassification error is a criterion to assess the classification techniques, its estimation is of main importance. A nearly unbiased but highly variable estimator can be calculated by cross validation. \cite{efron:1997} discuss bootstrap estimates of misclassification error. As a by-product of bagging, \cite{out-of-bag:1996} proposes the out-of-bag estimator. \\ However, the calculation of the desired classification models and their misclassification errors is often aggravated by different and specialized interfaces of the various procedures. We propose the \ipred package as a first attempt to create a unified interface for improved predictors and various error rate estimators. In the following we demonstrate the functionality of the package in the example of glaucoma classification. We start with an overview about the disease and data and review the implemented classification and estimation methods in context with their application to glaucoma diagnosis. \section{Glaucoma} Glaucoma is a slowly processing and irreversible disease that affects the optic nerve head. It is the second most reason for blindness worldwide. Glaucoma is usually diagnosed based on a reduced visual field, assessed by a medical examination of perimetry and a smaller number of intact nerve fibers at the optic nerve head. One opportunity to examine the amount of intact nerve fibers is using the Heidelberg Retina Tomograph (HRT), a confocal laser scanning tomograph, which does a three dimensional topographical analysis of the optic nerve head morphology. It produces a series of $32$ images, each of $256 \times 256$ pixels, which are converted to a single topographic image. A less complex, but although a less informative examination tool is the $2$-dimensional fundus photography. However, in cooperation with clinicians and a priori analysis we derived a diagnosis of glaucoma based on three variables only: $w_{lora}$ represents the loss of nerve fibers and is obtained by a $2$-dimensional fundus photography, $w_{cs}$ and $w_{clv}$ describe the visual field defect \citep{ifcs:2001}. \begin{center} \begin{figure}[h] \begin{center} {\small \setlength{\unitlength}{0.6cm} \begin{picture}(14.5,5) \put(5, 4.5){\makebox(2, 0.5){$w_{clv}\geq 5.1$}} \put(2.5, 3){\makebox(2, 0.5){$w_{lora}\geq 49.23$}} \put(7.5, 3){\makebox(2, 0.5){$w_{lora} \geq 58.55$}} \put(0, 1.5){\makebox(2, 0.5){$glaucoma$}} \put(3.5, 1.5){\makebox(2, 0.5){$normal$}} \put(6.5, 1.5){\makebox(2, 0.5){$w_{cs} < 1.405$}} \put(10, 1.5){\makebox(2, 0.5){$normal$}} \put(3.5, 0){\makebox(2, 0.5){$glaucoma$}} \put(6.5, 0){\makebox(2, 0.5){$normal$}} \put(6, 4.5){\vector(-3, -2){1.5}} \put(6, 4.5){\vector(3, -2){1.5}} \put(3.5, 3){\vector(3, -2){1.5}} \put(3.5, 3){\vector(-3, -2){1.5}} \put(8.5, 3){\vector(3, -2){1.5}} \put(8.5, 3){\vector(-3, -2){1.5}} \put(6.5, 1.5){\vector(3, -2){1.5}} \put(6.5, 1.5){\vector(-3, -2){1.5}} \end{picture} } \end{center} \caption{Glaucoma diagnosis. \label{diag}} \end{figure} \end{center} Figure \ref{diag} represents the diagnosis of glaucoma in terms of a medical decision tree. A complication of the disease is that a damage in the optic nerve head morphology precedes a measurable visual field defect. Furthermore, an early detection is of main importance, since an adequate therapy can only slow down the progression of the disease. Hence, a classification rule for detecting early damages should include morphological informations, rather than visual field data only. Two example datasets are included in the package. The first one contains measurements of the eye morphology only (\texttt{GlaucomaM}), including $62$ variables for $196$ observations. The second dataset (\texttt{GlaucomaMVF}) contains additional visual field measurements for a different set of patients. In both example datasets, the observations in the two groups are matched by age and sex to prevent any bias. \section{Bagging} Referring to the example of glaucoma diagnosis we first demonstrate the functionality of the \texttt{bagging} function. We fit \texttt{nbagg = 25} (default) classification trees for bagging by <>= library("ipred") library("rpart") library("MASS") data("GlaucomaM", package="TH.data") gbag <- bagging(Class ~ ., data = GlaucomaM, coob=TRUE) @ where \texttt{GlaucomaM} contains explanatory HRT variables and the response of glaucoma diagnosis (\texttt{Class}), a factor at two levels \texttt{normal} and \texttt{glaucoma}. \texttt{print} returns informations about the returned object, i.e. the number of bootstrap replications used and, as requested by \texttt{coob=TRUE}, the out-of-bag estimate of misclassification error \citep{out-of-bag:1996}. <>= print(gbag) @ The out-of-bag estimate uses the observations which are left out in a bootstrap sample to estimate the misclassification error at almost no additional computational costs. \cite{double-bag:2002} propose to use the out-of-bag samples for a combination of linear discriminant analysis and classification trees, called ``Double-Bagging''. For example, a combination of a stabilised linear disciminant analysis with classification trees can be computed along the following lines <>= scomb <- list(list(model=slda, predict=function(object, newdata) predict(object, newdata)$x)) gbagc <- bagging(Class ~ ., data = GlaucomaM, comb=scomb) @ \texttt{predict} predicts future observations according to the fitted model. <>= predict(gbagc, newdata=GlaucomaM[c(1:3, 99:102), ]) @ Both \texttt{bagging} and \texttt{predict} rely on the \texttt{rpart} routines. The \texttt{rpart} routine for each bootstrap sample can be controlled in the usual way. By default \texttt{rpart.control} is used with \texttt{minsize=2} and \texttt{cp=0} and it is wise to turn cross-validation off (\texttt{xval=0}). The function \texttt{prune} can be used to prune each of the trees to an appropriate size. \section{Indirect Classification} Especially in a medical context it often occurs that a priori knowledge about a classifying structure is given. For example it might be known that a disease is assessed on a subgroup of the given variables or, moreover, that class memberships are assigned by a deterministically known classifying function. \cite{hand:2001} proposes the framework of indirect classification which incorporates this a priori knowledge into a classification rule. In this framework we subdivide a given data set into three groups of variables: those to be used predicting the class membership (explanatory), those to be used defining the class membership (intermediate) and the class membership variable itself (response). For future observations, an indirect classifier predicts values for the appointed intermediate variables based on explanatory variables only. The observation is classified based on their predicted intermediate variables and a fixed classifying function. This indirect way of classification using the predicted intermediate variables offers possibilities to incorporate a priori knowledge by the subdivision of variables and by the construction of a fixed classifying function. We apply indirect classification by using the function \texttt{inclass}. Referring to the glaucoma example, explanatory variables are HRT and anamnestic variables only, intermediate variables are $w_{lora}, \, w_{cs}$ and $w_{clv}$. The response is the diagnosis of glaucoma which is determined by a fixed classifying function and therefore not included in the learning sample \texttt{GlaucomaMVF}. We assign the given variables to explanatory and intermediate by specifying the input formula. <>= data("GlaucomaMVF", package="ipred") GlaucomaMVF <- GlaucomaMVF[,-63] formula.indirect <- Class~clv + lora + cs ~ . @ The variables on the left-hand side represent the intermediate variables, modeled by the explanatory variables on the right-hand side. Almost each modeling technique can be used to predict the intermediate variables. We chose a linear model by \texttt{pFUN = list(list(model = lm))}. <>= classify <- function (data) { attach(data) res <- ifelse((!is.na(clv) & !is.na(lora) & clv >= 5.1 & lora >= 49.23372) | (!is.na(clv) & !is.na(lora) & !is.na(cs) & clv < 5.1 & lora >= 58.55409 & cs < 1.405) | (is.na(clv) & !is.na(lora) & !is.na(cs) & lora >= 58.55409 & cs < 1.405) | (!is.na(clv) & is.na(lora) & cs < 1.405), 0, 1) detach(data) factor (res, labels = c("glaucoma", "normal")) } fit <- inclass(formula.indirect, pFUN = list(list(model = lm)), cFUN = classify, data = GlaucomaMVF) @ \texttt{print} displays the subdivision of variables and the chosen modeling technique <>= print(fit) @ Furthermore, indirect classification predicts the intermediate variables based on the explanatory variables and classifies them according to a fixed classifying function in a second step, that means a deterministically known function for the class membership has to be specified. In our example this function is given in Figure \ref{diag} and implemented in the function \texttt{classify}.\\ Prediction of future observations is now performed by <>= predict(object = fit, newdata = GlaucomaMVF[c(1:3, 86:88),]) @ We perform a bootstrap aggregated indirect classification approach by choosing \texttt{pFUN = bagging} and specifying the number of bootstrap samples \citep{ifcs:2001}. Regression or classification trees are fitted for each bootstrap sample, with respect to the measurement scale of the specified intermediate variables <>= mypredict.rpart <- function(object, newdata) { RES <- predict(object, newdata) RET <- rep(NA, nrow(newdata)) NAMES <- rownames(newdata) RET[NAMES %in% names(RES)] <- RES[NAMES[NAMES %in% names(RES)]] RET } fit <- inbagg(formula.indirect, pFUN = list(list(model = rpart, predict = mypredict.rpart)), cFUN = classify, nbagg = 25, data = GlaucomaMVF) @ The call for the prediction of values remains unchanged. \section{Error Rate Estimation} Classification rules are usually assessed by their misclassification rate. Hence, error rate estimation is of main importance. The function \texttt{errorest} implements a unified interface to several resampling based estimators. Referring to the example, we apply a linear discriminant analysis and specify the error rate estimator by \texttt{estimator = "cv", "boot"} or \texttt{"632plus"}, respectively. A 10-fold cross validation is performed by choosing \texttt{estimator = "cv"} and \texttt{est.para = control.errorest(k = 10)}. The options \texttt{estimator = "boot"} or \texttt{estimator = "632plus"} deliver a bootstrap estimator and its bias corrected version {\sl .632+} \citep[see][]{efron:1997}, we specify the number of bootstrap samples to be drawn by \texttt{est.para = control.errorest(nboot = 50)}. Further arguments are required to particularize the classification technique. The argument \texttt{predict} represents the chosen predictive function. For a unified interface \texttt{predict} has to be based on the arguments \texttt{object} and \texttt{newdata} only, therefore a wrapper function \texttt{mypredict} is necessary for classifiers which require more than those arguments or do not return the predicted classes by default. For a linear discriminant analysis with \texttt{lda}, we need to specify <>= mypredict.lda <- function(object, newdata){ predict(object, newdata = newdata)$class } @ and calculate a 10-fold-cross-validated error rate estimator for a linear discriminant analysis by calling <>= errorest(Class ~ ., data= GlaucomaM, model=lda, estimator = "cv", predict= mypredict.lda) @ For the indirect approach the specification of the call becomes slightly more complicated. %Again for a unified interface a wrapper %function has to be used, which incorporates the fixed classification rule The bias corrected estimator {\sl .632+} is computed by <>= errorest(formula.indirect, data = GlaucomaMVF, model = inclass, estimator = "632plus", pFUN = list(list(model = lm)), cFUN = classify) @ Because of the subdivision of variables and a formula describing the modeling between explanatory and intermediate variables only, we must call the class membership variable. Hence, in contrast to the function \texttt{inclass} the data set \texttt{GlaucomaMVF} used in \texttt{errorest} must contain explanatory, intermediate and response variables. Sometimes it may be necessary to reduce the number of predictors before training a classifier. Estimating the error rate after the variable selection leads to biased estimates of the misclassfication error and therefore one should estimate the error rate of the whole procedure. Within the \texttt{errorest} framework, this can be done as follows. First, we define a function which does both variable selection and training of the classifier. For illustration proposes, we select the predictors by comparing their univariate $P$-values of a two-sample $t$-test with a prespecified level and train a LDA using the selected variables only. <>= mymod <- function(formula, data, level=0.05) { # select all predictors that are associated with an # univariate t.test p-value of less that level sel <- which(lapply(data, function(x) { if (!is.numeric(x)) return(1) else return(t.test(x ~ data$Class)$p.value) }) < level) # make sure that the response is still there sel <- c(which(colnames(data) %in% "Class"), sel) # compute a LDA using the selected predictors only mod <- lda(formula , data=data[,sel]) # and return a function for prediction function(newdata) { predict(mod, newdata=newdata[,sel])$class } } @ Note that \texttt{mymod} does not return an object of class \texttt{lda} but a function with argument \texttt{newdata} only. Thanks to lexical scoping, this function is used for computing predicted classes instead of a function \texttt{predict} passed to \texttt{errorest} as argument. Computing a $5$-fold cross-validated error rate estimator now is approximately a one-liner. <>= errorest(Class ~ . , data=GlaucomaM, model=mymod, estimator = "cv", est.para=control.errorest(k=5)) @ %%To summarize the performance of the different classification techniques in the considered example of glaucoma diagnosis, the 10-fold %%cross-validated error estimator delivers the %%results given in Table \ref{tenf}. %%\begin{figure} %%\begin{center} %%\begin{tabular}{ rrr } %%\hline %%dataset & method & error estimate \\ %%\hline %%\texttt{GlaucomaM} & {\sl slda} & 0.168 \\ %%\texttt{GlaucomaM} & {\sl bagging} & 0.158 \\ %%\texttt{GlaucomaM} & {\sl double-bagging} & 0.153 \\ %%\texttt{GlaucomaMVF} & {\sl inclass-bagging} & 0.206 \\ %%\tetxtt{GlaucomaMVF} & {\sl inclass-lm} & 0.229 \\ %%\hline %%\end{tabular} %%\caption{10-fold cross-validated error estimation of %%the misclassification error for several classification %%methods: {\sl slda} - stabilised linear discriminant analysis, %%{\sl bagging} - bagging with 50 bootstrap samples, %%{\sl double-bagging} - bagging with 50 bootstrap samples, %%combined with sLDA, {\sl inclass-bagging} - %%indirect classification using bagging, %%{\sl inclass-lm} indirect classification using %%linear modeling. \label{tenf}} %%\end{center} %%\end{figure} %%Note that an estimator of the variance is available for the ordinary %%bootstrap estimator (\texttt{estimator="boot"}) only, see \cite{efron:1997}. \section{Summary} \ipred tries to implement a unified interface to some recent developments in classification and error rate estimation. It is by no means finished nor perfect and we very much appreciate comments, suggestions and criticism. Currently, the major drawback is speed. Calling \texttt{rpart} $50$ times for each bootstrap sample is relatively inefficient but the design of interfaces was our main focus instead of optimization. Beside the examples shown, \texttt{bagging} can be used to compute bagging for regression trees and \texttt{errorest} computes estimators of the mean squared error for regression models. \bibliographystyle{plainnat} \bibliography{ipred} \end{document} ipred/data/0000755000176200001440000000000014646200411012264 5ustar liggesusersipred/data/GlaucomaMVF.rda0000644000176200001440000004531014172231220015054 0ustar liggesusersý7zXZi"Þ6!ÏXÌákäJŠ])TW"änRÊŸãXS¹î#Ž&–'¨Æ¯NïæUƒL~–ú<ЋºÆ2Âݬ­-+Ö¦þÈÖ`ËEu¦Æÿ&JKu‰ÎÊòAß (T%ÛfµQí½{ Rùôi‘>Âx6LO¾Å/@äOäW5ÝÇy‰Ö3VYŸõŠz|ñ¹ÛcŽˆí¦(w‡(êgèAþîjìQzÑZ!+ܕӈô@ËÃ8?Ne¶üÈ?ÔQ߀wÁÛA»Àá—TU[û÷ë%ì/ÚYÚûÖ¿Ì(Ìøúqž·¦¸?ñôÛ‡¶¿ø®ç= öägC¨ßÁtd%*%G)*ø%jjɵ?MÛÆ¯¬&Mùµ*µ\¢ï(€ŽÌåŽÜQ½J¨yN÷íÅ„ "¼†¹¸£I:Ü^±z‹–÷dsºomNBÿ¬ÛYº”PáJ>ššK º#âC/,ýSŽa¦Ã›we4õ—yòÞ9!ÜH–U +Þá¥9š„t^¨®:×^1ª ©€pQqÕ"-~¡ˆˆj[WÍ©d‡ðA¸ †,ëCݧξ ÿxÛ¦“Ž ÄèFq¾wCø¶ª[)pÓºÚ˜êÊY_§¹´%¸ %/P€e¿C°!•îúxÌÎù«ÙNå®jåî`~8îÖ;Gf&aüì0œ 9P §¼í8‰ÒT.bÒ ÈìIn»Üî§A}„…Xc¯\T~ o -ðT)ïGK¢ €ÏÏðhÆ&ùÃgÏ#‰ŽÏ¸óÀý@¨ ÛÐó·‰!ŽôvÖÉ×Y¿©è—Z41bãGkž”%Š>Ô˜öC„ ”ZYëM~+¦N`»i>ÎÍóíÄõ vÇVGUzãN`¦zq|"ãÎάLuÄó²¦îú?¾z¼‘´í–eÑûöOHzêYŠ,ÍTÀ¾GIJqÓoÍ%ÍÓ9¹Oà|2f°ô…»Óa;ØTy“pj…uÓeAãì9'?–þ¿f»]b!éÜacn(®«Œ ð÷”žMùÒ¯Ò‰¯Á–8î¿ä"ÿÍð¥ß²Ø! 5ëÄÁ•Foõ ÏâM=üÝÁ‡?$š²¿±V81# ³Î€›?&®‚MvD{±ôüƒÒþ¡@bS!²&Ñi'¤ÞI~“Ö¦s[§Œ–G“—O^DtîÈ?inÜÄiD¬÷s©ÙzPQFÙvdÈ}ÇŽ7[Þ`©F;ËÇ>8úX€Ю›øÅÛÅkj ±&{É)á+$ÏêeÝ F†$4ìÄe3` ¬Õ÷v|Ébž–DpuÑ©FàÈle0TŠº$bK1Í€ ɳ¤¾ù%ТbsùçZª­²ªIŽ¡’¢…ôÄ͆3S5^—?jJó0ÊŽÒûB '7„Ûl¾ª}\OëwyNôk‰_ÜqåÍÇÓ¬k}Ý¥m@îaíäW£x’¼9¨ ´šˆÈvÙ¸R„äd2wsWÆÑYʸ ØM“îÜiì;ñsZÆ0 >PE.ò?QoÇ* ҇ТŒí•Ñd ©ÀáÉÞßÔ’)%lÓæªòÓl•3¨õè"«¸û~Ê”¨ðxß 3€:¾csl ÒêÔ¸v‰K›¦Aæ§c93»Å2y¥Ž“B‡a´Tè¨]7KªŠ&Ãc¥©ÅÈ}Nå¾R}™Ú%ºƒÄÒ*pªÓVÄ–)€»û/¦úë^CXgÅ;©Q”²Ç -ÓE,²zÙF}›‡¯0Ÿdt69ßÄ>ÇÙÈ£=:®åÇ‚M5^Ù²e™u €yúkÁsçYïÂ=šîšLL¼Ÿ¡¤X]¨8ƒøñR.@i*çÓžÔQó& ºh¶…;Oue#ŽmE*†±;óR+cÛ[W¼}€æÐ3QE+üy’Áƒ¸1=Ùû°~k{Fp4v¿Âü«ao ÒH{™ÍcÉvSPظ¥KwŽN¼)s}Ýð¸›C«› &HÛP7u„’€F û&*Q‘J펷ˆ1íx­~Ë)ÒAp·¨µ—õAvV\j»¸à8‘ü1[ê‚éø‘¡ÓM¼*ȠҊص¹Œã’±ÏõÕ'e b‚ñ7ÿ7"®±ÇÁ$'6R¸òoaz¥ç]_³iOvð<êÚ\Þ3²péö_ÏÅFˆwz-/%`|êlÿäó‘Žõó∟î¨Èk0ž-¾n^–Çk]£G ›%  ág«m‹]à”>—¬­¨qPͰ™Yó›EK·Q‚Ž—²¨ÖåN[²ª‚/߉ˆRÖ?û¼ÜFþŠd” L0“§é4ÐT_]¤-Ç–~Ae‡Šo,”jJ8só ‡Òúk>‰+÷ï síJ¹jQ‹¡º™~ø‡&t¬¹4Ÿ°Db‹Ò2¨JžE/|2Oª†s@÷nçIøo²Ósn×Y1µ•›@È׸´Y¯_ ê8ÊøÚ™üÔ–g÷;¹5Ì1Eó‹±"Ëp|9e¡èç*©íÅå…{Û-ú£ BrÀPøŒkH _}[î ?r*°^b}`ßœ YíÆ™AUEn&æCÐdÞ Q“}ýÁ¾b¡ÊGóú~FVA#z©ûkÁœ],x‡álê©ëÏ!2Ê}ͦkmóþ …¼ð++ÎÄ—@Ï¿®bw´Ød5cd‰/Qe%,ãV¥^´ïî÷ n¨•uõPêó—ᘸ ä{JGÃnêY»¾;2æmðð¾]ºŸ2ôiéô(rݸ'3íq”’}³yõ®*ef†VÔáÆ©ã$Eu ÷æ%28 Ž‚¹ŠoîèpÝ™vžÃ#¶0û!Üï :liRH 0®)žZÉ£ÕŠw³ø~z«ó"#9¡À™fäT®(t ëýœ#ü€ö+´0P<›\\ëÜ(=}[å$qàÂÆ~|© $ ›¤Øæ›§Uú±¡?ä×2kXv $áp©n\Þ“œÍL4C[b¤ÁÂçÀ Y±âèÐŒå!Ó 4g.‘OBí¢>e[.Ùpwò‹ª,‘úp§Ôû8÷A4‚šE *ñÙY††Ž]è.c?¬L®ÐЪ¦ýÏ¿ã bס9=‚>È¢_ò:fÍ9˜¡% Íø ûƒ$úX=õWp(~:ø© X´@ÚrlqS5WfOqÀLîb©Ö±±Ñˆí‰½ñÑ¿KÔ‚çú/|2«añ¡ÈUÊã¢çvœó´bf¿³ Îp>M7¼øîÝq·ý‰Ï$à$<ÝÅë˜û³n„3N]CeªÚžGyiK÷GíÑÜô ÎÛ‚(a|NZŸŒk$ *·”RN.uè*Êš³¥ýf&Ž˜k@á„ê°*ܸk8Lªû»Ú©´;ÎÔa;OB?„ÊË(M7ô—¿»ñ4.  ¸I«¨ßPàWbhî.âƒ0•<\ÌqK½yÊÑyb>Üš2aØõ?°ˆB†h•YUjÔõÖÁº ßJ<¹|Q!¨À £=³EÏ›ÞnäòÃÃ×\Y+ý5*ñœÅ\&ÔæÓµ’hÚm8ö†fÈQì§ Á}19ð¼Ç ‡o.WbÀÖŸ™|ðê’-³wKüwé#dt?ÇÔ¼|ì,¸ç¸ƒ@GÀXgEÐSýh^ÓC† Duؤ;ʀɑ“žø©ú$fC%“×eU1²ˆ\Bßd¨Ê/€½Ôïϸ!ÊWwÐ¥úã>1L¶œ­¡ÉÒ¡sKM’ö5ëU‡º‘ÐÈ6q 9·’ë€&¥çî5,$ÝUfA„Ûd!·•ž®Á5GÕxáF3îÍqI?¬7Í;r§`Wžeö µ~PÅ ¼ûÝI»ýUê1PÃËVrÈ·v>ËÀèv篨~Aè“<ôoÚ˜áêWÑ[ÈØõ›¸ù(pÖÓ¶Î-Ø[2JhfÍ‘çÓ-Õׯ“uýfÕ·ýd8‡ÙÅÆ:U¡%’/X§8Ѻn€¨‹ñ »=ÑB*¼ï1ÀÁ+%Ð>·|`­îÏ,G7æ)ßH•sÍöÄ~<ÞÎ)+ënˆYËßúäs{˜'2bð´õ—š7®ˆøíSü[Ò“7¶Æ-­*P‹í³wÉE¸ÖSÅf()JÂ~Dá‹ÿƒ]°‘5íŽÃL’q‡ç î¦ü/-.ï1 ó1ÔŽËwûÎ/˜Š«`¬må® u(4t>0 C³{˜ä©}÷E©A=xHwÇÉð«Å¥éx' ºüò ¬¼¯r`ôo§³Ò 5yNN% ó‰bvf4©A‚†v$,ú« B‰cÂàÙØp9R`Z.Hr’ ÷2“±Âð¾0ÂÎÕZ°Ù–ÔšŒº‰û\Õª–µ~’ ïZ©ÏLõgÔÙÞŽ7uÆãŠ»½Ö~Ê<Ð`$sA»±4¡âÁð/w»ê=£RA9m%[%•Ê”Tœ,_Åʆ%|ü4ƒÒÔd>k2^Æ߉ÿµ”…IYðOO Yå’{WÛÞ¢Ÿã`™`…ÖÄÛ—Ö Š:V˜BCŸ¨«¦àqG'+pª\büü' Ji¥ˆ™­sfâU&sFkMÉâm ¾–jB'çãynKǧ([êUwÛ9ûÜ•B·Uz Â4å»{[Ö„e¦¬0"aØnçõŠð±Î#2~OÀø9qÞkFNË¥ÖüÒ6Wnìd1³x±`‚k¹µMŠò]KœßF!lÍ×ëhî%±ë1MQé‡z >[tXŸ)¢`š[Ø'dÃ5y ‡Š4Ì`Ðüm¸|oü5)f[Võß¹óäa¶në(ÓíïÎfÎ~DzŠ‚ú9+æ„ Ê½)MYך…É3Øm‰äßì œÒ#ÕrsŸÖ‡±Ï:Up„_ª„:øFneÐõ!TÅÑ¡Y_Û½uz…L„›9šåVå—qÿîX¤5l%ªX]ÀÖÿ h ꇀ9UÉÚ¨k«Ñ»Px›±b×+¾¤ÜbdÌq7g‡ƒÅøÞ÷ nß®¹È0‰ñò]“ ÃMð¹‚#ø;µZF¸`Ñ\~ú UÀÏò+gÀ_»Ñaª8±:ˆ#䯂A^=6”ðÇŸdHíE¥6ŒÑ-Ž üƇ}‚4”×WÃú S!z½1ä1%yMKY…;6÷Û­­ø*Øñ†RÄqvª|‚ɈCf c«eü‡†ð•Ûsó²ÛØÁ PI—‘Eß“®m ·†[·%v6ÇÄ‚ëqcH¢µA-yü0·eÍ£ÀÙXÓCÔ§ý¾ÊiiÚߦC‘« ÷ìÈ“D­cA3¬ˆOì*y¨[9ZWâËáþ þ„ NQ‚·9ØÔ”úUÎN œ~ž;ànÆîý~‘uÀr·L+ YïôyxHD€wå.FÍ ¿_·ÂG°º<4H@ûC«U—I^(`ßôO#ÏníÊqµÅIjªÑ2'W`~‘©»±ÿ•ÈÑz„šñ:|÷ (§ªácé¬$šð€À»Iñz©ì¶AF³LJ0ˆÎŸw9ú•XÙßx”o!¬°Ú\¿Õr=‰hgîW)#ﺅÆÚsÎvŒNÔ-sL¯Þ5Ž2!™u »`v$ë+É$%Bëš©½l¡xW¸Wý ¦oÚå W–¹YcíÇwýSëÜ{!ÓGvþ‡÷)ªÐ°´¸wÙÑÆ6JØ“ •XµØ'Ö%!HqèK326¦±R£GðM@ÛòjM³+Î!5ÞN4­xgŽs ôiáÆÃC/µ­´ÑÔOÇð„ãõž”ÿK'e£¤"Ðüÿ’¨oJRªFyÙl%œv ´ä­ëü í¤¿' úÆ0%®Ía¿~ÀïT€”Ãû`aý&/Á™øNAºŽ–Y°íÒd¼‚&U±…öD.ï¤q&q§Ú|œ‚8xJëâ´,e«›xA0*À.¤Eýt»˜›¥ T\Ý‘¢ÙÆí¹Eæs¤]>rg!Š,—ƒð×éo·>Šð @„Ö¾Òñ9óð{€–=Е!ù8êȯ‡ÎŒ:/ !¥Ú¦kBÇÆ¬¶Q ®upgåžMnôÔ?$´@ËN‚=›s^†BøÙj9éÁEI]ºò„â¹"-ðšœ$$„{9¼ŽÄlçÿ~ µ€ <œî©–ÊtQ0ÁtLÉ>¡]Qÿÿ¦)±†zì‚“cÊöPù a««¢‰:ûr?ö MemX¹`„ªœŽ Õ25òäîù*вÿ6÷”qËX¹ÕÞxÖ 3è3§À±Sp&0.Cþq“±`‚}krÀ4”Jb"Zý…”fþ%ܤj´UÞ’Ÿ¬ä½àȧjZ£Ô:¾JÇr i^‹´ÏEiÓ@¸àü°¨Úêfœ’ßciËánåƒÙð×õªñÒç‡ðì7̺æ.ͤÛÝǸ6>ˆR)õµ$eœŠ€Ï5õßorå\„Þš|ô4Ñ|þ&¯‡LçÝ—øæßµW£9v5u8BÚé'ØÇ‚´’‡Î&cé’ñÑÌ.ù€ cóR˜V®!”ExQw¥ËÆ¡ãÝ„L6êHŒ—3lîO¹ºGÙS'ºF2(cÇÃ’‘}ôRJ›ôÜõ_×ð—‚‹™~3‘{Ëì³ œh÷ ÜäÐi¶"S œd¾¯¨¡;lÜåb“U..Â×wùqjÏ+/¹ÂÀ p¾†É8A«_üUCöHš@Q‚ Œ¸­Æ,ÁDß º ½ÅžTj ÛÓ0±V®`GÌö\o:¤Ãz‹Ï9ÞØø»Æ–—äû«&/bí\«ÏΠ* P"d2±Óyv‘Ø 4ȃ»¯dåÒ`hnaŒYü¹Ö}²gØ;¯/Ï·ÖÕží|¡:Äfè=aYÿŸ»_i;}ØëkHdªs´ê%{á”C4hñpuHçxÛ]˜ÂŸfA¢†ã¥RÙg\zlÁŽœÏ½â©XÐÙ‚ãy¢qÛZª¨Œ#6’2úŽó™îJëPTLéñò{g”žcËgw¼˜‚ {§½ J÷•î„­† ‹ÊߣãÐÃZ*\râ³ícxÝ ‹£âØÞ|gX$™»'›‡Dëøw$ôɵ5¾}L¯)É}$ —kT`Ýûѵ«˜ɬ+Åý¹´H€°9î˜BÏþ ¶]bSjàW1ë÷ªåJø;Ÿk†¹¾$Ç…ò_' Vã€ßæ àff^ ³¦›.;lNÛ)$J:J;—ÚëS!¡x¢ÇÆõóä,K4œÌ‘NÙÊ̳¯„€ØIÂ;{w`*…§î²ÿ„úµ /èÉB=u{ëÆ·7Àœ!•.±½|ÑéxYPêj_Mý{øË=5Áä(éö6?¾ ¦  Y?Ò濫‚ø ü#ã–Ä’×òkê´ HR£¬Íï(¤µ«ÁsÄHœXr’’}c*^ã±´¡eO5àï…ÂðµÏ, ×b]'WF¥ýc HÉ•rFtÉMŸíY?”Æ‹±êÜ:`ܽPWÕFBªPUöÆ7Ad~ ‹†EsZJ¸w°ÄÄô…§ècx¶¢VÕZ,eÝÿjã(à‰W<³¿6ßãc'Vá_¶§yúÿØOìÑÕ:ÖZÌ»MZ££–È/Û5Ým*5鳡á^wìoÌ={mîç_ö]Sn „õM]l’ÄŒR ÈÞúÓ¢›Â$5.úu`7A‰ÿÔÔüvH0ìz°p6üñŒg°øJÆŸ-íJU=™v¢I-T~ÐEðÙdúK_àù–UÛ¶¯8ÏÙ!Å]‘÷À–¢öä[ýŒ ¬o3¤’Ç/ªj.$êDŽ2ä¨4ŸjÂhg¶vÁtr|ÒŘP"ñU¾BÈÄzRÿñ%¶‰èºÃá"œ4¸½Î¿ºô':„c2"vòÊWÜvs'ÿÒ•H3]è©ÿ wY‰}•fÊP ŸŒÿ§Â«ÊTË+HFQ??€}‹«½¯)4"wÀ2¶ ¯YøÂ’A‡g¸ïŽMöÌjNÞT¥¢­4®†å¦×ž>*8`%xÀ›ÞPÆ{áÑÐÅhøè%Y–ºU–1_¥Î©‹cïÚõ¯¾Éþ/Š«™ðâÀd|~º¦íêÌ;&”íÃBfà>Í\Q¶y®…0Ò—Ò(¿³ýkN™iŒDèœ)Zô¶ì¤Þ´ ü–ŽqBÌs¶H¢ ê/;ÇYÑh‹MÍ_‚²BÁõa´Øhãa|Xƒš‘èÔ“Õƒ÷š±8ib‡iQæ×‰Að’Þ«2)Й,àÓNÍÙË'*çªÙe{äš7•2ÀþIb*ä ÇÂ]Ãi Öä^ì×Oõ ÆÇp[g2´¬|›8Ïs¹Ÿ p£‚FUóð…pCç—òÒDÀÖP¥Éßíà„C6€ùUhS”ÜäÄI¸š†B^‡Ý¹†*m)ò(@f?¸Tù€o}_¶&ä¥V|–4`;ñÒÄl‰XøjxH>°øY¼Ò«øž7Œ"6Îêê_Ìõ+‹æV §¹,qÇËΧ‰ÔÕ1é Öú0“ƒ¯Vç„‹ŒêEµÌ5±m`uOÿXJ¡Væõ9øfœ5vÊúÖóÿ¥Ä!_çóÿl{½£ÉR‰9šXÀ—"ÂÒëŒm¸0„KñW(Dm”J¿"Û~=+™¶tûnZ¼ÄÈæušüN¶ù­¶î‚0O¼>¹ˆ¤‰ ƒœM¨øðQ¹" Z– :¦¸ÎØÁí„èÅ¿«ôA:µn\ÇºÏ¾Ñªš’´¾âBt¢YgѬõ!+Íãí'8O“j(”핳ÿõ{í`œ€2¾æ]¥MŠ·oÐsÜÙ/ƒ« m;y4“"…ªÉ#Tö@'yϨ¡J_ t²;D}Èþ×ݰˆªÕÞtåÒŠâÌ»-À"‹!rìå|1æØ¶â=Æ{Ù´pŃ\{Ëzá˜Â°*ëBêÒ$yh¤=×΃¢/r:™r¾.K‘Ãvš¾\¸klîö[É„ºp\@Omñó’“>kðÁ-³ VÖÁÎÇKé>ÄïÞ®ß}¼õå,ߘÛKF,/ˆ¬—m,,º³6Ñt9ïc5HA]R-¸Œü®gÝÞ/â+W 9ß‘“[LXÅ^î·¿ö‚Ëô·YrU„š™=ð¡¬NyàÝx¯Xk ¶˜"¨/÷ ù¡Ô¡å–9}êøŸjò¡ûrÜ2Ä…ÜgžãÉsn£æV•PVÄ#÷Âcà©|<ÔSE-¿bÒ"‰‚]ª°ÃE€(=s>s×p¢Â‹JŒÿ¥Šìó½@[%f<|/s{æˆ_1¦˜s0v¶A›«Ÿuàß´é»`·­¡¾ß1RIvo¤&²/xÔôo83|c ‡5éßÝ¢„ `OïË`ØBcxR¼ê1«Hø\…Wôˆp_ „ÐÉÓÄY™yN¿ÎÇšÁ (•-ÒˆÞ£þíX> à§Å—ØGr¾$cŠIg„ ÅkWònŠ’Ôp+/aj4ÀNÜÈ….ˆË®¢³|c`Ôu û!Ã=•3=7e@È¢IaQÞ“‡ªÃ© ‡'XºWµ;vÕ‚ ¹kxÃ`P‹&Ö‘À<ˆ¥ØFô¸‚RÀßš…•ñçÄ<"–Ò€ÕŽ§Sd°èøG܆+(g$ñ~Äh¨àß6¨*|&¢ôäŒ`²à¡Ï9ØÕ*Y£jçG³ÅZ+EdÜóRÊã±^0—‡ß–(>’ß(gu¨ÞÎaEBÒ—Ê.¤ÙM,dÖW{úû¬ H‘h+5oÒÁÿ–¯#¢“H¥B‰Ìn°7¢|ÒÚuØê-ØË¯®¤§ö£‚7ÁèŠWL¥«æòZM®ÚWó’Öš¸|ÎV¼&ºö*»—…ª nê z°‚:±3Jƒ}Óÿípbšê„]‰Vnc¬KWW;?»#ErW0ÔWóx¾”OjÁj2aê!b8µõîdµ v^`ÙT±Fp'W•‰MÚ¡•ê#é¶.ƒÛ/¤JÔQb—Lóùÿù0û]Y»’V°ãEßä[®­Ö—ÆM†ã[E³Ç’— (†CÐÔ€Á1“ió8Æñ46àÿÁÚ#k¬u›ŸPWWZ¬Ó½N þvCp‚ ÈèÖÓÑþB¸¦í‰ñï Û²k»wA"ÈóI­¡&ˆd?Û{Ê~]Ü\x(fÜFÞ ós+yšuR 6ý¿% I%˜Ð3¯/Ë•æ½^.=5Cf|Ø=¯½ä6"ò.·f·ÄGŽ’•`þ‰„^ƒsÑsŒ, c°d\ïj.Ik#Áº‹Iç'añ<&*ßv½ÂÚ ¤K™ŸV߬n¢,3Ý!z=•±ñ·ûå0z™üÓ¸å´Äv§)’À€¹žI> c­î-¥U-¡<¹üA™º³0m›‡·ã§ï%Ãb°Ð,µå9«U‚ùø$.íÇ=ÄVð™y'~ˆq…vç N¾5¦@^.+ê§"oy¾˜É¹_a¾Ž¸Œvëì° ¶ÿ(¦òzí§ÞHÖÿ–vzf=Y3uRë´³U~ý}mœ¸:#¬u¯1²ÆÆÖÿ½ãg¼pýÅt¦RŽÎô®RCÖMv´2N9göÃÛämÅʼë3T2"±'µÃ®pè2`M4$gPaûäul‚¾'4Cß_²‡»Ò=½ý&¬,ÊË^»‡´çàßøÝäØ`°N2ý–¶«ÿVB €Áÿ¸±`ÐÁ?wQòª4hÙI¦NmiëK™ÞT¶ÜÔÐ7q¨¦SÅ›çØjø8 Ñ­þ µì5R åU¯M|nPD ~E3Ò‹Q‡ˆç¨`§H%ò ÙŸr¿’f©æB‘k…6>ºZWˆD>ÕÑü­<û ¹[,¶_jüâ]QIéHÁµIôk£c†DuòÁìD~Üê>Š?^'Æ7 …ë’¼7 >–£LÈ”V 'ÖšÛpp—'í¹Ã~~‰Ésˆçv ½/Ë0w”–‡K˸`Þa¾§òÁ¼>G ^bV®r4çËö:ÅOjÿÖ¹?,#fsn™Ûûúúèap ŸÜì€(ø`MßMö+&ª+H¥+gv›ÝÕo¦"T…8›=QáëQ?ýµéJÚ*J·üˆ¨ô…ä}¸ 48Ûûï’iêRyå'Ž]ÓRÛ˜ýÖÜöÅ)1£ ±â¶ Œ\Øzî¦Z§ßÔ_qÄ[§Jv8Àæ<„‹€ÓíVÄž¯éZÓèÉÂä¨ ¤ñˆÌ¦1²Ù*Xf`©óJqœDÊÙ2/¶»I["ð( $ÍǧÃzÒö°=•‹õþ¸Ô6A³¾†fœ2ƤÊу ¼ÂàÞçãtžœ&µ5•ÒË_×D’V‚ój‰œýŠÍQä>¾Ã>$.Ãâêj‰=™z“^B!ê\DÁcjÜìX¦O­+q)0ï%þnL»Û)¨*Z }Ðeu¥¸èÅ»Òψí&9·?¨pc‹JhY ªê¶Å¾Ù&è°Ðí¯ƒŠ0gâx(@µfö í\ÓD£»ñ8Â8r4gß“gÄK#Oæ„°LêgÏ`;Æ þÊHYù9[thˆì •ÅMl»^Ú‘£ä<¥î}øÉÌÃrô«É";3¼á¥ºòÔ5ïeFuÁ8Þ¯¾7÷Q̦FÐmx<“¥N)澿߶a)ªÞì×<²°i+ôªDï8BÙsºÃh"÷—À@H‰÷ꤿb›uùYld£²Ó8ø[Acu£¢6–.¤¾²Ì˜B; ™³®c`gÓ—ÿ…‰3Å´Â9ö„þ‰üWÞÕïÓ úYOé;hCõ 81eð¢íóŽ9´G”žNúµôâ½é¶%`ñ¬)»â¿UåHì]^U¼¿v@Á~=$³n:ä•ÄÚˆ‚½¦Œ§ÍEñâ¯ÇZÐØÃW³Q­¥þ±úRo¾X÷ÍÔÀgD$$—MìžÓCK¿ Gºn2†Ä¨œÁßÓv[°5¤ñ•ù°Ät’÷X”LrŒ6±ÐJêâíÅ©^ÓןÓY "ЛäCžxŠ5ÁÌ1p½cÀ¸zP…zW‹Å6sÊJÕÐå–ºƒAD[ByPic4°s‹2Ö€­êXŠK0»"(¥í´Î¦ÆÓ­Cîlò‚/¢â6¨Mú;ëõŸÏväÐø®œ¾f¡\úÚè.瀲„2‡ë4=>æ®ELŒ½¤Š¾œ¦ü¤]ÜÓÄX‹)A¥õ á4'=…¬ðTÖ–ß•›¶gêžÆô^nÚÿâÚ×ÚI1•¶Çæ£d®Ð:õGÿÞ/^ ¦Î—mâ'OZ€[È:óárÑ”ƒÇes„'ÕaU…¦Êð«p@ï°®ç^rÙá#Þ+áçoÇl'.”H˜#½9п?âf¨žõôTiéxR%ò|ž9~Ïg‰b L<©¾êYJ¹B‹-|¡^Bζ¨˜µ°8"qÁCɳÊe)}eŸªu^ŸHVz›ÑðTvR ™ÄÎkb¸§æñÏãS›ŠÈ@~ª¤‚É&n¹‹Îjè=u‡Wõ/geŠ‚$wÌ!ãLøDþ;>÷¡– <ØÙ<¥pÕ.lMx¼šÂ 0 Ø»WãÜÙ9†ÇâXæSv8MÆcàéU§TŸÌ8QÙšzWnªƒF¹„'iNóͺi´Vc’ø¬ô$•Ißý!j!Ã.Õ/ͨ\"ñfí.ÿÐ[øþûs‡ˆAª®þÐLåœKhŠw¨îÚ,„úð.õè.–4{ßIk¬Õ }#’ã™k;uŠV1Ãw¯»À>'ÝJYá§œ]eJ+]¦÷Q„ÕØÌæ ’‚$çEg­ò}Œ°rZ¬à9KÇ'Þ\åžiü[`5_â×J&õÌN†­6)‹:·s_ã´‡u{æ½`¦âÉŽÏ%™\ †ø¸Ñ% ·WÃ[¬€JÙŠ|¥ŽÞ4iúzyÚ‹Î÷þÉœ-C/Ö]çÕ¤)°ÖPSX`×rOÔèã™îš–ˆôë`AdrD÷žHòR¿®±*çm7ÆEÔNän\ ,óœÏiG²n8#}FqÈüØyn‹q¹ ¹5)dj¸[Ät¦Š%}#t9©¶úe?¹Rähyÿì·gº¡ÖSÙ´é 7]kÊÚŠíÍ¡}qœn9l©æÍǾg³y?=ÓåÔƒìLs~GM c{[N E…?T¬¤¤#øŒ‘<hB-&»úõ%²ó7A‘06ÏUB‘FúAË݉ávih[xtMÑÖ¬d†`!À²§Œ²>XÐ7ÔÝÑBzâDˆåh¡ÝlÁ°¬ |žÍ[}%®y—] g‡S¤`ö‹›.Jüí€å)*Øô~Ÿ_¬<ÙŸhVâåïÑßÞ#O[)ÔÑöˆÏ$'Ê€“÷ÿ¯5rO‡#)SÁR£~¹TwŒ†<µÏW |‹«[Éù´ ¢a: ˜¿t¡†'gºfÝÍÚµ7Ùò% ³2ð˜c‹zdhIµvŸñt¼f³Ù‹àq×ÜA 5žc\àƒˆë¨Èˆ’kU®,«!j\,Ë+kÀøÀ03?Â5Á«–ŽÉ¸åùDêB%©®­ï²AA½„ ØñdÛU±u{¸0À,óéºfgå ¸“¸¦1Pc¼_e—jÅÁÐÖH¶(³vu©®2,)ÄD÷¯´©ìQS…ïÂG°iú‚³(]6õIaBÙ÷³¯ŠoúA&Û†((pA’ÿ3®%Gl39xº¹m&?-àËþ¦þ⚎û̱4£ÌO´Ö§È¢” )=fã"”™”§’‚¼Œö’c-–ï0-Äw#L)+sÀ’ÃÐ$ŠtÓÀ¥ø‚¬ˆ>Š×w.ñ5ÉÆâ-8 þÓňñTd…e„VÉÃöÌÑæóä9 6òÄùO³),îÎ×>äUêq8ô/¶ÄÆœ%|ˆ¶Y†U ßÝ9À1 =|nt"ù‡Ä¸{qƤeWŽaðyRfª.` È%ó¸AbnÔœx!K£ÊÜA䓯ä–Y>P …”Íô8×ëá#B´T¸ÑfrÏ>; íÙ[œqØo¸…©±ÙëöÜm÷òûÕδB'«@qÿGhÈ`Ø :%“?pAáèï•×'¼„›D6ÒÖ!šÓ°’W_ ß!$ÒçÈTLlÉûe!ÔÁ®¿ü,-ÀòCó:ŒÑM¥%?tE¶s^/rß*®8¾t\1t.àœðÕ–E©ï di9â\;9‚`w¿ ÍÚAw›2÷²û ËD¥¹j1"‘[°©«b†kó-ú ø*GßnΊÔgx•cºýh•½ñúW5ÐNÿ©zݬàôçãB• iJp #ßãs_ê¤1zêiÓÓè"Ïl¥0)ûË ùÎn„ˆS;a÷ushB‰ OïIp}hðg·uœÃ?§Ë­ö5ÒUVZÖð£¶Dý•ëìå_G:æ¦ðÔî&“xÚʃgjö™u‡Êì¾ÌÚžÙ~F„p‹Îí_”ÄRÉ ð:„æßJò%qÝ¥.8ÈæRsÊï•"r7þYœHŠ˜‡h4 ú«|GÜGÂpÏJ¥#9ÒÙ!hJˆë¢\”ûœ‰™Lž,ß8ÖsT\3¾]¦Kÿ51tà°•UXsòV\­æó~íº l‹‰îæÞ~D¾Ñ:Å> %×-缑ç{õø$‘òOÁu/­¸mYîYÌŠÊ"*× 9{‹TUÀ:äËÿat'l°«ùÁœgioiˆ¾ VF:”ÚLCFnK&4½¨¼F´+ãbéÅÿ®“ÝÕê¿<ïÏw.ú=ØQ7i-–R «f½€Çç…k-´<ƒ¤Žh*5oJÀ¤p–pü°@×ã¿vÁÛ3Š#—°«Ÿà¯q®,•kk3)L\S­®lQì8,ídú†²<,Ì óÃ¥»ZauÀ{-&y‡y“ŽÍyoÔ^뜗kOÎHÞýë " ŽÙ¦peÝJž[Õz·ØÏ”"r°R¢¹; û>Tûœ}ÑÏ騍!W´¯AE>YÈ@³8B`ß"("kºŠ Ž8C¡ñ!|­ÇLsæNAà:ÐÎRÁÔ)TÂ[M$ïØŸøGh^ªÑ °Lñ¼;šýfø«Yl±ƒ<Æ>gµ*È7¬6¦—丳^ƒ…©"’m‘Ümb‘~$ÿB&U2%âø©*g¢½?1ngs¯ƒkÚoxé0ö“rñ-8r8•ΊÃ+:l*Q*žêBûC…¦ÎSMž鯓G ß„‰>`Ù0&¶ŸÅÊô$qOâÓˆï whíÕ_à :U ‡ÿ76–â‹êïéL æ&htâz©šˆ'çåûÐò)¯ó¹»˜CÆ‘Õ?=S¹äñÝ+”<Ö /Õå²âEÃ7>¨#;s˜dã½›,h²ðeê æÜ*´¨rÔÏ)ÝïFÀV¾1þ»`òmfb4G5+Z-®…±‡ªNч¨†•Û‹ ÀÚñ Ïš‡æëÉ©T¯Ž„YÍœz—.儿‡8Lh­jÊ—[!}íjU0øÎ_ÐÄÝ­bÿÔùXZ+ ›‚D-Ø ”'„iÛp{[™ÃÈÖ葉ݫbùž 60]*ù…ÆF‡ìF8 [§ý]ÜWN%CÍ#¹¿$˜³ç•PÑn;šî&Ï ·¤„&kù±¯†óޝ­øRÌ!0dËUÅ…d½k€-â®–•nÄ×Ûºd"ŸmU?>~p>{=óófÕîŽ&jlêég¼dо&àà&µ3oŸD™å­‘ûÔ4ÃÜŒ ¥8šˆge·ÄT¨cÍq*ä ó̸÷|ç³¢^R3&ýŽdçE/‡‰`XòáT*Ç!ËuÆýÅá]±)ß°’¿Æ¸~œÙߤì’ׇKØ¥¯?Ì̳姭 °æ°LuÓ 4ãB”´Ãz –LUn¥b .ÃE/ju¯5£þ[@žxðïØö-Ú(èÖˆžgç»Ïp5åúZ9#´²Ç¡ÔîÕÌTlz©ˆÎ$LODJ£»"A#û‹ÃÀ¢Î@ú‹ÐnØî …|‚Bù51¤è·àm_ê*·¥“¡Nãéî rCÇ6_?ÔÔ:˜ÀæC_]QÊ‚7ªð—¼5¥‹)%ѧÍ=±"äš;œ¦aŠ ßC¿ÿ8o82Ê»ö‘öŒ§«0s-šk=€=BfI‚舉ò¯Ù­ E'ž”µŸÑ•æTe™‚ Å8?ÃEB©7I†DLáœñO%ŒÀW¬ºúÆA¿´µ^â+s›‰óˆkÌË@˜²˜3“ÏBVìýöÍÉT±_ÁßÅù´+ꜯµ#¥b¡~[ã’è9áájÄK½¬\Ë#°X´Þ ̰ÓäÑ_hýmô!Á¿\Ì´D[kÔö¶«9Gsð2ý¥–1WäÔÁˆX cTÿ™æ›xu>8»C±d*)z=±`õ ŒO2Ç}Wi;¹IÀ½à6£¢F%z`sú[ÌoÜÔKOè$iœ$*ð3Ÿ4Àž³F¥Fi< 9Þ˜?Ñc®Š¦`,x=YdÍÞ¹—Ì…ò?÷*‡,hÎßð$×è³H*Ÿ „ªÓ,ýãç1xVÈn¢M–a¯¨=Œmh%ðý¯ ⨛‰ò3!³#tÁi|AãI™*²`×î¶y™ú!×9-ðIg€&¼“kBf~¯ ñPq޵’n˜X>*meÊË6—à€´‡”I‚æ¥ýDj÷HÕ¡aËZûUß瀓­-º#jhçM$ Ív"—G»†ç‘}Õ7?Þžœ°÷pÒßFV”`8P5Íô±o3%F š¿ñÞ‡x8 ÓÝÁq,ØÁ*õóç q í"Š#OGC÷Æ‚I?Ô(Œ‚z¶”6@íÇ8q˪qZû³xÍ~ÀÞ-èQ (âÔS}©ö¶÷‡×X”ó•EQKñp•’>b¦üa¥tëæ¥bÞI=Ð[‚~FñÎÙ”´.5 úáýåŽB÷~{$ãÌuîA"qþqöús?Ũc‹eøáþ0x©ä"¸S¾\ªßúI•o±÷ˆÊ]•Òmsö÷ŠÞï*n´ ˆT¥heÔ‰:f”w<Û§+¥ï95xfc­d¾¼f†HâýÀ´o1èqÛ¼¡=YT[¡^çÙHɺô#î;'Î^J5§{3¢{pp Õ„¿¸æ¡÷áÝX@ËŒr‚õµàUõËm܉Gvƒ`¢îdš æ¡û[" è¼ySÃJëÿ1]Í]1 äÒç=S)÷²?âm¯KYƒjÁ5òV¢’_$ÝZ‡u¯CñÄJÍi(’ú¬ µp¨3RqUY"c(“°Äs†Ëmk éJ‰X_ŽYNå‰?=ûzòçÑAY®žŠœ§Bw{I^ìulâà +ÿtEù‰§Ìf{2>c}&üÐ ‘(hy }â?~Í jEzc1iü2€ ýƤ‹ïË‚¤Öo` *»•W¸‰æf.ErùrZ™ò®‡IcÅÉV̯öçAÛlùk¤ÿ2så&²ºÉIGJLt•ò"²Óñl:¯Þ‘/¯->Ëöè/åöÕÄbÂÃÃÛOOôæ-{Tx]YÔ¿*Çèñ%€‰å8ܬj ?’í¯u6fôBÜÞ>j? *òåjÞžý¡'J~‹ÆÆ?ÉmïéCY•<2epÌ…顆Rw(à›ŠfõßF%gÆiSS¿–èå˜Îw7P¤ÅWÙÇJ×3{0ï­MúzJ4K°–90jc\#á‡\§=|¹ìÌ“ÂÊvŽU…k–|Uh6®YbE“ãu´-ëæ½>„·­c$.Í Ïçqòþ+mN`«ÿ¶¡dç”Ô ¶jA yDè¶„N!_Ý/e1z|·9ŸàSüxaz^ŸRÌ*Û}7­Ô»U™/ûæÏ„œ­ÓšÐ‡Ç‡é—º¶ˆ:Óð—2¬CmKê ‰l{Ä¡Š, ÎPQ–¤…åŒ>c_ý/ÑÎïP„óûïÖS€ú<ŠXƒŒ»âX”=«u7žÎr”+TãŸí3R¥.`úCNQ‡Ô ¢è?wNv]ENgA˜JÄ›Ékù(á^ijj'¿‰Ä>+ ù~p²c£¢Ê‹‹J×(ü5 £Y›Á"ÛàìÅ™j*¢K„-*8EÀú;e]¿fåʸ«ócÆö¶ ¶Ä¤Y®9 ¯ÌöêÐ@O»­tЏ„ÍÈ©Þë_¬7žvÄ@Â(ehâ:ºÀ{ðË®`­Çzâ!Õ7° D+Š·SˆtÏ\ «öwÓÆÑˆnOŒˆ¤²ö¸WÕN?ögV~åþCQ!0qx#Ïï(ß(q ñLq «µu Øxf¾i¥c_Ä»¤GZEo°ñÿ¹ÒßHÙ ×4ÉòãVâh‰&󪢉*æ3;!Ý‘ ÿ¢Ò÷°0š[€;L}ÊI ‚´|ò‡T°ÔX„ÞÄ1ð¼«Þ?¨h"Š6–Ê—šªÖeíy‚Šu(ú$»=­ØkZßãݳ&ГŠ<ƒH{^8Œãt7Ë~¸+ƒ`ôÈúríehuÕ¶Ê‹„¶uº­…Êùõfk³‹‚¦Ã6¾ÆLT5‹^V\y_Xøl©Á1ôýˆ>f¯\פ=½Î0„%Ü ô¢³F B'¼‚Å ‹$¿|W’i²D艵»Ýñ*èZ_ÎSH?×p ?²ü¼ðb©Ü˜þèàò$a§i¿&ê(èÁ8³²ù¦U®ÕD¡ÉÈN’ ÈÝ1àæ½6­O>ôá‘ôì,p [«yˆfœåò?c°¡oýN¯d?§`\™ï#µk¹žÎŠ×à#eè¶xµë"ªòDÿ ytsÌ`eRõËoM°Ð&Û¾* ¤-µ"Š½Ï©ziC?gPî€ÝÑznGLç«¡^ÎìÕ¦oûy(f‹…`£ãx‰Ð ŠËÖ©U¼ÒÛê›Àdt®*°>‰È­íà”:(~ ±þ³IÀ7 ˜6Ž÷)Õ]ð‹§îèG(Ò³f!)º·Wv­‰(Àœ´ìGÚ#ÑSY½Ã-¾ðZ·Îgc†½1+ûpbÀGäUŠ–0¸ã<=ÃÀUÊ¡a4t0 nøÏ^…ßc™vx[2aHâ0Ö–¼º 9ÝÊ1¸A¬YDÛH–Fï_´ϧn4ôg‰$Ó¶‡É"ÄÅ[—k7Ê¡*FåÏÛuΰÇ;Õ@gw¼ªT¸Ü öâ~ðI/ú‚“Féñ‹=‹ê{œaïþªÆŽ»=W¹êµƒö{–L}éŽÑË{熨ÏÞl½"„ë@-Eïéƒv€b·2_ûÝ>fK‡w$žV/©‹‚"Ím.\o¯PÛæGƒc" YUŸD8ÑêSoÑa&x'£ÏÓ¶9{½ ¦ï­ýu5qų»ë¶{æ*˶.¡js’ÕâL"Ÿ$*8‹…pxHDûÌZG÷uë.*%ªFuçF£Äóu•°´v×ÑFVÎ>ͲßÂëråš•‡»j ÃK‰(n½DV ämÒœéŸ/cÕ±èÒ©ð*:?È%!ùß™®«‰œôÆ÷GyQcSñùòÀ…¹:™‘ãTÜö¡ÕÊ1:Ç:5äh"®üëÛ VáL° ÅÕ :kVúåT Å _>R}ÈÈÚÀãp«G¯1N¡ˆ:1„Z;Þ>!Ž/w:Üy2Ë5K‡¿©£«:*Ïn?s¶ «oÄm µK‡íüÚ Ñ×!Ⱦ«KÍ3AA¡æäȲàû{–W´<¿:^ÑiR` Ý·²íþÀF9<œsFõ<¬wÜ=©sd0¿Y‰F0ÕëÜnôãºmÁ6Ô@ά«šqê'Sµ!‘(ܦ#=Q¿·êÁÒ|Ñ©aH²Â©žßcñe¥£é»°KíÄ6WàvýÄñ„–ˆ?²é˜ëƒ‚S“ʨP‡Õ+yz™!Þ»#öÏ^éâËA™h+üL öÿ`‰¤ABËLâCë.K9ü’dìX€Òd8kOó…T®WIÿÁH}•HTeÍ¡†Mà_D1~“×_ž¼JÁô ž´Ø¤/ðäMǬݱӷ©!X;Zþ¦ŽýpjÃÔŸ«ˆYmR¨÷Ú¾à j Œ²ÒI.Fäi…§>Wc.e¯é†çÿ³¥Q‘:QÀ`76ù˼Íe£9'“l -»¥hK¸R–*¦çñÙÜU+²–ö©hm¬fY ¸•$,ã&÷ôðš Ç]¬t±›Ýbl@~‰V$ÎîC(rô^S€kBuø‡\–´ûÝ;,&Ý1¦‰|[³®¸ uØFÓ‚[”E™ßìÏZ£í“õRº·V'MQuD9Ö]ο=­F§üºê÷³(,†kà]Æ­…‚Þ&؆䨵' õF­­£ŠÔ*Vc/ó°ýz¸æK¹òaÙ0 ‹YZipred/data/DLBCL.rda0000644000176200001440000000774314172231220013603 0ustar liggesusers‹•X 8”íן"DD¯D%m¶ÄXKº%Š’,¥H–B Y#z‹"-¶,ýC½/ÙRY %[ǾfŒÁX+ÒÛßÈÌüó|½×w}ÏuýÜ÷9Ï9÷ùû¹Ÿg.?CÍcDÎcœ8Ž€#,Ãã,´)+öO K4u5öèâp,ü4c9mä¦âô€ùÀŸó4°ÑÀNÃRzòüÂ\4,£a>ga / |4¬ a~]VÒð ‚4¬¢Aˆ†Õ4Ó BÃÖÒ°ŽQÖÓ FÃ6Ò°‰†Í4lYà·¨ 67zeqº—CSwîVYYY9¬ƒˆuÈc X‡"Ö¡„u(c*XÇvŒCNëÀ2•Ã2•Ã2•Ã2•Ã2•Ã2•Ã2•Ã2%b‰±ÄˆXD,"–˃ˆåAü_<°;&%&%&Ý1y,Sy,Sy,1y,1y,,,,ls Û±¯â)+7Æ!fm[«SîN®´Ùì//'#ðËñýƒÿ—ùïÖù¿b~‡Á|[„9z'z'¼»O¹ÛyX¹ÛX‹jlu°³·aìǾ=4‡îOÇ|" #ñ·[ðó#®ndM»bÔ¹ä^ê-ôÑö祮½pŸƒ>ŠÒGúhì¶o̶¯*´àç¥ç+g-Ü?è‹›¿Ôuu_Ó®:õm¸›…^—>¦ç¯ ç‹ÒGË yêê y[èõè÷ƒîžY܇8=Î`ïBœÁBôáõB¼ÄQN5ç„vuCüĵµ/ Õ[×¾4˜¸¦¾¿‡æåTSG æo¨ï¡Ç‹QÖQZè·ð½_è†ùèpÿbÿêû]þ7y¿‹Çáþuð¿ðdü~‰c¡ƒ±ã·Š@~VL,s ßÿæýº6î—5~޾‹kþú0bžKˆ™õ„FßRÎT¶DhÐ*Œ˜½†ŠøCBm¾OAï–ÙþA¡ˆOœ¾QŸ> ÃÞ)]Ê-(fÛÎ ºP 9–ýqƒ7"³ÖÄ\õáA[ÛgFíß@…øéÀû&Ï!ZÓHè=jXúUXJ)C7ùÞH£æ™©•µÊÜ@q©édaçE­ÒzÞVaTÔåÆfÙü”§R¤Ìj—BÉç#¶x£¾ ÚŠKsq¨ºÑëûǬ#h  ×H‰Ê õO 'ΣL~9þe:7 ÞEý½C¬tÕî ôï·De1·…2ïz‘–—§|J·&?å=V ÙóCö[B™·Aõ_V¡a ±Ëo“Ððó®cJQJôPút\*”_î²`÷îC±‡‹P} orˆ£4ƒ%ywd21öµ«~K5 r†T%¥%0`txý]¾T¬A¾õB š½êµ‘ˆ]“ª"¦Ø–CÉ÷¨ƒ«ñ½MEMKfÞìBˆúûÚdyd+£ '[?è#¶+DSµ¡eˆÇé¾Þ¾¶šÚÒÃõ_½oœ¤¢N IJ ;Ì ž¢éƒÊÖîÓ2SF£+½]þhðBìƒkèj}v6B{ÅŠ¾˜Cæël¾|g¡Q@³düþnÛ˜.≆|󚯿’†vŽòÑZ¿AJÛü·Ró¨Iè4õwE$U™‘èæ@á‘ýº|ÈÕ¡m8˜³í,ìÍCS'Vònp„fœµ¦¿·ü ùïNòÉú[BUV[å@í~è¾~Q]§º› 'LÄ, åáxé¹íÌó˜C7eMjÿz¿Ì²¡uÖ­Áuê ”U(<6Òì‡f‘VîÑkÐbÉuþÔN˜R"IF‹«ßŠOa‰^)Pó&c© "{Zù¯B¥NÍÀ¥ ªV½4¶0U‚ˆ$+BÝ,tßçrèÿzröeÁy”ÿF?,ÉV Æ>9Äí¼LE5z»³Qc‚}§rË6¨ZK¶ûVÑYñfuÛ6#ʸŸý´0*m«ï¾uæ<¢~SùtµYò‡Ø ßß.EIÔ’j{2Ôe¯³[»M†Â¾Æ§Yáï:«ªµ_àÑÖĉüKòÉkÍ‚܈ý%P/ 1²:’f'Etuæ¦ÏˆéÙ>I+®¨ê4ÚBæçkÖy\‡44ô2öí9ˆ@ýæ¤k0¼¥ºm\ƒÂ<Ѧ6ßz/Ž@ÈnÂñóÅ@ê\e³+ 2žŸìK19M ‘bžæ­Pq²¯,Ü)&Ñ^+Êá”_¶!ì´=$ÅÍÆØh£Î3OúÇMRPøñž£‚)(©ÿR’Rîó­Óëÿà$ËMOf!YV;cï3Ô”qÑœçp4L¸~8ï j=øOfÌ·IÔ®Ùï+« ólÊ>á¿-ѳ¾FE[M4T$œ“Úü 5YEEvzý@#ýÚßnŽÂ#­¡7ÈŸPN\ºð¤*TJžöåáfþ«wÄœ»¤×¬®·–®Š[k.ÒÝ4Y·KÇŠíBNþ-H(®H³‹DC,.^Ö¶¨ÿ^»ÁÓ÷vèQ…„gÑ”Åü¹¢91=ᎻäT”|¿è‹Hz·^1A’Ê<é/„eìª%€Rw#³¹?È÷wÉ¿hQG¯¶{Õ|H‡¾Ï_ðo.@û%R·bß)˜šíQQ7Þ„¯“~ÙQ¼“†û*o}œ½‚=Ý;$ gˆÕݱÚ÷T\ÂåC§pí5³" º¤jØÈE@‹§ªýº•ÿAy±Ä»K\`škGCWùsT%~V‰ð½z65&4Zã:^SK+ @<)ƺ£ •I³*¥h ¢%7JËÞ!JOÏþ†AÈý|°†U0RTÍ|"pAƒòÁìoþÖ0R2;wå($ý¨á«!Ù;l‡8×Yh®0µf¥vF<^bèž«v‡ÙõŸö I®3Fu½WøDꡦ÷ÂÝ­§Ã ­û.§JÔº!Ý™QpÆB:Ö O‡É`k“Š8æ>¶j9ÞΓò„¨Øh[=ô™ö&Çr!Ÿ ò«Jn n2ÞÁ äs÷7~‹!QÒßÇÅPZæµ³ÑSP%xw¦k¼ ½Z=˜gþC H¾ãþA$hNo½’ÈE„V+iáÜ.~~ÜÞ(RÇ·æ\oƒ—, Ê-Ðö×økÏ•¨10v¹q•*j¿+¶~µ4;óôÉh@»™Yùh9˜#®º1vŠÙ/ÙÙ=ÈzÎÇ›Öá ›·E/„-=<»¤S¡ø“þoï9úÔîúruœq;®¡M6‚ü]ì!mjP­¿RÃK<:üç‚Äwñ#¯ÅfM=4òO uÌPõJ®µÚQ(WR-âzB?TŸ+$iq¡Úõ+òcŸ+ÁP²{Ú6˜Ú¶Íq|/sku[ü.ÂÃÀW9æÎPlš¦R“㌠®ê×ó…¡â*¿ÃRŸá™ôܳÈÈ ´ï,OÇé(”»ê–óš÷sb’/8® ‘*õFÍ§Ž£ é}AÔì|hîÙ¡øÉeâô„T{ú á©ëæôgG ÝÉMm飮o_¾Š8­‡&¿ªš¥f¨{ç—M}öˆl¯\tá~”§"³$ðÝgµ^Cõ+Yßô¦nB¤`uÜÇ|hØCsuÔ(¹áLÁîwÐsmk…©ÿ,‡ü§{M¿l¡=÷qe5‰Ì5¨úÃó§§}QÁžÏCh£ß±o᦭pg`à¤Ï8(¼á£O¶¸®•ìÞ²“ö{½¢F’jP•BYÆjþ@=’ÄÙ:Ú-àÇrè4¾÷6úÕ$ýBŠºþû}ÔY"³FTuèy•¼zÔN“ëX´P“c Q›+Ú…ÎÒ~‹r²4'¦ qÐû½'Ú;ej7Z¸O BI¹¶Në 6ÔŸæœ[l°âe7‚páh̸}g8?¹·mJ v°Ї/1jó*h“^-èÇ£ºjN# ©D4èb!äpµÅ¢Ž<2"_ôÁ fìB”ä•ãï/š »ãk.ªeM!Ê­¨K,mËQ‘¢Ï™Çã¨ß³˜;¸ÀȦ‘¹A׿£§¾)ù èÂÙÚ¾76ƒá Ž~Ûħ¨É›ßÁçv”†7s³ç5CCï)ªuî ¼Þ”DxðaZ•ø¶ XO£™³òÎQ­¯Ð;ƒ€vVR$âu©—„BÛ‡—áÐÄK[ÏIê_ÐßÝøêRꨮª<äq”y+ u4Yž‡‘Çìì{Ÿ›ÀÀaƒ " !·aéæw=— õ«ù& ß¡ò“–v›˜LNx‹}æÕCþßo Ò´g%Äóö¡@›Ï› ÷A½ká†#är¨ûj—/² ò8–I$ÅA÷C6©\¥.èœØî¾ß l5Ùý1ŒF¤$ÂÂqP¢hG ðñE¶;¼:YÔ ñ¯íï*oäE”×g¹4£þ一“o‰PsÀîõÉû*ˆ²_ÐFÜú'³Xó×¢¦€[*¤æ6¨LG9–‰0@Z"£->i¤ááîÆ2ȹÏ]³PÅ»K§w«Aeœ[åù½8hkrÀmrz‚Èÿ|j eGÃj2z·>Ä@PßpÔŒ´äð'Õß>êˆÚožk‘6„A5Ò¡Çaoámîι¾1æylðñÝc{U HæiMùª' ]%ç¯kŒhß%Ü`Þ|T2縋' jîÍZ~ž>ï鋵 ˶×5J³+…ªšÅbߢ"4jÛ åbGg8„P¼p¢ƒf ž×Ú4§ýt0 eúÝ3;¤þ'ª»Ã÷à¶ PmÅå,€õ—º;§ÑÓ˜åÛçNÁ“­}·ƒnÕlOYn xœÖ€*]î=6ãŒF“½Å¦—ø+PƒMPã²sbU[€jŽóÝ!a ûäê£- qý ŽÆq¨—®˜³‚F¾ªˆê¥-Jrep[¯V™8gåhÃP&–Ó\ó:†Œ‘•£³ã_ùåûlÎÙÈh]pvµqs³s:Gw³ºÛ92BXOÙœs£ÏYtôuèS¶ƒû´NÉÈ-²ˆ‹,ùE–Â"Kq‘¥´ÈR^d©,²²ûBuÙ 8­­Ü­dl]­m0Ãáêä)óëæ0´G<£<£ <£<ƒ;žAÏ`ŒgÅ3xâ L±ÀT LyÀÔ LAÀT L ÀÔþLÑ Ç¬ÁÔýLÁ@dÖ`j~¦ØG`ª|¦¼G`êz¦ G`*y¦„G`jw¦hG`ªu¦LGgÖ`*u¦DGgÖ`ªt„ŸòܼX4û? P§Êipred/data/dystrophy.rda0000644000176200001440000000614514172231220015023 0ustar liggesusersBZh91AY&SYþDzÛ¾ÿÿÿÿÿÿÿÿÿÿÿÿÿÿoÿÿÿÿÿÿûÿÿýÿþþoÿÿÿÿà ]ÆåÃŽXyï£Và"ûKì §µéݲóPh•?DžÓSTóQêd~”Mêh#Ô2yOMÒiè2Q‘¦e ˜L†š šb4a<¡€ƒQ“#4Ðf¦M G©¦ŒƒidÓÔ=OSÈÈ?JJxÿÕQ쨓&Ò©ˆšPPÐPÈhz‡¨Ð4§ HÒ$zPÓjS@=#A“M¤h„ d€$ÒŠDÍF…£Â†=M=OPõ=OP@=MЀhÐ Ô2™Ðf¢zÉ€hLi„ÄÀÐ&i£hOB`Â4À 2 24Ó4ИÓÈ$¢TÓÔhõ jô†›Hz€ @¨Ðlë7½k˜Á Ÿ)¯&ûÄ jhÆ/jÙ~LeÑTÕ# ¤¡¡YA!]XDC²…*²€º3…c$ÁYZÀx%J˜deDwRJ3Ã*»0e‡âxXrŒ™Ú…/7U<¼sôŠGÒq˜­2þ÷‚ÀœÝü-OžÃö#°¤$Ôfå-ì|,„©%RÊJSUMâY”õtKÉ'µ+nØsjøX§¶abïpð¯v4X¾»º¼Êå+=XÊY­bÕ[ÊdΖ)²­wН1¥« ¬³m9iÕ¿Ì÷*ù£âU)‹ûìrËí±‹³‰QbS—Ûêd/•uéS ´©ÁuöM–D¨FI6»ü»®PQ @‰ À¦:…JÁH ˆQ¥H±-Š¡opaSk† ¹aŒ-´¶´Å¸­;2gáêçTáöKh´€B1Æ!5P” ÒÀ›>C[g±.½Îµs¥3 „†„ƒ4!IPˆ¬TŠ¥en·¶€)4À$ªv•q`[ Ь¥”‘[lðlP•ÝÔT©ÂÍbê'n"•ûÐ l*¨ÉR%YY ’Ôux¶kvQâÅ/R¥šÔÖ±fU¤ú^OIÒ³›ÜÿqÇcîò{»»»¸J0oÝÝÝÝÀ4[éNå7Ò¸ªh .¥þ>þ”½¨Qð0œ9fZªÞÀžÍ[qG®ÒŒ3Ta]ÖȬ^nkL;‡ÿŒ£)“L ×&Ç!Ž\{<ñr˜WL«#[ÈRÃAoc3ʪ™ o”¦Öõ=ÑGJ‘ V’±FÒ£+jw 8Œµ¬.J¬bÆ á3pp…6™©í·j"qtK˜Ê¶+>Ï{;V©²ªÎ©Q••EºµBä4%+J…¡¶5¬¶3’»óqÛvÝ8ѸÒðõÇWqËrœ`*È,UX‚¨u† Kos¦ •0¬ãaX°PQTDU`¹j0EA*‚±b‘bŠ ˆÎWÜýOqç¹n'Çå9_›Å[a=aEQz)ã‰ÀÂŒtL³¥!Oå ÂÌTЮC#3ú¾µ i–@À|"ñŸúmòÂ?o&8„0WfdÁÝòž A#ðç¬Á®Œžœ©.<.×GÌ4V±âbtÓÉÌQ¾œ¬øöŸœ§.¬çB†ˆ³æ.Çl{¢¡â};køÙXʼny>‡ÞòÒùà±aˆ´sŠó›©Î\tèB¶ 5±Àd$9ÛMûèZü7¾íf¨ì2ŒDÇ:‚+SÕ.ýW¾f^.™µ—á$‘"\S.¡»7ýò8 kŒ¦nZ G6mPD5æ [S;*û‹£Ÿ-ˆž¼NñÿÈ|º/ÚF,½Çt€’`…´ÖØmÓÊû}ÓâNæ½X}÷‘Žåb¿b¶£Âñj=øUÝJ¾âÞÇ ¥Ã¶ûŽ”OJJ¸þóé¯FÚ°L2.ÃÇ<‹˜éèE$¢Hñ± LcH@ö‚¨ª-ÄĬVfý1g)sÐó)ή*ý©‘³ó¬=¸6=u»øõUÕŸ÷½kÛîÓÙÖ±×ËÚ¾¬ú¬âI>Ø82Ú–Ÿq¤ÊO¹dj}ʰ_ÁÃnŠTc¦Ö÷ |Ó4jê4ÍqC„`’…bHz1¢I*PA0É_€õb)b°+@¨±` Šªb…aPQA`±J¶ÀQee`¤¬*(E„…ua ÍXœ¼†uuó½Ñ !²Ôh´UZ…iZ R‚µFÒÖ,¥j-¨¥µ…- Q@P°X,ŒiI55upa’HéÎÁs‰ð?}€5,$ÔI %ªš"R‹YH$ «Íw•¼Ì‚%Šfµ!»ÿ"BaÄAB¹‘C“Â@UY *¨YB±`EQ) úŒìSôvý<¿‡Ùlµäšûg R…&Í–I´Î0Inß~íöç‘ѵn¹®?_gŒ ¥’Ò„`$-b™ç% amID]¾£Iƒ-Æ”˜" -Ê–O9Í€Ǽ€2¼Î¯o3®Y½®ºÀ·9Ñ9¤’LÌ’I$“JT[¦—wwwp5µµµµµµ´†÷NÜ…´…´…´…´…´…´¢LÍ*$ÌÒ¢LÍ*%Mz]ÝÝÝÜ'™<=IQ&d’¥™$’I&¢PÑ^‰m4ª­ IÂhðz~ïâcñŒš-Ñ¡ÍËŒŸ¦£_ðæ_ÍóÞ|~u^½HsýlÓÂ\ªÕØî2$‰(‹në5EÅâS 93ˆ“”A=Œä¶º1p…rÌ¥snã0Ì%FŽ1\S)†(¢8¶¦PÎqœÜXŽ1RÀqp&S(¢‹8¡†a“ Åke­–1Xƒ2ÔQв,X£e¬t5V,Xè´J¶©j·4‹0Á Å âØ"†K3I„™rŽ,m‚Åm‹!U+¥ˆÆ,QÅ£¢çUQ"ÅP5-…„ݰ72ÀÑÐê^¨Ü> ¨„`@`œŠjoM A±GËZ‡lhyûÇ‘S¡<^¶×s%6¶Y êœNc3ÙI@„á!»ßï¦ÆI Ø“a!5í97šd˜ J ƒ!9†[aAyI©ÀÛÐtÝ?Æp_u¢©w ÏÀºÊN_ ™t;u¾ø8IfD¤@D4.ù|œ¹Uß=Ž+Q1Œãq$)ÿ×+d°N™7Ò[~ÿj»u¯eOy,7 (¾KÔl4êëªK¥zad£VÀÀýéŒ?½Ù*¬`[¯ŸW•Õ7Vµ½ƒ';óíÉ®X,N¬Äßî 2 Ù¢:$Ç 4PÞ'äôoØ*0›²BÓ(Uò%.…i:&±ïM”+eI6+Ë–%”é˨˜Ï®À’avX¶¾ôî9ÕmuÙÎg>4cÀ:‚è®s—1ZëÙQ3KÆ›­æ®έMTÖ25£&síš6A#j$N °É!#ȃѵj"^Ŷøù´x-£üÅJ½¨ðÙ_yJÚ—éüÁ¶ZYKžW,ño¹ «–]3d„DUAE7Ü]0ÛŒKxÝ÷_MÿS¼ žs›y;LÓ¤½ä1‹vt¬èù1€zA­µÆGEÊ ‰dÝaœ'“ð0ê®ÃЀn!ÀBÉ “o§\r]wÚŸ¡â5ŠTâíÙpšå¥f¦ùrX0Hâ$Ž0JR‡hdN\“’ ˜fä GvÃâqyñÌñAyHÒ4—EÑòÊöcvŠp¾›©Úø®}¼ß‘Ab€ *,xË7Ç|€<Àm̓ïöõYVà>JÝ ¨"JŠºQÈlI•n'nªõ€ª» p7`+àÀ½€Tß à~À6À€BÀƒ€‡¶ì< (<Ø x° ð ð$ ð °Px<(Tl€pa@%î-À{ôo__[ígãdªñýG­/Cœm-C½ç5oˆ½¥7㧬¯‚[ñs£‘Q¦K“ÞHtlx¼›Ž0@L¾.)››†€âp]NZ?m¹éÈQÛÕœ@Zoææf©áùüª>íSGâ¿E§›vVè8ì¸ì„Ù©Äâg<Âÿ–*Ó›°Ré±¼–ó}kÞ8f¼<½ÞšðÆl[_ÛlºÇUÿ.Åë6Þ˜ùº¿žKÏ­óÌÏV§­žK¯kÖÄæë:¯ÎÃâ÷"Ý]Ê–’-!Rùb²ej=¨xl·+~©ª+VùU§õ9Þ©úê¹J/¤Öu¿2ÅfY/Qõ…Yxú|Ø©>zz>æÑþ,ùÿ[Ç|oQ»îµ=Ðv“}%á]?N:}œ§>ÆËR=ß+î'uÔ¯_Íѹæc¤Ç÷4šÈ_m+‡tYý> 椙«ÄüR5ò“ßa¿ÅC˜ÿü<òçfÔc’Aä-çãúªãùì7ÿ Ú+»ßç]óE!Æ— pþµ÷*nœÛ‘4+—H/Žëß þÏfñ,æ—~Åüê´K§1ÿu+íëÓ«³ÁÉëæÊÎ7óîsiç_®Åó[êÃú…R´_^Ãüì³xγƒX÷þžßÚØÉÆçL² ÷·p ë>ZÆý,Þòï:òþøžÆÑ´Ót¯NQ~j+ÚÉrºot/ Ý“ÓÄGú?}Fùѽš¢xx Ô÷gâS§ød‘×ò}§¹',ï:ëŒR§9Æ©î%ÒŸ¦ü™‹4ñG©ßÙ^ÒIL>õŸCÛÄýâ^½<ço_>Ö ~gþ >´>Mçp‚ç¢>t~¿¿FÒo§º.Òo¥¹ÛÉ%ÛJ¼C ¯=lyëÚ¨O-åë‰w”ò‡9¦õ:ŠÈ6¯Ãòò^ {Œt^&ÛDùUŸƒ\Oñ^ŠÛÈF»½ý¯Ãx-ŸOÙfÒݧúó>ky<ŸQõdù»®‰x-Äk6Þù¹}'é/þ¡È`Œ¿øóþú&òͱ v÷õÅÈÍi‰Œ´³_ße?pàˆ¤[ZšÙßßP_Çþ#ÏïW3ä Ÿ ¥ÏQeeø¡ò>¾Ñ‘òjvöðÆ*ÊÅ«ÏÏÏ/,^¥xUâU‹'¶hØ¢a‹†-¶hØ¢a‹†-¶hآሆ#Žh8¢áˆ†#Žh8¢áˆ†#®h¸¢áІ+®h¸¢áІ+®h¸¢°h„E#,aч³üÆÊEB½#p,üuóo>ÜœFipred/src/0000755000176200001440000000000014646200411012142 5ustar liggesusersipred/src/init.c0000644000176200001440000000074114172231220013247 0ustar liggesusers #include #include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .Call calls */ extern SEXP SdiffKM(SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"SdiffKM", (DL_FUNC) &SdiffKM, 3}, {NULL, NULL, 0} }; void R_init_ipred(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } ipred/src/SdiffKM.c0000644000176200001440000000434214172231220013570 0ustar liggesusers/* $Id: SdiffKM.c,v 1.2 2003/03/27 13:36:02 hothorn Exp $ SdiffKM: integrated squared difference between survival curve and KM estimator */ #include #include #include SEXP SdiffKM(SEXP time, SEXP prob, SEXP args) { SEXP rint; double d, p, helpone, helptwo, k; double myint = 0.0; double hazard, window, tw; int i, j, n; /* check arguments */ if (!isVector(time)) error("Argument time is not a vector"); n = LENGTH(time); if (REAL(time)[0] != 0.0) error("time[1] must be zero"); if (!isVector(prob)) error("Argument prob is not a vector"); if (REAL(prob)[0] > 1.0) error("prob[1] must be less or equal 1"); if (REAL(prob)[0] < 0.0) error("prob[1] must be greater or equal 0"); if (LENGTH(prob) != n) error("prob and time differ in their length"); if (!isVector(args) || LENGTH(args) != 2) error("Argument args must be vector with two elements"); hazard = REAL(args)[0]; window = REAL(args)[1]; /* prepare for return values */ PROTECT(rint = allocVector(REALSXP, 1)); UNPROTECT(1); REAL(rint)[0] = 0.0; /* for all discrete times */ for (i = 0; i < n-1; i++) { /* get difference between times */ d = REAL(time)[i+1] - REAL(time)[i]; /* estimated survival probability at this time */ p = REAL(prob)[i]; /* if the difference is small enough */ if (d < window) { helpone = p - exp(-REAL(time)[i] * hazard); helptwo = p - exp(-REAL(time)[i+1] * hazard); /* mean of over and under sum */ myint += 0.5 * d * (helpone*helpone + helptwo*helptwo); } else { /* split up in smaller pieces */ k = ftrunc(d/window) + 1; tw = d/k; for (j = 0; j < k; j++) { helpone = p - exp(-(REAL(time)[i] + j*tw)*hazard); helptwo = p - exp(-(REAL(time)[i] + (j+1)*tw)*hazard); /* mean of over and under sum for all small windows */ myint += 0.5*tw*(helpone*helpone + helptwo*helptwo); } } } /* ok, get outa here */ REAL(rint)[0] = myint; return(rint); } ipred/NAMESPACE0000644000176200001440000000436214402334514012601 0ustar liggesusersuseDynLib(ipred, .registration = TRUE) ### exported functions export( getsurv, bagging, ipredbagg, control.errorest, errorest, bootest, cv, inbagg, inclass, ipredknn, predict.ipredknn, kfoldcv, mypredict.lm, rsurv, sbrier, varset, slda) importFrom("stats", "as.formula", "complete.cases", "cov", "delete.response", "model.extract", "model.frame", "model.matrix", "na.omit", "naresid", "predict", "rexp", "rnorm", "runif", "terms") importFrom("utils", "data", "getFromNamespace") ### register S3 methods #S3method(print, bagging) S3method(slda, default) S3method(slda, formula) S3method(slda, factor) S3method(bagging, default) S3method(bagging, data.frame) S3method(errorest, default) S3method(errorest, data.frame) S3method(bootest, default) S3method(bootest, integer) S3method(bootest, factor) S3method(bootest, numeric) S3method(bootest, Surv) S3method(inbagg, default) S3method(inbagg, data.frame) S3method(inclass, default) S3method(inclass, data.frame) S3method(cv, default) S3method(cv, integer) S3method(cv, factor) S3method(cv, numeric) S3method(cv, Surv) S3method(ipredbagg, Surv) S3method(ipredbagg, factor) S3method(ipredbagg, numeric) S3method(prune, classbagg) S3method(prune, survbagg) S3method(prune, classbagg) S3method(print, classbagg) S3method(print, regbagg) S3method(print, survbagg) S3method(print, classbagg) S3method(summary, classbagg) S3method(summary, survbagg) S3method(summary, classbagg) S3method(print, cvclass) S3method(print, bootestclass) S3method(print, cvreg) S3method(print, bootestreg) S3method(print, cvsurv) S3method(print, bootestsurv) S3method(print, inbagg) S3method(summary, inbagg) S3method(print, inclass) S3method(summary, inclass) S3method(predict, inbagg) S3method(predict, inclass) S3method(predict, classbagg) S3method(predict, regbagg) S3method(predict, survbagg) S3method(predict, slda) S3method(predict, ipredknn) #S3method(predict, sclass) #S3method(predict, sreg) S3method("ipredbagg", "default") S3method("ipredbagg", "integer") ##imported functions importFrom(class, knn) importFrom(rpart, rpart, prune, na.rpart, rpart.control) importFrom(MASS, lda) importFrom(survival, Surv, is.Surv, survfit) importFrom(nnet, multinom) importFrom(prodlim, prodlim) ipred/inst/0000755000176200001440000000000014646200411012330 5ustar liggesusersipred/inst/doc/0000755000176200001440000000000014646200411013075 5ustar liggesusersipred/inst/doc/ipred-examples.Rnw0000644000176200001440000004270414172231220016507 0ustar liggesusers\documentclass[11pt]{article} \usepackage[round]{natbib} \usepackage{bibentry} \usepackage{amsfonts} \usepackage{hyperref} \renewcommand{\baselinestretch}{1.3} \newcommand{\ipred}{\texttt{ipred }} %\VignetteIndexEntry{Some more or less useful examples for illustration.} %\VignetteDepends{ipred} %\textwidth=6.2in %\VignetteDepends{mvtnorm,TH.data,rpart,MASS} \begin{document} \title{\ipred: Improved Predictors} \date{} \SweaveOpts{engine=R,eps=TRUE,pdf=TRUE} <>= options(prompt=">", width=50) set.seed(210477) @ \maketitle This short manual is heavily based on \cite{Rnews:Peters+Hothorn+Lausen:2002} and needs some improvements. \section{Introduction} In classification problems, there are several attempts to create rules which assign future observations to certain classes. Common methods are for example linear discriminant analysis or classification trees. Recent developments lead to substantial reduction of misclassification error in many applications. Bootstrap aggregation \citep[``bagging'',][]{breiman:1996} combines classifiers trained on bootstrap samples of the original data. Another approach is indirect classification, which incorporates a priori knowledge into a classification rule \citep{hand:2001}. Since the misclassification error is a criterion to assess the classification techniques, its estimation is of main importance. A nearly unbiased but highly variable estimator can be calculated by cross validation. \cite{efron:1997} discuss bootstrap estimates of misclassification error. As a by-product of bagging, \cite{out-of-bag:1996} proposes the out-of-bag estimator. \\ However, the calculation of the desired classification models and their misclassification errors is often aggravated by different and specialized interfaces of the various procedures. We propose the \ipred package as a first attempt to create a unified interface for improved predictors and various error rate estimators. In the following we demonstrate the functionality of the package in the example of glaucoma classification. We start with an overview about the disease and data and review the implemented classification and estimation methods in context with their application to glaucoma diagnosis. \section{Glaucoma} Glaucoma is a slowly processing and irreversible disease that affects the optic nerve head. It is the second most reason for blindness worldwide. Glaucoma is usually diagnosed based on a reduced visual field, assessed by a medical examination of perimetry and a smaller number of intact nerve fibers at the optic nerve head. One opportunity to examine the amount of intact nerve fibers is using the Heidelberg Retina Tomograph (HRT), a confocal laser scanning tomograph, which does a three dimensional topographical analysis of the optic nerve head morphology. It produces a series of $32$ images, each of $256 \times 256$ pixels, which are converted to a single topographic image. A less complex, but although a less informative examination tool is the $2$-dimensional fundus photography. However, in cooperation with clinicians and a priori analysis we derived a diagnosis of glaucoma based on three variables only: $w_{lora}$ represents the loss of nerve fibers and is obtained by a $2$-dimensional fundus photography, $w_{cs}$ and $w_{clv}$ describe the visual field defect \citep{ifcs:2001}. \begin{center} \begin{figure}[h] \begin{center} {\small \setlength{\unitlength}{0.6cm} \begin{picture}(14.5,5) \put(5, 4.5){\makebox(2, 0.5){$w_{clv}\geq 5.1$}} \put(2.5, 3){\makebox(2, 0.5){$w_{lora}\geq 49.23$}} \put(7.5, 3){\makebox(2, 0.5){$w_{lora} \geq 58.55$}} \put(0, 1.5){\makebox(2, 0.5){$glaucoma$}} \put(3.5, 1.5){\makebox(2, 0.5){$normal$}} \put(6.5, 1.5){\makebox(2, 0.5){$w_{cs} < 1.405$}} \put(10, 1.5){\makebox(2, 0.5){$normal$}} \put(3.5, 0){\makebox(2, 0.5){$glaucoma$}} \put(6.5, 0){\makebox(2, 0.5){$normal$}} \put(6, 4.5){\vector(-3, -2){1.5}} \put(6, 4.5){\vector(3, -2){1.5}} \put(3.5, 3){\vector(3, -2){1.5}} \put(3.5, 3){\vector(-3, -2){1.5}} \put(8.5, 3){\vector(3, -2){1.5}} \put(8.5, 3){\vector(-3, -2){1.5}} \put(6.5, 1.5){\vector(3, -2){1.5}} \put(6.5, 1.5){\vector(-3, -2){1.5}} \end{picture} } \end{center} \caption{Glaucoma diagnosis. \label{diag}} \end{figure} \end{center} Figure \ref{diag} represents the diagnosis of glaucoma in terms of a medical decision tree. A complication of the disease is that a damage in the optic nerve head morphology precedes a measurable visual field defect. Furthermore, an early detection is of main importance, since an adequate therapy can only slow down the progression of the disease. Hence, a classification rule for detecting early damages should include morphological informations, rather than visual field data only. Two example datasets are included in the package. The first one contains measurements of the eye morphology only (\texttt{GlaucomaM}), including $62$ variables for $196$ observations. The second dataset (\texttt{GlaucomaMVF}) contains additional visual field measurements for a different set of patients. In both example datasets, the observations in the two groups are matched by age and sex to prevent any bias. \section{Bagging} Referring to the example of glaucoma diagnosis we first demonstrate the functionality of the \texttt{bagging} function. We fit \texttt{nbagg = 25} (default) classification trees for bagging by <>= library("ipred") library("rpart") library("MASS") data("GlaucomaM", package="TH.data") gbag <- bagging(Class ~ ., data = GlaucomaM, coob=TRUE) @ where \texttt{GlaucomaM} contains explanatory HRT variables and the response of glaucoma diagnosis (\texttt{Class}), a factor at two levels \texttt{normal} and \texttt{glaucoma}. \texttt{print} returns informations about the returned object, i.e. the number of bootstrap replications used and, as requested by \texttt{coob=TRUE}, the out-of-bag estimate of misclassification error \citep{out-of-bag:1996}. <>= print(gbag) @ The out-of-bag estimate uses the observations which are left out in a bootstrap sample to estimate the misclassification error at almost no additional computational costs. \cite{double-bag:2002} propose to use the out-of-bag samples for a combination of linear discriminant analysis and classification trees, called ``Double-Bagging''. For example, a combination of a stabilised linear disciminant analysis with classification trees can be computed along the following lines <>= scomb <- list(list(model=slda, predict=function(object, newdata) predict(object, newdata)$x)) gbagc <- bagging(Class ~ ., data = GlaucomaM, comb=scomb) @ \texttt{predict} predicts future observations according to the fitted model. <>= predict(gbagc, newdata=GlaucomaM[c(1:3, 99:102), ]) @ Both \texttt{bagging} and \texttt{predict} rely on the \texttt{rpart} routines. The \texttt{rpart} routine for each bootstrap sample can be controlled in the usual way. By default \texttt{rpart.control} is used with \texttt{minsize=2} and \texttt{cp=0} and it is wise to turn cross-validation off (\texttt{xval=0}). The function \texttt{prune} can be used to prune each of the trees to an appropriate size. \section{Indirect Classification} Especially in a medical context it often occurs that a priori knowledge about a classifying structure is given. For example it might be known that a disease is assessed on a subgroup of the given variables or, moreover, that class memberships are assigned by a deterministically known classifying function. \cite{hand:2001} proposes the framework of indirect classification which incorporates this a priori knowledge into a classification rule. In this framework we subdivide a given data set into three groups of variables: those to be used predicting the class membership (explanatory), those to be used defining the class membership (intermediate) and the class membership variable itself (response). For future observations, an indirect classifier predicts values for the appointed intermediate variables based on explanatory variables only. The observation is classified based on their predicted intermediate variables and a fixed classifying function. This indirect way of classification using the predicted intermediate variables offers possibilities to incorporate a priori knowledge by the subdivision of variables and by the construction of a fixed classifying function. We apply indirect classification by using the function \texttt{inclass}. Referring to the glaucoma example, explanatory variables are HRT and anamnestic variables only, intermediate variables are $w_{lora}, \, w_{cs}$ and $w_{clv}$. The response is the diagnosis of glaucoma which is determined by a fixed classifying function and therefore not included in the learning sample \texttt{GlaucomaMVF}. We assign the given variables to explanatory and intermediate by specifying the input formula. <>= data("GlaucomaMVF", package="ipred") GlaucomaMVF <- GlaucomaMVF[,-63] formula.indirect <- Class~clv + lora + cs ~ . @ The variables on the left-hand side represent the intermediate variables, modeled by the explanatory variables on the right-hand side. Almost each modeling technique can be used to predict the intermediate variables. We chose a linear model by \texttt{pFUN = list(list(model = lm))}. <>= classify <- function (data) { attach(data) res <- ifelse((!is.na(clv) & !is.na(lora) & clv >= 5.1 & lora >= 49.23372) | (!is.na(clv) & !is.na(lora) & !is.na(cs) & clv < 5.1 & lora >= 58.55409 & cs < 1.405) | (is.na(clv) & !is.na(lora) & !is.na(cs) & lora >= 58.55409 & cs < 1.405) | (!is.na(clv) & is.na(lora) & cs < 1.405), 0, 1) detach(data) factor (res, labels = c("glaucoma", "normal")) } fit <- inclass(formula.indirect, pFUN = list(list(model = lm)), cFUN = classify, data = GlaucomaMVF) @ \texttt{print} displays the subdivision of variables and the chosen modeling technique <>= print(fit) @ Furthermore, indirect classification predicts the intermediate variables based on the explanatory variables and classifies them according to a fixed classifying function in a second step, that means a deterministically known function for the class membership has to be specified. In our example this function is given in Figure \ref{diag} and implemented in the function \texttt{classify}.\\ Prediction of future observations is now performed by <>= predict(object = fit, newdata = GlaucomaMVF[c(1:3, 86:88),]) @ We perform a bootstrap aggregated indirect classification approach by choosing \texttt{pFUN = bagging} and specifying the number of bootstrap samples \citep{ifcs:2001}. Regression or classification trees are fitted for each bootstrap sample, with respect to the measurement scale of the specified intermediate variables <>= mypredict.rpart <- function(object, newdata) { RES <- predict(object, newdata) RET <- rep(NA, nrow(newdata)) NAMES <- rownames(newdata) RET[NAMES %in% names(RES)] <- RES[NAMES[NAMES %in% names(RES)]] RET } fit <- inbagg(formula.indirect, pFUN = list(list(model = rpart, predict = mypredict.rpart)), cFUN = classify, nbagg = 25, data = GlaucomaMVF) @ The call for the prediction of values remains unchanged. \section{Error Rate Estimation} Classification rules are usually assessed by their misclassification rate. Hence, error rate estimation is of main importance. The function \texttt{errorest} implements a unified interface to several resampling based estimators. Referring to the example, we apply a linear discriminant analysis and specify the error rate estimator by \texttt{estimator = "cv", "boot"} or \texttt{"632plus"}, respectively. A 10-fold cross validation is performed by choosing \texttt{estimator = "cv"} and \texttt{est.para = control.errorest(k = 10)}. The options \texttt{estimator = "boot"} or \texttt{estimator = "632plus"} deliver a bootstrap estimator and its bias corrected version {\sl .632+} \citep[see][]{efron:1997}, we specify the number of bootstrap samples to be drawn by \texttt{est.para = control.errorest(nboot = 50)}. Further arguments are required to particularize the classification technique. The argument \texttt{predict} represents the chosen predictive function. For a unified interface \texttt{predict} has to be based on the arguments \texttt{object} and \texttt{newdata} only, therefore a wrapper function \texttt{mypredict} is necessary for classifiers which require more than those arguments or do not return the predicted classes by default. For a linear discriminant analysis with \texttt{lda}, we need to specify <>= mypredict.lda <- function(object, newdata){ predict(object, newdata = newdata)$class } @ and calculate a 10-fold-cross-validated error rate estimator for a linear discriminant analysis by calling <>= errorest(Class ~ ., data= GlaucomaM, model=lda, estimator = "cv", predict= mypredict.lda) @ For the indirect approach the specification of the call becomes slightly more complicated. %Again for a unified interface a wrapper %function has to be used, which incorporates the fixed classification rule The bias corrected estimator {\sl .632+} is computed by <>= errorest(formula.indirect, data = GlaucomaMVF, model = inclass, estimator = "632plus", pFUN = list(list(model = lm)), cFUN = classify) @ Because of the subdivision of variables and a formula describing the modeling between explanatory and intermediate variables only, we must call the class membership variable. Hence, in contrast to the function \texttt{inclass} the data set \texttt{GlaucomaMVF} used in \texttt{errorest} must contain explanatory, intermediate and response variables. Sometimes it may be necessary to reduce the number of predictors before training a classifier. Estimating the error rate after the variable selection leads to biased estimates of the misclassfication error and therefore one should estimate the error rate of the whole procedure. Within the \texttt{errorest} framework, this can be done as follows. First, we define a function which does both variable selection and training of the classifier. For illustration proposes, we select the predictors by comparing their univariate $P$-values of a two-sample $t$-test with a prespecified level and train a LDA using the selected variables only. <>= mymod <- function(formula, data, level=0.05) { # select all predictors that are associated with an # univariate t.test p-value of less that level sel <- which(lapply(data, function(x) { if (!is.numeric(x)) return(1) else return(t.test(x ~ data$Class)$p.value) }) < level) # make sure that the response is still there sel <- c(which(colnames(data) %in% "Class"), sel) # compute a LDA using the selected predictors only mod <- lda(formula , data=data[,sel]) # and return a function for prediction function(newdata) { predict(mod, newdata=newdata[,sel])$class } } @ Note that \texttt{mymod} does not return an object of class \texttt{lda} but a function with argument \texttt{newdata} only. Thanks to lexical scoping, this function is used for computing predicted classes instead of a function \texttt{predict} passed to \texttt{errorest} as argument. Computing a $5$-fold cross-validated error rate estimator now is approximately a one-liner. <>= errorest(Class ~ . , data=GlaucomaM, model=mymod, estimator = "cv", est.para=control.errorest(k=5)) @ %%To summarize the performance of the different classification techniques in the considered example of glaucoma diagnosis, the 10-fold %%cross-validated error estimator delivers the %%results given in Table \ref{tenf}. %%\begin{figure} %%\begin{center} %%\begin{tabular}{ rrr } %%\hline %%dataset & method & error estimate \\ %%\hline %%\texttt{GlaucomaM} & {\sl slda} & 0.168 \\ %%\texttt{GlaucomaM} & {\sl bagging} & 0.158 \\ %%\texttt{GlaucomaM} & {\sl double-bagging} & 0.153 \\ %%\texttt{GlaucomaMVF} & {\sl inclass-bagging} & 0.206 \\ %%\tetxtt{GlaucomaMVF} & {\sl inclass-lm} & 0.229 \\ %%\hline %%\end{tabular} %%\caption{10-fold cross-validated error estimation of %%the misclassification error for several classification %%methods: {\sl slda} - stabilised linear discriminant analysis, %%{\sl bagging} - bagging with 50 bootstrap samples, %%{\sl double-bagging} - bagging with 50 bootstrap samples, %%combined with sLDA, {\sl inclass-bagging} - %%indirect classification using bagging, %%{\sl inclass-lm} indirect classification using %%linear modeling. \label{tenf}} %%\end{center} %%\end{figure} %%Note that an estimator of the variance is available for the ordinary %%bootstrap estimator (\texttt{estimator="boot"}) only, see \cite{efron:1997}. \section{Summary} \ipred tries to implement a unified interface to some recent developments in classification and error rate estimation. It is by no means finished nor perfect and we very much appreciate comments, suggestions and criticism. Currently, the major drawback is speed. Calling \texttt{rpart} $50$ times for each bootstrap sample is relatively inefficient but the design of interfaces was our main focus instead of optimization. Beside the examples shown, \texttt{bagging} can be used to compute bagging for regression trees and \texttt{errorest} computes estimators of the mean squared error for regression models. \bibliographystyle{plainnat} \bibliography{ipred} \end{document} ipred/inst/doc/ipred-examples.R0000644000176200001440000001121414646200411016136 0ustar liggesusers### R code from vignette source 'ipred-examples.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### options(prompt=">", width=50) set.seed(210477) ################################################### ### code chunk number 2: bagging ################################################### library("ipred") library("rpart") library("MASS") data("GlaucomaM", package="TH.data") gbag <- bagging(Class ~ ., data = GlaucomaM, coob=TRUE) ################################################### ### code chunk number 3: print-bagging ################################################### print(gbag) ################################################### ### code chunk number 4: double-bagging ################################################### scomb <- list(list(model=slda, predict=function(object, newdata) predict(object, newdata)$x)) gbagc <- bagging(Class ~ ., data = GlaucomaM, comb=scomb) ################################################### ### code chunk number 5: predict.bagging ################################################### predict(gbagc, newdata=GlaucomaM[c(1:3, 99:102), ]) ################################################### ### code chunk number 6: indirect.formula ################################################### data("GlaucomaMVF", package="ipred") GlaucomaMVF <- GlaucomaMVF[,-63] formula.indirect <- Class~clv + lora + cs ~ . ################################################### ### code chunk number 7: indirect.fit ################################################### classify <- function (data) { attach(data) res <- ifelse((!is.na(clv) & !is.na(lora) & clv >= 5.1 & lora >= 49.23372) | (!is.na(clv) & !is.na(lora) & !is.na(cs) & clv < 5.1 & lora >= 58.55409 & cs < 1.405) | (is.na(clv) & !is.na(lora) & !is.na(cs) & lora >= 58.55409 & cs < 1.405) | (!is.na(clv) & is.na(lora) & cs < 1.405), 0, 1) detach(data) factor (res, labels = c("glaucoma", "normal")) } fit <- inclass(formula.indirect, pFUN = list(list(model = lm)), cFUN = classify, data = GlaucomaMVF) ################################################### ### code chunk number 8: print.indirect ################################################### print(fit) ################################################### ### code chunk number 9: predict.indirect ################################################### predict(object = fit, newdata = GlaucomaMVF[c(1:3, 86:88),]) ################################################### ### code chunk number 10: bagging.indirect ################################################### mypredict.rpart <- function(object, newdata) { RES <- predict(object, newdata) RET <- rep(NA, nrow(newdata)) NAMES <- rownames(newdata) RET[NAMES %in% names(RES)] <- RES[NAMES[NAMES %in% names(RES)]] RET } fit <- inbagg(formula.indirect, pFUN = list(list(model = rpart, predict = mypredict.rpart)), cFUN = classify, nbagg = 25, data = GlaucomaMVF) ################################################### ### code chunk number 11: plda ################################################### mypredict.lda <- function(object, newdata){ predict(object, newdata = newdata)$class } ################################################### ### code chunk number 12: cvlda ################################################### errorest(Class ~ ., data= GlaucomaM, model=lda, estimator = "cv", predict= mypredict.lda) ################################################### ### code chunk number 13: cvindirect ################################################### errorest(formula.indirect, data = GlaucomaMVF, model = inclass, estimator = "632plus", pFUN = list(list(model = lm)), cFUN = classify) ################################################### ### code chunk number 14: varsel-def ################################################### mymod <- function(formula, data, level=0.05) { # select all predictors that are associated with an # univariate t.test p-value of less that level sel <- which(lapply(data, function(x) { if (!is.numeric(x)) return(1) else return(t.test(x ~ data$Class)$p.value) }) < level) # make sure that the response is still there sel <- c(which(colnames(data) %in% "Class"), sel) # compute a LDA using the selected predictors only mod <- lda(formula , data=data[,sel]) # and return a function for prediction function(newdata) { predict(mod, newdata=newdata[,sel])$class } } ################################################### ### code chunk number 15: varsel-comp ################################################### errorest(Class ~ . , data=GlaucomaM, model=mymod, estimator = "cv", est.para=control.errorest(k=5)) ipred/inst/doc/ipred-examples.pdf0000644000176200001440000022747514646200411016530 0ustar liggesusers%PDF-1.5 %¿÷¢þ 1 0 obj << /Type /ObjStm /Length 4231 /Filter /FlateDecode /N 95 /First 787 >> stream xœÅ\[sÛ6~ß_·m'S÷ËN'3Ž'iœÔµÝ8Ý<(mk«H®$'éþúý@R")É´Ä#KAòààÜp. %ãL1ë˜fÒ f˜r†Yf¼eŽY­˜gÎ X@[pü9Ï„`B*&”µøaÂHœ Ë‰€&|@¿e’{œ8&eìdR)“F¢“3é NSÜâD2¥pŸTLyŽ~Í´¤ë†i«ñ°e:h<ç˜ÑçžLf9QœYáð°`VáDILÁ£3´Z3 l=G?¦Œe “@^9æ¤ ”9 ÈT`ÎxÎ4gÎj *@Õ’y"YdÀ¹fÞ`Ú0ï·úÅ A5 Œ ¤Yp˜œá,äA?Ž3šqȆˆk=Í  È$8ž-<íÐ0—À‰i„”Œc¤¦igt=YZzæppZ¼cŸÇËKv ØóyqŽË¯Š¿?Ïæ£E„ùz6º ÜÑ|6º€÷üè=¿œ-–‹á||µ„ˆe¸çäúËá2‚</'µÊìÏ®§KRöüÕ£þÖ¦ëã/Ô d<ØD [^Kg`N<¨Ô™(]¥ÃûŠj‰ͦÓÙ’F’ Jƒ¨H¥±T‚§Ò *!¥t•Ø¥|5Èþlº,¦€R8;žÌ¾`n‰¤¤[sÜYñô¸XÌ®çCàFˆ ™…ÄûùlxR,!?zz€‰_–ðøñúœšD´1ž/–¬§ÃÚ­‹O-aåƒO‹Å²¦yþîƒÖ&sì’™P”éõdB¨¿! *ìÒ4J9M<ýù¼8?ç\ÎmÁ¹ÓøJ´Ñg©mpT©ß†²¿xÜáŠI 0‰Fw‰îMdcî@ä’V~­¬0™ð¦E+!6ëh^|ª8Ô$œA†øŠ±"‘FÔ·P :€{“Èò»“¨TÛ–<—)gÛ4R[i6ÑH&úXW~Ka³®K›ÈoåúˆûÓG݃>r}$w™uªM³•>•xmÔ>[jYÔÀ5ÍÃ)×jMÐTúFBÚò¨Êk7hªK¸¹„”K2í’TºMR)ïMu©Ô›¤’«LsѦºÛNuµ‰ê&Qº²wt$J¹FISRß”T¯¨;ÚIí5ò©û“ïv¯\Z[B«-„vm‘ØD1³bF•Fn´F¢`¨gþd°("ÖùñÁÞË'ÇNNOáOy–?›g£ñô‚ågãéÞt1^uÄpÿr0V0Nˆ œVKcº RAþÉ2N›&/*¤±ÏÆ£åe4·äG¦Ïªµ~N¿ïk=Y #) 1pÛ®^ã‹ËêCýÈGy‘ó«|þc$t|â'Ä1~2¸X;yR1<^ÔðzÐûtñ`<)È·/O]o‹ „{ ïq<Ü›^€ ÎëñbªÅ9yŒz²,>¾%÷{kôËß•ÓÐp¥;|:xqp¼†áŽ_ß‚OXÇÚÃHÑa“ïÉ&ÍþöŠÖêc[çÛ>PøIÇÔ³ÉEï“‚ ºcX qE<¢NG(¬ …7ëBñ2?ʇùp6™MKù˜ÏHHòE¾Ì?­‹Š@°±UTè¢Ó!ŽÖÓ•ïv‰Š"!N¢"DYQ]Yyòâä÷ƒ“4'jÔ¢J;·¦É¶+!ª-! |? QJÆoÉ×|Õj ›f¢ëéå&¹Pè{zÂ!2•Xú’ŒK AêóMw¯>±£ƒî"¸b”¨°¸nÁ5çÓ5…>{ ½hÅ'œŠwRˆ ï9>¯‰±Xi7) N7j-ë£ÂÀUœÎ4±í)õA÷•ש?« «>UÃÄ/Ál »3b]ØÓi-ì{ù“|?š?Ëòçù ÿ/ùaþ:“ÿ 58ÎOòÓü÷ü,äj¥Î>~DÕˆ ób:,.óóüœþÆyüûTäçXó‹ü2G€{YL¡HÿÍÿÌ'P§i>E­šá÷*¿¢ekRœ/Sk¡"&ÏFùÕäz‘ÿ•ÿu=[£“xWu’n$½\Ÿ0Àbüº¼œE¾ü<˯óOùçüKþwþ¿üÅ|Ö°ò†Wª«šûÂÍx+˜Œäj)¯m+oG™v*¯ªí¼ 6*oG_÷ßgC#}[#;²38+nôød­‘³ù¨˜—±j4ûñ„,Ýû:¸,¶1¸ >3´úI2oÂ*¶|)¬.£'2K>‡ÈdòI2 sn”΄“¯Uôp<ý³Â8&/:™ Ÿˆã7%-îŸm œÝ>êþft !Vքú“y§oG¸»#g¬!>mCÈa(pÊ(¬ÏÀ(ç3Z9§3*VÈQêȈV2ó”sÙˆœð2£\†¸ 1†²à.p£D ‹›Ôà"b… 7«3®¾nAgŽ»mts!³ð²¥å—Tí`®Æª/³ ÝÃ"ý̨<^!§„È”ú^¸å³°žmjƒñ™¦2º†áÁ4ç™nÆd*e¦7ÍmæHÒ*Ü  æv¨ÓƒrÉÕÉAÞpÂèâÊ KQøÊQJ¾1¸¨ýš®/S¹2ÖX‚N¦“i'âXM_¦ª/­À ·™Á—µI´rž¼Îyö…Ÿ½{ýü÷7ìäCC•T[Â].¶óV½Ã`çÖƒ‰íç6¸ÐªéB«† }A;Šyòw?^O–ã«I#ÊméšE M0[\m.µ·;]3†òÚ=9óæíþþ ðúe“3¶Í™ IgÝIQÀôL:;¿}yÿQËï1qƒ‚^¥*ׇ2xæg–SÒA§ë†(®h‡•£gœ0‚)ÝL<7ªG1qB  D7“b±(Óe3G rnfÌŽ aÆŒ†mI‡lKG‹y}¥CÞ*©Eålïôù«³8ZßBEp]™é$z*LJ6Ù.ßU¸«’`Ûä&OãiÍSâè ¤p³Y:¨;Žïj6-³jëT›uMböäfuÆ=;=yõì(ŽåîÁ¸n!°/ãl,"±qeEÈØÄ8£è›êƉïÉ8Z7»Œë›ÄìÉ8*VÝqOŽÞ=}þîÑáË7Ϛƹ²«zO—_¦[4=ù78ÞþsÇǾ1¸¦#G¾¹¼ûÆò>©ðãòVùÀ‹¦íÞR‚ÑxS%‰Gx-Aê”[Ìík¼Í¶äî½#Å"à­±¼ ™ˆ•Éà¶+<³H”0ß<$Ý›æÞÙ 7Š­¹ú*¸%ëô~ÓV6x/™§ý±H «·†Ë&3šöíºŒê”v´×W#³}c«m1œcÍ«¶aºº«¬®ï ´:µMÜý\¶3éÙî¶„£·§gϜܪ ºFè~¥€ÝÉñnšüv©÷n27 ^ß³½çáKÝ;n[X¯-§ò@USŽÅplƒé¨U&Ìcy ¨ EÊä÷®Œf“ €Ôê\_†“ÁÇVM¡ ¿ºµ…ÚûŽáôôúã :¾˜ÞXk ºì¨9ÜT©Þ^qvÃ*A…K6”D§NÝÑÄõu"ú5ÛŠAÜÆçøFy%DÆé ¯2åLŽð¿D lëçã`‘ãë3nÖeôjÆmpë‘Üji 5›I¯»¯‰´ |å®ZÚŽ"²àâþLS}ÝêÌ»ož{ß9’«r–²á·Ã­é;ä^íuN™MW,Q.Z÷Ùë\m‡ý>¬*då†>ÀMx‘yM •Ìˇ.Âà6jÔ¤™”öV¸5a]ï¶8A;„"MÀ%mubƒlÜÿ}'¿šlÜß=•‚ƒ=D4›qxkRø˜ò§Ç÷/nÞà×qA/6"ö ö¼É4½&ÿ>fê·J©°ÀcjÔ8Ì¿¯àv‰Ú3ß;<9ûãˆüÃ[xæºë™;ÝòÌMßmÝ´’ˆ¹Ú•YíÓ¬Ú‡Óº†Ãi¥u·«QýXw©|vSá÷8´ñÓø¹åQ…¶GÕ¡`ßâ"ïÚ|{›Qî¶5 Ó)¾¹ùv0ãô2(Ïœ¦×zyF[ô9c2Úú_#çuFüüNÈÝT¨æ°¯’IN›KüÏ̺sÓöíÓ)»qS0ŠMJÜ4Ê«ÛávKï,?¦7ÛÕºÍ[­¹U%ù>¯ñÜeÍíaŸwã}ÿwæ¼þšx¯;D5æ+„2õ*u²vòÛ[Ú(÷²ýªBUâܵõÙw³F}_U0¦zÙdýãbÍHs¿Ô¦ T™î¶åÍ_`v‚Ç—CVý,v–´/???s„Í ×~¾_•p°ôÕŽšˆüú^›¯4meIQK&m3Üÿob2endstream endobj 97 0 obj << /Subtype /XML /Type /Metadata /Length 1388 >> stream GPL Ghostscript 10.00.0 2024-07-18T13:48:25+02:00 2024-07-18T13:48:25+02:00 LaTeX with hyperref endstream endobj 98 0 obj << /Filter /FlateDecode /Length 2134 >> stream xœ}XK“ä¶ ¾Ï)>g}d§¶;¢øåÛnÊU¶+Çéª<9°%N7m=z)õŽ'¿>HJê­µk-‰$@>óiWù®À¿ôÛôOÅîòôé‰Ó×]úiúÝÇÓÓßæõŽWÇ’+¹;½<Å#|Wü(D½Óð[T»Sÿô óûâXYU±Ûæ9lž=—šWеë÷ÿž~U%ߪRühJe@Ý©éßîVk8øC'¥®êšÝ¸/õ±Ö…bŸñ©ñ̵ûƒàå±65û)¸ÖoÔ6ó¸?àùRI©Y˜öI»ÓuQsÔ~àð"ÅîPHU›x‰ÓÕOpîQ+6]Ç0çWÎz;ìKs,¸®ÙÝv¸PÃu$[ÎÃ®ÎÆM²bŸ}÷†+†í„W†GXQlÒs¡ÙOd^mÀw³ ‹$¯ùY0Û£ gVEy~ÞgaÌí*lp®ö8HßÀ©šK œÆÞë8¢®˜ï³{kÁѽ(±÷öLE«KÁæé˜=¨v\…Ô%zP€ñèË>þö ã{ôh7úaØ£|Uq66$ IIa!½à¥–"$¤1Z±ûö¥™=z?¨JTòk±ƒkJ²(ª}aÐ%k:;Mþ]c£úÌKDѹsý·ÈðJ³÷è4AN›¯. S ¬™]^ &“‹^)¸`.`Ðã „dž]›§¬[1Ä]T¨Yœ9Š…{çöJœ+ÍèLMHx½zü\”p$B–ìJþ^â­D».hRUÂ~Ã^"È5Æñ>ßéÒ§KçÉ…Ï{¥0®`úbJËuº'Jáœ5.ÌÖ/but ƒ”–ìè°ZH³±ïÇ;ÞÍW“¡ÞNëRt )Pìe ë‹ûý@¦‘=ý \rBÁ¡Šu~p6à»DرÖOMð=y‡‹ªfƒ%H‚s!béŶ{›(é‹aã"%YãÇ/¸€½€MGvVJйŸ]ã†%6*ÚŒe˜ëÆÛjÃ’&Ú0BB .7’uÎ"A[™èoðt?Os´¢T³Ùžð  ,vOàÏŸÆ—$Dùd˜€_¾À8í–Ï@¾¦;(æ‡õya.ú–dHííÖùÕš(]"KIŒ÷#¬‘×4çiöª™½\‚»¤;”¦ &|f9Â.ß¼O;áèÇà<^“°‚+†BÂ8¸n…˜EZCQHëÍØ/Á88¦¤…W)ª‡˜@Q*[ðŽTpó: ²ñÙäŠE“×h“dçha­‰Ÿ(4jc¬(jbÊ‰àš…#…¿¤gÅ|]Pê’pÞ<8‚BLz5kíLÆ ­Jfü%û0ŒHBYz…‘ £m¨\W!~Zæ‡Ö×T…椳éÈ^. Û`ä=n¨)!^¯«»}b0åkÌ^ø:4jc¸¥ ‡;ŽmÊû³øHÖ»z06/•ì·a\ªÇkçÚ‹ËkÂ2çå^Ü$iœpœàý%.#œ ú#—’¿+d”ôŸÙ÷T ã;•ÏxBcù$8r.TPþ¼'˜ â˃͋ ëÁª¬ù¶r÷šx(!å]Ö…TDš ómÞ…)x(ïx.›EæK7"ç¢seô ]_bPó*Ä욥m¸þÓr«„ÊÌ܄ᖘáÌÏI&¢ÞMs¬5(ìWaäŸ7Ö&B/"°ç!ÁkBºB¿s {£ þ;.†}€ª&QÈéØ!R0ú÷Èá”Ò93ckÑgçûœ võ—ëzTCw¢4Åê‹·g¬q«"ËÀ b?)§±Ã* sÜPj¢,¬z¶kîà¹]埗:ÿoÓ„qZsÊ)ÜXªó-zoÉ´¡¤‘„Ùw©*+SAÈÃPÇ>M`ÓÉsòçéêƒ|Þ€Ü lX!Ò.N%ðQWäÆUt™:c"-Úo²_ÜÚõLQ8ª£(GE€ñM¶}¶ä2B9v1{Ù‡)»Nn© yÊëÛ{Ë…ÌÇâýJŠ55è, "(ýP+R$Mt‹¦Ž7Ço‰ RgMªëÒÞ‚gîóaÜöIЇËü« öe9ßGƒ]¯1Ñ ±R3… ïcSCí©Ô=™è "m*#'*bžh:ìT9Åuj1èZ7¥·q¶™ÔÁÐ:˜Ôç}JGA‘Q o(îÊ|HkŒ? x©£4r‚Bà§õ;ñ›®–"˜k0pR> ×ÂîÀ.XØ$å\ÜUnÑ’„B"¶þ¯ÐË5¯K‘ߓЉ¸ˆ¹Ú(ÿ¿(6nÛÖ¶w´Í+ °0¢14EUT3Þ§ue…4$S[ ïöùœæà(º¬­3kZ)"rÒ|S=Ì7Ey4¼Ê“±¿í·>·_lá*¾¤³7l"’#[£@¹Ð(Ñ>Êâ‚&ö.Lój@štâXY }Ôò‹©2O>ØûÒäCè/•äˆ4ëØ´ ÜÿŽPN3¬Š“ý0²Ñ6Ëdö0ñדF“§XÜ#õvŠmWÝ7ü'A3’é¼t¤íúqÕ{Ò¾Ôù¸Q3l‡(-Ñ  E~‹s©$ƒ#¾Œ"^z0¤ëÆ¥ò¼¦mÌ¿!Õ¶¦0ìMg·nüCá÷!N A¦Ì˘ù–‰˜ xU£·ÒÌ#~6ÃHrM¬†‚ä‰é#:ÿ÷<àñ(UТTìÒÙ;ô÷6ËÐÔ¸n[Vtcð!©â~Á¦yanKÿ£!µà3Í8¾at“nȺerÜ #|öî5MÚ€i®w‡²>c‚šñýé×§ïNOÿ‚¿ÿÎþѹendstream endobj 99 0 obj << /Filter /FlateDecode /Length 2771 >> stream xœµYKã¸rìÛrÊ|¤“±"Š‘‹\ ÙM ÙAßvrP[š¶ÛòJv÷ô¿ÏWERgç±ô¡->Šõøê«¢ôÓ*Ïä*§¿ø{¸ËWw?ÝI]ÅÛÃê/÷w|S(Œd>÷ruÿî.l‘+Y˜¬ôneÌ ãV÷‡»Eõ°ÞeæÒ¢»¬±ËéRœ×›,Ï­8ïzÐx(EÝM54qVjQkzP™÷FÔÕ¹J=¦’´zZß7Omóœú$=ìh§}shÖR×”hêéäí¾†öÕ¶:·Ý1KÇçÿçþw²Ì¬Ô0÷Ÿw÷¿ÿQ4ù=Ä¥…· Å¡9ï:66—…õ@SѦeÐp‹=…£Ÿ¤Áûs\åµxnÏ»´® ÍÛ>MÂÚÓißn«µ¤³œél:‚º´²ûê²íÕ4R·Õã±Ú!c[?³’:SÚ?å3„¬H¶ë”y%Äwðp®¼Ôp¤®7ô¨³fhR{e¤U¼ÌÔR»$ø»I1 Ä;„|d ¥DöV û.¸Èzñ¼‰k¤Sœ—Vlìø˜f}ÀJ¡EÛ kBž/”hú¡}Ø7“¤hióyWg¢~×lÏÃ$a”öv§s»½…ŠcÓ?ŠÓ/É‹»¦ª3@ŠÓÀŠ¿sÈsŽ ;€VAl8ƒ'¼à¤N“Rº!mžzèÞ1Œã%Þqp´õ öi©ûöX᪴Ԋçàm±p_?·uÃÚ9Ã$²Œ"UI ¶â2\ª=„È–ˆ¬†`)¥±”Šª$IF<À¿uÜ‹S ®˜¢8c&Ô—m3./ÅJRpJ<YÿªÙׯñè» i kÂN %œx`Ÿ“ò/Q\Á<ÜqhjdÎ~´¨y_ÚcÌ^%»wô[e¹-Ä ¾r …ÂI0Eß"¹{’. ŒÉ5^N` ãôs8À?MŸæŒ¹ž+#.‡Q͇ ¿ò¿átȰÄŒp RmÏIf9ƒ(¤IJYñjFµΨ‚'†iE¤0Ô’7šá^¶ö ‡q@l|Î,ö¯#ÏI~èN§È{–0u¾Ûó(ã%­³ÌR霄$žbøó¬#2 ÿ È¿ðó$Œ]†åLì3ŸWE­3?<­sLÆ.Ó†8i`4#ÿ9ï.bøå ”<7¿oF6Fí£(o)´ã+Þ4g ž#í~í(8±;t}uÚ%‘N¼cÈ¿³vD>;Þ®_Çý°iUP-yGŒH®ÎñHiæJðõ㓡ö:ÙBuN§SnÉÎÏ;@!‘ÃŽÆ5Yw3dMiÁÈ2ÿ†“ø¼ë›&ž0ÖÈ›ã€<ã|ŽÂ€’ ¬I™ôà[!{^P²&œR¼¼£"UÒ’¬ØsÀ„­D\FYC¦§œƒpèúÓ®Ûw/ §”ÅQ‚3T)W›‘Ûkt1ÄÙZ)®ó¡Q*ƒ@bàáfÓ°ŠÃÃ+ x߆©‚ÝÛE’FE2¡^êe#ÛU®ÑLáP¡Šõý?l¶Lf•)°ó¡zlFG Ñ¦ÚœÈ»¨ bô©£K"ײŒGƆ³µYœí3£Æ£C\îÜmcÐU(y-ñªC@lœ—Iä©}ßìƒ5%Sw$Ñ<FsJQõ!‡Ê2£c®±ÇRvžö}èø ÃeƒØH‡nƒÆÃJPÂS§ ùX"­ÓÔs$¤¦*ÄX¡Øü9îDáÚÔÃå7ìàÎ mè{¶2çÚýp9§óP¸öh#/»0BÓS-Þ³ „ŸÖ£ÀËâOéSðZi8¸‹¹ª{Ü‚™Ð9Fþè(ÿL@y»¨|b1Öbí‹ éb}+Ò&1M‘Þ,©!¶Ys;.Çú2;Ù U9ÚLQA«`AÈáôeVÚkT|ß1·!ûŸÙ;Ú„Z‡#Š NÝCËW€‚Ý¿äçQºY¡kúñ® YúwvšGÚ¥ýèSѵ۶:q©séZ“‡‡IÄ©o»¾å½ÙbU¢³àª”‡^ùỷ&ì…j5Èæi,”3–®“*n†¤ñJ@9¥¹Ep~™ù"КLÝ/˲ݼ‘PÑs¹¶^êÍD…OkC/8¼êÛ ýw: &tÇýË· L催ÊÀ„ )ÏL3(«¦Œ«ý Ñ(óÀ[™“0‘ÎÊÒzöRèx¤Díìh»†6ˆ$·Å™¦*VñpmÔÍ›Kž(¾ZHí›Sß Mlé d´ÇŒ©Á]¶.ôöög.®ZD7¡–AŠ17êUès¹5¡(ÃnMx…=ôž$ Aãî\•œÊÝùBK•fј=ŒÝçË4X}4…­4_ŸÂªüh G‹8…ÇÌåfbÊÜ×7±€L†Ö_yó“˜È©–(•^Çš@{$ (-G¥Í¥×·¢O¥ ¯fûÙë¡$–×ÌK÷KÀ:W,7/ÁéO¬#ܨÝM5„é+„ÖͰíÛouÀSºà6±ýAëŠËV¸cÑ$Iw¬Ô1šY· n¨¹‹òhPÿh—êÉ9â2ÈlxxkŸ½žÚ± Q½]gSx}æmaÙEætQ¬6о\úPÙ°ÅȬ)• .”ªÈ¹P{j/&"a­dÚ¬ph˃ Á}ƧžÇÌNG³e )W I¿¥-%XÙÙ1Kf[p ârÄŒx˜hÊW«¾ýYijT)}'oˆÛàÎ[æpÀ͉‘…üR!‚##Â+@Š ŒˆÝ%âÎWP¸ÈøÀˆ%rÂÞte·Z-d~¯´¿aÿrþkœU¨ò”ͬøé“^¢@ý?¼¤¿ÚMÆýn².‰»Ñ´ÐÒš€)å]K7Cî…5“õ”&˜kQA¼¸ÄBêÑ~o'‚£àg•8LëI7ËïAÞ &±÷1žÓ~¹»çNTbá\©ÃD¨C©Ö蔈4˜tÐà~nJ8yì9i ‘ƒ.oðÅÈ¿ ëŸ>?¬·R›™¢0_UàðøQ¾ÙÆ¥_~™¯7h¶3U‚¡ ¨´€ .WXVIKW,¡BŽEêøàd\Ñ„ó‘[Z~€ô‰âU j{´? 0ë3ôìÉoÈZx¬ú–Ìf ‡$¡²’N Z}³–yž‹?pEK¼Œx+›{Þüm&©’<7“Ê$´DÙY ý! ¯;6t—·Žî/&“&w[HÿerÚJuå"™…ÜÏö‚$¸›b!wT—jìmu-ò)ueù1u¯åNê~臅ºxR'{Û¿Wx”;æ×Ïø7÷Ÿï_«Ô•à…Â×ÍߥN;Â,~äDM¼çoíã¥_y!Á*Z‡{þôñ…ß›ñ˹ñŠ¿ø<´qhµÑ5­pmDÆ”j)<|rÒ µøå‹ò‘z½Ââ:šnHáz}ŽßÀâ÷¬éãÖìú„„ë'ÿ6š®Ÿ¬[aÅÕ§¬ù—4´Ÿ‡$@Æý†¯¯¼ÁŽ/þÇ3êfÛn–/Â6üÂÏ–W7Œ!¼$‰Ùsßð7M7(zÅ£âÕ—ßå´ôÑp|51íK÷> stream xœ½YK“Û¸¾Ï_ˆ*_¥F AËñŸÿÛìîòÙæîÓ§Ù™ÿÓìfï—w¿ÿX˜Éê¼æ³åãÛÂg¼PYYW3­xV¨j¶ÜÝýÀž¬YÏE ËkÎv]xê¶ÝæÅÏU;ô¶±k;à Ïr^26°5ñ7«­õ_óŠ}n‡£Ù†Õ{c·Ó lmç‹<Ëe]Ål3fó…,jØX³¯æ•€yÁŽýødûÝ·­ Öõö~¾9ì„Ïf?ÿ×ò/w¼Ì4— â_ï–¿ûYÓoAlQ•°¨„›F8¾íö8UÁ¹ŠµCø¬X÷èç¥`;ÓÒ* úªÝÄ-áj]ÁÅQˆÑìCæj6´ðÛŸÈ%JEcÐÒ¬í§£™£=d%Ùhý]¹`¨˜9Ì üÅ{ {jÖ˜ 8¢Ûƒ.¨$j6l;ÚÀuÍžá~YÒš5̨’=ïÃ4^€ãŠd:ôݦ·Ã@fϤ:-PútõºÀŸ\Rzß}c'%¼î¥;Ãø]`Éfkà‚7ý-(SÍzÜÉ9ÜqÜÚ0­Ø#X4èå]´ß Þ‚\;9±€kÀ‰fg6vpßñÞá©;ÒÁUQ2ÄUø¾Ø×6l-"”ÛÑè.Àu)»~Gbˆ®ZÑI½AMBŽOäÁ £ÜB ¸…ÀPAqGãÖÖuå|HÒHÅ2Y°ž(ùl1aw 1¸œV=Ï MÒw`1^ÅìˆÄ t-*FÆä¤Ñx_„'xnð› ÒŽBê‚õn¨NÎ,!FÉVk·ØYÅ-ŸÈ¸¢¬Ùt dÓ8üÉ’ý4W 'ᆠ"EC , DxgoúaŒ;;’¶í“㚎á.^Dg }× 4B€œÚ.ëØ]”uïì&!H°¸±´ºûÀì è]° 0õI c ý½’=0r"ä×2ͯ…À@¨!Ç’°_o稒Füšc7]ïL7O-pž¼Už•Báð‡ù½W£’óm´æÆË'OÀôêO`º˜/¼¸e! ¿WCˆ”ÊßËk}íâ"Ï YUA½n5Øn•”à 4 Þª HnŠ{¸ ¥)Íà´bq A?1yÃi\—YQ©ÿÁi”8¢ÓØ?¾zÕh…އy” ã „•C8…€à\PyWˆÒm4ëu‹™Š’B°ë˜Œ”«i>)—i)J(âkI`¢®¦(¡Sä¹'ÝiÚLS‚â k0!p‘1”fwüN4BP%`«Ö Y€¼šAΰ ‘iª2Ð%1_pž“ˆïÍfƒ†ÊÑqU¥Õ”H΂µÊdB¸>Ú:¶ï©’KÍ'¤v29Ëk§^ô,}­Á ¿5ǦÛÿ¶nÍfß ÈÝÂÑÏÎ$R³p XÜÿkmw€ƒj¹MDz²×ˆcªÅq߸(kGçâšl«PtoTLgç …Žñ¾JýfÇ`î8ÕØR@~ªy8èŠHkPÿ$ꜫ’ šò¯JXå™àùTQö«$ómR© \³}Gy—@[¨iÅÕdé<ýpF÷Íq;ºHæâ8U.dç\ (¡ptOkRà™ïPØ{Ke rÜE&£€ÒàÄ÷&°¸ãcÆi ù Æ ð_Qlü‘ÔUUYq¶MÆm2^%ã>›ôÇK2†’¼=9¨¥( *7NµÉxÝõ ›zæ ã¿®ý =ÌÆÛzüú—ÿ§dþï7Æo£ð û5™×7L0Þ˜8‘çëÚ§;މÌM2ß%ãÝÅ^G?ùïCÌCJ–4?]Oö]â·e“%À4‰sÍ·Õæ†OÍÉ—Jö‡dvágEùÊÆëãHibö¡Nâ0øËTÅþ³cvdÀÞ,®Ç®ÐKœäÒ_Ü(„Óµ8YÓ%ü]ä¥Ëqü}~ˆCï‚‹2S {†VØÎ{QD¶ÔrªN?£q|‚ÑK`†@|­Ô‰æÐØ .É4½Ó`ù²_[³7c×c)§Š ý8¯KñÚoXJDèZ¿ƒŸ°=$îF?ª*ôÉX¹l¸(öpHÈì~°awáGGò+è>ࢳږ°"‰ò„ýUä7‚GœÂ5އ[ÌŽW%?é2%tÅ‚4PkÓkšk¬“ªÂ0÷B·¦7805dƒD^kHx†ð<—ñíp]-`ѲžÄ}‚û~—¨¸},@}•-0zÒµ¢åìy;íMŒ“mÒøã°‰CaÇr^m%¬Öž²ìš Ô jy†[|à2w]—«)²’×Oííxì)h aÀätã (çð•{%ÅçÇî8úS›Å51-w®ë‘=C¶Z`³Vœý˜ö͈ ®k3z\ªñ&œ¨èÄØÄºæ¶ÐPÙÂÊ䇶RH$öJŸ¶ÖÔ"@ ——»‡ÒÅ’‚··‡mÛáÞ=ƒ]»_ØÆ¨ðeFæôžf¦u5ìþt´ÃèרÏÊ=ÿ‚]_n<( (!ULm7r}Rr?~Ç^àÙƒÒ½”‡'2±F—.R<¦¿€P;ƒ•Ìt)Π Ú¶;ê¼—®sÂL!ٮ܋ñô>ü&RzN€VÒÑw×>°÷½…÷÷Á— ¼¨Wóì*‡?ç¿8tN¸áÏd$ÿò6¼SïBG^•S‡ñþÿá* i}RRo•àáÆ¸}Œèi"bœZRBÜŽÉt–‚x¬5½aéKXá?>»ŽF ¡©hú5ö‘SZ¥øNØâŒ]Gáºñœôâƒ[Sqü¯˜Û$O'‰|LVtq¼ÃáÕ8â¥ÈëÙB–™‚žÓ¹Àtþay÷7ø÷Q·oEendstream endobj 101 0 obj << /Filter /FlateDecode /Length 2585 >> stream xœ­ËrÛÈñ®SrÎxHU†‰‰3xºâìx¤¼•Ê–r’|Aˆ‚ƒ-iùöt÷Ì``­«¶tPèéé÷‹?m|/ØøøgþÍ•¿9^ýtÐÛùW4›·×WþQ&ðÆËü,Ø\ß]é#Á&‘—dé&ŽOFéæº¹ºï¶¾ç‡Qš¤È\¯À¯·»(ˆ½,û•“ǸbpË?x >g@?wà¸Ý8i$±È=÷þ®gèƒË[Á:÷ÍÙ=Ô¹‘%ŠÅ¦J¼«ÑÁÁ¢$ž„âÛ{…päeq&œå|R”¥­bñ2©"¼}‚Ï,Ø5fÈ?8ð•aÉOgç:ïÍÝñŒk‡ðãüÞ·Û þtýÏ+yR¦àM¯®ÿx#þÅ,vfðÈàƒ;ßñë>dØV¢d¯‡•»*§÷&'_ Ó4“b,qº;'à ÅÌ#¸¹`GR ÿŽÁ78{ßµŒæ£=wEˆ¤ÀWÜOøš0 홃•³™M ÌsˆîÀO¬‰®ïAv)ÑGÔ8øj„–¸C± »}~D$‰±%Êa¬š|¤c‡ßÏC9h"£¡ÞÒ퇲ÿ²"RÈǪk÷ùá¾*¶2a,î-}ÀëKýàûRJà 6Z¬Ù5À@µ ;÷n-F ~¢¯òcôn ZÎR` À¥ 0*"#ÂÉi‡±ÏOp6Ìè¢!oN5qløÀCg¾Í5>ƒÀH@¢¸·oáΦ püûJ Cõ]AÚ °|ßõZ›˜4òÑ^ž‰¼nºa4—R´Óå ßáP!‘¼&)õ¢kNç17ow!ºto‡qð¶»8‹@%þÞ÷]ßâ÷¨¥ö`‘ñ1s¶æä¿AœŒHœúô#SÊ'ÝPB¾$ÝèsÂÝ@~¡±Bíi;î¢Èû‚Ç…ýËX`Àgm{މš ¥"ÓäÅ÷Qæf‹B-ûª5ÚÖˆ:Æ 3uÕ–yohƒV`«¾jàH«3NÑíè$©ŠìèK8ù4TSÒ(Ñ\PÔ+v“y˜ÝC? ×û²^¡n2žóº.æ;d¦ßü­;ïër÷ôa(«öø[4§ñóï·)Ê %táKŠ.L‡ O ñHú3— ÑÜÑŸôÖcz¢¯ZM¢n')Æ|_ÕÕ@l*…Áí´¨B" Z´ìV™ºŽ ‚uiuHo"ñP÷øa¤€s*tÈK*sgм5G@ï,ÐKËM€B.8D Š ¥oµ®Å,§Œa(€é[¸Ôg†P áŒ”× O}y¨Šq°Y5˜eÕóx¦¶ÏdRh§vÉôà¡ë! /%óCñ c¨dE×L:–ÔçP§¦¨!³é\Ÿún4™ŸºFÈl¦'L`4+ko1ƒï¬8;R¡Ÿi©¸§³2Q@Ïã‡f%èËñãà)¹%Ø'Ë zdð›ËsÎyÕ‡o˜Ÿóð ^;XÁõ¦36.>ËÆëÀÁ¾N¿ gBòŒ3 ŧgI÷™¯Ï¦—€ÁŸlKSžçQnL^Šç{HñØ¥_’¡ô­´‘=Û<äµ+ ílñ°„y«f„¾’Y¦)þøm™h¡hq…ÅË ;ΰ¬>÷Ïú›¹& ãÑÍo‘ÿ¸{_VÞsλ·Šã_C:³ š„ûŠ©¹ë#»x ÃÛöÏ}=ñd”Ø\ûk­ÜVÒ:ø ¾‰†žëÏϹ <•ÊÔ¥~Ö|¬5}Û—.»}Y?á,Ló»Ð㳆1½/¥€ˆÆÏp"pb*ï_ºËH¦Üí0·bƒ£WDƒ©Þ™4µÂ@œyJ†r¢á’Ö‰uìµk+_dÉr„ØÁ-€ovSã}Ï&å  9©Ì‹­àqà HŒizÂQÒ”DÈ×݈‘í´X=Ùn7BÏ©Åà“;RKi tl˜~º£Á0bå:;îê}GÕZâ¦dë±8g½ÑÐ;’=›KÛ&bOÛTéA‡ã$#Aß>Y‚о•wù¹ít‘ûxÆ?­ÇáÕÛ[›âÚ•=«ê¿þz &àÛ©?õW8GKܵ‘¶P©RoÞôT½ C9% ¾ìlÙXýÌ¢—fr1h"åE º£¯çß“IÙ› –oü—2¡ #eOÒÒEFz¯TÎŒ6brª‡ WQðD¡Ö ÚW`ø¶è»ÁlivR)êøEéÔ§…\uÀ&”¶A¨Ž™Þœu¿Ó0n ZÒx‰ÑW“ÍŸU§Ëjð’>€ÝT:ÞnÑ÷qYœ&fµK ]4Ûí´œ¡Õʧ©œ¦r)…ŸÙ5k’VŽR48Óab¦O>îMv—ãÊGŒÈT§sŠN:M»^]ñÊÖb%¼îŹ'‡¸…·œàf.·¤ Ê3¤ûªë+ËG,þ mq—%æXZöÿ=û à<.-lð·ƒ84H:OzÂŒCº ã¹0“k¬·“˜xôgèÁ«/Óe‹}CèSvú~*U´øÇ£P“í/ ô“ƈøySï·V3Óû”EÝiÁMm|hÝ{­5ËYnoÉpU»°-s̙ʗ„CBù¡^²C9èõé“1ÄLöµ&†óþ ™0ƒp9Yú×%:­ì¢Ïd—3H`N–€!÷ôk€fLؿ‡”.mº¾tëY"F«ñR#é%»ÑÑ´‘ÄàÐCÉÌKÓ„F|^(`ﯯþ ÿ…Ð1Qendstream endobj 102 0 obj << /Filter /FlateDecode /Length 2735 >> stream xœ­Zßoܸ~÷߇…ÜžW'R? \ ¤…sEѨãö%¾YKÛJwµIk×-ú¿wfHŠ#[{q‡Í”(ѲŠ\Ýù.¶ŽÊ&sÈ/ OÅâZ…4ó.$»Ÿv*KÉÊR+q`eXÛòùFñã?ƒügŸ ž7$È> =ò¢+"Á‰´Þ®6û¤Ë™>€´rÇ›Q Hä¾\£ÁîøÚpt?2v¼9POBS á§.¤€¶g[5­Ї;q_µwf92­²…L£ÖBsié­É 2Ž1¢çƒI¢³Ôá1… º¦)‡8dÐT!}‡'̉8ï‡Æ¹:  ×JÁ@’I‘κ³ŒRÖÛüyÚ…)•Úžæ€Íò¤ýZ¼mCîLa¦¾žÛd&0[Ëᜉ¿˜¶†~l¦U3]GîJ©é¡ïÜöƒè®Š"Ÿc†q§{Ûô^–(§…Ÿ äE¢Ùîmψ¤c×ýh;ÂjE³(Åo—žÔ>hQ˜b˜8‚PÅ‹«5vÚ,§²ÌÎ5¬Ü¿z˜À¦R§Ú®‚ÆlÌjyõåFAË¿XÅ]PêŒÀ?i©éÜU(–áCk;NzÏÓŽ“ɪ6ôuÅø9‚ wî騮`QÆjƒ:é@ƒxÓÞ¹e¡ÀWý¸(Ä‹";ÊjØu=CƒÇ/Í­éVv¼Ðtv¶Â†‹v²ÆÝaèèÈüÛ÷þYl¡ñh’Bó¾¯ö{<vŠ„ÜSHº%]›ªóš%T¦¾îšmÓV̳ø:¡õª¶Ú<õE»ðZ»ö³±¦¼kêJƒ·Çæ8BŸO=* 4æ¨Â‚Á*Øœ§´O³¨” ¤®¸aÉŠo˜·¼¿cMG7,VN1z8©n©8eåmÇæ‚ú¼%dWÈ|Þ`"@û¥E‘çì4œœ< ²bTˆ•>VîØ‘;ýÕ#—ÅQ¹Ä/x†iÌ^guæ0¡Ž”Q–”îâe~2F=u·Ö*KðÆN eD81€¿1¤éÎ$MbJÜïáK’(ãÕmXNì6˜¦ufëh·ë{÷ì*RHŠÕ¦Yû\Lº9åI/ó{¯n2;Þ5Ùµ±bNË­R/óàžßÃõx2g¤Î£<Íg2dÏø £šœ… Grè.ºò‹MóáéZùÀd¦3[Áø¬pp Ó=‡QÎPSSíôý+Øì„Á²Ž¤ M'¡ì|µ¬‘èØø†õ>¦›è¬ÆzÚ± ƒiGò¯g¤Òa3årö@ëÒOêÙû¦ «æP¯÷ˆÑ~6Àœ²"û}Q3=´¸#õ )ïB#ÃAÃÓÝy˜€éH/²L+Í%߃ŸßœÌó ¨ ïYks`UcÞ"© T%Úoxm6˜Û°â¦8ÈŒÄÅ ´ª7!ñøÜ‘åã>¨`Þ™”:Él9õÍ`!³’%äHýœýÜ4æº"%´Ö»®3õ@i­ÈÄÃxyoº>pº2Ÿ˜CõT>PQž¨\¬&ÖŠ ™Çýh´q Ãüç<±v”Š gˆÝ[&®š›þ¾éª¶9£A!ÁÕ×K|(3b5޶¨Dû}oy¬¥[D%hy„u :ö›Ñ¼ÓU9Búùw-64 IwÍôšêSg¦ý€´ÕÅÄöéPS\‰“JZ!mŠà—x#3^»š‘9œ²dl‚·ìFvßͦó5¿ñ~íå @Àð躒öb±5µéŸw‹|†¤%‘à:Eà†îüêäïðïÿÈ‹¼Bendstream endobj 103 0 obj << /Filter /FlateDecode /Length 2305 >> stream xœµYKoãȾë7 a’C+¹d?Hö" q‚E‚Ùø6³š¢lΈ¢–¤ì5‚ä·§ªì¢M.œC0‡)5»«««¾zú§m§Ûÿ¹ÿ«v“lï7?mR³ºuÿUíöO·›o~ÈRX‰u¢ÓííqcÀWq®‹m¦Ò˜«b{Ûn>±ßí¢TIkÉ~½‹TšÅZ¬Ú%q"U‘)ëÝúBè+¡ÇÚñÉ2V:²ìû?›=™Ö9û#.çq’æì:„åæè{¿E²ñ!,×áäP‡å¡«‘l?xY»ô³õpkè± t?ø³Âé@s¡Øùèç‰üñöo+K%¨üûÍíogšmWÔé¥Ë9û=Y޼!àa³Ý. ý™¡ôm ¯§@—N‡IÁöÁ>‡’¨ÐˆªJ¢ÚòÓ.ŒI)Á{bÅš\õãçÝôUôB/qôAçeõd¹¦€[böÌ)× ß™Ú¾ tw¸™©4ÀïMpš³üMWÞ@UQÍÞŒÊWi*X³.zÔAäõâ7Ok†:À(ö¯‚ˆ!f‘`ÍH¼Tâj\“ —Xà°œß"ýbm£4 >*YýDx®@ÿÓ>гhCaÍ~C ~"§‡à ÿwx)ùê½áÅ×o~àbÑMpH·×±Ê ååï0ô Êá5A îpNCôÇ(•ØC44=á[ªòÝ’ ‚Ç ­gsè9ú¨v@)2°Ö^$Éúz¼¢Súߥ§Åݼ^ž²/h'ïZ•a bƒƒ£; 8§4¼ôÀ4Ëc¥¥íD‘±øùPrîÜ]Gû‡Ñ­‹”càðhäñBÇRl£)þ#“§ÒV$•6H-ûûk[ïR|v!ˆÀbÙ:9¾´ð¢¼ á‹o’2–Ùdžî|zÞXKÅâ]Tpp\ÍÙíCyþ: œ`;ðdÈ@ðò rêÏMUžÂÂPu—æ|o” ŠBcö~pð¡üs!š]~,cxqcM¥<¹ä e]LÔôˆ‘õHÌßõ¸Ž÷CQÒµ—ëˆØ­¯K_šj´§ ƒƒ‹ jƒÅÄðmÎÈë©È5«Ëƒùbb ZfœÝÞL³†r&¾Ë°Î!yz¼4!/dœü]B½Ð×>² ¤ ‡¾¤xeUç‰Ò–5»àK»Û/›H€ó3­¢-áË+çÀ!ŸFCt¿Bw+ëõ°œ„—WP¹f|Š:%‚%“¦¼³¾.Q€^A#âS@„Ý“ËL$y¸ËyD”€+ïÂL¹—ÏEˆ¥+z¢Ì HL7T}7 ÑãN)/…ZëÔ즼q( Úd¦¬î'8ôˆU|”Ö‚õ°ÍE!—ù Ä×ÃØ´åhà- !TBT€¡l~ÂUä«Ñq€æ@C5}1E‡ó>·] ö3²ªÁÃ-/¸¾´ P…ÝÙ8Cš%‚E§æ\÷±Óà¼K0©DOÕëVòø›@B¬€džëol-)¸.f%o¹ÂÈ”æÊTvÿ •]È·¶ß-çí+Éç]K>|ôLU6Ï´4ø`_â¬]K¨ûP˜’ˆ0ß§!¡ dIv–¤Ê_Cdì=‰*&Ëû×í‹ÈIûâ«‘·˜3&4­Ë~ÅœV*Ûµ–d¼¾3Jy˜>Ťïg›M,4FŸÙWR*ZÕ½ÛÁKàV¯¤›•ÇVèoçQÒ¨œkÏíÿŒg†™wÁ寕gгG¢NâmMÕI6u¿Ü~NH½¡_I'9„õò½w~=÷m[Ý›ò̵{õè=ßÑ{®Ä=ˆWQo äGÚë†ëÉæŽäóšÄ€SØM|“|¦L˃½A"‡zX¾Ì#opMÚnÑr¿¤›æ¸rñDRí½'qññ} ÷ÁJ51ÞHüòRÎÀ3µÖ„{EÜõ<÷õ\W]öâ #|õ Ô*(úóÎE¡$I‹ J{¡bÎ §sET½ò×êœÏp¦È_¡;Šúˆè™(ªÂðùÜdÍiãÒpÃôYTÉɶºÄQ’ÄsØ‘–¸ËùŒì Ó6'ˆÖ4SöýJ¤Æ‹ æÌ…âÎzyn­÷qÅ3¨÷T+Ö+‡•”Ñq¶‚,eJƒìkW}mɵ¼öí.J¨¡uKˆs¤:ÐÚç1µ…Ú_@Ÿcºw EiáOlÒ$AÓÿÓTÅ" øç•þ°¡£PXöº˜%@@Ÿá%´P‚=¯tD1ÔÎÓuT[ÿãìhq!bèg§–fìŒ )«mËÇ3œ"Á¢²­`{9Õ¶éÀ¼a÷š´áw\ÏÍ;SèsìR8Φ±Üe³ÑýiÁç½jYÕŽt£¿ ºÿ¡kÍa.éë /FPKno¦‡>ÔÓõÉô> stream xœ=kHSaÇÏÙÜ»c-Í&Á9TÔ‡² Ì.–&tÓ-%*KÐз³‹kËË^ï¹M7·31,"Lè2íˆMÍú A¤,"ëƒHõ%žÉÔÊèÿáÿ—çÿãGS *ЦéÄÓÙùù;vïLÿSÒ0©ŠÕÄ–5X§ÆºÿòuýLJ$yt-õ'šÊ—.—QÔIê4u˜Ê¦r)Ai©BË*NUÿ‹þ1B%ý½¤Jt”óŠ>i§"Ð:‘Ê~†í1ׯ•¿IV‡QrÛ{vqäçãBÄF¥:»dô:‚lAÁîΠ¿Î- … ¢ÁÒ¡ªQÌzP}=I„)(©¾|U€Vr€sfîMÇÇpÙ´uŒ™ÖânWîa^NE¦?>Úz¨×»êù°f><=7¦†½§"©ì,\‚/œkJ#Ûo‹üfd´8kEÿ-¿À¾˜'Ñ ÄÎ6‹É}S  õ´tz±—‰S‰ü&X–žü=/£ÚnIîõ{ƒÍ<ûài‘»,gßµ¼kÿžÄ=@²¢zíñM3|‹E8rT[1xåÎÌMQ¡g‰ÔáÈÀpŸÐ\¡©½m ñsÈ×ÓðÙ½F¡06€NÏ þ ’,N£Ùm ¤˜,s¸ÝÕŽÛ™¹ñÅ…‰ñ‚<«Ë‚­|ž–9i+>ž»!_øäœeVhêØ† ã&Õ …"Â(èëèŸ0Î å÷J|%q¢5{â< ›£d5¬ÚP¿ Œw¸Ý¾¸UŸµË\é2˜ì¼¡ü¢TŠwá#¯ëÞ3Ía®ÕÓâmõÈ“cáWø~˜Ývù/=ª@Žõ`Xk,™#Q¢ÑÖØF[§­»I€û(àíê ÔuNv ™ðÝ5·˜x’†¬¦ÆêZ¯3$@"X8£Y4‰~±/ì ì¼,É5<»ÔÔèjÚįR{2u‰X· ëVcž¢~¼Xwendstream endobj 105 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1566 >> stream xœ”ylwÇǙĿ ¸’š¦ ɲҶÚ]©e¥¥´PÊr%5 Ù®Jš´9‘˜˜8vì86±ãŒ=o|Än‚Î1äÀÝÄE‚Д6ÍvÅ’4t—”î†ê¡­ Ú¶ ~Ž~Ht,èŠ?w4é÷{š÷Þç½÷} *=R(™û¶kölØø»'R‡‡€/·,ïÊ ªôè:e"û»5ó«¦VS©'ëpÝ‘zý ¯WT¬}’¢ ©—¨}TULýš*¡¶Q/PÛ© ÔNj/•G­’ÝSˆ2R·å ’¶3­›^MÛèéÕé?dìÈøRùØmŦ5TÖmÅ¥÷¨°!¹ß¨ˆÿ¿@c]²@MT–ùÌú‹$çá•ÿÂ8íæóXEÖ²$€‚B‚ðÎoþýf¤=ê‹À˜ØÜ"šE˜ ˆß]UZ_[ÝXOAÙeÇãAÈb²’=¤XJþhTܘơ t"¹E­ ¡‘¾”»UŠ Ut€ƒ©ªskY›HfeÞçD\Üw`V Ī%œŽÓ¿,ÿbC‚k 4z¡vk^þ™‹ÃÄ©nÛZA2¦hïÌ$‡7"èŽÁ1fváÜì·'IöSNp m¬Ì]»›¥äãFÅõ)z<•FÅ{ì¯qdýÙæ,bu¸TÒ¢Ú#¾‡óÌù>ž~'î{#&s„ ËÚzÔf0kêq3ƒ¡HÏxUôPÁ+šâNÀt†áˆóneGǰmŒ^<£ö(cÕgÌ'YøüøõÓ†‰â!N3Brm_3~43#ÍLÍiëgG@ôX¼ùÜímv‹ÞÖÊjê:+Ùùû†ß쟨|ÛÂÅ[ÞtÅíoMô”1<ÚU|h÷5çͬ»ÓkÆ!ðNŽlB¼ÇéïÆ:ÚYÀð™;ìrfKxQñã4Ùä-u¡™¬û%Ûô1!h,ÐÌ<ó—=K—»®œgOËWÑMLY˦Â}ͦY<Œó%’÷ Ùæ¹”Ë+%^7GŸ[–k)¡Ñ!¾Ò!´B+wË“êk«Ü×j-_ËêtØ*é”5‡}‰ €—”í!AflÄ7ÊHJÙå?¿“¡"šÜªÖ…Ñ9Áwøøö%R–®î{®Bàx;âK¹#—†GCc=gf‘È:Šu™ŒM­fg3W³¹f³öYÞÎÛ]-Œ ´Y‡Žß„_ËuÐÁx'6,FåÿR}üÝv%d$ñu5/¶‰<0†f³ÑÐeéííŠ .>=¹ƒdo%÷å“Ì+ëñ깿ž=ßÍy¼¢|ŒùxÑe‹ÝÅ6”¼Z±Œ`ð›ŽíL˜>…‹Ì'¾§½sV[ÚÍÊ:–5©X8|ŽÆS©¢EÑHÌQÉ ¤MwúOƒ/ÃdÕ3:‘dà̳3C'Ê" fHͨÚÜ9Î-SˆÌÝÕôi?~«þö¾æ6Á6–lD%ÛŠ4Û\œoa]ˆ¸#̽Œñ9l¥¯á£jW¾áót\+¹J2ÈŠ-òG±þ#Yö*'T.ÀüDïG¢WôÊ|~——·òu6'ÛPVzðeСΆ^ÍdÛeX`ĨÚóÉ[ß}—`øÙöM°zlrÈ4[<x2†?”Dc6~xðñ9WßO QécâEà”åßÊTÕºjX£$¥Z;0‡ëýcâÇÐpÞ$^Ëå,áL²òC¢ds®šM‚9ïò­L‚FÂþ~ö†ò,¼k¿> m¯€*fý®‚RŠO¿”ÓƘ1ûæÔ9 §Rª£ÝörŽŒý¬úírqõzï .y:€µêÞÖf¦1Ú00ØÕcsÆ»›úëÙœ—KpçeݦWýêî"¹9+R/}òÞur Ô •lj›™Pýï(‡‹ÐÂÈ÷×Îw°ÄªëÝeÿÙŒô ÷ðƒ v ˆ ñ¦á†ÄP9ÀÞòf<ßÖùò[à%(xÅZ$ü{‡¸Ï|.EüÕ™¯N>¯6Z &C—¡¿¿§«ŸË™cz™Æ¢!ß§ßkM™ä{psYì ºs³*T+@µTÙõ·ÿ'iendstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8107 >> stream xœY xSÕ¶>¡ôìT††Hî9*£ “€¨ˆ"£ -óX;CÛ´iÓ&Ó4Í¼Ó mÓ¦sÒtJ'(C…Ò2)U&QEpàŠúŸàNïæÞûöIQï}¾÷îý^O¾~mrrÎ^kÿë_ÿ¿Ž€êßÖ/]fBs *††éJñ?KÞLX˜¸H²8iIòRé²”·R—ËVÈÃÒÂW¥GgD†D­Ž^³{Ýžõ±â6Æ‹7OóÂTÑ´3_œ5{ΰ—æ¾üÊ«C'=?š¥¨ç¨ÕÔXj 5ŽZK§ÖQ¨õÔDj5‰ÚH=Om¢&S›©)Ôj!µ•ZDM¥¶Q‹©iT(µ„šN-¥fP˨™Ô[Ôrjµ‚šM­¤æP«¨—¨`*„z™  ÄÔSÔ`*‘B £)!5œQOSrjD¤FQ£©=Ôj ` `µ”dT© ¿à£~sûuú-ð»Û?ºÿ_ýÃüÐ`ha$Ì6 87pìÀ¿ò„Üzjù`Á`ÉW†4}q¨g˜p˜;pp`¥°rxÀðOD³EžŽú‹ ALм ›#ïZ>êýÑÜèG/:fؘ¸1þä`ÙnÛ3Ô3ÉÏŽ{öÌsO?W;vèØÌq Çÿ»`¥™ü÷~÷‚)“íôx·ÈÍM"1ù$†—µrù–Ù+­eŽ–òö‚fè*hßR”d»vA#…‰¬à2û_¥Oèý=I@Só³K]µ¶|šH^[Фuè¥ m±Þ¦€rhHU§/ëWâÕoáµ™á™ó ƒ6£,€n¡ø–?YѤdEÞ‘N\B—üZ¼ Eâràñè·sØÂa¬FŒ$ž:AjÙÇ¡píÀ7ß],\¹†Åi`·D/aSû>—$™Ú9$Ðclʪ—ÙZ²®‚Á;3Ö‘[u2”©Q%C?V£ƒ“<=ó êùfÅ·#„½ DbPKLÅimÐFp+Ô˜ÔPÃÄÄjãr”&˜KpS«ªÕ£¿‚=í¿XæRX ï¬:³ÈV ·Ê¡£ ÂTje„m=5g:¯Ž‚×Wž^UUaŽ€ŒðPZš1môD|W„Þ¤ëa½ÁÓ%iZ×0Ó®˜œ‘a‚ì|Zžn©çÐÙþÈ Ël.ÒÂŽËðzœm"£³æ…ÌQ’Õ(ÙÁüô<•[ëê4~Ššoú!…wÏz},Äþ̳ŸãèOhؽ_Ð@4ðEäG±x4. a,d>­DÃÑàŸÛ&My5 ñ «¡_…¡—ÕŸìÐn´3$eï{)ySï A×w~ÞÛ^,ÚzzõÞ× ƒ7áÉøM¼o@/àùh+WB£ ;¢h S¬/P¥[ß–kYéø™ žƒ‹áÒ¦í]k/§¿„÷*¿»r­ãðïA4^ÅþNÜÏ”cL+‡Œ#¿ ˆC6`5ÚU*MŽFÇfI3RÉLLðFšæô>ï<¼„Ê?ókä‘âu:²[I2qo¤k;Ù×!/<ƒap÷CÏj:ÔÆár4FT n@C¹xtÙú"¿àúµ[Âr”ú£N4'£ õ)þþµ‰<½\ð 6±·Ðv·ß¡Þù"q hiÒÅhŒ$3ÜãR§Sö%Çû.8€ìþRZ¢Y°k×ÎKŒkÈ2†ÑÒts ‡j¿­ð€Á))K5‰¡„Q¹Q…T Ä(ÎJ•I" {HЉnª2×ð…Ò /Ûw´FXÖleÜ´¥Ž¨ºžpÌÞ>Ôü‘_r=´æs —`%,3|RŽF=°XŒÐÊA–&/fAYyºÓÀ4TTº÷GUǮھnsgDžÇô×)K]ž }âAÇ=|ð›Ü#„'òáÿ3FJÿˆaϭµf4§žVÀL#Dh¢å0‡LŸî3úÉ\zX”’8Ž]†Jèæ ™!!_†í¡-æuÛ~Y¿ŒR9½Kå-GQ¶Svt„ðÛ¯ö‰*ħ2÷«ðNMá#ø~Ì‘õ5R»Äœl•Y‚" ·TÃÌÙ÷+N|غsy!«‚¹%¬ðA±ÉNŠr°ê 4* Y¬‘&Ë·iÍ©%IÖdÈ„LOÄ P^Éâ:òŽÄ›B™…+å›_‹¾ðA:«/Îw’HÔZ]‡ƒÔ5$ð¼ü;[–ïh*w0ƒ{rÊòx'ò$ã]ø¹Ÿ÷îû¢’äÖÈRȘ lš ÍX ˜½µùGXnƒrXi"Œq M¦2æÁ<æ˜r ‚Y°(tÖ”Ù]_†²qgÔ×àEø½ëÑÍ·Ÿ©9 QøÑŒFÜÏ’®ß] ™’ÿ…m¶ÚÔ²ÄÊ(i¼Ïųð.ŒÆá¥(ìúÅê÷Oq}pR¹½O;e,üù’ÔÆÔŒ~„&y‹–%âÀñ¬ ?½ƒ«h(ΗIj"Žf†ÌÍŠ¯.rû¥f™²`³=wÁšu¹—±¨=„Ç<ø9 úš´JßzÐÞz³Ü¦®[¤Kñµ$ù§Zªø‡ZÊĈÂ7џꇳm?‡ªP®áqú06•þÉJRÅLl¼u¯ÍTÀóL8Ðlm..6B;+ló`þä½?f%4äÈ47h¢8¬Æ]H ´˜².ú×¥ú'm½DæÚf#Ë÷5ö¾Öîµ°É`MÌ'·…Jï0Á”P*©ßz6÷ì‚'ÛªN›^ BÏÚý=Ù >Þ|„AxœÒ_xC\ÚZ ¡^FϹÇñŽŠ5TAr0ä´wÁVþè¯|ì}®®/}#„ûøÜ'ËomÒFqÛ@T¼&N‘Í7Sán1„5$'cë霾ä퉷ªlíM¶fG©³B‡óçõEIø–D9Í#¨ò.‰«@'´Ä×­:0åÀä}“­9–ì2X«®•ík¸â¹ÚpÅÕín= X¢÷Ÿ%¡kŒ5ÙPÓrÓóÒS×&¯KY«ÎФçeìùsÔ7Qßè t|“¶›«Vóß"qýí%wÊP°7Ûxòšv“t!´œoCgAk‹.’{̘aÊ„Lpú¸9;wÕîOe iËÙÈ ÓjÓjãRå±s?ýðÁ§WQ?Ö»ì­5cù‹ó½¾/˜J>˜RÐ-{j–·Li›Öò‚%לí00ù´ –}PÒR}ª’¶2[™¥œ1ÑåK[ßr½U²§l…’sJMæRKqÃÍ·<_¸Þ+»r…_ý2½ÿT ]g¬Ï6'2:…V‘¼XºH²DŸ§Ï3¨#ôç¨?Ç~ý$òº}¦jRìëÈJÚ¡#§$Ƭ¶¨`cú4eŠZž¥ˆÎŽRËreÚTò]­[Y“W“Ò“Ü“Úc(ÐñtQ×jªd­ºO(©œ²½(¶½ì ìø…_ºNèù$xQ]ó>ç!È\k^ÊÅLãyQX¸à+ñÎAŸ°tíu%1šÃ"ŽÂØÚjé`>”nB…D¯>P[L UÌ"@þ¯æÛY§_^5gÏk’Š´:wUemMzu²Ao„Åᢉ@x.Ù¾a¹ŽÑÑÛÃöç¬Àöãý÷ÌOZ8éU´GPÞ©c°2½&¾%ÜÖnÖ©¡jóÔ:]lXЖá_62b´ØÒs+ÊjÝÕ¥•\¥³¼¦²º@aW)Ëäåò2y‘²< fÃ,Yž‚?{§G6ÃLWJ“¬!µAÖ}õíø>7ê÷†ø}n“­©¤„'vž Òõª,œøø,ñ~ *K7?É›Éí9§H=*ñl7ßšÈÕàn>f>@t»?=NÁC§ã!Z:ª•MWÄ¢{t9 !Ó‡é¶Iãc¶éIº°?]è°—Ø7QÿÛH@~[m…ÄŽ1Eyf’ù2rz§¸ø2j½‰ª{üÐY>uÅà@­a5‡÷€Øöp÷&r¥€‰ð0ìwû£a§Ötìç–4Î×5ãÄæcœ÷/ß~ Ee^·_¿r¬ºÄeuAæä•ÈU £*8ü*€ÙÚ¼¾xà“ÎdÉ7C cÓXµ™š=J ›¶#a\·íO=™qÀxv0¦jÑ'÷nÝ©›¸ˆ¤ÚÏÿJ^Ûûr­QH„güw¤—zý{7ЦdN/ÆbBÒ–À% ^€ðZ‡B‰[FKXá ´ DÁx+<¢!‚v4L€ˆ8?$܇ÞD¯63ÂùÅvSñè»ðJðþùë —ÃÕß—ááÙº,CÄà›hZ6Ka™/á‚éû˜Õ˨‰»AÜ|ñ'ø6ýë”Céñ©5‚âx±µC_ àÁ¿°¶”6;k=­ùŒ™ˆ´b]‰>K•­†R&µ,­¾®´Úu$Ô5sÕÒÉlŸOó)Ì+µè"1+Ü-ÔMÂ?ᛂüïú2Ë'O<‘O$R\¢­Czî x•ýÓó—';ÒòÓaéd|O„Z@m¹ÍÉ¢AôÇ×£ñÀ]dJóöZÕfÈôugVƒÖŸù¢éÝÅòÀ†“:ê99Bøó#´Idr¹r7‘JÑIuIŒðÑÕ®°ÐQcÑKÒs_£§Û~@þ˜®Ë.¥…ßoMª?TYTj+å2sZ L!}^>á¸ÍØlÉi–Õ̆|hfò£úhE#´@³ÑÕ†HÈtä”U––6'”I÷ìÉJMbõz½N Õj ¼"ËBn_(—Yd£_‡ 6ª^crŒ* k’2Ü$*]'<ìwtüXa_ΨÁ‚gÄ«–O÷\Êe;ÑcQ 8]òÑG=îµ+·*-X'=ÖýkÑûü)_ôHЃ¢/ùµð[Q šët;¸Ç :Y—Ä&ÿÛc0¢øSl2¶d4¤ØQº†À·"3×ÑÏüìf Q‘æ:ÔåD]„ ¹›èØ=¿w˜Aƒšß÷Ÿ· Vr·bԈߙõÛ+$¤“Æ3¡.s‡Ü‘—éÊ e9öNË "Êò²Ì)¹N'ІSáA´r×ÇÉÑ•¡Õ׫ƒßC$ÀÃÎN/NË—ûÀã¿¡JPk/®c?¤oý$Ã#^ݶu~6'ÑIxì ås7 ý(w’î߯‡8Ýö>ßìÖîä=~D²a Yy´'H’ÌÍœwØ€RE‰ºì˜Ê¤”Kš›ªjkIa×dÖDZÂoõz£atß¶ÈšT²ÞúÀ®óèúy^lƒNQÛæoRÏAæÑݯр;¡ŸÎlæE¾Â²¸"(ÝšfÎ(Ü^xT÷é—×Þ½ðÑ£‹¯M¬`ÍÙúÄ ÈZl…JÆ"•R*Íe·‰ª_ ™©ã–á§^=±ø£î»h4Ö]…g2*0ݲW&M8ƒè,Ö@$±±ˆ)Fâ¼5Qé5*o$;äsÔÂ4•IQ–š+ŽUÔ,¯ª]/î1ǘw3n:Í7îøM5£ ¹oÈó?WO©qŠe?w@×Áºšê¢LGfqÖµñA6U¡æ0ér]6AÊzÀÔ|©;«<§RQ¹çD쉘ãZ‚_1žŽSŸß%–/ƒŠâœirFf6ývLøÆ›òÄÁË=½#}C°Þߌ7Ÿ_qÕk#TFTsøÀA·pzùݺÿ ‚µÔ˜jˆe¬6V?‹ª¬2U9Ê«]­æþ†18r2w‡?pšl2¦vÊòRu©p!\wZvI¹×¸¶3è{@ ÂkŠöæ>qcgyg}åÒûB_‡iý-MÛ}i’èX¹‰Iš“Í{¹^ŒßkoÿÄ>’ýë!ýjpeÿÝΗ„lf±êŸØé_}ÞÇ^R›yö’vl)Þ%½Iž½…°°òZHíj~¬!ñõýþºš2Éz_"¾•ϲ ¼ Sáïàñ‡ñÄxBÙβ]¥;-Ùæä*Þœ–™-µÖ²ÃhdyÙ›ŠšKšU/ªy³aFÓtÏŒ"IP¾Ú¢.4FZ Í5ÖRÇáòÃ¥‡¢áäÕ†Wv^ºÃoÇ61h9ž™›¦Íä”±¹qÊØE89H‘‘‘Ùž{H±? ch;f´¬DŸ4¾cj »€Lïý$•t¬2:TÎŒ‚í„Ct&=Ô1z`ÌÕ(õ¹Ä„îÄC#±P§Ô)õJ¾í“ÊiF‘(  Ò–hÊa1cîý¦:ÈØÈ…i¿Í¼¶Y#C:чr7Àéç @¥"|†VàqFk‡,êæß0©ŠX]d²p¨uƒ‚"Sgö\¨fq:Có\Åi•1WÍñ¤Øá«´6dâÇ©hwßÃßdJL‚!–ÍâE!P’™”Ûhhvæ…s¸DH ‘¬’ÿ4™Nä'ô^ðD¥?ù}¯çÌ­––\©›•¦f%¦TÈê\Õî|Î ./j\óÆKñK¢YeŠ1…è™>ã7dè¨<ð+ä's¿‡–TŠL¼7VÃäµá»ôú<ߠѪ·”¹ÛªŠÙÚ¥Çá1f¿ªg3+¼—žœ”à–ו; ìv~¤·ÒL¡5gÅÈ Z‡*Ï€>Q‚Šè=ýe^µôÝÖ%óÑ©°¸ÿG"töaç"ä/!sõ9MòÀŸ"û§#ˆvTôNá5œŸ…ÓŠpÿw—´DJy¢)°ÍÈG¬4UÂJxO…üB®0Â#ûc7×n-¼¡Õµ£1ýxrßœ"ø|Ïúž¹ÝrÈÙâjisï‡ؘíL9¼Ý2.bÖ˜d"¶–ÁÃ3ð³ØŸÕÑxH‰MÊýñ‰æ²Ô“æ6•4‡ä«„¤ÿìý‘€cpoá±Jæq5ád} »ÅGÇ£×:žUOgÂt~4±‡¨>H_«ÈJWå°»6åmùUÍ™®{ÏãðøŽßm¯KTöÞùã!óÍѹ¯s  ™òæšå[7l[¿eƒL¤ÉËÓAÔš´ú^º½AL“F¬ß–©Ôj}3‚Î6f“s<—îì<}ªûäåwo_²þ'SÐàqמò^y¾SsvÃÃ{ÌKˆHƒ«R^_•*É .†rFUd(d‹}ƒNñÐ_[æš2y`aâÔio­iîŠe‹èºú²wFm’™ÓÐQÙ  ÿ-_£þ÷?G~wgß~ÅÌ[K,ÅVbÎyŽŸ¿:êî¤ã¯ÌŸ>3¡*µÞí®lhI*gßôªÇ+%;qò  hžUM¥#öv±é´0MLÔ•ð¯»FöÙX¢»âÄÖvîÞ}ÏDï:-&+´ñÏÝ”ÒèÅÑRv)¦çãþ‹p?…\©„DÆi –©÷ˆÎ;Àÿñ€'ø–×yÿäDì \ðSʾåw Y6£2uUÕµî4Wr’,M<ýúŒ_PÀw?¡§î¯@ðPNEï0D¤ÅÅ1( 4ªµðø®ûd—Õj³Á¢­V­Y šñ¶b Ü#Š"Ê´ù³ÖDÞÔB½Î¨eð5—Ÿg/.¬°²-‡*CÆB26ŔƔFç‡ÃH¸H3ï•ðLu¦>.‡+;3.2}Åê'Cï ~v¢5N¿Ÿ¼Iñ“ÕpERHh(Qì#&3’êOyyA)1êQOÿÿ¿ü¤È¼Ï¹P,ÁÙm”p'øËB/¢OEFÞ³þríJcÃÞòý°¶)«i’2>¥\FÔG¹ÛDîêê½+ðkSð*<tà9H´u©ÜZ7üvw´w3‡uíE‡ŒBýÞ@ýñ Õ#ƒÓX¦ˆÛ¸áùéá¥æ­l>¯‡%Ì2 À€Õг e?h?†¿>Èõ¥ÌûÕÕ6Òù—Üñû²÷QÞEYáQɲÝt tç×@­¥w¼µeeØâÕã¢qÄ%0§Ve¹ Óè¨iäÊˆÏ ,uéᡞw.tôÚm°”Ó—`°~,\ רôÌoÅ÷T]àÁãòTwHvb„ðk´Ý%~nÁž…«á fì.ûé>\FƒÎŠOooe¥GJ©™óµï½: ~ùZ÷l“CÏœ¹bââS!—â¹Ix«HøµÅhµŒ‚×Nuvïµä[-Vh…«þ$èäîÛ‘p3çõ­óßXpü1­7\Þ¢"pé“¶®÷nD¼¼.tóÛRV§'ÌÉO­Ê(ÞÖ\›wá·£Øjï¸ZÁÁ/á¶Ÿ×}$2Ñp–qÒšEI±Ñ);án^˜ÒÈÔº®,WbjZf 14`ñ¥=WÐKŸ¡¥hB![Irhx.¥8ÉšÁȈ5IQª]p'óÌuì9 »—ža ¤è Ò”•º¥ñ¦CflòùÍj~ïÂktFHî×þã}“o@_ÿßôFö‡&´¯¯ ]ö5!|«ÿ;ÿTïF?©¤ñ÷ÑF¹@﫤joŠÈ@*˜=uŽ7“¢Z¬«V!‰–)Xi„bÜÁìt¬=ËÖ™+\°–©—׈33ÕÙ*¾’þÿ_î{®†ÓeÉ⼆ðr¯J”­M—çäà!xÈ6¼âyŸÇ+Ðx<Ô¢·imDö˜­–b‰Séà& €=舦Côj3 ACÑP«µ„—ÂÂ}V­EK²b4£ŸÜ+¿—¥Ÿñ»@î£3<롲6Ížó6ã×!ž ñ ±xödîlüð ÷†ukÒ£wqií›+"áˆD*Æ3¨É,ÂcpüC&gauMç¹–3äM¹n)¹@FR†=½BË fú¿0 „ƒ`@ Eý¼ò|Ïendstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3707 >> stream xœuViXS׺Þ!½¤•4ÄvÇ«ÖS«u*¶=ÕV‹€CPAFEˆ R"C™B€ _€Q™#‚`p@­‚Ö¡­µj-­Õ¶žÖZ¯õ¶j­_8ËûxV´õϹw'²žõd­ýNß+`\]@à¾&8 b–¿ÿLçq@bþ5fd•x ÁÓµáÅç'yãËc¾îøóŒóá7¤/ Ú’¹(+{IÜVÙÆ„Ĥ՛֤¦½8köëþsÞ`˜•ÌD&„™ÄLfÖ0¡Ì+L³–y• `¢˜@&ˆ f1‹™×™¥Œ?³œYÁ¤1ÞŒ˜y‘0c|)½Ã2é‚ÑË——ÇÂ~×(×Ãn“ÝV¸]åŠî°Eì·Ú}™û­Q£Ðc†Ç‡ž³=³=Ý8ÚáÕÿܪǂÍfÆë±K<2z»ã9«wÎ}õΫ0æøFŸc¥DÖÈŠ/··jR¥ä+¾$C­]$>„±¢kûoß9U¹t OjŸ,a 6­-·3ã`¨)BÐ:E0'¾¡CÖE^…£{è98dE‹Ý'¢;~„c¦ ÄGü od¡DöàÑý 2ÇkìO€LÍÐÏõmF ìáNºð /v|×ôcï¯~p~ÝàJKDK(„é9òæÿúIp—³mЪk)âÄåM!¡~¹5r]Æ&E¢.82¯KЂoÓ]íºVºËq »1:†îÊY·Ažœ2à( /Ç”ÛÒNEXƒœÍŽ( ñ,œ°È XܶøDÜ@ò™‚SÀáhô:þ'òN%î—F}´ q!=ç9²K2ÈÂ1ýé}}--ݵ]pŽD›æW(ô Èå2YˆÒ®NKÈLK,H„ظ#Ë*ï†ýê}ôp¡ß{LQÿÈ(»à,Ž޼0â*‰:hYD„ûàÝ·BÞOüaŽeÖ™¥_¾\òCz‹qê-|Uj`µf•^œZTRª+”ªXehaRrؙ҆+à ˜ÙñÅúOÊ¿¥÷9oü²÷ÔѯoÚnºÃ÷Ó SQ¨Íß œYTWY]+E%kÖT+ r3 øbú(‹œØ\¬u2wØŽMvŽÇ1ž‘ùNÂ~¥„ý‹íÇF·QñòÒE11‘Qïé–R4ÆâáS¢ýp¤¼>²Z9Ãõ¡%A隟¥ø`£& ¸UØ„ÓDŸÃùÒýA»“ªâ 2®O”…n^Ç"œ'ZÑ7ÕêÝ…oN¿5ýî]úˆ/uý)ËOžÉòÕ_-•¥Å'‰nTâdôà U…°•_JjÏéìnnïø0Ü»<|õÊ^‡î„)wžÑ÷1ÓãX¸Ç»÷níK8ç#¾u¿[Ò•~Hy¸›ßöß—Ö²å§b„7ÊkÒ«2 2ãÂä†>Ý~èô`V ‰/ÕÞ(Ún0ÔKÅw«uÕ¿¬Ym(ÙšŸ J¾;r+ó*óôyÀü=—°/£gî…ÒªÓÛ „{ïšx~AMª-ã{M J€+,WH‰ ]9”s%•¥5ÛëêŒ&ÞXm2TJÙÎÛ1Öîðy"Ç,ÊÆÈ¨3½¨qËPÊ.àLz£QŠ©_ÙÉM¾4…,]ζÜÍÉë•€¸Ñï±Dô.êW‚ðiÕ¥ŽÓüuÿmÀÑ0<Ó2¾"_»ÊÁ$ªÿäq4°=ˆ¯Äw_‰YžõÌãÈzœL^òÓ?ÔÚËSh»lN3yZSí¸FáýÛƒ„>|¾×G<ŒsGž—¼##^ãùTVüÍUÒ"‚´Š´òŽØ“ÊÃÀ]é=wEjO`¡@_ùœL¹$,R98Àc'NdŇíd"®aŸFV¹ÕñŽB€Œ3®„OÂªÝ ­ÚŠlŽ”±ô_S2:".©Â8ki<­Ÿã‹ó䢬œ «Ô1šplˆÊMÖ̶7k6K‰œ-[¿"¸qðÕ}ÔIñÝCßÅü‹3ïÿ©üNgT¡Ë£ûÄE†ÿc'.Žûèbgÿ +nl=ˆÓ¨h{þFWôp²ÏG|§9¦K:Œ–êà†å¤›XâAVËÉ þ(È>wâ²åì.©^ÞŸrdà¸õ8ÿqÂj6¯tKNv™eÙ[Aç‰/þù}"(ž“lQtXÛZºZ wäÖK1”J¦±ïGF½Ï‹‰‹éøðŠ¿2JéSmu\´ šï ›«$²!¶ªÕ…CTDô!a %Æ"ª¼¢mrÕ¶ìÈ´eå4FŠAYY\ÙP×` gÍnJâSË r@έ>™>ðãÀÕ‹Vþ ú}‚cOã¸íßÚÖ=ú6àjídŽL´öè JNÃB¡"¥$g%ñõUf)³Š³‰m¾'ítnrªcœM°Y!ÇLɶ"e͈ôÆÍÝ|µ¨gW‡åyAŸ(ÕŠVAHÁºÄð ‹`Gø{3ñÅ‹—;Ï[ùJcu ˜8³ª¢”/åBE~iQvôúÌhà^Yw Ý®ÖÐ 3¦­M­n‘>S oEBÐKÞ|Wˆ?9¹lc[›œI’Ä&tÅ쌢Ùõ";Â!¢uË‘Ü>ÝAØÏY\á áˆeo“ÅÖÐMC­eê † „sJš±£¾Dµ*¾€.T²Ë˜'aêï`ÐÕN¸E‡ ?ÿ`2étf>¯¨d¥H|ýƒ¶,kg[k×P˜%:xff@¯Ì„-TO=@b:ðͯt`QgÚ^ï“÷ÞèÇÔ{¦+>â_Š‘Á(IÖ–¢H„†´=é1¥!Ž# ÎË»úNkDæá[‡ÈT>”j°HWÜúä–ƒÒ*V_g¨­®µçÙ”»~ßþǾü¹UÚZÓZÛ®W—jU â²w455ìl´¦·¦%'ç+2x­V£ÕRJL¥•œøâ)‰ÆÄqSf¯–°k-[ª'ݨÔU@%ìÙmÙcÚ^YÛ»%:ñ/˜»zÉô†ËJéQ$’.ö¸ùì•ÏwÆð*vm~Xà¹yW>¿m—®YÓâÌåYK˜bk®û­h§õf<í}8ÖGœßó¬Ûà÷ö Øö³ ›æHYñÑ ÖyxxÈrÑÈØ9?©ióo€ _Ó´«l=Õ‘oÎ3çUÖê*ê Ÿ¶œ; W`xÍà2ËËŠ@àf‘«´ßc‡àKMwLcFE|±ª q²Ì(ízgµÌÚF陨ð>ˆs‰/úN¾ë#¾~Й¬Lzpâ³ÍMêt)ù>X†öeŽéDÈŠ¯÷¡V"¾R*—oòK¶¤ì§³®ÛØÞ6®'£;ŽêªbûÈè^ÁÙëxéŸBô;&9¾þJÞg´p¿_Bé·‘_ôHÓê7RLËê}Ãj·ÃîëOz?CÁW‹ˆp_Q¬)¤Ç(ª1TÒ4“±&­©Dž¼N¤ª,®UUÇ퉪nqYE^|c0x8Uº³t·z—ú^–ïùüÄêÅ\ µæíW‰Ð†™¼¦ÛÍ\nPò%¢"­J%%‰l‰¡ÜTßionàŸÁ1AáÝï>¢ßIk³ÞL¡¸Ð²»|³ôÑE ECe™@ÜœýÕœù=ùIU¾ùWU~ÖInþY•ÏÿßU9–ýϵíOêóÖùÀS4,àøìå…à»÷—>kÓ7Ÿµé?[;c—Ejá{wq9z:ßæ›{ÔÊ e_<±r™=ˆZy¡¨­ª —²ût… Ï1‹ÌgÅ×J›½ÿ­eØÚ¤,nç3”¹y ©Ù唑¾fKוyÖ¥“ÉÄt"È—>õ7=ô»[Q ü§ã'ŠÛ8Z5°ƒ{´‡¼$sŒ±“qŽ/Y¨Ò¡š«/7åç*²”9|Ø–•e!õl=º9À.8c0½„È:ŽK,ûŽ´ÐpýÀw¥ElÙk ÞYþ~èêUk"VªTÚr(ær3Y &Ò¦Y¯_»25™IÓzžhp`ðð‰ý—NütpèK^89qaÃÆÇufv÷µvv÷¥·&òOO.îubô&›÷úNÂçð5ôšäti?þ—ãkIGÇ@=Í—¡ÞÔ•´EÃò¬y™êH3láÊØŠâZ yê—FÑ>èQ6@fsjEl¢ß ÕŒ—Ã7n·DñzÖVÕJ=]9IN%Š—oN$®8>íš;†/Ý}ýã5Rñ¹ ×ößñûxôø¿M„§&·oÞ»·ÍÒcËlOáÿÝíŽx…7~Œn¨ÖîZq¶­Ä}¼ /ا¢û/ö©Ž•¢:uu>‚™ Ê)Üöw2ž¸±“ÉøòRµJ¸&‰øªqØ ³»þxhUxŸ@%ý˜Þ>â‡ÈÜ—ˆÿeëÚÕßÙžœ’‘7åvà-:GÇ}„³Ñmîm2N*~ÑIi¾Î¶—·©Û潪î;~†–T˜9£ÆXªÕ€NËG憇A"$’kÒ̾e•ô£ç4¢2Ðhtt>~Á* ªÚÆííu;yK“Íd« Íóf©4… ÚXyRÖæ¤¢$èÎÔÃÜ3ÖÓø„FOŒB×èá#¾†sñ[‰n7ܬ|påøØ¶]5]Ð %Í9©Ú¬,HåÄwR›·ôX[›û¾[нø­·’—„ó‘Kÿ€·édí${ñÓ–‡xñm›½öè1?t ¾G¤¡«RVæózò²ÄÆ^hþý¿ïTN™Âg±‹Šß™.Kß½GÁçtëíê>΋%¬ï鞣ÀÓ<½æß•€¾úendstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7099 >> stream xœµy tUÞo…HU± ¦l ´S…ë.ˆ€ˆ »¬*a ö@ö¤÷=½wßîô–Þ÷îl@–Ù KG@ƒ‚"~èsuýÔ¹Í)æÌ»ÕÏ÷æÍ{ïœGurIß¾÷þïÿ·Ýäa÷ôÁòòòú-S\ü옞ãþó ™•d#7^é æƒ÷„|¸µïƒý‚qÿ¦Î¨œY5«zv휺¹á|‘xdý+Ò ¯Ê6.ÜT´yÑ–Ò%[·—-+_^ñú“O­yzô3cž;N5þ¹ ¿0iÊÈQö0V„=‚-ÂÅccK±ÿÀб‘Ø2l9öö¶›‰­ÄfaOc«°—°ÑØëØlìl6›‹=‹ÍÃæcã°ØËØsØ+ØìUìyl!ö"6„MÅîÅcC°û°ŒÂîÇÖb“útäåÿãs_~ßx ~šXDì'G’7úûï0n€{à¬o 2 úë½êÁcdžLrú¾ù³ .R‘ûÝï¾ÿïúÐú¡™ä…¯žV0lͰ³ÃÉᇧùOóÒûÓ?9è‡è3…99¢`Dõˆ ûGŸ{Ža÷æ¾Ûº×‹àäLöqµûz7Oô6 *µDêW§™ïpªÚ嶺ùI ÊP»r¯X¯·þ½/§2 z0L,¯1TµÔ¯JÒÔn·Ûæâ‡§ýþhLåèÐ=c‰£¦È¸ì}…õõŸmq’Ux)Ћ 5µìrvH%;Xød¡If”Zdäßà$Þ)wékÇÝ+O±ñ¾½ëÍÀ3™eÝ~øäܸ¤çê"|>[À M ˜ž„KäõbI@g¨sGØ3+pªCªTKÅú4ƒ–› 5&¢J¿”Y§à—:/^¥½ø1½kØL²ŠU÷VQH›dàX<ðÆ{ßZƒ½–ƒUoÖµmño+À¢ÒRòÎz$¸6Sðu”ô¬FëùíëìaÞMu!–ë„2Ÿ*Î@5ÞšmMÞ³éýçÀàÂkg§GH*{²ùðž†7ùÌÓ4[‡‹PÉ…1}”ùËM5Jà" ֏ޤ~ë4¿ñÆp°pEÉ"É2°ƒ×¾•=rN4}éª7–˜[«1fà'™¼ÀÔùÐ’}…ÇÍ~°}Házäã‡á`@úp80xþ¯b¤Ðÿ:b,ú)[ÈJyÀauùñ‰Ï®Ò6üÚÑ 3§,;‰1âSÜk?Ö_êÝr}&ëÍäíëÉÏŽÊ~ÈÛºsUøu@jqvdýTv2`ôºÂN‡OHþã¯çá=Ír—d“»\hdÔKW­ØÁž5pø¼|o×±ÖŽ{Ž@ç|‘c›ßd«3eàSD²> (+ÄFúÖÌlM>Ó /ußnÛù¨ïQ åùˆ”Ú'øC|säfj€OÿElÂÛËö’×aÄ› vðšÕ’²2ÚØ—y†™Ë§O䜒OÎøl>àe€Ïê~òÌ{GN_{ì£Õn/Í–Ù ‚X’)ø¾òο녣ßçz¡ŒËtB†RI‚j„Ž#9tdÀcvw„‚p‘ŸIµO$“jŒ6úÜElŠáP¬bdM°> »ZéÏ à²¹Ð€fE£X*ÓWqð3ÐìPX‚SG¡q1^‰p–ðûô„).kò&×)ëŨ.Æüs½Y‚CR;\8-µ VÁßyÖŽ¾qC¤’f \*Õ E\Õ¨¦ÏØ3qªT VK¥UŒ¡ÚœN›“‹ÑCakˆ¿=æjŽhµ Ðé¬zþX8¿væÃN'°:i»0$M2‹¦Ž-lÜV4eëŠÊ; åX–±‰ ~îÃÎïGkùéç\ÙJseCxL1pÌ-.Ab–Àr8ÿ?-ËpX-¨ õyÔ$wÉùÔOˆ–ô|öW‘˜ƒýmÁ9„Oe >ÊÀ@æ*ÜÉÛ[ñ¶ö ¿øi×p{q6ø–yçÓÆÎÝ' kÝ4µ£ÆmU´óÛ¢Qæs"©ñJZÒ@—E7;¶òE«ýZGÙ~9s±>Ï“sÙ>V9mÅW‚Õ áI’*OÕ[#ü*£TÊŒãΑ1)B ’v%#Nú6úàø <˜ÉÛÝãægûwòô„À«IE¢ŽÎ qD\)¹[̰xîÈ|ês/Þúð䅫Ͻ+¦¥úƒà ¸û˹®D´)Ðν3ÝÅŽŠ€‰CŸ#cà("¥ ˆš‰®wé<õ®šH™§õ8ûpýKðX@€J{U¤®yãíÛ`?ØïßÑJR©KÞJŠ'"îT\á•0«`ŽzÆÜd²Þ_IOÇåBsµÜ§L0GÙõ8Y…j©pËzõ@ªð-öZ?8FÂѤ ÖÐ/àE½Ñ{âNgÕ™<(@;äºw¡:NKë8-åÎÇ#öÆ ‘VP€ž~.«¾Ž»Çêd:àE5~ÃÇe­(H4A“ÇcîÞýFDÙ™¼oÑ.uÞ:àö¡ÂÀúðzÿúP©×Ø :€Óßt¹ízËõí×>‡×î#ÛñTÐGÈ©eØA¸H©‘ˆÂÚ8³g1ö"‹Á‹}·ã‰ bE_À½£V­‘#º8s׌œÔq€Ky6¬j~Õ¿Ö»%lj‡€¥»ñPðZèZðšÝkñ…9y Ù|>[ˆ´T"!÷ iêxŽFâ•B©O•dNà+Ø.K‡´³~ß0m›!­ Õ‚a[A h˵ºr£L/µJ—îÂxÒˆ&en)3 §z­MÇ(8«q(¼ª… àõV›Þ]á¨pV¦g¦;Ë\¢·@ ðÛƒ{Û }û‚ŽNònìê"tT/dòbö¸µ}ׇÒñ ‹³ójØ{Ç~Ur©cŸ;úãv4z\À ò 1]ÿ¦µ  SáÓ@èR»b\Vc‹;mà`à«ø~úéÑ.r ¾b’aAU\žJÅb©Ð`m`à+ì³¼ñÄì‰úõ›^ާm8ñU¢ÃÝîloh䘕¼.ãxyJ7ï 8äkvH_.Dða¨RîiªÍíA:r‘°Ûmõð›üþX‹È!EôŒˆÍÀ×â`qÝ’taÅžò½{, æ$ïvÂî …½¾ôŽÃWõG~Rê‘2àTÀ`°ø’œ™kC]›¢ƒøôǦýÒ7€Â` )k£¹õ:4Å44@¯³éùB…Z\›4¢ß·y¸Eyð𠽚–Ò–ÍmÛìf‡ ˜¬F£ÉlW «Dè‘ :³A È2DSqmÜbþ mÆå²ºø±¨7> -KÒÛú$üÝÈä}ÏÉ~~NY£DmÜnrGbˆAþ7jþÑ*nÒ¬4j #¥AMŒÎöaù¿ÔxâëÚžü¿å0¾–Õ+忦‰ÉâÑwDzåüc"¡ dbSÕm9×¢ÑD 1ªö×1ìT\¬QÊå^åmaÅ™ì®^¨ d'óÜv7⦔4*¨RŠ…+÷Wìûù-8ðr½t‚·á} ~[ ȉ6´3¦¤¬AÈGN zîPÔ*Ô Ç9CK·á£Ø.‡]}[ñò@q•WÀŒäŲû¨­Ž×¡bÒ)<‚K};ÇïznÇø½S t¤‰0Ëuƒ\¶F2oø’. ý#;€g53°‚˜$ÒúÒdG½gñúñ#‹“lßÿdó Ýu*y¨v#5a2%r(+Ê´´l«r x̹hùœt×»?ý>™ÆÄíÚwg•Ýyç»`üDþù\í @­ H‘J%Íù½oñR¾˜Ò/6P@alY±½<¸Í[1,º$º$¶´±ºÐ%öTµ6àiuÅ[ ›.µ\Ž_vø[ÀvS\øï>1«&8®q!' `ã3‹ñ`ÔVŽÍÌÏ1iL &-8ð›|&Ÿø¼ GÔSù±±‹q±J#Aþ)árÙ€‹¹+ÏäA1:´à£F=:zÔ(¥R®u¤Ø£hBMŠÇ¢ÎV棜;”ËR3½Œ?ýô_ßýôS0t&2®CŠÄþ€ äº*æ5B€&ƒž6¯Õ o efYN‰LÁ®Ø€´ò0üÕ‘@”·lНC àþÙ|¶ïCŸ°÷@âØÞôÎŒŒ u~Mо†ÇBÞ8R-!³2Æ_ =) V„BbœaW±?ó€ÓêNòÊÉk×O+^¢¶ª†j› „:<¿xÖ¼)%ŸõèïòËÙÃ\·Ûk» v|›;Õ*BˆŠ¡ÊÅ!M™ªœUþ&gþr-[…£cm»åNù°·ëdûu²ý÷³ý†ùªª€tã­Îh˜É[‰—ß8¤Á©H"…}òŽy¶Þã&¯Ò¶ ×C²öEß5oJöj@ _ÁÅr­H¼ËBú䡳º.ãÁCx:ÔØ|Jzf]'Sºc5ØAÏ<Æ`‡þþ4,èÊ$ì`Qf2.óH+Tª¡Ee›P“¿æžV|JÚNñâç÷g>Ý s‘]k«·kÿ±NŸi„®òù¼)Úùn;Å´¬«É™˜ìxv÷†›ÎZr²^ï6ìØ‹»[;¶Sày•‡­ïèßÑ|*8Ì>‰–÷ØÍþ2Àhl¨aQ™ªp1rv¹[€¿²„J”Ä3Æ3«Á<ÒB€[×®‘ÔÖ×€šÛ,ÈJNÂÙ'Nž„%'â‚¶“bΊî9ùõÍOðež•PÈ •@ª¼’`]‰¸¢¼N²]vùKÈßý‹‹öd´¼@ÁßÅßb\„ÍÍq«¬M»W??ýó›Ú7¥ML‹w§ËöÃb °ÇɃý[ËË´²jÚd1™ŒVÒŠk4¨p’Ô¯—TÚ áO/Í7L$u(÷”4 H5›æQ¿k¬ ÀÞ0üdWÛúGüî{˜@NbGkÅE'»u4~Þà5‡÷?v´cã2­%VmXþÚŠõ{ß5ÜÁ0<ß ñÌ]—1?Þªþ ‚úíÿÓµ |ý¥å·QSÿÄ Dy…Ê‚ë=p^Ï\¸€ƒV6»ÞK02î~e µ¢áÏ‘}ô X©€sɈb`ùxçGõ´5ØRÏø/í;p2¡ Šh*¢YÞ>µŒOÅ0yš‰ h 8,;¸®Õãö:=n½i×›©Îw†ÿ:êìS4û.@Ù;gº`Ëx°¬‹hÍÖ¦ú½ò›A ÒÖ)ãªçɤ6 §ïœ/Nˆ `=Zûw°žË5gØAˆµb¥GÁaè,žðõ^¯qQ‚™¿Â·KRš¥qI¨ûê›HAO(­×Åëš›’ñ&†º’§k¼,f«…Ûzœ^ûe…·'=„&퀇Ф7§ÆâÊEó€ ”ƒ‹¤N]ã¡7ø" VšäÔkæƒt#þí—]ðѨu%“½‡h—9Ä4;—ILµR¿Š “ÿm;n-¢ã®E {õ| Jsʽ˜ÿŒÎ ½©^‚‹P’•ø4hG3ð¦œ*û$Ì’¬]ŒSŸqñåÞŠ÷æˆÂ%äÍq‘L_ż@‹Óôü5”)HÀ‡‹»¸b¹Ö(&¨bµJ†´µâ³x<âIæZ±ø*~õ2ìÇŽé½ë¨UHH$Üu MÈIÔœ’ä\x:è‹3ÔêQ).f¯Î+j)¬k¯k¯E_…F—Éêh#‚^Àïç@a0JD9érå¬#Lº(]D´A!Í>x›QŽ@îSÄ™ý8;ž}ª/ü’ˆÓ€‡Lé‚•ôK¸\l r¸Û‚X9y½ØU?_äÄ>I±xb/ÛÔqó¹Á%pîXKOdŸ½t’ðB;‘Bí”Þ®äœ>‹ãBd_ ¦˜vö ŽMhª—oX\ŠÀ†SêãÆ$H‚ ¯üÛñÿ"0³"$Z…BéQ…Ì ü‡<‰¤²Ye*Å›%\gã2z Bïí{¬€({¢;癹 zV‡Þð­I?Ñüdú‰ÔS…N©G7“­ønàL¸›þœí_è?>ä>ÄU¹8$Iä*„Ì3œeΗ¯ŽKꤛsiôF–ÞÆ>\ÎŽ0‰õB â¬]BŸ4Å¿U£G·K·«~ç ѹ%Q|g(¤.> ʼÈ1øÏ M׋…ú.÷]ž¯öd ̰Oq½oþ-û¯UêÞºPã¹N¤hT"œSŸmgÏàœ5Õ’ú‘»gÏ™ÓGý©r÷ñ ‡Ò¤ûó3a§Ë¢ä>P¼a%ŠÜ=4JrÜ]~òaÿŸG(‰Ú/'nZ',—Ð\³«4Z†ÜÒ6¹PØ«†/ tÝXÏ5¬ÌÝ*îòn<'Á†Æ’T -qK¸üÈETv"®*‹^eÖZÑÑsË0þýi¢"jŽÓp?ó#q'¢6À]5ôNö›g`±8J2ùP¿â±ÏáRµB. «Ãh{ˆƒ¤e$Fi8Ž%Â_4¢JvœšŽÒžŽâ¨Õ`íœÁ~¹M÷ˇØŸ'4TªÀF²¼¥ …4<éj €½ä›Õ­e½[[(ºá4‚:ž|c¯ÊTþtíªê⚥5KW\5rÍHÉúêe Jƒ”e¨ü´OÚ\)O8ÑëŒw^†£/À1—à3þ&Ú×äxà @R‰°2(G¸±X;ëí¶Š ÷À]Üå´"#ÌŽ/ag¶,ò.L/Û>û½×Àepº»ýÌöLà}ðøPñIéi’ºrb[q×>uÀh²ù,½Ê àzÞÿç]è· Ý÷ãïݯ&V7zëna§=jF g1P ÎÞ±™ÙÓ¨H(÷CmVÊ»ùâÓDm½J&õ"ŒÀ±5"@FÕ¾:ztÖÎÝ*rÉ=®ˆj-Bne¥•[6ÔkÌ*{½Qm4ªQ‡› ·ÈkÊñþ.ÇŽpSúŸSY3yÙ=ùÙÝÖ‹à´LvÂìù\·}§òؼÒ-©”»è=Ž4~_§e¿)åhEú–”{$ô˜w |1”5…Ì$xšUt)¾hEŠšuó _›øÚ„åu*µ ÈIQ‹<1ÝžáÁJøK_öâNÁwtÃç8 ‚ÌHàhøw\ ·œ\yÀÄíŒ5¤ò"{ÃäzôZ›–_+3 ÖN²AkÚ †éÖônï@~rtÁÌç¦.¶e} ­Ž1º´^”ü=mii¬®N"«zü÷å×aØç ¼ï—é?²6lÐHJÿÙûºá ™¼cW`5þ D¯®V{‹·¥õâ™ ×Ùø¶Uª3€†©ËRÍádòÒK­ Ù1#رì3lA»¾ ±o¿†Ä/SfÉ⥂iKðˆ/í?óñÑùót=1{ùŒ§lìéVÒÂê±4¢^襵_Dð@ÎŽò>Ÿyö¯è‹ûcï×70žÈ¬’"-ÖÔ^ûBG‘caC­¾¬45Úã IFªÃys·¹s8l–Ækiê¢Ùb5óÙYC¥fµÂ,#…µÃ^ä,j(²Õ™Ê÷í.C{Û$ÕÆ½ËÜØF”ÄHyo¿ÍLð…nØq:ïXŒ_ÉÏ2p;O6ê D2ƒ)Ýô¸™&"¹¨nêÕÚ3ð¹oà ø| .fg³ùOañǯ<É]©½{i |’Lˆ5M6mƒÔ“³g”¼:cÉû—éFÂq¡ûÜ%ð>è, ,±ìz`¸ÃÝÙ™w¸ÅïÍÝzöýËìpuæÛLÞ¹ì3<…E¥A½* ©bN䂜4dñXÐMH½†ýËó ˆ‘}Ч0ëÕHnÄ!M3 oâñ˜'‰ÉÍÍûq´`¾­3¯ŠnÄÐZ§!^{é†Ò¢Tktl¿ç°v)#\ìcØáðÞI^ƒ×èd8àEe*óèG‹áPï:‡ÁüK>·Ïƒ¼DXå“™-Àj¦ÿ0ÅvÔ¬šBeÑ!¢‚íšêÏS²ý:–\Íâã¦Id›ÈÂ*êu/øðüÎwN~òæà@ú”!‰¯>YHÂÝhæ¶@÷Ï÷MØ ì 0ì-q~¹endstream endobj 109 0 obj << /Filter /FlateDecode /Length 2149 >> stream xœXMsÜ6½ëî[sJ‰†&>Iè&ÙÙ8Žâ8òäå@‘ðˆÙrLr¬u~ýv7’’GÙ-UÙ|÷ëׯü´ÊR¾Êð/ü_íϲÕöìÓ§ÞUø¯Ú¯®6g/o„„žÔf–¯6Ïü¾âB§¹-VFóTèbµÙŸýÁöeÓ&k%Tše–}LÖYš)cmκd-òÔ–UÇ!N¬i‡Ñ•uì€yCÛhÖÆfßü]ŽMצÉ:°[&Ù•šÚÍÇŒ÷ôCÂî9sÿ)÷‡‹Gpņû.£3.8{hÏ“?7oѨ|i”ä6åVƒa›̸KðÞVHÍÊíÜ^4›¹ÙÎÝaëGx­ãÞkž§†+î¨JJš®x‡Mæð_•Z«Øqpuü•³±£©¬êö‡ã躬`wåvÛÀùaæcÀ{ì60ϰ>áp׆¹mï†ð\ìÞ;„ ÷´…a%Xc ¡Œ`m},aÐó"‚åúd:´ïf\ú¹ ûOíñÑRE*xöŽÉæ/Ä‘(óG7GʱëÁ)3ârHJÇxnP¿5lït)9a<|:z[-Ï"Ü'úáó늯`¥i…„5ˆæ¼‡ÇÕæl^34±v»!Xê"_"žMsÁs}¶ùîv㩊ØÿqùÃ)¥ÕŠù~Ë\±–®&UË‘!9…¯(REØù¯Óx_îz× O2U(dŒ8¥7»Š<#T°„+rª©þhÏ‘…ë"•…Œdù¥¬î›}¢5 Àu¢ñL@Ö%ÊâE,+û–@5V Ï=e ÐU5±ðœ.ÇÄÌ^uËÄmrÁ…|ÁUv.zÁmB~å"gÖ”þâk¡djàîè3]dÖo{íÀ‹Šã5!ä1¹!Ì~=Žë.Ѓؽ†Ät ONÂ4‡h:frB«Ù&)2â£B…²°»o›ªÜÅ3-pìàidpo3”äüFبPë2KÚú¯Øèf+e?®ç[¢½É'Á´w­¿ˆ)ØxãA°ßÛæ3 üÔ͘‘€9ûg™vp9ðÚ«r×,qéú¶)Ã0ðç*Y“yà±Å5û{¡6pùû’õ^{ßúP¼raR†€“pOöê2ºW3«r¢¦T‘çó‡KÞoG󽳃ùW ¹$t·×E^pе6 qH -JE¾M¾‰kÀªMs7Ü7}Ù6ЩÏp?í ŠäA%Øgô» lðú8Ð.Ì#ˆ[Uß Ãús‚Â9g€h]Ž“iÀ¥ 8EhºÓ†tŽî!Yj¤ø>yÖË>ÛÌêÔÃØ—@3¥=Ï÷n¼§”ã]\?Ú";çk\ïÚÊùQdÊ3[‹IV rçW%$þ ÝX´á1`ÿòÝÕ®ŸÐ#@–o¶È€i†¯„?ÄORAÎwsÒ*MU^Žq*ÊJÍ—mÝ;dD¾¯£U¯»_+¸_£±+ŒWmÏ5†Jx–ðxȹ\F4ø„ºÀÊS6¶L¤MP -¾ºþwX‚„÷hÕW‘ïMp]Ùu“øg5$¥wîátÆÄ„¢ÕR?1ZPû…aÍ•…”/¤‰ ty{lÿ ‘ž¹CLN ô÷›ë“ß8~˜_v÷ãü}a<Ìí‹—sû嫹}3k¸|Gï!PKÅ!’¦þ›5æ'0˰ÃâƒÅâãÅ_‹´¼°­ºø#}ô!c-„„éS‰ô[†GÄ¢½]´_.Úõ¢ÝUÏLºY´ÛE;ÜT à×âXuÒÅ9pXÄJžs „³\+ŒåÌä‚F8Ç¡6g¿Áß¼Æendstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1256 >> stream xœ-”kPg†7$$+dPY·Ël¦Õ©—v,Q‘Z@;^¡TÔñ‚ᢔ‹!ŽÜ„–h"MrJQ´ à% D©”iE´H«­¡Ö°b‹8”³ºüèBû½¾?ç=ïyÎ7Ÿ€¹@¼<"Já7~ó…,Võ*Èý ©¤BŠÊ|Ý…ÞÝSoLvM!ÆÏ$?…À‚À…‹ƒb*áMPÄ4‚&Þ d„1ƒðàí¡%:¡‚ß܂ݞ óEbQkðb Y¶WÓm‚‹½XÒ+dã°‹†îÂîä{ÊÁ ²xˆ„eiÛç«V.…`=¨¨¹¼¤+»ZáçòÆgÕ·=„_H.‰ë¢à»fPÛp: ³¤õzÞ?b‡èÔœšst=„@8„ÂJíÚü÷¢Ô1@ò½ÁÁ:Vî¾'ĦWÓèJ(Êd"Ö€ ÈT‰•·j©"ÇNH’  F•*®5?K¼ž@­Á%éPI†R§|Üꎌ¼px»F0¶?Ý.£X¤Ðÿ¼D?dÚ[aÈÅŠn΋s{ß½ Ì–ÚcõUšªdL¦âV«³ ÈþËK‚‚·|°!BÎmàvjµ`„ j„•ˆÏà2÷tq¡Z»/_¯[ Yù³ÎÜEgU Ç‹›vï-Æà—2Š#ð2ŽÐ=§Ûîð„z¾{÷ȶœ³«í¬kä3ŽæÎãu@Ö›¥(0®K‹•«£S é&)èIo‚}$5¦Õ Ó(îq®œËù®Øj¹ž,¯-tØàY·Ûº+Cµ7Öïéj Wÿ0÷†žð‡3ËÿŒ3lÐåɹf‰t_.2dJËœ­ ìÊMYbQl!©× ‘ñ ‰iJXQWs¯›MfS)5{ee®}çÞd½2ðö<É©Q$‡‘’Ofß®èí¿Äï©Öôɨ1œŽèJ 㔺o*¤Ø˜ü“Z™o+;wäb{ì׋¹iÜ›ͥɩלÉž¢ðÙŸ|å?È jl%$nÎŽ&Ñ“»O‡ƒºýÀ MtÃIòaõP_Ùˆp0'a3l…hØ’›¤ŽTfþÿvð‡kháã¸2±ª1Ó%£žã"VI+Ï}~,ä[ w»:6-JˆŽLflùôOÎæè$ü¯ùÍ^6_SÓ°)-p¦ÛsIêåº]ëçÌxkxÕº?ë­Ó~õi“Sžwtí%rv<“—žÉiRÃÇ>a¡›‚—ßìm«îxÜÄL$©øµ‚5NfÍ&ô/Rˆwi•„ûl¦r]îÆ¢ó»™Æ’³GƉ¬<õþ„U­Û#^è‹* Ú8Éþ¶jbâ˜í1™ñJrSŸ‹"†½õ m$7åC:”ú½Æ .ÿAšÊ§]Ž×¨I±ÄS %¹8e^ãÅxˆY¥“@êROzÄ¿+sòendstream endobj 111 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 337 >> stream xœ±NÂPFï…ªˆU‘¤  £›‘˜è¸I˜œ0B\\$ ƒ.-RÊïZJA…7〃£‰à ƒ‹“oà|K.ƒ/'9É>ŒÂ¯&SñÝ…ÊÀ£Ó£ih D?ˆÂû o}n~„qý¤XÈŠ…³‹Ü)BÈç“zÂ:òc,¨^sc:²Ç^ÒÅlÛ+û=™=K<ÃÒÕ¾A5k5•Ô©á…ex–Ï!Ä0 .“žÚëRÚïÇXz¾<ÏÓ–Õ0Aîwm‡B»ÙQZ”ï±xê%X"Ò1ïó†¶Û`Í®jºNHlþΣ¯lgìM\üë1ÿtŸýHw¦i =ݼ´fol¹²t0 PÕTRí6œ†â=r˜¹ÊjKƒk™ØªsKÁļ ÏJøn *Êìe¹R‚ryCe#\qÄ ˆk †úÈΗxendstream endobj 112 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1849 >> stream xœU{L[÷¾ŽÁÜdÒhÞíîMH—1©‘ʪ©Z­YH׆ŒÁšJBx¿Ææa›—/~ÛÇÆ6~ac00À@!HJ„,/QÒTÉ&uëÒmdÉ?]ý.½™´ dšÖeÿì^]éêêžóûÎ÷ó¶ãñx‰ÉÉGÞX6{í«µ¤pˆäCdXÛ«xúŽÇ/ߊžÝŽ­_Ñå"IUµ4;7µð\Ùþ·`X –ŠÃN`iØIìvKÀÞÇ’±_c/aÑØ°í\~, »È‹â¹·Dl¹ÀW†Å…Ax|ø¢ >‚ŒÈPE¬0ú(æ1E3ûhúfÉ{ƒÿàªP_¯©-.÷(½îv»¿…0™ÝÓ¶úÏ;m´¶vY|ö~“ÅœûËߟr”$g°ß­;K*«Œ  ðÌá¢é‡!´×îT‚¾ ôõZBú^ɱ ÀkÕήG·ƒ&;æ&ÐXÄoŸOLÈ8ZUKhr‚§á4ÈòšäuÅ © Ã›œ`všL´ð­øgF÷»KjôTc:ÅÈ6At }ÆgŠQ@øäÝ› l,wDZqñ·ß]E1èD ½ëg½Â³YÁû³„ ^&/Ý[ôö~eàÔúû‚Е3,ž„aèêjô´^3¾!§Q× 'ŽÉÞËÞL@»§íÃdš½ÎQ‹7ÆÚå倵78êµbc±ŠÊbiF]îþC€§7M…ú~X&ÝVtàS¥¡S¯°?TnÒan³= îh3–VRåšz² ¾&2ð×—%ŸÜ™¢}DÔZÖs¼¼›Ž¥¯o Ô‡|&ýUh¥¡ç§+ä*©šÐ«3Ùcj±ºÌ 9P`0i«ÒÊÎå¾îú­cÓ(št Ø<àǧJB߂һ:Þ„"¡DêZ2‹Ý¦8 ùø·‹>÷MçãDàè²f>‡ ×KC÷Q,ÌàóÞZ‰D\ÝhÓ8T„‹êÇP\þcñIqZQ7™‹}-A‹‡£¾g³”ñ>³•‰ÊCå½gg·²ÛÙ±{÷͹ï¿dYY"íÙÃ%3ÿK‘Ú$yjàú€Ÿ6Û]HׄÙéA»v¶ô,­RY_]­'’X·P+C}mÖñœ:à§æÇ. ¢è®‹dðêTh𛞟i4QËá;LÑhèÁøÆ¨•¯ò™]Ìva4@CC]½¡Ôxe;ÕÑßÛ=>›:Ì¾ÂÆ°{ØÝ WÉÞÈ(®Åàom7Ýd›Á¦çdÕõzûlhgSžAu€sšõ¿ÜÐÜ·Ò }d'ÐFîÚÐ xK+7FJˆ Ýf¶šl$Še¦„Žasó$à_ÌUü2OúÓ¼BòÔ™U'ÅnASÁfÓî?~I3q4ï³;|¦”yIhâ¡eƒÙ¦ u¡Ž‡Š¹\€€ÓnûZüµÔ;W§§]NÒd¬ù‰" 1:eqünA‰Iäp¸àÃÛ¨ŽŠêRynNHú½|çþ’—¡t¡F\%©–ÕÈÏÉË9…ê?ŸéGÛz/“½³“ã€ßò¼­Õr é8³¸LÑk ÞEÎ)\kïMàl¬:¤2ˆ •‘2‚ §œà1)U„^Êîx¦×S5ÔÅ|p¡hú|ÄG´º¯|«›d©\e0èé6Y­dÔ7[6»t-úÿݵHUFŠX\úŽNª,mÌzlÐÖ3h'< ܺpŒ5·Î¡HÄ|ÒU\øA6+¦‘ÊJÒí”ú´ÒÃÇÿc§|<øÜN¹™;qðàɬ´ÂتuK:K­H€Œ£µUÚÅì3ÿòá¢ÆÔùp›÷ß><ÀžèDŸòК²LÂ[’±¼©DTáŸw·ZN¢Ùl2™7™Œ ¿(J))!)Îe¸´j«Îêúý=´•à²ÀSÞ—Oùèøœ°¿²»¼¼²²¼¼»²¿¿»»Ÿó²ö¼NfÈÏû‚Aüµ·­Ðmk±‚ w)”ÞêFâÙeÖCiUZPĀ®uiîÃÎVµšw9.—Úª°‘Ïhd "¶…½åÜ ‘Û ò;¹Ãþ ÒW"Óendstream endobj 113 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 959 >> stream xœ5’]L[uÆÏ¡èþކm˜Ffæ9SüØ z¡3FopްˆàÆÂØÛº ŒÓZ)”ÓÒžÓ¾PÚÓÒ–ÚSFi)ÆF[Ø€)Î æ¦YHd3‹FƒqÎÌ ý<^ØI|ß›÷êù½Ï“‡$23’$Ÿ|¯¸¸è­ÇW.°ÒÈúþ'@©eægsgÛâ–Ù­ÄãÙÔÈh›kŽk¢”8H”åD±—(&PZ„È$*ˆ‡$C>Èpdü®¨’øl©—KtœLÝÇ3÷’ ŸS=|ÿ»<ù™JïΗVöþóðsx'¦(yLvªN—ŠkÓaL\Y¦çn.ú†MÎì9nÈ?YB«”}R([Ò²çðüZ NJä>„)•/éKö¦ºº…á.—ÓŽEdžÇ]"ø@tô™ªÍV5˜ÑÙq`hdàÒ¤fâ@aeÕ™fªe¥X8Œ®U£«ê8 <2 œ'ÐÕ(ÿœx=(æfÎlzKm·K¹6µüj.g´2À!]ؘâã*[2±ñÔ~ñîTœÄ;–ð›Ë éI©ÒqÍ`Áß>õÍ,ïËÏÖäNS/4RCˆ¿ÊNt^c¡šôuzµöˆ£´PØÃ\Zè¦ÁFΦÖZSYéòÉ_pÁ¼%ñóî°Z ŽzO@bèöoS;š‚é'ú6ÒÆY+¸æ‘B²J„ê^pÚÆ¦¦VO§×BEZú¡UÉõò¦êÓ‘›‹#ßÞ ücNˆè‹G+wÕÊÏ›6Bp÷:©ØÉ‰I@A¯Y«3ëX†>"çpu A2ùSË5üöm¼°š/m€“÷Òi›ÊjH~HÎ’Ÿ‘iyWÞ7%w¿^\èiqß·v?ÌBdÊêŸìû z›L&»•±Rmeúò€tæp<äwGiw4x£Gì_Íu úàB½§ÝȰU(ÇTûÊ›ëUêW/ý5¢‡—æ³€n… L³ÛÚD·;[.L5ÇѵÑÿzøé¯ é5éiU­­]-¨!f ¸z––Š¢%ÿ×2ÿÖ»XßøáQÀË V+Ï[mÔ;¯Ë™`t¨úüjªÿÏËËtêËyñ"¤Ý8b (ßaãëÊrfEƒëùª®ngº‹óš¨—-jÞâÃö¶>ð_«™»Í@É[ÿéäZÁŽíÅ õóÉÈø€Ï¯§­ºy¡ÝÌÛ”·øãGúšS€´‘Á°Óç ÐÙÔæÌ=ýʧ@¹”Y Ì!ˆKÕÛendstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 968 >> stream xœ5’kL[uÆÏ¡èþlMÝp5øaçh £&j¶,’©¨É† Ô™­Ãp‘­d)®¥´… œ^ÖËÛÚÒŽB¯Ðö c°• c…á”9Kj¶l?Læåãÿà_ƒÄ÷Óóé}žç—‡¦rs(š¦Ÿ,-/?¸÷±*NX^?ôˆE Îø«*eÇ×OMo§ß¥ºYWWßHQ•ÔÇÔê(õõ.UN¡ì*—:NýI›h’ÓŸ³!ú\°I„›/°<½øž_ <ŽH±hßÒ‹¤˜p¢k¯¡eŸÉ®fÌŽèPG0›Ìv‹ÝÈímœ£ ¸gËæ3C_ø®$™ØÅà·ÿ“±[ôV¦SÖZ–-©2'†îˆ;ÀJ˜­¹{£â<oñ6çSÔ¿°xâ•endstream endobj 115 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 284 >> stream xœcd`ab`ddd÷ñôs541%ÊËMøéÄÚÍÃÜÍÃ2ýûb¡;‚—ùÏ 0°02²rpó%š&Z&š'š300° j30¶3v0v2v1°22²(„÷oûÑÁ÷«æÇí7¢ßUê¾Ëe}—Œäèa×ü`ñÃã»0P®|ÁOÝùŒå¿–0Oûi/ÚÞÛÞÓÑÍÑÐÔÔÐÒß:¡CþWõ_½þN ì–œ6eÊ´‰íÛúåQ´ýjÿ)':¡¯b÷$ŽiMSÚÛ»:ÛåþUÿÕokooín•l˜Ò4­¯¿··_¨¯{Á÷£ó»˜0ïýþNÔã·°Åo-5Ž.öï{¿Ë­ù®2ƒƒ¯›‡³›‡«›‡»›GˆYpVendstream endobj 116 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9087 >> stream xœÅztUUúï 1çn”r½rCôœŠ( "M¤7iÒM ©·÷Þöí½·tH 4©J Ü€AEq;ŽŽeFGÇ}y‡yÿ·Ï%AfÖÿ½™·Ö[ë宕µ²²ÏÞû|ûû~åÛ·€uG?VAAÁKg-]6~Üä'™?îÑâÜ߯Ï+‚ á€;¢÷=²yJFCžÄb~f¿PUýbÍŒ™u³êgóç„¢âMó$›çoYðZiÙÖ%åK·-«X^¹bûš‡yõÑÇÆÉÆËŸPLPN|ò©I<=|òˆg9eÔÔia±†³²±d-fdb-ef-c=ÄZÎÃZÁz…µ’5µŠõ"ëQÖjÖ Öc¬™¬ÇY³X³YsX/±&°æ²&²^f=ÉšÇzŠ5Ÿ5‰µ€5…5€ukënÖ@Ö`ÖZÖÖ«,këÖz—u/k(‹ÇêÇ*f²†±î`•°ŠXë~›E²(Ö¬þ¬98øÏÃë Nö›×ïÏ… î¸ïŽÎ¢ÙE¿mì© dï\Ýpÿ®ß½3 vÀwî.¸ûÌÀÒAÔ ðàÑCò9çè=³ïù‘k¹÷®{Ú6ôïpqvؤao”ÜYRSÒQòé}KîC÷o¸ÿ9“üš‚Ô»¿¯~ à×ð;†‹FÜ?"ûàÊ‘GµgôއŠÇŒãó±«Çžú¯~wgÝÿmËn¢g³¹¢!èÇîe åú¶›[CÔ«Ô"qH“¦¾&8b·Çê)I‡C Šs(%÷ I9ñúÏ!{Ix(ª'DR@Ä þsßàT(”H("г_¯·Jàó>ÓÀ9¤C÷iÑýø·9SÛ eP'ÖðÐO¾D_@O0Èur“  $z†KóˆŸP2úFÑTvÞRÔ·cÔ“-ȹTˆÄ¹{¹6·ÍÝ †#'"™Ž_7&©½ÄJú|ÑrB¤P‹%AU…&©˜7S‡DÔ«„-äÚC)4ˆþäÔ;Ÿ@à Z“Šr‘ (>}¿™¯¯·Ö¯ Ä •äæóOé¨/S%øy{Ú•v¦“h0ÏÙèn†Màƒ•;æ¯Y-®ØJÞÜ¥1‹>Ì|y5õ¢wró¸4Gù }7¤‡€IGG£d€@ƒ_!V ´¹¡¿Â7 þD÷#¢WruO/|`,Ó*¯v~ùÇo(7qÞt`1\V4‚›+hºs¾lÁg— sÆÜeniKuz ôLš «ègGœ D#¾»öÖµ”ÓïÂp²Íq™K+ ¬× Ä‹æ­ÁëþòÅ—m:s‰jßûƾS\옿(¿ %H#»ÍÍ)'™FÅ6޲«8*÷+dZµJOµz­IÛ»º6‹ÆeÑûøXVô0Ç2ŒksڜР¢š€œO Þ¦‡@(ÓøqC‚ÚGL¸Ñú¿;µÄnTÖ²u·è0,F%?!â|¾ò{ºßê-âꪣÇru“^õ0 jξA¹Ù~TÐpé»fŸÝgóCp©ýåÅF)#Õ—/9ÞX)ÞØó¹cÜgn¨ŠªL#Æ´ -#М¿¡-è™/¶!]¥ÔvTáH®c ÅQuÒã±A7¥º¡âþ1§*úñK+]ø}ç z!=œº­Œögц¬M4„Ó‰ô®´•Hõ|ŠS#©2$§Óã¶¹KЯ§ÆãµzKš 7ìH„?çÙûðâŸ!¨MÈ“±Té`ë”û·Ú€–˜ çW®^n•}eUd Ð²ÞØ®#ÄJ¥DÁ;@ãˆtÄŸŠ©‚"êbª,BËîf¼ÆèN{cƒ“t¶ºÛœ­ÀÅNÉ=¢:ƒLb&ÍÂ"º?PQ„ÌLÔøPøÌ¶L‚‰šÍïðCqRá7î#dÊ iˆßªó…hý„0wovØø†rbèÁ]ÜöÊCÚƒ\Cý÷ü‚&ÐæÁ?PYTèik;aÜh°â %ë¦h‹#sÁËÆpc“ÉtLUÆË\[áJ¸Â°¾ºRT[ _ƒköWP^ä£ß{álõ¬gT+áj¸!®êœµ; Æ@MI´HMbÅ$žUkRC àL‘FT‘T°!æéÝ®&‹&fÑálÁÇ sƒörq¥›¡ˆ#ÚXÚëh÷’ÒQgÔ-Ž©ˆ/–zÄ=ˆ¨Ãg+ˆêSÔB5wé#ÏC0i;b_þùºÿ/¨d°ÁŸç`b¾k-0±¡ÁŠsÔà÷ûïD «’èȪÖÒøz4ÄcÆçŸÞü|ÙlÑT8 †’ÑD½R#dгwK1a®wât"Þ\h[d[±¡ªlj0›]3$úˆê"ÝaOÔ6nrnvmrnã¹¶†Œ pô&ýM©ó¼¦«-6]u\8…ÝÐmõ@Š’)…OÑ ¡\#’T)ê1Š>ð§ØÜ`ß±n¶j«ø+·¿Ä3(ô “ÜŠNñÎIœ QUHJÑ÷õ8’˜.NÝ®XÞ¿„¼˜)E¹ßqÓŽ¤= Á±3Ò…”žmÑÚU dÄ(½¬ÒÃÀôK뮜|#Ðr˜LT±î† B$ ßîmA¶>¥´É P²[0âiv&©8êçLº’0Y ß[pdn@ì@1k1¿X›R$ÛÂMŒÈ¢ÇqMõ&¾©Þ,±HTŠŠ²úÍT¬‰xóbã¾õzèˆëìã„îM O1ÇŸB³»¹ˆ…]¢1ü¤‡Åb©ZˆéHdJ+•/­Ÿ‰xÜŸJÊüB\¸´ „ êÒÚ°¤UÒ"m•5ò,N“ÓꂞTÒïßÑÞÞÞÚáñ8ÝÚ‰Xƒ½•â¸bJ¬h8)ΪÇG/–j„’ :Mùˆq#þT„A3êÍàax 1fÀHXò #ùâ°:}SIxˆôнŠä¶¦­±r‡Ùa„&`eC_¤Q—o-/ÛVª× Jô„bІ@$0Ô2ï’‡‰”×kó”üHÄc>&„¿àûYÄîaŒ±b%m‘O»±‘gÍ3àDZk Fd‘)=(9Ñø+ºÛ‰ûã ?ÓÖn’Tm&1{ÌK¡{xXÈáêƒamPáã ™ÀT+ (“:O_àêžzi4ª kºþ: ‹ŠcyI±º.7|¹–9#¬ŽRø½ÜÌ!ì86¸q¦—´0ê(îju¶ºZ{RJê‘UZ$µX ¥ú’†`ò-"$’J¿€¢Ÿ#Dj¥DÑ¥zצE˜DîÎÕô¦Êx\–Ž*•»:>)õT|’S̳™l&h²È%µ1mù‚…ÐíÍãgC\©©Wñ7í¨ÚûËi4øãýd7ºûp tì‘( ñ¤£-)òÈ©ÇeÐ3p£RJ0TˆÂš4ÙHÐcèS†½KP´hG>EJ?.໡aävà3üÃÙ‚_°¨Ÿ’»‹ MV&5êòTS<Ýh'/N°­[¶N[½Ž|~íʉp ÷Íœz.îÌî&^§zATî““zB ²®¬T¼‚ËN]AßùU*e¶¦ûΣ;§À05ñæYÜTèÅ|…J$ «ãÔW Ô18'¦”ÄG9¾m»»Ò¹½Ø?Û;+<Ç.uÕ730¹7ÔÔvnǹ¶s oò\£ eSnÔÿgç… F²bɰäP+‰W¡Q³flÅKå³+ç˜4EmŒÁsZý¾ê}ÕÓA©³L=G4ñ¯)߃#¶¿I =¹ªcSt,¦ï#iŽ‘f {ò~š…=’ìØEZõV=ÔaÌ#ÿ˜g®‰KDm!l'†âws/ðìQW FA\î“S ¡D_/(”ÎpÍB“À,´(­ “JðòÂ… 03‘D+`’¯Afˆgoˆt¦N]g±Ý]ƒî¢» Í<ã.‰‹~CìÂDŒN^nE–<ƒ¸Ù‚k—P¶ÚãÑr.j'ÜÐËPÐûÞŽ+u?Ò$]Lуé{éÂ/§üúö{ͧ:)߀!K\éA,HE*E]åf)ñéóNÏp›.œë <;6ÚV[g1jÍ&Ûm!Cç³h¡¨q™2ÎbR ò±\Qø”Q …ö©P(ž¯¢‡‰7ÑyÆ %cÞLJá­§èþXÒY›|ÉøÁC̵qy*Odì$<ºÑWnÇ¢s>|©¦b™¬ÚTfÚ´z‹ž‹¥…Z$Á> ›´[q Å]hæé®.´¡Ë%îÚÛƒ´g†rþŠúâ¾*\…M9xÝWyõW4ô0bûITDÑçÈ9„j Öt_—Œ%<o”jïÔµAðíן6eÔIjW¨ÍíG•qXœ‰¥R{JƒÛËË4’*Êd1›MX­]ãÕºɼ§ÄÔÄÃãž#q @Yºû"ÝÛŒU˼»ËHO‡\¸²ë4Îú“4Ρ^E9®aéªÅ !X_±ó(b{ºNžì‚ÀC46”a¿a‰l·u]òäÂ4^úìíÿþŠ=æŒÛci4”wS¨* ´Ç ™ÄP©©Ôˆ¤‡ôšÆ!èoWöX{†røÿ¢ÕF½ÉÊÿ*»ù±ç´n‰Ò¬²()SuçˆQd ‡mˆ¯8·çŽÝQÒðDa$цà4XN*­É ÅÙ(¾÷Ð0Û²[ÖZ›œ#Ù¦ãï^/­>>#.uJ „iÕ0…'E4)/Þ€‡ê +¸Œ=MošZ;5vlâàÜ—^]¤UXäPÁíò’(…Qå3ôfo•Ѓ1›jD Ÿ‚©n" $q}ÔQc–äöÀQ´Ÿé3`ìM"§”¤Yžß¦†|(×›…ÀÌÄ´¬- ‡D Aø»ïÚ5ø|ëµ®é›é{õðkp察'ƒÄ[ƶ Î¥@®c», X¤7¯4œÅQE@ÎWlɲ]k« KL§ « ¨¦ŽhÞÑÀ)`ú fÏŸšþÀË45È&‰SZn«šH!=[Zü)–†ÔÑ&O[ÜGöI€³¹Ý¿E 1ÆFaQ˜ªçxÊ©Š©Ê©êrÐý°(ÅÒÔfwÙ°Þ66bÛA½@R5çªy#¾ƒï]jü̷ϵ߳¯ }Ë$¡º@ƒÈ)'é' ‰ÈX‹ùçóóÿamÙÜôȼ0bê “ž ¾º¯4^"’P ;8 õJ^ì0Ý ¡V…€sB֤𱤓 öÜl†7ˆ]ròÆï‰Z,ÌÕ‹XþYôØó†h³3šqaõäˆ;c¶8ø§†º«×Ϯ魑E„@­”æ[»h\¾¾ó›XM„ÿø§÷ÿD+ªÇΘašAL—èëo}Ø×ÞJc~§\lNk†oW’Zœ‡9TîÔEDÂQ‡¸gq˜ñ±;ÛîÁ Î÷õû<·ËåòAÐô§°¡—Pôx¼N^(¥¨v‚žO?ù8ôIè“âÐ‡Ž #CÅ.¶5*óÉ!àtæ5÷"B‚9VæWöI@œ¨Væí~¾4”ÓƒžfÔ± ×µjíàk Õ–j g?ýÆ#¿~õ£/vÞŒ³ÑÙ°xÎfO‹½ì"RqVö E€ž¼÷£‹0•35¬MQ-„UmÁº¥”®ã©f›g­YZ_]Vó‹*ΞÁèjϸ2ÎôA”àÙ18ÚBÿ~ÆÛû—èRï‘ÐC‰Z¥F¤ô*"f }G¤áXRá«cÄäM s5·©A£Ð³r"!kDQœ•7§ s]L,ÐÌîÒ|8&ãÉ¡Ì.µË¼ñ8ÝÞG|{±×Ùkuv¾iÛ^ÄièÚßr¡ ?‹FœEñ!F H1{ # ™o—‘OLøm†¡B)¦8ÝŒ²%#„¤L±E^ºaÔæÑ›Fé*ìàô@4F„ƪÑCÖ %ƒ H‡ƒ‰¼y¼o"‘Œé)öàÌÜ.©î:y³99=7ŠÛ,°KÈ2B"ÕbM¥ÎPá|;².ß³‘„0Í~tÓP¸K±@ «"fRNØÂŽˆ#ü€x'‰†|…µK,™Å˜¡ê鑦:C­¥®˜Q²L¯SÂø<쳿Á’±Wlý¿˜!Ÿ™4âòœ-îVØ‚­³9cJIc€ã ËÅnQI¬PÔ õF£^oÔîÞÝ·ÁíŠ:aoåNÆÑ¸¾©W`îpQÁõ‘Eè9"âð…11q\±GB2&¹ž$ª É -+§7óêè1µôCõôc9Ï‚Ý,Ô~£%L¢§˜p1—8±”§‡ {¿ÜÑ»Þ/B´,‹–auÖ‰®qé „‚o®á§t™`Ѓ˜D±ç”[äŠÉ<‹Æ¬†j Êc žt£ƒŒ£;.V[°=JÄí- BO½Je…*j8áð3$ý‘g÷Ùý6†U†d6ÔÉÌ|éC<³Ä$±Jú.î"ÞC)*.D-v®¤¬ºr‹RnVè¡ÈB²X,ðEÈmûN»»—}°¶s5Ya®­†Û@M¼v·ÓaƒNò¿‰à—×r邌,ÂRnÖ2Ý.Î~BŸ`\·»n,s`Ðíì@ìë³xvÍ =ºŒÊ.Æ„þTŸú– õõLà°V˜{·Œä—P£‘È•[t˜ÏÌm/Âú=Œ‡ua™¢=çyÖ¦+10MTkïÃi!º¿îk¨!J þÜí¦úZóv`doo´’b§«±îmu™í·½9¶à›t˵¿]/侬_¾aËê—'—Ñ,8Œï¤Ç !¤Ÿ@/D?G:ˆF€/_C£iù=íåjGl¥Oš+R5uð9{*„ïB4èzâ×ý˜qœíħ ôsó»¤ú‰ÞìF4ÞçnÔÅM“¡H*’r483è~^ D“'Â÷AFè’M§YòÉU¤Qj›¥òG0à ¡¨²z™¾ Ó×SF¶0cŽ>b¿õ‚nF¦‡Á§‡óÌy¡ GÔgÈ@q5‰(±Ù1Ó÷Aƒ~iTÌsÄ] ˜€;Å­5Ñ[L—;‹wøNNÀyã颱„PªåËCê0ÖLq"Ìk&5°EQGôýÜœ‘̈át¬7bp&ë¶n-Ý ©Ó׸`9°°=B¿)›Øåh¶¤zZ ¼Ëkè^ŸË›õR³”.üÇÃ<‹Ò¤€J`bÛ$QEœ‘Æi§#äi@…×çÙ½—u¦,Š´„sÂb¶Z~Ó V,Á·a·;¶›Kˈìás³‡>=÷ýDZÁµ™ <öØòqU¤™¹,Ò€š„"ÝO5’n¶Õe‚–ÿäµÖt¿[5…¾“~`âœUÔÖÚòÚ­µSñ”­J0ª“è" Nw‹«­O⊀x»ATÛçÕæ3•S€¾D縯TK*µK°´ç)_˜i{Å$4o÷Â- >mо‡ "7‘î¨-îˆE๚àŽd²µås¤ˆL œ Ül}JÁ´ 5D©m¦/ŠXÌ4 á<Œ±QlÇÿLñ­ ™äP‰ÅL Dì7_²ÀMLÉjÐÅ<Œ¼LO±*PÕ!·ÕÁÜœKk¢ËMDŸqíì3Mo¾ ß×Ûý¼Éò%[HB+Ư]›¥3©dÆFB¶Íì;›ŽºvC°·K¼'¥iñ¢Å ´kõ‹¢p5DtqrÁùìÏOŒ³xjÙZ_s eté|Ð ½¡€=Œ2#Ž ¶ª+kÇ ‚-Ÿ£"4ô#Ä9UÚ½2ÑÅœö œ“-¹waúJ¸Èòhsæ¨k/éÂp“&Uieƒº‚#ßQDe5‚UæÍ”‰XËO,½Íds¿ÇSý€¦q%tAÑtB¤TJdAeŒB¯©@(Óë©„}§¯Ãµx/ù>‘ÆV:® ©i„3áH:’o S¼ÞܘÜ-'·2¨4ëTÕ8M¶aÛ?W±iñJ¸»ÏÚFk„ìÈrQ+ú©ÈĆZ‹Î¢]MãYÔv• Þ:ÔžEOáäÞSˆ*Ðq±T‰Ù£ŒE´{-ÞH•÷,ÍB»Œ¢Ó„ÊMæšutŠ—oـܯH¥c ‘¹#ÞéÃ'r¡cù²±¾0îµ ÑNeuXÐýQL`9•+«ËýaÓ4q¾G%vé±×6êÄå¿]…íË¢g²__Fi¼§9è[næ/? »Q Ãf“C‹kj4Ú|­ºøaU ¶Àh kA»{iË<º‚fÑ6zÚè×i6ºÃñ+úõ‹WY´A\¶’Dú®vÒœg'B°pÝñ+.‡º¨ÛÿÁ^†àS[fÝÞ sg³Ñ<^¬g\@ ¹ôÝ£èÇÖÑ/êõuæ:¼ QXžèpîmr{Ð"œIÓ…EBÉ Laê“ô%‚“8¢u%õP£´ˆukx¸8˜Ö¯8ª‹‘œmд7Ý¢•ëýúh…á¾£¡À¤†J‹J1Z5Z1c.Ó-«KšÃän¢ÁSΤpgìÒñ¬là`·Õ¦«ÈÇo8òÄdÙŽ­äöF~kûŽÖŽÖúÆ*lv ù·¨;³h]vÈåÃE¤êÙŠËò¤AŸr­lY «—T¬]gÛýð7¯|‰Ó‚x_ ——í[ˆÕÈ·ï$¯ûdØ_Çì›Jª‰‡Ltÿp\ôÆÖ“b€'ZM?À5=5aÙLÄbÛ¼}ð">-ׇ×N_…àìÛ›çRí¨«Ÿûܦ9Ø*|»ffKöòç/z¯n>vdÐÜ{è'!:t½s³¯³¸LkØdá«­|‹À°Î´Þ¸Î¸ŽgÅyŽE“$¢‰’N"ápaÆp·ñBÇ}ÇCÇÒ°ù'Õ,òqÜÍÌ=«ÐÓï­4·›kŒëñ,ëoŸEÍÌ’t8£ÎžÅ·ß¿ß·ß»#20ls°Š òSY˜©nÅó&‘ PÿžÂÜ˨‘+zE²jóúÒ«Ê—@!9ůϕ—×°h›²ªzîÛ‚“æû'á_ÿr±Ú.߇WÀWÛÐïèbúºˆ.¤ï}ôƒ¿ÉÌ´%cèI®YeUÚT&e±xîŒs!X²êàù/¯|ýݱ}•Oû(s‰lúMª<›ÿ6ÈœY²Ü"î8B$U‹e!EšºB¨¶jKÕeÚíXB,Àr P ™4¬Ì_**“Ê(6ˆ b“R/ƒ ›$¬ŽÀfèju¥vüÌsúlÎ?ý6RÜ7H¢ªH“«-é"ý¡oÖ‘n¬S>a¾)¾ÉÖׇ&“Åb–ËyBIµ¨ ‚mæ½éÓ­ŽÀ4¸ÐkÅ" vÙ>ëjvÚ\vI[µœR²ÍK—--¯îsnk³_÷~âžÜã\™E­ÆöCR$]nt‘ˆ&á@"!H(ú3Ì¿ZM[‚ië°~„+7ëUXDˆBêfýƒH¤<™°–q{7¸D//Ü~Ò¹©¨›+ÕKÕ2Í„5VO\)ÑÔë È‚ŠÖƒA{Àëw'©`2€ÊÑÞîÔ¹G3㘛_M×ŦœrrÚIWPB SÈe’¨6¤¥>[üÅÒç[º=SªòbaâRX”j•Ž9áAz&¤§Cú®#üùy¿. Ç², Dc ÊOM=³ À/ zôKô(ºëÓ¸G¦AX›ÍÐjþÍÄó…úE¾·TPZ´J¨‚²°Þ¯»º`÷X8Ò?A?J~Rª®ÕbžRø•Q+v•ðí³{ö|ûm7*†¨þ²<{p:ÃrQRPÂé¹i†Èþ…çÜ ô‡~ a±þ;ˆü"endstream endobj 117 0 obj << /Filter /FlateDecode /Length 2735 >> stream xœ¥Y[“Û¶~ß¿P?¨~È@­¥’`›d¦éÔI3ugêÙ:Þ«J4Êï³âñ¡©8,J‹² lºªXÁEöC9¡ŠZTTlüˆ/ýЀM?4~¡dï;ñm½»¯ýo¨'Ù’¤VL=Nk2%’Æ1š•ÖJ‘O†c[£›5»èo])†á ±DßmfûçT$/B’ÂWM×8‰ãLŒGJW µ»æC³«yG" *ËC)î›a{ç…Z±+Q k ŸhuAÉ;bÖðòÔ ÿ> µ;RDÜdŠÎRü‹†yR!ÓâÃ:MA d} AÀ ©ÅÛ¶ÿˆAÏ@n?’>…Ì™¬¦?Ø Q®gG*D峸Þ5ÕÄ•ƒsIÎIËšã>©ô±ªIÇQf\ù‡U7¢þxhË®œúáÓÍúÇ;‘¤²Îå\‘¹Å‹³¹ X\ùB[Yý¬ÛD™J)­Y–WÚ$”U—ð*¬¸ñq“Y!À+P&Xï¸5!œ Ó\Yû½¿p˜·iq¢YÆ£̚ƺ…LÐ)d¾FgRÂØ¬Ð øxpA…\ë»T„òIÎÛ—k«ÐX (j °&Æãt$dN¹øûÛ±HLHAÅ9b¸(“íÖ4,Ò©Žkºx ËÎåÓèÉØÎvÊ€¤s6iL»P” P™å’á±<80“)€† jiâLAÅ)šÄ,8Í@m9\X݉€1§Q˜Wµ q[º$dLgø*h”ænU(y ~v¡çØöð™Å°¤™€ÀÙ\Sb_Oä†|x.(ø+i*i¥ÏsÀÎoν²´šuåäÅf O¥y!€ÉÂckª)8ÌÂlÀ“´u~£¹BðÃ2ôkI´ýìc=ÿ pÔ|OHÇÃb KõüŽß…\vWÀïÁÑÄô6¥U·l%†-leßÐ $Ùµt0‹;2ßÛ”¢KnDZ~Z|dIÔ.—p¡´¡[\ÇÕRbMñZÄŽ£ >Öù¯ÌDÊ7Ÿ‰œS'™ü5“à/ËÕfî*wÐÿ°¶™%RÎ,]|hØÓª¬ Þr‰;çÃä~؋Ғ~Î Ñ—¼U-Cá7©v Ä—Ó×/&Ÿ¡.ÜL=©12ny| Pi²µ‰…›Ù)>oƒ>t.à2 ÇqCüéµøÙdIába+Æëú®T÷Q¯’ìı˜çF1`qk­0 Ä}[«~_ú_¡#ùXîmý‘£`˜¦ø(#<`óâó‰è$Ó=†Öø|÷zm±Èd!®ýn—ª¼šÈ®Üw5^]> ™Jó½gÁ/°k#2›ÁdÑà6Žß†áßÀrÔ˜]o±”ª®½ë1,Ôkš»ÕÅ ð2Ohq¾µÜ¿Íó¬HyO»ÞHµ•ìïq»y` 0ŸÁÛO‰Óâ)ó³:Àe2³«…Ð?aÁé­ý¤Ùš¡É¹:æš#!Éy|4$ÈgÊ·mÀÇòaeÆû,_…á›—ŸÅÍ )†MTÛÑLÊ&´^è­Åƒ(î ˆòMÉÞti^š³ã÷P·¡9cY‰œ$Xè„ÝJ×jJ™K ’ÖÔ?Í$O{¥Ã…ÿÀw¶j~â¡ÃAÌ|òl½EŽÓœ-ËÜ૙̀öÚrëbšÉEL5@YªüûÐ×—ÔæPs»h\FãéÂüx}}Û ;ލmaà&aEÍ÷Ñxÿd/÷êM¿|ÆÀ*©ÄþW¢ÕÕ»(Uïøþ*ŒŸ7a oøØEâáŠ|æUíëÿÚî0®>o,_Eã7‘Ó^¢µÐ‡æR|-Ùà4öjÙ¯-R™e©Y:ömäØMg:Œœs/öÂÝ…hŒ=^rÕ67Ѹ‹¼°»°&>,Š'^›È=P_nœ¡õøKì*Ïõèš<šÿ© ãöCó{âSyv(Ý’yÝcÇŸŸÂ´/Ð%ž(³¢â¾³½½ë¤Ü:39¤K<¨]Ç·Ñió‡ŒÖA›8òs!!H 51Ö¨ù£ÔRüÜc9çšOì ¿bî©›Wö[þ…ywòŒrRzuK'é¹×“¯2­2'©ÌèäÂ?a„'Iá„úü%Ím¥‹ÅÊ,eÍýw F¡CùÙT™L bI:±Cg_Ó30*(þÜîã—Í)h^—ÕìÌo¥{ï¸@º‡}âxƒ_}¦Úõ16]ó~~õ9Öþ"UÙù'ùÕп ÿ.Ik LjüZäžü53ÂRÒÝû.°arÆåôÂï?🖱6Õìì‡9aÆå¥ßÝ€‰¬“´ÀhtÜeŸ\Èe·«Ûp Žè\ d3h¨Í|E<¼ µþ¯̵þU(ê6b•qŠˆDDhÓD3݈™û]DV­ ¾OØÇôå¿Î6ÝÀIîZ+ÈAº,;‚ ^ƒ¥4« AK¦Æèsõ%àãqŒÊ15|òjÎóW¾XÑWáð¹k”%âbEÎ+#ß—øÐí‘û?sÞBËÔ@˜‹íÎ¥Ö‡.@E*%þWTÜýOíTÜiœ(ö”Ò¼·]î©K޼‚úI,9W#¼qNþ6Îh²›qkÛÅŽ6T‘ØÁõ©ø"&%jY1n£@Ç.‰쇳Õ^*?G'ûÃŒKe¢¿¹Ž¤HÃìV†üø"puôäNwïAóâ…ä³õ—€àýÖ~S¦X†½¾ú'üûéU&endstream endobj 118 0 obj << /Filter /FlateDecode /Length 2150 >> stream xœ­XM“¤6½×o˜Cͼ*» ƒ„Lx|؈m‡7l‡íh{Ós ªÂ[@ P=ÓÞÿîÔJ(è¨YOô¡!%R¾Ì—Oõ~í{ÁÚ—æV­üõ~õ~¨Ñµù—UëÞ­¾þU0â%~¬ïv+½†(÷¢$^ x”Çë»jõ–|µÙ†‚ÃdAÂïù!£8 ²=dSd3¦D’D$¢Î¾ßl¶<ˆc‡ßÝý{ƒ"¥Vw_ŽÀÈP È~4ÛŒ(ùF›~ ßl§@pXàwâ6µËÉ·oÜyŒààÎæ¡³ýÄÌ#²Î¹A ¼ÍÎÁ1 }äRƒ+?ºµ8ŠW]y‡uœŽVš”Qöã¹÷hÀ$ê/Q¤Kdw åðÿ§ÙÕ¹›¡Iîìs!ì‚dPRvrù&ÆÙm -êrB€p ó)(’&($Â3 €uįBèï0ÊUÔ4‰ø%õ©J®Häp€@ŸÖÈðp㢄âxãâ J3 ' x˜xIHr£Ù=²SdgÚs0r@Ã#<ò…µ—>m€f6¶[ÜÀ¼ÃÆm¬5lMöÕ.œµsKo]Ñ „³æljKyAÞ hqZ¼Ú£¥Èå±Þ{¬’ ÍyucylIç5α %"JâW8SƱÕù“ÿ¹Ã۷ߢxàЗ£p cÝðÖ:ŽF³ëÐŽ Àv¨Èpw£3Ÿç£©[KLCAI‰*4G|Ú"³~¦šÈéÖþöÓ@šoPR”¨²ºïA€ù¢Ç½¨rf“;»pæÑ’#ˆªQ «P=ãÚ†cYÖÏw(”)¢=¶xº#°Ärˆ‘ð½.H,²ésÅ­7 (¥Žý²yä·÷(‡ËÙÏW¨‹¥Éú…#Gƒè®‘µ‰xHNcÜôp´ -REŽZ’&Ðj±Y"%{tnæˆr;_]·›˜É$9·ÀmմŤF*©ö f¹¬- pÀV’ñ ˆÇtƒd¬„ÁÄYÚ+¥BrhLN­#Á¼ÌúμŠ#MÅò^˰("æ1D¥­`nÚ«×±"ÈýÒÕKyH;ØÌÍ]í# a7‹(x‹IñÚCöMûdgDò2œ0}"bp MëÒ–…¤N\¾Ð¯õRp^ÙÉLU”QBYÖ´¹bqýöјe¾o¾øXäÖŸ j¹{š°¼<“vË”}ëäl™È«—2 ?ð.?$K©Èu5ªÒŒ*3ñ$ñNO“RÚ«% šªHëÎ<ú‰v¦ó"/$Ve­b,¤Zéú2KÇ'û•˜ü4³j{”‘véÎîØ Ÿ®ê[¦bk蘄òIçÛðXAà•óò@ú&…J8?‚Ë^w(OÆ löëƒöÏì:;.' ´ @9aË—]›|_«9¤9·nuñ1­NG»Cý¡>Ä/O©^2ª`ï2¥|”çàK]ÀX &”ʦä¶ÜŸÛÂŽsy½”¦4*7Ó1-åVªBÖ@š›Y'íHÁmâi^LRÈ)›aÓBއe±ÕZøúžŽtèÎõ§‘4ž’R¥‡p‘Á™˜x ñGzؾU¢Ò\‡"P"MÅsR½ ˜¯¢ÔéŒð⟢»ÙÒ5|¤Çp›zЂ…Q@ý…/\%}õEWƒÖ.7ƒv©Q'û€ºîX©#Ÿë®¡/I ò]çïÍ^êá5úE‰°Øe§xíìxּ߸Ëy7{ý›-Õc‹÷[ÝÙïœPªÀÃ)u–ªGyÛä𡺠}%ËN¸ÆtÒ–+3Qo‡†ú¥D¶Py5…:´7Yfßõmz²+VFû}[ìSÕ£”)cAeqÉ5ßH­ASš¦§SÛ¤Ùp¿?X‰ä^©"¡/?ÙÁÑ­ÛlŽ…¤é¶SͨÄÕ?.ªf€d~\ÀLýi¿ä<,`¿¿°µòG7…ýfNPd"Їi™Ê©Š1¸U=iý¨æ„Fsª4E-ÛÖ¹h1¡L˜&õŽj·ÊäxÂ,è\ ”Né£nðp—6ùxJêÇN-Š> stream xœ%ŽklKaÆß³êékš²U™à$$Í÷Kc‚n4Ù01DH›V[¬»tg½œÿéÖÛ6q¦i×Ùi1² _s™KĈu‘!2ñÁâƒK|x»ÉT<ϧçÃóËB“2EQ“K·–/[ºvÕ¿‘ëÆùñ%JP+@=Iç³³O½7 ý‹2wñò«R£©H‹¦£™ˆN yÑ}ªŒú“›  ÍH3¡Pæ!£…$#$i¥R+ §s7ðõž†Æ=9u…µ…u…Ø¥jòrF E“eßùâþxs“'t U±‚*èðrŒün–üqŸtÈñ¼A`¿Óï¬çí¶/³ ÷¨¬y.λ*g‘9ß>}ìeÄ›Á;Ð_J>é[°ƒ6‚¡ÂVŽÉ…Ý:O•³ÚUm_{lþBÀÆ¿tjäcA ð`dç>8¾‰Õ0™Š¶êÉ ÎõPg#ôb_Z¢endstream endobj 120 0 obj << /Filter /FlateDecode /Length 1840 >> stream xœåYKÜD¾Ïoà0ZqhÃØ¸ÛoD8±‘A ¹d98OÖȉíI´BðÛ©î¶§?ï¸ ˆ ÚÔÛý¨®ÇW_yßl}o}ù7ýÍÆß¾Þ¼Ùp5º~ŠfûÍ~óÅO" /ó3¾Ý7z ßryI–nãˆ{"J·ûfó’ yÿà¸"‰=ßOÙÑq}Ïã,KX×Oã¸Eß ƒûÖ‰$P†4»®¹B+½(eeßKœÑÛE¬Ÿ6—I²r«&õ®rñh„>œ+­« rjx ’ žä¸/bV îœ.±+J$vIÚHüRËi3¦Œ0a ¨z®² !ãk; ¤­Bi‰àÞ…=.þN ?µ BnÙh˜Ñ!a˜ ôv(&Ö!ò“9œCö¬†X\)1Xú|w‘W|ßXìr°Ø´6ÿd1lƒF#Ï7¦‡œ«õŒp³~6ÄœÖ2ynè¬xkfÝ쌥O½Ö3¨z–ôÂË ´ y0ã§àȶ֔QKªgHòº¨ËÄ‚8³OžZ¬V[ä/—°óÓ?8 Ðݶ4ðìõï’£å¸öž7€Ów8©ëópÆLPqXÿß.û’JWI¨+÷!‡ˆ¡¤Ãî+Y¨\]±k Œø\›( I]8&w:%„úZ‚ŒuÇv‹ˆ‹h“GdíÃk<°¤qΘØh“~xòxP`ýv“›üà¯0Âç $ts.»cßÞ@€9ª²Pááil‰ØzB¤“‰8\ÙÙ½ âëL‚9:R€U:ŒKÔCh˜ÍÈi!«,œ¥‚Ë·æ¨×îh¬•Ä7Þ”_J±2âdØ#WgwFl'E²l K5Ú÷«“ͨô¨ù^Dâ‘KŸ["±®¸rã„cƒent\Œ@ÄMÇ«}tp´¦&•4=zìK*}>5SD©|H%ž™âœ9«dTÖœt.º5¥Wª5 x¨H¢ê‡PúPŒµjÛeÚ«×t…Ó©ïòB±»”Èá½~CÍßr¡îñd÷I‘•¼–Þ!Ta¦ä8Z®‘”ož&Y¡+¨v]#›o5‡Ì9ÔÕë{}zB]eý0+žèO kŒ´kNuUHž,ûòP°ý½#³+‘)t¬¯ª\1R_1à¢ëåÍu+”æ’?+þšøzŠHk+§ñÂÊ~è…"žg/Äç²y~ì "ºÑí¹1×lX‘k¥òyœûgi›‰#“g×{d—ÂßK$’)r̽éK÷6v„ˆl èäA>´XBz` áû²™ÙÚhè’¡L””c9{fÃuÕ­ü…‘o•eÒŒ'ñ̃%‡m`é¡4Ù[›)PË*øVP؈Βø?²ÉßñhµpÖE%› —~ž¸Jº(Á±Q/Àj¨äg¸ÃÍ_ÞùË-È?ƒüê_kË-¯-1qP°q…Uu zíqðlÁZÖ¦cÔ@eïüê°3µ¸5Ã?_î‚ÛXÝ©s|À£ErµIÿÍVÄÀïÿµ9bŶ­‚êÙME"iÑvªha,G}A¸0螇ƒŽá»˜‹øÂþÖ k$^o<>âSãÀ§Ƽm‘»Ùñp²FôÏ?;¼ÍŒ‹—`vù†yzo^½®€Saž°G;سm6¶˜F<­ží…–³ô@$yÔn*®õÑàl%ßW@ý> [ý_F$ùfBì(óÒ4 3K%+ûv¿ù‘þþ£Gendstream endobj 121 0 obj << /Filter /FlateDecode /Length 2344 >> stream xœ­YÝä¶ ß¿á¦ihšŒk}ø«H ôÚ¤Ai»hr}ðz´7nìñÄöìÞ"Hþö’”dÓ»vz)Š{X,Q$õãïû]É]Œÿüߪ½‰woo¾¿‘4»óªv÷úöæ·SÌDE\ÈÝíýÛ"w2K£Dë]ªu”³»mo¾Ñ>Žb“äY.EÊÆš¼?$2ŠL¼îh:-`Üóx`㾜ÇÜ™E±v˜gÇz·lõØí`‚IÓĈÞËTt÷^Š–íªyÜ09ÃÀäÔ÷ó‡šmpë ¥“ »yxö§¦ZØy¶ïWϳÿºýòlN¥›øêæö7ߊGæÌšG6>'ƒï“ÙÞ»ÿÙã½e³Í†˜”štöhÆ!‰£L'jwÐI¤T?_o˜3°qÅÆ —ÃÆ.è~1Ow©U‘/„–~døè¦̄홿˜¥ýïök‰ˆ£y^:7‘RC, 3 Ä41•FFÉéŽ_Ûª¼vPºˆŠ<'Ô*#£8Ñb<áÒãB ×;ø‘‰cýP5*‰_Š<ñ[`U¢ÄÃ>I`y^ˆ²¯Ë»ÆîA”ÔìàþÎG x§ôçÅx˜½€–yT Ú=h]àÎkS†FíPõõ]}~;Ïy•ñP#ZˆLÜ™ƒbGÛà˜£Y¹°ã^å EZîU†ÂRa-˜©•$­í» ç Âö\Ž]ÿ„Lg’V1)QŸ¬$£í[{¬ËÑ:A±\ñÑà¿ÅàËsó´ÏŠIÄ'8o`OÂu ‚2î,/C‹ªlšYò© ðnÊÁÅÞÚv’êЧbQgf¡%^E¡€‘é@Sô·_ØseQÏœÌÆ°I‰ ¢0 à¼/I=Š\IK€b0ɯýz®€×Ð<™¤ Ë4¡@BžHØõ¹š­™‡Œ+‡aAtσÂè(1JA3²äXŽ¥£-óÈÈtw é ‹€sûï—™,‹t> üË­°ñuƒ†ºvcç³°ñ笙šBr‚oA3ˆ}n†(Îñ×ü­óHƒÝ~Ë&'1æf<¿êï"’ɤCë¢(ËÁ8SD U7qbIŒ¥ªšb4NQ˜kX`$º`­ü€"fœæX¤û”h–&B@m\$k$ 6 ŠŒîŒ$IÓ@¹/#8rÌ‚Ù/“Ï$þ½kíX·æ9¬¨Ç0”XJP -žÂì8‘ýžme‡¡ìŸæL9íE×jZžyŸ{*‚¸Î~ÑN뉿q²…£8•¢Àºï’Ò9¡ãα)qáÁ»pÕà%S,iärd‚ ¿>o2(ý¢Â“Ô̯lO|““„Ïðd9ú½”ÜÌDbû¾ëIcˆa aã5ru“uk[ËP™¼<@lc«ÃD#%@-]VcË#úI¢e8é» “FÜÕ¥ 5©é·u†(p*ö÷€ã4è%5¹­rÊ«ª$&Î%†ƒWAS°ÓQ£aÜeõöÞ]˜ÜídÇpê® &ãa#'­p¯Ú‰>Cò§¸Ÿ™w²[éªüL›rZõx"¥“üÚ5áàéÒc~&¼¦¢÷˜YŒŽÉÿ¬Ç¼ïny•œÀÊ$É&vú5o(ï,~6CÂ1ÛHïËÖ>RˆIÀR×td^pЩ†‹6qJ¦Uå$”­§HÇ ¼»cG•z–Í–lÿ"¾š¦›âúx覈¸B|^÷Ã8é/Kˆé0ûÊ‚oŒBø î) w^IÔÿñõ7mWjþŸ£»@`J4,—-w6uã)LɵØ2*&PlùóhÊ—S†êƒ¬@ÀqF9ê-}"ÀÑú$q€£õ€RG Ž7²˜Ôƒ¤˜+4pгnH9ãÚWhÀòâq ±n°:1U¶®äRäCmÈ4§?þN抋~ÀÁG¤t¥ ¤¢T£SùÉOn5^`Õµð™è—€´º?ÅÏDq¡ŸóÕâÃ<‹,ʵ/EÄ7¾XY€;HŸS=u`d G ‘àâ²¹Zÿ¾Òà—‚/>Ó£Ò)„cø9ºôØØ`0Ýøl(=ÃÒîÌ#Ô'JÍå~õggµQâsv¿.…ížÚPÈ Ož9Öá‹ÔÑ”×_T›¤ËDä½™Êço@ 5 ý¼Ø|Ú˜ïØøîZ|ʦ¡-^żÆ=oÔ»ãò)<ßÖéúvþq¥7/&o§|:1ðP,Ý«Xk¸Úq}Mê×ZîÒ¬e=ˆßÇó8bãäÍ><ÙñÃÚócx®'Põ;ü:¸&_4ìÆÁn¹iv|ùb«ë-… {ž­6UF^Êsi<-/¡§Vr‘óI%k˜ LbU/¤pm¦“Ùšñ47AØ;nféunŒ·i6ÞS}½ñat½c¤7Õõ÷FÖwùÛÇ¡‡êq^~90€•s›¦¹>s¤Ã#ïrlÃt7³@²åxXñÝ6½8µ×¼;xÚp5ƒ×é`ªðÖßeÙ©éóÄ7y¿õyS›ŠÖä¸Þä“wìÇÞ ’ÿûI%ÀÉPÿ/û|“¼ŽnþWïÑ`äMl¦Ì•Ñ_×÷Â;P“«ÌôÔ)jŠÛ—÷É{û¿2%OÛ ¸¾¤ÐYyA›U·0c¢Ê3ƒeË]ö>Ê©ï#Æ•ç¦Ûùà=BàÎM-cí:go¿dëÿë—bUQžCµ–,ŸÝÞüþý_øˆkendstream endobj 122 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6463 >> stream xœ•x te¾o‡H¥`ÐQÚ–Ñ*E˜Q·QgtPE „=„²ïî¤÷µª«¾ê®êªÞ»“tº; Y1 È*(ŠŠ;^œ«w®Wï=×™¯™â½y_fîyï¼wÞ¹é$§sN§ª¾ÿÿÿÛþ9Š[¦(rrr¦½±dͲÇ{æ©ì ²÷¯õ×SÁŒ\0ã–ØÝ÷Ü:>wÇw¿øEîµm™ëª†S+w­¸l”WÈ’\"ãçåáB8NƒyP½äxN<îÍc}.¸Ð•§K«®–sî—§u`cºzoý¨í ˜ÝÞ4Ÿî‚ùïÀ{àôßLê8p¨~ðF``Œ$K1è¸Ã磞@€#ŽÂ oDŒ‚H¾@ó”©xU]ËÍRÈ/ FÂåÙ‚ph~—ŸŸ¥ÔÀ¢Ã*‹Tô7‚A0ëîH´«ß;9ö¯à*~®áoÜ^Y^M(kÞ´×õo+(šµæí–­j–fh@ãN%ø½„R3 ~{  Sh6–Kíeäoå¿P:w£Åïp˜•Åm˜ÝÊH&Ð^šgñ0æ"ðqø zãã| cƒY$R ½¤Ù&½y@ô W?œ©æc¾¸'†C7ìTQ«Ëe»<ÍŽ»±--gv“\€xCð>u`È—Ü=œèêï¸Ûãmâª&KìÂQ³~x_Á!_›Ó7óÚÒËÙiv^[¨r14j'nÝ~"Ž%Ól›¶±.R¾ëzÔ¶ƒ×<ò˜ 0nʆ;ó8»ÄЍLÙ³±X Ÿ…V¸NyæÉ·FI“ÇLhÞÆÜ€ ‹gŠäJ•rD~ÑúœÜ!Û (¬ ´qz€¯ÇœƒþÇ„³3j©›ÞµàÿŒè¡‡í¶_hì_ ^FÏž“,EHÌ,JädVfRµ: &ÐŒ~k;… XoGºû苾ÒŠS%uÚ¦ñ–][ÁKÀðˆ¹u‹šZò ^ԭоè7ûuz-­¥täËrcwÛ-ß¶EÖçñöˆÉP¯Ž;ðñy ÷a “ŽÛ²OØÿwX=|Þu4÷ÏUiO²¡/jŠ´U«ë_¾Ôxüß®¾÷Ìs_ÕUª‘"`?qdäðÀ¡B$t´Þ Zp‹ßÔÑêM½³¦gý‚VÿV¾.¿ä\bÑKe«JJb{MÙá]¶'{ôGú`‘.e>ÛèVºm£\¤¦šíЄoé+yûãÃ_ë"<¼7K¢ŒVwƒÑIèJj7¬wàµîd"!v äiÈzC|ÈÊ?‰@b%€‡‚B$àôØÉ'0i48êõ¨9ÑããD¼Ó*™*èêF'Q+Ï`œÍòQ5ÇãË1“6‘tžÇ)fG Ø5ÿ(Í©ÜÌ´·T¡†XÕðèüiˆ»ñh¢SpÔx:*Ûì—úÈ}p©˜ ‚®ü¨%hj°iZhâ Yãl «,šü–Êêªmg°ÛÞÝï÷^L½KJÉ` tãËûÊ^ÚùÔzÁPŒ =—St‹ ¼¢'àïSGFS'ö¼Ýg6Vªmµd˪ú¥`¾t|ó…½û“#ITÞ)ø….¡…K3Ž>V7S¹ ª>P‰Xo_Ç`ÌÚÑPb,«zù'¾úääG»I1Æ·óñÏüZíKóoFÃA;âa\Ä‚’”\¼ƒ¬aªÙf¡ªÒSU`Ä” ÊÅQfÀ¹‘²yV?ë'ú±ž>¦ÍÎ8)O¹¾ÌøÚyÊï@~(ÛåËVRº7[Éù]9ðÑSðTÌÛŽ«ÒÅÃÚCb`¤þ)z,vûmCu-†%€>d÷›ƒmb3BF­½ÞQ¯{¾Fžñ4ÀX±Ï4 †q> Iƒ‡á6uÈmmp¶´8 ˦¶Å ÏQ/çÛò@Cå²b /Ô8ðaêé‹ qÐŽk÷m|®âwkŒëFhB܇ДÝQžïIžØÏ#Æ|fSµ±ÆVGj^k\–á¿;[yþͱ¾¡ðÿ&Fy Ñi•¯t v | b?†8ï¤$ˆ¨~vŠv1„¹ØüÄäÜLó@Ê—ô%OB›Úàƒ\OXü†&{‹ž&Z›R¥ˆlnŸ'/ï|u÷ã_$Á˜ŠµÒV·­B¾WíFÖÀeÑnz}} Àk»ÏŽ÷HÃdçå1x×—?ÐUX•³¿êÞ^;’‰k÷îB¡2K3STB}´6ØÐ±±}SûÆX‰Úk÷Y€´´8ô îÀ¬N§…TZ@$”Bˆ ƒ‡IJԎ0¡|¬t°,Á”…¬—|A÷ûãµ. Oêy[Ô«û°ñ£†KÕ_«]QgŒŽ!ú—$ªáõñ‰#> áÉžòLôŒX•³¼s!nwq¸SºÑð£ñÇ]˜Í‰N:$h,mдÊVyƒº¶©¦¹®Ñ—»}éc°A­†Tÿ¼ €=Aä3”nÔ':M³î‚f¬™iÍÀ œàõâG`çð–ìÍ(ÎU`¡Ö–Ï;@ ´¶&J+ßqÝ­¶×Ùlëú“¡ÀÓyÊB—G`|J!¢\„Ã'n žÄ,ùL ÊÄ#L Y@!L*… s­˜®™‹ûñ$²6ݾž®«joXô¤ï.«C`0,eaRw·‚ë»–ÛëAÝ: Hͨ›jæCjÖ…u&YƒÖÖèj!å_\o6®ÖƒÍ ߆Yœ”‘Ì*ºf ‰÷Ÿ…KÕB4Ô"¸äô8ul³‰" %ú’ºŠ’ «tËþfsp°/ê “ðöŒKèôìjä‡i®àP=šÞ@c«LåEýQ5ôbðµHG­¬ àÛ/46{› ܽج¬|sX!'ࢨÿK¯Ê¼p­<{>hšˆuO¥Û{Ú­áF²†¶Ô‚|Ýš‰>ýêë"2,îñ~«=’×üxØ2Ü@öR¹Çmc,”=_W^QY ðËØà©äÙÎãd¨?Ú‡$iOËØ‹+Ýôÿ¶¹Î\k«'5+jƒWñÅû¶žévug}zgØ‹tY—™›Yœù…ª‰iÑ£'ÔGM±¾ŽþÞckvm”çÈSä'eåo¿ðýgï=2F‚ˆM#ºD4c€¶ÓT›V]ZòZõYÊZm8·7Å'…$Ù~9}彉ŽHÚŸ½`—1X‹ê•Ûöj½Aµo¦2ÿýÚmÈÉuÊâòÆ„¼kc4í Ĉ‚¹ð5˜ƒ~¦Â•ÁÎh§Ôs˜«ÎQï¬+‘ŸQ7ÈŠ&YQ'+;ë= ®EM¦Œ.Á-°dMl˜Ã‡± pAä×&…î0fC,¢ý ŸûÜ>'GÖcú&.úwÇæƒ³2 u`”îúà~¤ H^ˆHy6V L[ŒMsåæÊ‹æÊ éN`: ðmpþ {"mˆÍæ`fe =ŽÉûŰDkpLªÅ× ÛvÛvëö|kI~« ·ØƒMbD2åIr)Ä8ÛP‰ØÞ¬ÁT&PæñH!*:½.Ò…5ð >°tJ]BœšÙ¨æ£B̽éj‚Ù¤3ël–ù™[à¿©•o‘ïxúyùvÚá°!P¹ó8 Õ•IÁH"ÕÛ‹nXxáTÁÇà­Ï;Þñü—¹wp.”¦0«›ª·uõåÍåÑ塺§,ËZÈ“^AyøïÁ»9çÞì,.¯ Ôë ˆÓÝèÛ<®÷Öó8…­[ -›‘›dƒÛE·Ñ D„;{")0«ög È?TŠº¬ÐdžÊ(Uÿ/‰ø¿*×Õ Œš°–éï¸Î•ŒMt Mpz¦EÄàùèt""€ÝäÖ±¥<²`vÉ-1¬3Á¶µÙ›‘©4¬0.ׯÀ…ÿ}BQû¦=§K n¸á…k÷¨X;²}îõÇÔ†•ô£ËZÑŸ¬E5µQéÎS¾ÂQ<šZ\é!B24‚dHœ”¡º’LW‰W&GB òQò&ÝSXMs¸”ò<1_ŒÁœÌr”6„˜7†`âA>Ò㈖¨˳÷ÉÄ„\€+_‰”öÍ‹0Èôt¶ƒÀ;2@?/Œ}ÀÃAoTt .‹…e-d9†€Í:³n¨¥ÅÓR`BÒá¦9wÃF™5—`éÈcŒ”‘6ýæúLµÛB›‹+BG©hÕ÷êbx瘵cTˆ ¶¢ά.Êbõ»ƒ>|$w”ÁorP¬OÀÙl§ž‡Ôè,åðÇlàm~@¾È÷àÏìyøçKߎÙCx½ÔH\pƒY(cC›¡¬bCÓ:yµÇÞ:<çxÉOàÓœA*¿8ºa÷BùM±tðU¦êóYÊý™ákkUrkƒü{Ùä\ÆÉsá‹Ùà]ß_„F«påeˆ­†¿–Ÿ'”û)š¥ àïd³Ê%«ä—äéówÖ–´l¦P„”§î„Ø7pÞI¸Þý/¤òrjx¼c_ûÙ±w>xP,²Ù‚<¶)+&™Ïû`"æÚÌKªYE¶Oæ(Y±¶¸¾¬2›Õ>lEYí‡o'àÛ}33úoÖ\ýz–RŸð{UDˆx"Âg½WÏïÀ‰û×¥7'6€ ˆ9´˜Ýî6ØýŒDv`í ÅzLèQ€ƒqºrþõÝjGµÃ´fÒ¶².;²­v‰–½Ü !öè1åD,Š\ª_ƒ¤R°{\D †h-õ'ù8y–{|À/qù?bï”éi#RpÛö©Ê JãlÖÌ^3¾å£w†¿Ï.öøìº‘ádtšY×¼¯sõ¦IåøÉö'gKyôþò›C¸\VI)!íK„k-{ã)/þvòŠý‰uÚUåådII¡ù pò7š[9©Wà&D~¯Ú«×ßÈÆ¸S¤¥v®;*¡±ÞSH0£¢ÕÐhkv4‘úÍúÍm›-•º2P‰ïHn?JˆØ{¾Ý_FqÂäÒoçœVÆédˆúçOšíX:€N¶ŒOĆ:†&ýäÞ5ÄeX©Ò¹[[o‹˜Ú»R]]„òývc¤P^a–)¸±ZÕõ]{¬'n: ËÎä¹GT£[ö6"ËgÿI8ã¨öøëݤÉgô…íêÍ¡ix=yß~òíw_Œ¼²È‘Â]ËR,Ê>ùÈ8EƇÈø½ øšÚ‚ ›lЧL[´„áàÊä&4·Ê¹2&Ï(J½ÕD^.‡·µù—øO>úäÖ-ñ¡z¢µÛ¹9¥v/*9ê—orËú4€0Hz×É;Õ¬!Ë»ÐåÛß|WäÑ„*þÇjõ…ºœïOäÂ=×–«æÛ ¬Î4ù7󆈵÷ÖV”˜täïå?"/WbÛžÏØQ,°á[“[O£Òïó%&B#¾„šùbèÞÉp-ç€×‰6gW|o|"0FÂ[3­Á#_>ð¡iâ²¾xþü¿/Ê6"™8•› ŸU,vªhׯe%åÛv7¶×‹€þ~{ÅänŒÆ"#ÜØdFǃߢÉèôi[[(-ÂùR9}s7fÛ#¥x_Ü—:›Õí—Ng%pÔ_Y»Ý¼ÝVJn–W»Qi²{´-²[ÜÈ1lüâÙ+—¹|6àßÑ(OY$O+IÄÛ{kÆÁðÖUéLö„N«•ÉfzÓö¦×6dú,*„NÏçN_§Ø‘þdì[ð!~qëàêÂ;‹ê³„ðþw¡ó!..ÏÌS±m®Vw[£|o“|_•LØvØ£ôe­ÁN!ÒxÚh«544—¼Ù2úÁЕ3 ¢ûrøËèç=ð>µ€2ߨ³…ÒPš ò&5­wéAr¿w—'xº…GQonì™'éR êM4aÛ©_L®ÞÞÞžò¥|Iò"äÅœü¬ý'd“öIûƒû"§“'Ã`ŒîiÒPm$’­q[tWGo÷ù}›6¬uT–[Ê «l¯Û7©ÝV^ìÏ*Ìê ÍÖ€Û/È›*¿P  ‰_SéŸ/-*,t#‡•q"ã ÷µÇˆ±C#Sx¿±»¶º®¾Š óÄÆ=`óüYæ(¦€…Çr~‚”ÊꢫÑ3yN²³°ëz£¤'/ BÞý#®Nz<<×´tÍ„w\‚{úu}paÿ,å—×~›©Pè¿§‚#xÌÕiZ´Í†°©“ЕoÙZõê¦yeà1¼"U5’JµwwƒzÍ`´lv¹,“‡võ„¥°?"Šæ€U´Ä«U`'./”ï‘Iyé³ó`Ž“å =§S§“º¤eÙç#CÇvk{JwVÚjÉ·öÀZ8PþëžÁŠB²¹ÞTc;‡bûŽUžZ"?´B^¿VÃiÄ’c8Á˜Écœ;í,YW¹¬Àå»á}…pêg_$%>=3ñgð'ðŽæpå~\îY¢¢k´5UšÑæŠdŠ”’G‡Zt“”@ÞTr›ö¯Æ,ÇÎR¾Ÿ¡®ÙU&ªUo±¢<<ý yÁVy‰~™u X æ%dÅûÏ ”àDv]x!ÜÜ댒MÇÌçÀiðYðÓv”_‚ÏÀYpV(Ôa\9"м 1+‹bQÁÝÒ„æá냹°ݎΊ¿˜;5¢©eðet«Eæ /m“W<+/ENVåtYœÀŽSí#BžDG(‹à“(4§ô‚þcÙ§4O .€’`Qï‰é¹Á…3¦ÓÁŒ_€3ŠÿY¸ü-endstream endobj 123 0 obj << /Type /XRef /Length 125 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 124 /ID [<8436c38abb03aadc92bbf5ece5044f0b>] >> stream xœcb&F~0ù‰ $À8JŽ’$ÿþ,²ÙöÒç|É-"¹Éd°ÈO°¬7ˆd6‘ì• R Dò_‘2b`5ëA$«3ˆd\ "9¦€HI0©"™Ê@¤Ê-„É€$# Ø.ö*°½E Rr§úÖ endstream endobj startxref 77230 %%EOF ipred/inst/NEWS0000644000176200001440000001547614646200367013056 0ustar liggesusers# $Id: CHANGES,v 1.48 2009/09/09 15:40:28 hothorn Exp $ 0.9-15 (18.07.2024) Rd link problem 0.9-14 (09.03.2023) S3 registration 0.9-13 (02.06.2022) no need to escape & 0.9-12 (15.09.2021) predict(, newdata) returned factors with set of levels depending on newdata, spotted by Max Kuhn 0.9-11 (12.03.2021) suggest party 0.9-10 (03.03.2021) suggest randomForest RNGversion("3.5.3") 0.9-9 (29.04.2019) RNGversion("3.5.3") inbagg potentially treated the y ~ . case incorrectly 0.9-8 (05.11.2018) test output update 0.9-7 (17.08.2018) sbrier returned incorrect answers for unseen time points; reported and fixed by Marvin Wright 0.9-6 (01.03.2017) register C routines 0.9-5 (28.07.2015) fix NAMESPACE 0.9-4 (20.02.2015) register predict.ipredknn 0.9-3 (20.12.2013) use trapezoid rule to compute integrated Brier score in sbrier 0.9-2 (02.09.2013) NAMESPACE issues, TH.data 0.9-0 (22.10.2012) Due to interface changes in rpart 3.1-55, the bagging function had to be rewritten. Results of previous version are not exactly reproducible. 0.8-13 (21.02.2012) import(survival) 0.8-12 (20.02.2012) use prodlim to compute censoring distributions in sbrier (makes a difference for tied survival times) GPL (>= 2) and no require in .onLoad 0.8-11 (08.02.2011) depends R >= 2.10 0.8-10 (02.02.2011) compress data files 0.8-9 (27.01.2011) fix nrow problem in sbrier, spotted by Phil Boonstra avoid partial matches of function arguments 0.8-8 (09.09.2009) documentation fixes 0.8-7 (27.03.2009) survival fixes 0.8-6 (28.07.2008) make R devel happy ($ is no longer allowed) 0.8-4 (09.10.2007) change maintainer 0.8-3 (29.06.2005) terms(formula, data) needs `data' argument (suggested by BDR). 0.8-2 (09.12.2004) - slda: correct for one explanatory variable: ewp <- svd(solve(diag(diag(Snull), ncol = ncol(Snull)))%*%Snull) ^^^^^^^^^^^^^ 0.8-1 (25.11.2004) - change #!/bin/bash -> #!/bin/sh 0.8-0 (02.06.2004) - correction of NAMESPACES 0.7-9 (13.05.2004) -description file, insert suggests: mvtnorm 0.7-8 (21.04.2004) - don't run selected examples and ipred-tests.R 0.7-7 (02.02.2004) -return predicted values for error estimations "boot" and "632plus" if required -optional argument determining which observations are incuded in each sample within 'errorest' -"boot" and "632plus" can be computed simultanously 0.7-6 (16.01.2004) fix tests/ipred-segfault 0.7-5 (19.12.2003) examples of inbagg and predict.inbagg (don't use mvtnorm) 0.7-4 (16.12.2003) R-1.9.0 fixes 0.7-3 (03.11.2003) fix documentation bugs found by `codoc' 0.7-2 (29.10.2003) `rpart' is sensitive to compilers / optimization flags: the results we compare `ipred's tests with are produced with an optimized version of `rpart' (gcc -O2). `eigen' in `slda' replaced by `svd' 0.7-1 (08.08.2003) adapt to changes in R-devel and lda (package MASS) 0.7-0 (08.08.2003) add namespaces 0.6-15 (----) new argument "getmodels=TRUE" to cv: the returned object has an element "models", a list which contains the models for each fold. new interface for inclass and adding method inbagg. 0.6-14 (13.03.2003) clean up bagging.Rd 0.6-12 (12.03.2003) methods for "integer" for the generics "bagging", "cv" and "bootest" do not call methods to generics directly, since they may be hidded (because not exported: predict.lda) 0.6-11 (05.03.2003) 632plus was false when the no-information error rate was less than the raw bootstrap estimator (eq. 29 was used instead of eq. 32 in Efron & Tibshirani, 1997). Thanks to Ramon Diaz for reporting. changed the RNGkind to RNGkind("Wichmann-Hill", "Kinderman-Ramage") or RNGversion("1.6.2") making the regression tests pass R CMD check with R-devel (1.7.0) ipred is allowed to import rpart.{anova, class, exp, poisson, matrix} from package rpart, thanks to BDR. 0.6-9 (25.02.2003) the terms attribute of data in errorest.data.frame may cause problems with some predict methods -> deleted 0.6-7 (17.02.2003) use a formula / data framework in cv and bootest. "model" now deals with the original variable names (and formula) instead of "y" and "X". "model" is now allowed to return a function with newdata argument for prediction. This is especially useful for estimating the error of both variable selection and model building simultaneously, the vignette gives a simple example. cv.numeric and bootest.numeric were broken and gave faulty estimates of MSE, both problems fixed if the maximum of votes for any class is not unique, the class is choosen by random in predict.classbagg now. Formerly, the class with lowest level was choosen by mistake. 0.6-6 (06.02.2003) fixes required by attached "methods" package 0.6-4 (18.12.2002) R CMD build problems 0.6-3 (03.12.2002) cv in errorest did faultly use all observations for estimating the error which lead to over optimistic results 0.6-2 (18.10.2002) documentation updates and copyright status added 0.6-1 (02.10.2002) documentation fixes 0.6-0 (27.09.2002) added vignette documentation updates 0.5-7 (23.09.2002) add internal functions irpart and predict.irpart for speeding up standard bagging use error.control for the specification of control parameters cv can be used to caculcate an "honest" prediction for each observation 0.5-6 (12.09.2002) code factors in GBSG2 data as factors. Documentation update. Add keepX argument to ipredbagg 0.5-5 (10.09.2002) set rpart.control(..., xval=0) by default 0.5-4 (05.08.2002) added k-NN with formula interface and stabilized LDA 0.5-3 (01.08.2002) use rpart.control() for regression and survival new documentation for bagging and friends 0.5-2 (30.07.2002) new low-level functions cv and bootest for error rate estimators (misclassification, mse, brier score) 0.5-1 (25.07.2002) bagging code completely rewritten 0.4-6 (27.06.2002) out-of-bag error for regression trees fixed. 0.4-5 (17.06.2002) use "minsplit = 2" in `rpart.control' passed to `bagging' 0.4-4 (17.05.2002) use predict.lda in bagging and predict.bagging bagging(..., method="double") did not work for factors. 0.4-3 (07.05.2002) bugfix in bagging (in models with one regressor), changes in documentation errorest 0.4-2 (10.04.2002) predict.bagging much faster, OOB much faster 0.4-1 (08.04.2002) bugfix in print.inclass, predict.inclass 0.4-0 (26.03.2002) pre-release for CRAN/devel ipred/inst/COPYRIGHTS0000644000176200001440000000107514172231220013745 0ustar liggesusersCOPYRIGHT STATUS ---------------- The bulk of this code is Copyright (C) 2002-2012 Andrea Peters and Torsten Hothorn except the code in .R/irpart.R .R/predict.irpart.R which are modifications from the files rpart.s and predict.rpart.s from package `rpart', version 3.1-8 which is Copyright (C) 2000 Mayo Foundation for Medical Education and Research with modifications for R by Brian D. Ripley. All code is subject to the GNU General Public License, Version 2. See the file COPYING for the exact conditions under which you may redistribute it. ipred/build/0000755000176200001440000000000014646200411012452 5ustar liggesusersipred/build/vignette.rds0000644000176200001440000000035414646200411015013 0ustar liggesusers‹…QÍ Â0 ζú ‚è ôÜÅW/^D=x-.ƒB·ŽvczóÉÕL¸x°4M¾/ùBcAˆÂˆÂhN®O6% @Àˆî™.& <«¬0èã]^3Äro3”™u(­“„ñ²ò˜VF¶$™RASùÒ©RÛ<þ3¤HR†˜r¤|þ¬wq½'@„ßÉ\eèr°Âó¤IßX3xñƒ;£ 7x©­k9Lÿkm°{ÐåçmWëw츆Ÿþý#gë¸ÝaÒ|Ú•Ü_ôd”狎Uª8uÄot?,²­÷öipred/man/0000755000176200001440000000000014646200411012126 5ustar liggesusersipred/man/inbagg.Rd0000644000176200001440000001145114172231220013642 0ustar liggesusers\name{inbagg} \alias{inbagg} \alias{inbagg.default} \alias{inbagg.data.frame} \title{Indirect Bagging} \description{ Function to perform the indirect bagging and subagging. } \usage{ \method{inbagg}{data.frame}(formula, data, pFUN=NULL, cFUN=list(model = NULL, predict = NULL, training.set = NULL), nbagg = 25, ns = 0.5, replace = FALSE, ...) } \arguments{ \item{formula}{formula. A \code{formula} specified as \code{y~w1+w2+w3~x1+x2+x3} describes how to model the intermediate variables \code{w1, w2, w3} and the response variable \code{y}, if no other formula is specified by the elements of \code{pFUN} or in \code{cFUN}} \item{data}{data frame of explanatory, intermediate and response variables.} \item{pFUN}{list of lists, which describe models for the intermediate variables, details are given below.} \item{cFUN}{either a fixed function with argument \code{newdata} and returning the class membership by default, or a list specifying a classifying model, similar to one element of \code{pFUN}. Details are given below.} \item{nbagg}{number of bootstrap samples.} \item{ns}{proportion of sample to be drawn from the learning sample. By default, subagging with 50\% is performed, i.e. draw 0.5*n out of n without replacement.} \item{replace}{logical. Draw with or without replacement.} \item{\dots}{additional arguments (e.g. \code{subset}).} } \details{ A given data set is subdivided into three types of variables: explanatory, intermediate and response variables.\cr Here, each specified intermediate variable is modelled separately following \code{pFUN}, a list of lists with elements specifying an arbitrary number of models for the intermediate variables and an optional element \code{training.set = c("oob", "bag", "all")}. The element \code{training.set} determines whether, predictive models for the intermediate are calculated based on the out-of-bag sample (\code{"oob"}), the default, on the bag sample (\code{"bag"}) or on all available observations (\code{"all"}). The elements of \code{pFUN}, specifying the models for the intermediate variables are lists as described in \code{\link{inclass}}. Note that, if no formula is given in these elements, the functional relationship of \code{formula} is used.\cr The response variable is modelled following \code{cFUN}. This can either be a fixed classifying function as described in Peters et al. (2003) or a list, which specifies the modelling technique to be applied. The list contains the arguments \code{model} (which model to be fitted), \code{predict} (optional, how to predict), \code{formula} (optional, of type \code{y~w1+w2+w3+x1+x2} determines the variables the classifying function is based on) and the optional argument \code{training.set = c("fitted.bag", "original", "fitted.subset")} specifying whether the classifying function is trained on the predicted observations of the bag sample (\code{"fitted.bag"}), on the original observations (\code{"original"}) or on the predicted observations not included in a defined subset (\code{"fitted.subset"}). Per default the formula specified in \code{formula} determines the variables, the classifying function is based on.\cr Note that the default of \code{cFUN = list(model = NULL, training.set = "fitted.bag")} uses the function \code{\link[rpart]{rpart}} and the predict function \code{predict(object, newdata, type = "class")}. } \value{ An object of class \code{"inbagg"}, that is a list with elements \item{mtrees}{a list of length \code{nbagg}, describing the prediction models corresponding to each bootstrap sample. Each element of \code{mtrees} is a list with elements \code{bindx} (observations of bag sample), \code{btree} (classifying function of bag sample) and \code{bfct} (predictive models for intermediates of bag sample).} \item{y}{vector of response values.} \item{W}{data frame of intermediate variables.} \item{X}{data frame of explanatory variables.} } \references{ David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised classification with structured class definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. Andrea Peters, Berthold Lausen, Georg Michelson and Olaf Gefeller (2003), Diagnosis of glaucoma by indirect classifiers. \emph{Methods of Information in Medicine} \bold{1}, 99-103. } \seealso{\code{\link[rpart]{rpart}}, \code{\link{bagging}}, \code{\link{lm}}} \examples{ library("MASS") library("rpart") y <- as.factor(sample(1:2, 100, replace = TRUE)) W <- mvrnorm(n = 200, mu = rep(0, 3), Sigma = diag(3)) X <- mvrnorm(n = 200, mu = rep(2, 3), Sigma = diag(3)) colnames(W) <- c("w1", "w2", "w3") colnames(X) <- c("x1", "x2", "x3") DATA <- data.frame(y, W, X) pFUN <- list(list(formula = w1~x1+x2, model = lm, predict = mypredict.lm), list(model = rpart)) inbagg(y~w1+w2+w3~x1+x2+x3, data = DATA, pFUN = pFUN) } \keyword{misc} ipred/man/ipredknn.Rd0000644000176200001440000000260014172231220014221 0ustar liggesusers\name{ipredknn} \alias{ipredknn} \title{ k-Nearest Neighbour Classification } \description{ $k$-nearest neighbour classification with an interface compatible to \code{\link{bagging}} and \code{\link{errorest}}. } \usage{ ipredknn(formula, data, subset, na.action, k=5, \dots) } \arguments{ \item{formula}{a formula of the form \code{lhs ~ rhs} where \code{lhs} is the response variable and \code{rhs} a set of predictors.} \item{data}{optional data frame containing the variables in the model formula.} \item{subset}{optional vector specifying a subset of observations to be used.} \item{na.action}{function which indicates what should happen when the data contain \code{NA}s.} \item{k}{number of neighbours considered, defaults to 5.} \item{...}{additional parameters.} } \details{ This is a wrapper to \code{\link[class]{knn}} in order to be able to use k-NN in \code{\link{bagging}} and \code{\link{errorest}}. } \value{ An object of class \code{ipredknn}. See \code{\link{predict.ipredknn}}. } \examples{ library("mlbench") learn <- as.data.frame(mlbench.twonorm(300)) mypredict.knn <- function(object, newdata) predict.ipredknn(object, newdata, type="class") errorest(classes ~., data=learn, model=ipredknn, predict=mypredict.knn) } \keyword{multivariate} ipred/man/summary.inbagg.Rd0000644000176200001440000000101614172231220015332 0ustar liggesusers\name{summary.inbagg} \alias{summary.inbagg} \alias{print.summary.inbagg} \title{Summarising Inbagg} \description{ Summary of inbagg is returned. } \usage{ \method{summary}{inbagg}(object, ...) } \arguments{ \item{object}{an object of class \code{inbagg}.} \item{\dots}{additional arguments.} } \details{ A representation of an indirect bagging model (the intermediates variables, the number of bootstrap samples, the trees) is printed. } \value{ none } \seealso{\code{\link{print.summary.inbagg}}} \keyword{misc} ipred/man/ipred-internal.Rd0000644000176200001440000000034214172231220015325 0ustar liggesusers\name{ipred-internal} \alias{getsurv} \title{Internal ipred functions} \description{ Internal ipred functions. } \usage{ getsurv(obj, times) } \details{ This functions are not to be called by the user. } \keyword{internal} ipred/man/predict.bagging.Rd0000644000176200001440000000612114172231220015440 0ustar liggesusers\name{predict.classbagg} \alias{predict.classbagg} \alias{predict.regbagg} \alias{predict.survbagg} \title{ Predictions from Bagging Trees } \description{ Predict the outcome of a new observation based on multiple trees. } \usage{ \method{predict}{classbagg}(object, newdata=NULL, type=c("class", "prob"), aggregation=c("majority", "average", "weighted"), \dots) \method{predict}{regbagg}(object, newdata=NULL, aggregation=c("average", "weighted"), \dots) \method{predict}{survbagg}(object, newdata=NULL,\dots) } \arguments{ \item{object}{object of classes \code{classbagg}, \code{regbagg} or \code{survbagg}.} \item{newdata}{a data frame of new observations. } \item{type}{character string denoting the type of predicted value returned for classification trees. Either \code{class} (predicted classes are returned) or \code{prob} (estimated class probabilities are returned).} \item{aggregation}{character string specifying how to aggregate, see below.} \item{...}{additional arguments, currently not passed to any function.} } \details{ There are (at least) three different ways to aggregate the predictions of bagging classification trees. Most famous is class majority voting (\code{aggregation="majority"}) where the most frequent class is returned. The second way is choosing the class with maximal averaged class probability (\code{aggregation="average"}). The third method is based on the "aggregated learning sample", introduced by Hothorn et al. (2003) for survival trees. The prediction of a new observation is the majority class, mean or Kaplan-Meier curve of all observations from the learning sample identified by the \code{nbagg} leaves containing the new observation. For regression trees, only averaged or weighted predictions are possible. By default, the out-of-bag estimate is computed if \code{newdata} is NOT specified. Therefore, the predictions of \code{predict(object)} are "honest" in some way (this is not possible for combined models via \code{comb} in \code{\link{bagging}}). If you like to compute the predictions for the learning sample itself, use \code{newdata} to specify your data. } \value{ The predicted class or estimated class probabilities are returned for classification trees. The predicted endpoint is returned in regression problems and the predicted Kaplan-Meier curve is returned for survival trees. } \references{ Leo Breiman (1996), Bagging Predictors. \emph{Machine Learning} \bold{24}(2), 123--140. Torsten Hothorn, Berthold Lausen, Axel Benner and Martin Radespiel-Troeger (2004), Bagging Survival Trees. \emph{Statistics in Medicine}, \bold{23}(1), 77--91. } \examples{ data("Ionosphere", package = "mlbench") Ionosphere$V2 <- NULL # constant within groups # nbagg = 10 for performance reasons here mod <- bagging(Class ~ ., data=Ionosphere) # out-of-bag estimate mean(predict(mod) != Ionosphere$Class) # predictions for the first 10 observations predict(mod, newdata=Ionosphere[1:10,]) predict(mod, newdata=Ionosphere[1:10,], type="prob") } \keyword{tree} ipred/man/print.cvclass.Rd0000644000176200001440000000106714172231220015206 0ustar liggesusers\name{print.cvclass} \alias{print.cvclass} \alias{print.cvreg} \alias{print.cvsurv} \alias{print.bootestclass} \alias{print.bootestreg} \alias{print.bootestsurv} \title{Print Method for Error Rate Estimators} \description{ Print objects returned by \code{\link{errorest}} in nice layout. } \usage{ \method{print}{cvclass}(x, digits=4, ...) } \arguments{ \item{x}{an object returned by \code{\link{errorest}}.} \item{digits}{how many digits should be printed.} \item{\dots}{further arguments to be passed to or from methods.} } \value{ none } \keyword{misc} ipred/man/sbrier.Rd0000644000176200001440000001107214172231220013700 0ustar liggesusers\name{sbrier} \alias{sbrier} \title{ Model Fit for Survival Data } \description{ Model fit for survival data: the integrated Brier score for censored observations. } \usage{ sbrier(obj, pred, btime= range(obj[,1])) } \arguments{ \item{obj}{an object of class \code{Surv}.} \item{pred}{predicted values. Either a probability or a list of \code{survfit} objects. } \item{btime}{numeric vector of times, the integrated Brier score is computed if this is of \code{length > 1}. The Brier score at \code{btime} is returned otherwise.} } \details{ There is no obvious criterion of model fit for censored data. The Brier score for censoring as well as it's integrated version were suggested by Graf et al (1999). The integrated Brier score is always computed over a subset of the interval given by the range of the time slot of the survival object \code{obj}. } \value{ The (integrated) Brier score with attribute \code{time} is returned. } \seealso{ More measures for the validation of predicted surival probabilities are implemented in package \code{pec}. } \references{ Erika Graf, Claudia Schmoor, Willi Sauerbrei and Martin Schumacher (1999), Assessment and comparison of prognostic classification schemes for survival data. \emph{Statistics in Medicine} \bold{18}(17-18), 2529--2545. } \examples{ library("survival") data("DLBCL", package = "ipred") smod <- Surv(DLBCL$time, DLBCL$cens) KM <- survfit(smod ~ 1) # integrated Brier score up to max(DLBCL$time) sbrier(smod, KM) # integrated Brier score up to time=50 sbrier(smod, KM, btime=c(0, 50)) # Brier score for time=50 sbrier(smod, KM, btime=50) # a "real" model: one single survival tree with Intern. Prognostic Index # and mean gene expression in the first cluster as predictors mod <- bagging(Surv(time, cens) ~ MGEc.1 + IPI, data=DLBCL, nbagg=1) # this is a list of survfit objects (==KM-curves), one for each observation # in DLBCL pred <- predict(mod, newdata=DLBCL) # integrated Brier score up to max(time) sbrier(smod, pred) # Brier score at time=50 sbrier(smod, pred, btime=50) # artificial examples and illustrations cleans <- function(x) { attr(x, "time") <- NULL; names(x) <- NULL; x } n <- 100 time <- rpois(n, 20) cens <- rep(1, n) # checks, Graf et al. page 2536, no censoring at all! # no information: \pi(t) = 0.5 a <- sbrier(Surv(time, cens), rep(0.5, n), time[50]) stopifnot(all.equal(cleans(a),0.25)) # some information: \pi(t) = S(t) n <- 100 time <- 1:100 mod <- survfit(Surv(time, cens) ~ 1) a <- sbrier(Surv(time, cens), rep(list(mod), n)) mymin <- mod$surv * (1 - mod$surv) cleans(a) sum(mymin)/diff(range(time)) # independent of ordering rand <- sample(1:100) b <- sbrier(Surv(time, cens)[rand], rep(list(mod), n)[rand]) stopifnot(all.equal(cleans(a), cleans(b))) \testonly{ # total information: \pi(t | X) known for every obs time <- 1:10 cens <- rep(1,10) pred <- diag(10) pred[upper.tri(pred)] <- 1 diag(pred) <- 0 # # a <- sbrier(Surv(time, cens), pred) # stopifnot(all.equal(a, 0)) # } # 2 groups at different risk time <- c(1:10, 21:30) strata <- c(rep(1, 10), rep(2, 10)) cens <- rep(1, length(time)) # no information about the groups a <- sbrier(Surv(time, cens), survfit(Surv(time, cens) ~ 1)) b <- sbrier(Surv(time, cens), rep(list(survfit(Surv(time, cens) ~1)), 20)) stopifnot(all.equal(a, b)) # risk groups known mod <- survfit(Surv(time, cens) ~ strata) b <- sbrier(Surv(time, cens), c(rep(list(mod[1]), 10), rep(list(mod[2]), 10))) stopifnot(a > b) ### GBSG2 data data("GBSG2", package = "TH.data") thsum <- function(x) { ret <- c(median(x), quantile(x, 0.25), quantile(x,0.75)) names(ret)[1] <- "Median" ret } t(apply(GBSG2[,c("age", "tsize", "pnodes", "progrec", "estrec")], 2, thsum)) table(GBSG2$menostat) table(GBSG2$tgrade) table(GBSG2$horTh) # pooled Kaplan-Meier mod <- survfit(Surv(time, cens) ~ 1, data=GBSG2) # integrated Brier score sbrier(Surv(GBSG2$time, GBSG2$cens), mod) # Brier score at 5 years sbrier(Surv(GBSG2$time, GBSG2$cens), mod, btime=1825) # Nottingham prognostic index GBSG2 <- GBSG2[order(GBSG2$time),] NPI <- 0.2*GBSG2$tsize/10 + 1 + as.integer(GBSG2$tgrade) NPI[NPI < 3.4] <- 1 NPI[NPI >= 3.4 & NPI <=5.4] <- 2 NPI[NPI > 5.4] <- 3 mod <- survfit(Surv(time, cens) ~ NPI, data=GBSG2) plot(mod) pred <- c() survs <- c() for (i in sort(unique(NPI))) survs <- c(survs, getsurv(mod[i], 1825)) for (i in 1:nrow(GBSG2)) pred <- c(pred, survs[NPI[i]]) # Brier score of NPI at t=5 years sbrier(Surv(GBSG2$time, GBSG2$cens), pred, btime=1825) } \keyword{survival} ipred/man/GlaucomaMVF.Rd0000644000176200001440000001214614172231220014516 0ustar liggesusers\name{GlaucomaMVF} \alias{GlaucomaMVF} \non_function{} \title{ Glaucoma Database } \usage{data("GlaucomaMVF")} \description{ The \code{GlaucomaMVF} data has 170 observations in two classes. 66 predictors are derived from a confocal laser scanning image of the optic nerve head, from a visual field test, a fundus photography and a measurement of the intra occular pressure. } \format{ This data frame contains the following predictors describing the morphology of the optic nerve head, the visual field, the intra occular pressure and a membership variable: \describe{ \item{ag}{area global.} \item{at}{area temporal.} \item{as}{area superior.} \item{an}{area nasal.} \item{ai}{area inferior.} \item{eag}{effective area global.} \item{eat}{effective area temporal.} \item{eas}{effective area superior.} \item{ean}{effective area nasal.} \item{eai}{effective area inferior.} \item{abrg}{area below reference global.} \item{abrt}{area below reference temporal.} \item{abrs}{area below reference superior.} \item{abrn}{area below reference nasal.} \item{abri}{area below reference inferior.} \item{hic}{height in contour.} \item{mhcg}{mean height contour global.} \item{mhct}{mean height contour temporal.} \item{mhcs}{mean height contour superior.} \item{mhcn}{mean height contour nasal.} \item{mhci}{mean height contour inferior.} \item{phcg}{peak height contour.} \item{phct}{peak height contour temporal.} \item{phcs}{peak height contour superior.} \item{phcn}{peak height contour nasal.} \item{phci}{peak height contour inferior.} \item{hvc}{height variation contour.} \item{vbsg}{volume below surface global.} \item{vbst}{volume below surface temporal.} \item{vbss}{volume below surface superior.} \item{vbsn}{volume below surface nasal.} \item{vbsi}{volume below surface inferior.} \item{vasg}{volume above surface global.} \item{vast}{volume above surface temporal.} \item{vass}{volume above surface superior.} \item{vasn}{volume above surface nasal.} \item{vasi}{volume above surface inferior.} \item{vbrg}{volume below reference global.} \item{vbrt}{volume below reference temporal.} \item{vbrs}{volume below reference superior.} \item{vbrn}{volume below reference nasal.} \item{vbri}{volume below reference inferior.} \item{varg}{volume above reference global.} \item{vart}{volume above reference temporal.} \item{vars}{volume above reference superior.} \item{varn}{volume above reference nasal.} \item{vari}{volume above reference inferior.} \item{mdg}{mean depth global.} \item{mdt}{mean depth temporal.} \item{mds}{mean depth superior.} \item{mdn}{mean depth nasal.} \item{mdi}{mean depth inferior.} \item{tmg}{third moment global.} \item{tmt}{third moment temporal.} \item{tms}{third moment superior.} \item{tmn}{third moment nasal.} \item{tmi}{third moment inferior.} \item{mr}{mean radius.} \item{rnf}{retinal nerve fiber thickness.} \item{mdic}{mean depth in contour.} \item{emd}{effective mean depth.} \item{mv}{mean variability.} \item{tension}{intra occular pressure.} \item{clv}{corrected loss variance, variability of the visual field.} \item{cs}{contrast sensitivity of the visual field.} \item{lora}{loss of rim area, measured by fundus photography.} \item{Class}{a factor with levels \code{glaucoma} and \code{normal}.} } } \details{ Confocal laser images of the eye background are taken with the Heidelberg Retina Tomograph and variables 1-62 are derived. Most of these variables describe either the area or volume in certain parts of the papilla and are measured in four sectors (temporal, superior, nasal and inferior) as well as for the whole papilla (global). The global measurement is, roughly, the sum of the measurements taken in the four sector. The perimeter `Octopus' measures the visual field variables \code{clv} and \code{cs}, stereo optic disks photographs were taken with a telecentric fundus camera and \code{lora} is derived. Observations of both groups are matched by age and sex, to prevent for possible confounding. } \source{ Andrea Peters, Berthold Lausen, Georg Michelson and Olaf Gefeller (2003), Diagnosis of glaucoma by indirect classifiers. \emph{Methods of Information in Medicine} \bold{1}, 99-103. } \note{ \code{GLaucomMVF} overlaps in some parts with \code{\link[TH.data]{GlaucomaM}}. } \examples{ \dontrun{ data("GlaucomaMVF", package = "ipred") library("rpart") response <- function (data) { attach(data) res <- ifelse((!is.na(clv) & !is.na(lora) & clv >= 5.1 & lora >= 49.23372) | (!is.na(clv) & !is.na(lora) & !is.na(cs) & clv < 5.1 & lora >= 58.55409 & cs < 1.405) | (is.na(clv) & !is.na(lora) & !is.na(cs) & lora >= 58.55409 & cs < 1.405) | (!is.na(clv) & is.na(lora) & cs < 1.405), 0, 1) detach(data) factor (res, labels = c("glaucoma", "normal")) } errorest(Class~clv+lora+cs~., data = GlaucomaMVF, model=inclass, estimator="cv", pFUN = list(list(model = rpart)), cFUN = response) } } \keyword{datasets} ipred/man/inclass.Rd0000644000176200001440000001105414172231220014046 0ustar liggesusers\name{inclass} \alias{inclass} \alias{inclass.default} \alias{inclass.data.frame} \title{Indirect Classification} \description{ A framework for the indirect classification approach. } \usage{ \method{inclass}{data.frame}(formula, data, pFUN = NULL, cFUN = NULL, ...) } \arguments{ \item{formula}{formula. A \code{formula} specified as \code{y~w1+w2+w3~x1+x2+x3} models each intermediate variable \code{w1, w2, w3} by \code{wi~x1+x2+x3} and the response by \code{y~w1+w2+w3} if no other formulas are given in \code{pFUN} or \code{cFUN}.} \item{data}{data frame of explanatory, intermediate and response variables.} \item{pFUN}{list of lists, which describe models for the intermediate variables, see below for details.} \item{cFUN}{either a function or a list which describes the model for the response variable. The function has the argument \code{newdata} only.} \item{\dots}{additional arguments, passed to model fitting of the response variable.} } \details{ A given data set is subdivided into three types of variables: those to be used predicting the class (explanatory variables) those to be used defining the class (intermediate variables) and the class membership variable itself (response variable). Intermediate variables are modelled based on the explanatory variables, the class membership variable is defined on the intermediate variables.\cr Each specified intermediate variable is modelled separately following \code{pFUN} and a formula specified by \code{formula}. \code{pFUN} is a list of lists, the maximum length of \code{pFUN} is the number of intermediate variables. Each element of \code{pFUN} is a list with elements:\cr \code{model} - a function with arguments \code{formula} and \code{data}; \cr \code{predict} - an optional function with arguments \code{object, newdata} only, if \code{predict} is not specified, the predict method of \code{model} is used; \cr \code{formula} - specifies the formula for the corresponding \code{model} (optional), the formula described in \code{y~w1+w2+w3~x1+x2+x3} is used if no other is specified. \cr The response is classified following \code{cFUN}, which is either a fixed function or a list as described below. The determined function \code{cFUN} assigns the intermediate (and explanatory) variables to a certain class membership, the list \code{cFUN} has the elements \code{formula, model, predict} and \code{training.set}. The elements \code{formula, model, predict} are structured as described by \code{pFUN}, the described model is trained on the original (intermediate variables) if \code{training.set="original"} or if \code{training.set = NULL}, on the fitted values if \code{training.set = "fitted"} or on observations not included in a specified subset if \code{training.set = "subset"}. \cr A list of prediction models corresponding to each intermediate variable, a predictive function for the response, a list of specifications for the intermediate and for the response are returned. \cr For a detailed description on indirect classification see Hand et al. (2001). } \value{ An object of class \code{inclass}, consisting of a list of \item{model.intermediate}{list of fitted models for each intermediate variable.} \item{model.response}{predictive model for the response variable.} \item{para.intermediate}{list, where each element is again a list and specifies the model for each intermediate variable.} \item{para.response}{a list which specifies the model for response variable.} } \references{ David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised classification with structured class definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. Andrea Peters, Berthold Lausen, Georg Michelson and Olaf Gefeller (2003), Diagnosis of glaucoma by indirect classifiers. \emph{Methods of Information in Medicine} \bold{1}, 99-103. } \seealso{\code{\link{bagging}}, \code{\link{inclass}}} \examples{ data("Smoking", package = "ipred") # Set three groups of variables: # 1) explanatory variables are: TarY, NicY, COY, Sex, Age # 2) intermediate variables are: TVPS, BPNL, COHB # 3) response (resp) is defined by: classify <- function(data){ data <- data[,c("TVPS", "BPNL", "COHB")] res <- t(t(data) > c(4438, 232.5, 58)) res <- as.factor(ifelse(apply(res, 1, sum) > 2, 1, 0)) res } response <- classify(Smoking[ ,c("TVPS", "BPNL", "COHB")]) smoking <- data.frame(Smoking, response) formula <- response~TVPS+BPNL+COHB~TarY+NicY+COY+Sex+Age inclass(formula, data = smoking, pFUN = list(list(model = lm, predict = mypredict.lm)), cFUN = classify) } \keyword{misc} ipred/man/Smoking.Rd0000644000176200001440000000221214231244645014030 0ustar liggesusers\name{Smoking} \alias{Smoking} \non_function{} \title{Smoking Styles} \usage{data("Smoking")} \description{ The \code{Smoking} data frame has 55 rows and 9 columns. } \format{ This data frame contains the following columns: \describe{ \item{NR}{numeric, patient number.} \item{Sex}{factor, sex of patient.} \item{Age}{factor, age group of patient, grouping consisting of those in their twenties, those in their thirties and so on.} \item{TarY}{numeric, tar yields of the cigarettes.} \item{NicY}{numeric, nicotine yields of the cigarettes.} \item{COY}{numeric, carbon monoxide (CO) yield of the cigarettes.} \item{TVPS}{numeric, total volume puffed smoke.} \item{BPNL}{numeric, blood plasma nicotine level.} \item{COHB}{numeric, carboxyhaemoglobin level, i.e. amount of CO absorbed by the blood stream.} } } \details{ The data describes different smoking habits of probands. } \source{ Hand and Taylor (1987), Study F \emph{Smoking Styles}. } \references{ D.J. Hand and C.C. Taylor (1987), \emph{Multivariate analysis of variance and repeated measures.} London: Chapman & Hall, pp. 167--181. } \keyword{datasets} ipred/man/errorest.Rd0000644000176200001440000002241214172231220014257 0ustar liggesusers\name{errorest} \alias{errorest} \alias{errorest.data.frame} \alias{errorest.default} \title{ Estimators of Prediction Error } \description{ Resampling based estimates of prediction error: misclassification error, root mean squared error or Brier score for survival data. } \usage{ \method{errorest}{data.frame}(formula, data, subset, na.action=na.omit, model=NULL, predict=NULL, estimator=c("cv", "boot", "632plus"), est.para=control.errorest(), ...) } \arguments{ \item{formula}{a formula of the form \code{lhs ~ rhs}. Either describing the model of explanatory and response variables in the usual way (see \code{\link{lm}}) or the model between explanatory and intermediate variables in the framework of indirect classification, see \code{\link{inclass}}.} \item{data}{a data frame containing the variables in the model formula and additionally the class membership variable if \code{model = inclass}. \code{data} is required for indirect classification, otherwise \code{formula} is evaluated in the calling environment.} \item{subset}{optional vector, specifying a subset of observations to be used.} \item{na.action}{function which indicates what should happen when the data contains \code{NA}'s, defaults to \code{\link{na.omit}}.} \item{model}{function. Modelling technique whose error rate is to be estimated. The function \code{model} can either return an object representing a fitted model or a function with argument \code{newdata} which returns predicted values. In this case, the \code{predict} argument to \code{errorest} is ignored.} \item{predict}{function. Prediction method to be used. The vector of predicted values must have the same length as the the number of to-be-predicted observations. Predictions corresponding to missing data must be replaced by \code{NA}. Additionally, \code{predict} has to return predicted values comparable to the responses (that is: factors for classification problems). See the example on how to make this sure for any predictor.} \item{estimator}{estimator of the misclassification error: \code{cv} cross-validation, \code{boot} bootstrap or \code{632plus} bias corrected bootstrap (classification only). } \item{est.para}{a list of additional parameters that control the calculation of the estimator, see \code{\link{control.errorest}} for details.} \item{\dots}{additional parameters to \code{model}.} } \details{ The prediction error for classification and regression models as well as predictive models for censored data using cross-validation or the bootstrap can be computed by \code{errorest}. For classification problems, the estimated misclassification error is returned. The root mean squared error is computed for regression problems and the Brier score for censored data (Graf et al., 1999) is reported if the response is censored. Any model can be specified as long as it is a function with arguments \code{model(formula, data, subset, na.action, ...)}. If a method \code{predict.model(object, newdata, ...)} is available, \code{predict} does not need to be specified. However, \code{predict} has to return predicted values in the same order and of the same length corresponding to the response. See the examples below. $k$-fold cross-validation and the usual bootstrap estimator with \code{est.para$nboot} bootstrap replications can be computed for all kind of problems. The bias corrected .632+ bootstrap by Efron and Tibshirani (1997) is available for classification problems only. Use \code{\link{control.errorest}} to specify additional arguments. \code{errorest} is a formula based interface to the generic functions \code{\link{cv}} or \code{\link{bootest}} which implement methods for classification, regression and survival problems. } \value{ The class of the object returned depends on the class of the response variable and the estimator used. In each case, it is a list with an element \code{error} and additional information. \code{print} methods are available for the inspection of the results. } \references{ Brian D. Ripley (1996), \emph{Pattern Recognition and Neural Networks}. Cambridge: Cambridge University Press. Bradley Efron and Robert Tibshirani (1997), Improvements on Cross-Validation: The .632+ Bootstrap Estimator. \emph{Journal of the American Statistical Association} \bold{92}(438), 548--560. Erika Graf, Claudia Schmoor, Willi Sauerbrei and Martin Schumacher (1999), Assessment and comparison of prognostic classification schemes for survival data. \emph{Statistics in Medicine} \bold{18}(17-18), 2529--2545. Rosa A. Schiavo and David J. Hand (2000), Ten More Years of Error Rate Research. \emph{International Statistical Review} \bold{68}(3), 296-310. David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised Classification with Structured Class Definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. } \examples{ # Classification data("iris") library("MASS") # force predict to return class labels only mypredict.lda <- function(object, newdata) predict(object, newdata = newdata)$class # 10-fold cv of LDA for Iris data errorest(Species ~ ., data=iris, model=lda, estimator = "cv", predict= mypredict.lda) data("PimaIndiansDiabetes", package = "mlbench") \dontrun{ # 632+ bootstrap of LDA for Diabetes data errorest(diabetes ~ ., data=PimaIndiansDiabetes, model=lda, estimator = "632plus", predict= mypredict.lda) } #cv of a fixed partition of the data list.tindx <- list(1:100, 101:200, 201:300, 301:400, 401:500, 501:600, 601:700, 701:768) errorest(diabetes ~ ., data=PimaIndiansDiabetes, model=lda, estimator = "cv", predict = mypredict.lda, est.para = control.errorest(list.tindx = list.tindx)) \dontrun{ #both bootstrap estimations based on fixed partitions list.tindx <- vector(mode = "list", length = 25) for(i in 1:25) { list.tindx[[i]] <- sample(1:768, 768, TRUE) } errorest(diabetes ~ ., data=PimaIndiansDiabetes, model=lda, estimator = c("boot", "632plus"), predict= mypredict.lda, est.para = control.errorest(list.tindx = list.tindx)) } data("Glass", package = "mlbench") # LDA has cross-validated misclassification error of # 38\% (Ripley, 1996, page 98) # Pruned trees about 32\% (Ripley, 1996, page 230) # use stratified sampling here, i.e. preserve the class proportions errorest(Type ~ ., data=Glass, model=lda, predict=mypredict.lda, est.para=control.errorest(strat=TRUE)) # force predict to return class labels mypredict.rpart <- function(object, newdata) predict(object, newdata = newdata,type="class") library("rpart") pruneit <- function(formula, ...) prune(rpart(formula, ...), cp =0.01) errorest(Type ~ ., data=Glass, model=pruneit, predict=mypredict.rpart, est.para=control.errorest(strat=TRUE)) # compute sensitivity and specifity for stabilised LDA data("GlaucomaM", package = "TH.data") error <- errorest(Class ~ ., data=GlaucomaM, model=slda, predict=mypredict.lda, est.para=control.errorest(predictions=TRUE)) # sensitivity mean(error$predictions[GlaucomaM$Class == "glaucoma"] == "glaucoma") # specifity mean(error$predictions[GlaucomaM$Class == "normal"] == "normal") # Indirect Classification: Smoking data data(Smoking) # Set three groups of variables: # 1) explanatory variables are: TarY, NicY, COY, Sex, Age # 2) intermediate variables are: TVPS, BPNL, COHB # 3) response (resp) is defined by: resp <- function(data){ data <- data[, c("TVPS", "BPNL", "COHB")] res <- t(t(data) > c(4438, 232.5, 58)) res <- as.factor(ifelse(apply(res, 1, sum) > 2, 1, 0)) res } response <- resp(Smoking[ ,c("TVPS", "BPNL", "COHB")]) smoking <- cbind(Smoking, response) formula <- response~TVPS+BPNL+COHB~TarY+NicY+COY+Sex+Age # Estimation per leave-one-out estimate for the misclassification is # 36.36\% (Hand et al., 2001), using indirect classification with # linear models \dontrun{ errorest(formula, data = smoking, model = inclass,estimator = "cv", pFUN = list(list(model=lm, predict = mypredict.lm)), cFUN = resp, est.para=control.errorest(k=nrow(smoking))) } # Regression data("BostonHousing", package = "mlbench") # 10-fold cv of lm for Boston Housing data errorest(medv ~ ., data=BostonHousing, model=lm, est.para=control.errorest(random=FALSE)) # the same, with "model" returning a function for prediction # instead of an object of class "lm" mylm <- function(formula, data) { mod <- lm(formula, data) function(newdata) predict(mod, newdata) } errorest(medv ~ ., data=BostonHousing, model=mylm, est.para=control.errorest(random=FALSE)) # Survival data data("GBSG2", package = "TH.data") library("survival") # prediction is fitted Kaplan-Meier predict.survfit <- function(object, newdata) object # 5-fold cv of Kaplan-Meier for GBSG2 study errorest(Surv(time, cens) ~ 1, data=GBSG2, model=survfit, predict=predict.survfit, est.para=control.errorest(k=5)) } \keyword{misc} ipred/man/rsurv.Rd0000644000176200001440000000330414172231220013572 0ustar liggesusers\name{rsurv} \alias{rsurv} \title{ Simulate Survival Data } \description{ Simulation Setup for Survival Data. } \usage{ rsurv(N, model=c("A", "B", "C", "D", "tree"), gamma=NULL, fact=1, pnon=10, gethaz=FALSE) } \arguments{ \item{N}{ number of observations. } \item{model}{ type of model. } \item{gamma}{simulate censoring time as runif(N, 0, gamma). Defaults to \code{NULL} (no censoring).} \item{fact}{scale parameter for \code{model=tree}.} \item{pnon}{number of additional non-informative variables for the tree model.} \item{gethaz}{logical, indicating wheather the hazard rate for each observation should be returned.} } \details{ Simulation setup similar to configurations used in LeBlanc and Crowley (1992) or Keles and Segal (2002) as well as a tree model used in Hothorn et al. (2004). See Hothorn et al. (2004) for the details. } \value{ A data frame with elements \code{time}, \code{cens}, \code{X1} ... \code{X5}. If \code{pnon} > 0, additional noninformative covariables are added. If \code{gethaz=TRUE}, the \code{hazard} attribute returns the hazard rates. } \references{ M. LeBlanc and J. Crowley (1992), Relative Risk Trees for Censored Survival Data. \emph{Biometrics} \bold{48}, 411--425. S. Keles and M. R. Segal (2002), Residual-based tree-structured survival analysis. \emph{Statistics in Medicine}, \bold{21}, 313--326. Torsten Hothorn, Berthold Lausen, Axel Benner and Martin Radespiel-Troeger (2004), Bagging Survival Trees. \emph{Statistics in Medicine}, \bold{23}(1), 77--91. } \examples{ library("survival") # 3*X1 + X2 simdat <- rsurv(500, model="C") coxph(Surv(time, cens) ~ ., data=simdat) } \keyword{survival} ipred/man/prune.bagging.Rd0000644000176200001440000000205014172231220015134 0ustar liggesusers\name{prune.classbagg} \alias{prune.classbagg} \alias{prune.regbagg} \alias{prune.survbagg} \title{ Pruning for Bagging } \description{ Prune each of the trees returned by \code{\link{bagging}}. } \usage{ \method{prune}{classbagg}(tree, cp=0.01,...) } \arguments{ \item{tree}{ an object returned by \code{\link{bagging}} (calling this \code{tree} is needed by the generic function \code{prune} in package \code{rpart}).} \item{cp}{complexity parameter, see \code{\link[rpart]{prune.rpart}}.} \item{...}{additional arguments to \code{\link[rpart]{prune.rpart}}.} } \details{ By default, \code{\link{bagging}} grows classification trees of maximal size. One may want to prune each tree, however, it is not clear whether or not this may decrease prediction error. } \value{ An object of the same class as \code{tree} with the trees pruned. } \examples{ data("Glass", package = "mlbench") library("rpart") mod <- bagging(Type ~ ., data=Glass, nbagg=10, coob=TRUE) pmod <- prune(mod) print(pmod) } \keyword{tree} ipred/man/print.inbagg.Rd0000644000176200001440000000073214172231220014775 0ustar liggesusers\name{print.inbagg} \alias{print.inbagg} \title{Print Method for Inbagg Object} \description{ Print object of class \code{inbagg} in nice layout. } \usage{ \method{print}{inbagg}(x, ...) } \arguments{ \item{x}{object of class \code{inbagg}.} \item{\dots}{additional arguments.} } \details{ An object of class \code{inbagg} is printed. Information about number and names of the intermediate variables, and the number of drawn bootstrap samples is given. } \keyword{misc} ipred/man/predict.inclass.Rd0000644000176200001440000000644414172231220015506 0ustar liggesusers\name{predict.inclass} \alias{predict.inclass} \title{Predictions from an Inclass Object} \description{ Predicts the class membership of new observations through indirect classification. } \usage{ \method{predict}{inclass}(object, newdata, ...) } \arguments{ \item{object}{ object of class \code{inclass}, see \code{\link{inclass}}.} \item{newdata}{data frame to be classified.} \item{\dots}{additional arguments corresponding to the predictive models specified in \code{\link{inclass}}.} } \details{ Predictions of class memberships are calculated. i.e. values of the intermediate variables are predicted and classified following \code{cFUN}, see \code{\link{inclass}}. } \value{ The vector of predicted classes is returned. } \references{ David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised classification with structured class definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. Andrea Peters, Berthold Lausen, Georg Michelson and Olaf Gefeller (2003), Diagnosis of glaucoma by indirect classifiers. \emph{Methods of Information in Medicine} \bold{1}, 99-103. } \seealso{\code{\link{inclass}}} \examples{ \dontrun{ # Simulation model, classification rule following Hand et al. (2001) theta90 <- varset(N = 1000, sigma = 0.1, theta = 90, threshold = 0) dataset <- as.data.frame(cbind(theta90$explanatory, theta90$intermediate)) names(dataset) <- c(colnames(theta90$explanatory), colnames(theta90$intermediate)) classify <- function(Y, threshold = 0) { Y <- Y[,c("y1", "y2")] z <- (Y > threshold) resp <- as.factor(ifelse((z[,1] + z[,2]) > 1, 1, 0)) return(resp) } formula <- response~y1+y2~x1+x2 fit <- inclass(formula, data = dataset, pFUN = list(list(model = lm)), cFUN = classify) predict(object = fit, newdata = dataset) data("Smoking", package = "ipred") # explanatory variables are: TarY, NicY, COY, Sex, Age # intermediate variables are: TVPS, BPNL, COHB # reponse is defined by: classify <- function(data){ data <- data[,c("TVPS", "BPNL", "COHB")] res <- t(t(data) > c(4438, 232.5, 58)) res <- as.factor(ifelse(apply(res, 1, sum) > 2, 1, 0)) res } response <- classify(Smoking[ ,c("TVPS", "BPNL", "COHB")]) smoking <- cbind(Smoking, response) formula <- response~TVPS+BPNL+COHB~TarY+NicY+COY+Sex+Age fit <- inclass(formula, data = smoking, pFUN = list(list(model = lm)), cFUN = classify) predict(object = fit, newdata = smoking) } data("GlaucomaMVF", package = "ipred") library("rpart") glaucoma <- GlaucomaMVF[,(names(GlaucomaMVF) != "tension")] # explanatory variables are derived by laser scanning image and intra occular pressure # intermediate variables are: clv, cs, lora # response is defined by classify <- function (data) { attach(data) res <- ifelse((!is.na(clv) & !is.na(lora) & clv >= 5.1 & lora >= 49.23372) | (!is.na(clv) & !is.na(lora) & !is.na(cs) & clv < 5.1 & lora >= 58.55409 & cs < 1.405) | (is.na(clv) & !is.na(lora) & !is.na(cs) & lora >= 58.55409 & cs < 1.405) | (!is.na(clv) & is.na(lora) & cs < 1.405), 0, 1) detach(data) factor (res, labels = c("glaucoma", "normal")) } fit <- inclass(Class~clv+lora+cs~., data = glaucoma, pFUN = list(list(model = rpart)), cFUN = classify) data("GlaucomaM", package = "TH.data") predict(object = fit, newdata = GlaucomaM) } \keyword{misc} ipred/man/summary.inclass.Rd0000644000176200001440000000105514172231220015542 0ustar liggesusers\name{summary.inclass} \alias{summary.inclass} \alias{print.summary.inclass} \title{Summarising Inclass} \description{ Summary of inclass is returned. } \usage{ \method{summary}{inclass}(object, ...) } \arguments{ \item{object}{an object of class \code{inclass}.} \item{\dots}{additional arguments.} } \details{ A representation of an indirect classification model (the intermediates variables, which modelling technique is used and the prediction model) is printed. } \value{ none } \seealso{\code{\link{print.summary.inclass}}} \keyword{misc} ipred/man/bootest.Rd0000644000176200001440000000371214172231220014073 0ustar liggesusers\name{bootest} \alias{bootest} \alias{bootest.default} \alias{bootest.factor} \alias{bootest.numeric} \alias{bootest.integer} \alias{bootest.Surv} \title{Bootstrap Error Rate Estimators} \description{ Those functions are low-level functions used by \code{\link{errorest}} and are normally not called by users. } \usage{ \method{bootest}{factor}(y, formula, data, model, predict, nboot=25, bc632plus=FALSE, list.tindx = NULL, predictions = FALSE, both.boot = FALSE, \dots)} \arguments{ \item{y}{the response variable, either of class \code{factor} (classification), \code{numeric} (regression) or \code{Surv} (survival).} \item{formula}{a formula object.} \item{data}{data frame of predictors and response described in \code{formula}.} \item{model}{a function implementing the predictive model to be evaluated. The function \code{model} can either return an object representing a fitted model or a function with argument \code{newdata} which returns predicted values. In this case, the \code{predict} argument to \code{errorest} is ignored.} \item{predict}{a function with arguments \code{object} and \code{newdata} only which predicts the status of the observations in \code{newdata} based on the fitted model in \code{object}.} \item{nboot}{number of bootstrap replications to be used.} \item{bc632plus}{logical. Should the bias corrected version of misclassification error be computed?} \item{predictions}{logical, return a matrix of predictions. The ith column contains predictions of the ith out-of-bootstrap sample and 'NA's corresponding to the ith bootstrap sample.} \item{list.tindx}{list of numeric vectors, indicating which observations are included in each bootstrap sample.} \item{both.boot}{logical, return both (bootstrap and 632plus) estimations or only one of them.} \item{\dots}{additional arguments to \code{model}.} } \details{ See \code{\link{errorest}}. } \keyword{misc} ipred/man/print.inclass.Rd0000644000176200001440000000077614172231220015212 0ustar liggesusers\name{print.inclass} \alias{print.inclass} \title{Print Method for Inclass Object} \description{ Print object of class \code{inclass} in nice layout. } \usage{ \method{print}{inclass}(x, ...) } \arguments{ \item{x}{object of class \code{inclass}.} \item{\dots}{additional arguments.} } \details{ An object of class \code{inclass} is printed. Information about number and names of the intermediate variables, the used modelling technique and the number of drawn bootstrap samples is given. } \keyword{misc} ipred/man/predict.slda.Rd0000644000176200001440000000144114172231220014765 0ustar liggesusers\name{predict.slda} \alias{predict.slda} \title{ Predictions from Stabilised Linear Discriminant Analysis } \description{ Predict the class of a new observation based on stabilised LDA. } \usage{ \method{predict}{slda}(object, newdata, ...) } \arguments{ \item{object}{object of class \code{slda}.} \item{newdata}{a data frame of new observations. } \item{...}{additional arguments passed to \code{\link[MASS]{predict.lda}}.} } \details{ This function is a method for the generic function \code{\link{predict}} for class \code{slda}. For the details see \code{\link[MASS]{predict.lda}}. } \value{ A list with components \item{class}{the predicted class (a factor).} \item{posterior}{posterior probabilities for the classes.} \item{x}{the scores of test cases.} } \keyword{multivariate} ipred/man/predict.ipredknn.Rd0000644000176200001440000000146714172231220015664 0ustar liggesusers\name{predict.ipredknn} \alias{predict.ipredknn} \title{ Predictions from k-Nearest Neighbors } \description{ Predict the class of a new observation based on k-NN. } \usage{ \method{predict}{ipredknn}(object, newdata, type=c("prob", "class"), ...) } \arguments{ \item{object}{object of class \code{ipredknn}.} \item{newdata}{a data frame of new observations. } \item{type}{return either the predicted class or the the proportion of the votes for the winning class.} \item{...}{additional arguments.} } \details{ This function is a method for the generic function \code{\link{predict}} for class \code{ipredknn}. For the details see \code{\link[class]{knn}}. } \value{ Either the predicted class or the the proportion of the votes for the winning class. } \keyword{multivariate} ipred/man/bagging.Rd0000644000176200001440000002331114646200317014020 0ustar liggesusers\name{bagging} \alias{bagging} \alias{ipredbagg} \alias{ipredbagg.factor} \alias{ipredbagg.integer} \alias{ipredbagg.numeric} \alias{ipredbagg.Surv} \alias{ipredbagg.default} \alias{bagging.data.frame} \alias{bagging.default} \title{Bagging Classification, Regression and Survival Trees } \description{ Bagging for classification, regression and survival trees. } \usage{ \method{ipredbagg}{factor}(y, X=NULL, nbagg=25, control= rpart.control(minsplit=2, cp=0, xval=0), comb=NULL, coob=FALSE, ns=length(y), keepX = TRUE, \dots) \method{ipredbagg}{numeric}(y, X=NULL, nbagg=25, control=rpart.control(xval=0), comb=NULL, coob=FALSE, ns=length(y), keepX = TRUE, \dots) \method{ipredbagg}{Surv}(y, X=NULL, nbagg=25, control=rpart.control(xval=0), comb=NULL, coob=FALSE, ns=dim(y)[1], keepX = TRUE, \dots) \method{bagging}{data.frame}(formula, data, subset, na.action=na.rpart, \dots) } \arguments{ \item{y}{the response variable: either a factor vector of class labels (bagging classification trees), a vector of numerical values (bagging regression trees) or an object of class \code{\link[survival]{Surv}} (bagging survival trees).} \item{X}{a data frame of predictor variables.} \item{nbagg}{an integer giving the number of bootstrap replications. } \item{coob}{a logical indicating whether an out-of-bag estimate of the error rate (misclassification error, root mean squared error or Brier score) should be computed. See \code{\link{predict.classbagg}} for details.} \item{control}{options that control details of the \code{rpart} algorithm, see \code{\link[rpart]{rpart.control}}. It is wise to set \code{xval = 0} in order to save computing time. Note that the default values depend on the class of \code{y}.} \item{comb}{a list of additional models for model combination, see below for some examples. Note that argument \code{method} for double-bagging is no longer there, \code{comb} is much more flexible.} \item{ns}{number of sample to draw from the learning sample. By default, the usual bootstrap n out of n with replacement is performed. If \code{ns} is smaller than \code{length(y)}, subagging (Buehlmann and Yu, 2002), i.e. sampling \code{ns} out of \code{length(y)} without replacement, is performed.} \item{keepX}{a logical indicating whether the data frame of predictors should be returned. Note that the computation of the out-of-bag estimator requires \code{keepX=TRUE}.} \item{formula}{a formula of the form \code{lhs ~ rhs} where \code{lhs} is the response variable and \code{rhs} a set of predictors.} \item{data}{optional data frame containing the variables in the model formula.} \item{subset}{optional vector specifying a subset of observations to be used.} \item{na.action}{function which indicates what should happen when the data contain \code{NA}s. Defaults to \code{\link[rpart]{na.rpart}}.} \item{...}{additional parameters passed to \code{ipredbagg} or \code{\link[rpart]{rpart}}, respectively.} } \details{ The random forest implementations \code{\link[randomForest]{randomForest}} and \code{\link[party]{cforest}} are more flexible and reliable for computing bootstrap-aggregated trees than this function and should be used instead. Bagging for classification and regression trees were suggested by Breiman (1996a, 1998) in order to stabilise trees. The trees in this function are computed using the implementation in the \code{\link[rpart]{rpart}} package. The generic function \code{ipredbagg} implements methods for different responses. If \code{y} is a factor, classification trees are constructed. For numerical vectors \code{y}, regression trees are aggregated and if \code{y} is a survival object, bagging survival trees (Hothorn et al, 2003) is performed. The function \code{bagging} offers a formula based interface to \code{ipredbagg}. \code{nbagg} bootstrap samples are drawn and a tree is constructed for each of them. There is no general rule when to stop the tree growing. The size of the trees can be controlled by \code{control} argument or \code{\link{prune.classbagg}}. By default, classification trees are as large as possible whereas regression trees and survival trees are build with the standard options of \code{\link[rpart]{rpart.control}}. If \code{nbagg=1}, one single tree is computed for the whole learning sample without bootstrapping. If \code{coob} is TRUE, the out-of-bag sample (Breiman, 1996b) is used to estimate the prediction error corresponding to \code{class(y)}. Alternatively, the out-of-bag sample can be used for model combination, an out-of-bag error rate estimator is not available in this case. Double-bagging (Hothorn and Lausen, 2003) computes a LDA on the out-of-bag sample and uses the discriminant variables as additional predictors for the classification trees. \code{comb} is an optional list of lists with two elements \code{model} and \code{predict}. \code{model} is a function with arguments \code{formula} and \code{data}. \code{predict} is a function with arguments \code{object, newdata} only. If the estimation of the covariance matrix in \code{\link[MASS]{lda}} fails due to a limited out-of-bag sample size, one can use \code{\link{slda}} instead. See the example section for an example of double-bagging. The methodology is not limited to a combination with LDA: bundling (Hothorn and Lausen, 2002b) can be used with arbitrary classifiers. NOTE: Up to ipred version 0.9-0, bagging was performed using a modified version of the original rpart function. Due to interface changes in rpart 3.1-55, the bagging function had to be rewritten. Results of previous version are not exactly reproducible. } \value{ The class of the object returned depends on \code{class(y)}: \code{classbagg, regbagg} and \code{survbagg}. Each is a list with elements \item{y}{the vector of responses.} \item{X}{the data frame of predictors.} \item{mtrees}{multiple trees: a list of length \code{nbagg} containing the trees (and possibly additional objects) for each bootstrap sample.} \item{OOB}{logical whether the out-of-bag estimate should be computed.} \item{err}{if \code{OOB=TRUE}, the out-of-bag estimate of misclassification or root mean squared error or the Brier score for censored data.} \item{comb}{logical whether a combination of models was requested.} For each class methods for the generics \code{\link[rpart]{prune.rpart}}, \code{\link{print}}, \code{\link{summary}} and \code{\link{predict}} are available for inspection of the results and prediction, for example: \code{\link{print.classbagg}}, \code{\link{summary.classbagg}}, \code{\link{predict.classbagg}} and \code{\link{prune.classbagg}} for classification problems. } \references{ Leo Breiman (1996a), Bagging Predictors. \emph{Machine Learning} \bold{24}(2), 123--140. Leo Breiman (1996b), Out-Of-Bag Estimation. \emph{Technical Report} \url{https://www.stat.berkeley.edu/~breiman/OOBestimation.pdf}. Leo Breiman (1998), Arcing Classifiers. \emph{The Annals of Statistics} \bold{26}(3), 801--824. Peter Buehlmann and Bin Yu (2002), Analyzing Bagging. \emph{The Annals of Statistics} \bold{30}(4), 927--961. Torsten Hothorn and Berthold Lausen (2003), Double-Bagging: Combining classifiers by bootstrap aggregation. \emph{Pattern Recognition}, \bold{36}(6), 1303--1309. Torsten Hothorn and Berthold Lausen (2005), Bundling Classifiers by Bagging Trees. \emph{Computational Statistics & Data Analysis}, 49, 1068--1078. Torsten Hothorn, Berthold Lausen, Axel Benner and Martin Radespiel-Troeger (2004), Bagging Survival Trees. \emph{Statistics in Medicine}, \bold{23}(1), 77--91. } \examples{ library("MASS") library("survival") # Classification: Breast Cancer data data("BreastCancer", package = "mlbench") # Test set error bagging (nbagg = 50): 3.7\% (Breiman, 1998, Table 5) mod <- bagging(Class ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion + Epith.c.size + Bare.nuclei + Bl.cromatin + Normal.nucleoli + Mitoses, data=BreastCancer, coob=TRUE) print(mod) # Test set error bagging (nbagg=50): 7.9\% (Breiman, 1996a, Table 2) data("Ionosphere", package = "mlbench") Ionosphere$V2 <- NULL # constant within groups bagging(Class ~ ., data=Ionosphere, coob=TRUE) # Double-Bagging: combine LDA and classification trees # predict returns the linear discriminant values, i.e. linear combinations # of the original predictors comb.lda <- list(list(model=lda, predict=function(obj, newdata) predict(obj, newdata)$x)) # Note: out-of-bag estimator is not available in this situation, use # errorest mod <- bagging(Class ~ ., data=Ionosphere, comb=comb.lda) predict(mod, Ionosphere[1:10,]) # Regression: data("BostonHousing", package = "mlbench") # Test set error (nbagg=25, trees pruned): 3.41 (Breiman, 1996a, Table 8) mod <- bagging(medv ~ ., data=BostonHousing, coob=TRUE) print(mod) library("mlbench") learn <- as.data.frame(mlbench.friedman1(200)) # Test set error (nbagg=25, trees pruned): 2.47 (Breiman, 1996a, Table 8) mod <- bagging(y ~ ., data=learn, coob=TRUE) print(mod) # Survival data # Brier score for censored data estimated by # 10 times 10-fold cross-validation: 0.2 (Hothorn et al, # 2002) data("DLBCL", package = "ipred") mod <- bagging(Surv(time,cens) ~ MGEc.1 + MGEc.2 + MGEc.3 + MGEc.4 + MGEc.5 + MGEc.6 + MGEc.7 + MGEc.8 + MGEc.9 + MGEc.10 + IPI, data=DLBCL, coob=TRUE) print(mod) } \keyword{tree} ipred/man/slda.Rd0000644000176200001440000000623514172231220013342 0ustar liggesusers\name{slda} \alias{slda} \alias{slda.default} \alias{slda.formula} \alias{slda.factor} \title{ Stabilised Linear Discriminant Analysis } \description{ Linear discriminant analysis based on left-spherically distributed linear scores. } \usage{ \method{slda}{formula}(formula, data, subset, na.action=na.rpart, \dots) \method{slda}{factor}(y, X, q=NULL, \dots) } \arguments{ \item{y}{the response variable: a factor vector of class labels.} \item{X}{a data frame of predictor variables.} \item{q}{the number of positive eigenvalues the scores are derived from, see below.} \item{formula}{a formula of the form \code{lhs ~ rhs} where \code{lhs} is the response variable and \code{rhs} a set of predictors.} \item{data}{optional data frame containing the variables in the model formula.} \item{subset}{optional vector specifying a subset of observations to be used.} \item{na.action}{function which indicates what should happen when the data contain \code{NA}s. Defaults to \code{\link[rpart]{na.rpart}}.} \item{...}{additional parameters passed to \code{\link[MASS]{lda}}.} } \details{ This function implements the LDA for \eqn{q}-dimensional linear scores of the original \eqn{p} predictors derived from the \eqn{PC_q} rule by Laeuter et al. (1998). Based on the product sum matrix \deqn{W = (X - \bar{X})^\top(X - \bar{X})} the eigenvalue problem \eqn{WD = diag(W)DL} is solved. The first \eqn{q} columns \eqn{D_q} of \eqn{D} are used as a weight matrix for the original \eqn{p} predictors: \eqn{XD_q}. By default, \eqn{q} is the number of eigenvalues greater one. The \eqn{q}-dimensional linear scores are left-spherically distributed and are used as predictors for a classical LDA. This form of reduction of the dimensionality was developed for discriminant analysis problems by Laeuter (1992) and was used for multivariate tests by Laeuter et al. (1998), Kropf (2000) gives an overview. For details on left-spherically distributions see Fang and Zhang (1990). } \value{ An object of class \code{slda}, a list with components \item{scores}{the weight matrix.} \item{mylda}{an object of class \code{lda}.} } \seealso{ \code{\link{predict.slda}} } \references{ Fang Kai-Tai and Zhang Yao-Ting (1990), \emph{Generalized Multivariate Analysis}, Springer, Berlin. Siegfried Kropf (2000), \emph{Hochdimensionale multivariate Verfahren in der medizinischen Statistik}, Shaker Verlag, Aachen (in german). Juergen Laeuter (1992), \emph{Stabile multivariate Verfahren}, Akademie Verlag, Berlin (in german). Juergen Laeuter, Ekkehard Glimm and Siegfried Kropf (1998), Multivariate Tests Based on Left-Spherically Distributed Linear Scores. \emph{The Annals of Statistics}, \bold{26}(5) 1972--1988. } \examples{ library("mlbench") library("MASS") learn <- as.data.frame(mlbench.twonorm(100)) test <- as.data.frame(mlbench.twonorm(1000)) mlda <- lda(classes ~ ., data=learn) mslda <- slda(classes ~ ., data=learn) print(mean(predict(mlda, newdata=test)$class != test$classes)) print(mean(predict(mslda, newdata=test)$class != test$classes)) } \keyword{multivariate} ipred/man/summary.bagging.Rd0000644000176200001440000000103614172231220015503 0ustar liggesusers\name{summary.classbagg} \alias{summary.classbagg} \alias{summary.regbagg} \alias{summary.survbagg} \alias{print.summary.bagging} \title{Summarising Bagging} \description{ \code{summary} method for objects returned by \code{\link{bagging}}. } \usage{ \method{summary}{classbagg}(object, \dots) } \arguments{ \item{object}{object returned by \code{\link{bagging}}.} \item{\dots}{further arguments to be passed to or from methods.} } \details{ A representation of all trees in the object is printed. } \value{ none } \keyword{tree} ipred/man/varset.Rd0000644000176200001440000000323414172231220013717 0ustar liggesusers\name{varset} \alias{varset} \title{Simulation Model} \description{ Three sets of variables are calculated: explanatory, intermediate and response variables. } \usage{ varset(N, sigma=0.1, theta=90, threshold=0, u=1:3) } \arguments{ \item{N}{number of simulated observations.} \item{sigma}{standard deviation of the error term.} \item{theta}{angle between two u vectors.} \item{threshold}{cutpoint for classifying to 0 or 1.} \item{u}{starting values.} } \details{ For each observation values of two explanatory variables \eqn{x = (x_1, x_2)^{\top}} and of two responses \eqn{y = (y_1, y_2)^{\top}} are simulated, following the formula: \deqn{ y = U*x+e = ({u_1^{\top} \atop u_2^{\top}})*x+e } where x is the evaluation of as standard normal random variable and e is generated by a normal variable with standard deviation \code{sigma}. U is a 2*2 Matrix, where \deqn{ u_1 = ({u_{1, 1} \atop u_{1, 2}}), u_2 = ({u_{2, 1} \atop u_{2, 2}}), ||u_1|| = ||u_2|| = 1, } i.e. a matrix of two normalised vectors. } \value{ A list containing the following arguments \item{explanatory}{N*2 matrix of 2 explanatory variables.} \item{intermediate}{N*2 matrix of 2 intermediate variables.} \item{response}{response vectors with values 0 or 1.} } \references{ David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised classification with structured class definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. } \examples{ theta90 <- varset(N = 1000, sigma = 0.1, theta = 90, threshold = 0) theta0 <- varset(N = 1000, sigma = 0.1, theta = 0, threshold = 0) par(mfrow = c(1, 2)) plot(theta0$intermediate) plot(theta90$intermediate) } \keyword{misc} ipred/man/kfoldcv.Rd0000644000176200001440000000125214172231220014041 0ustar liggesusers\name{kfoldcv} \alias{kfoldcv} \title{ Subsamples for k-fold Cross-Validation } \description{ Computes feasible sample sizes for the k groups in k-fold cv if N/k is not an integer. } \usage{ kfoldcv(k, N, nlevel=NULL) } \arguments{ \item{k}{ number of groups. } \item{N}{ total sample size. } \item{nlevel}{ a vector of sample sizes for stratified sampling.} } \details{ If N/k is not an integer, k-fold cv is not unique. Determine meaningful sample sizes. } \value{ A vector of length \code{k}. } \examples{ # 10-fold CV with N = 91 kfoldcv(10, 91) \testonly{ k <- sample(5:15, 1) k N <- sample(50:150, 1) N stopifnot(sum(kfoldcv(k, N)) == N) } } \keyword{misc} ipred/man/control.errorest.Rd0000644000176200001440000000275214172231220015743 0ustar liggesusers\name{control.errorest} \alias{control.errorest} \title{ Control Error Rate Estimators } \description{ Some parameters that control the behaviour of \code{\link{errorest}}. } \usage{ control.errorest(k = 10, nboot = 25, strat = FALSE, random = TRUE, predictions = FALSE, getmodels=FALSE, list.tindx = NULL) } \arguments{ \item{k}{integer, specify $k$ for $k$-fold cross-validation.} \item{nboot}{integer, number of bootstrap replications.} \item{strat}{logical, if \code{TRUE}, cross-validation is performed using stratified sampling (for classification problems).} \item{random}{logical, if \code{TRUE}, cross-validation is performed using a random ordering of the data.} \item{predictions}{logical, indicates whether the prediction for each observation should be returned or not (classification and regression only). For a bootstrap based estimator a matrix of size 'number of observations' times nboot is returned with predicted values of the ith out-of-bootstrap sample in column i and 'NA's for those observations not included in the ith out-of-bootstrap sample.} \item{getmodels}{logical, indicates a list of all models should be returned. For cross-validation only.} \item{list.tindx}{list of numeric vectors, indicating which observations are included in each bootstrap or cross-validation sample, respectively.} } \value{ A list with the same components as arguments. } \keyword{misc} ipred/man/print.bagging.Rd0000644000176200001440000000076614172231220015153 0ustar liggesusers\name{print.classbagg} \alias{print} \alias{print.classbagg} \alias{print.regbagg} \alias{print.survbagg} \title{Print Method for Bagging Trees} \description{ Print objects returned by \code{\link{bagging}} in nice layout. } \usage{ \method{print}{classbagg}(x, digits, \dots) } \arguments{ \item{x}{object returned by \code{\link{bagging}}.} \item{digits}{how many digits should be printed.} \item{\dots}{further arguments to be passed to or from methods.} } \value{ none } \keyword{tree} ipred/man/dystrophy.Rd0000644000176200001440000000403314172231220014456 0ustar liggesusers\name{dystrophy} \alias{dystrophy} \non_function{} \title{Detection of muscular dystrophy carriers.} \usage{data(dystrophy)} \description{ The \code{dystrophy} data frame has 209 rows and 10 columns. } \format{ This data frame contains the following columns: \describe{ \item{OBS}{numeric. Observation number.} \item{HospID}{numeric. Hospital ID number.} \item{AGE}{numeric, age in years.} \item{M}{numeric. Month of examination.} \item{Y}{numeric. Year of examination.} \item{CK}{numeric. Serum marker creatine kinase.} \item{H}{numeric. Serum marker hemopexin.} \item{PK}{numeric. Serum marker pyruvate kinase.} \item{LD}{numeric. Serum marker lactate dehydroginase.} \item{Class}{factor with levels, \code{carrier} and \code{normal}.} } } \details{ Duchenne Muscular Dystrophy (DMD) is a genetically transmitted disease, passed from a mother to her children. Affected female offspring usually suffer no apparent symptoms, male offspring with the disease die at young age. Although female carriers have no physical symptoms they tend to exhibit elevated levels of certain serum enzymes or proteins. \cr The dystrophy dataset contains 209 observations of 75 female DMD carriers and 134 female DMD non-carrier. It includes 6 variables describing age of the female and the serum parameters serum marker creatine kinase (CK), serum marker hemopexin (H), serum marker pyruvate kinase (PK) and serum marker lactate dehydroginase (LD). The serum markers CK and H may be measured rather inexpensive from frozen serum, PK and LD requires fresh serum. } \source{ D.Andrews and A. Herzberg (1985), Data. Berlin: Springer-Verlag. } \references{ Robert Tibshirani and Geoffry Hinton (1998), Coaching variables for regression and classification. Statistics and Computing 8, 25-33. } \examples{ \dontrun{ data("dystrophy") library("rpart") errorest(Class~CK+H~AGE+PK+LD, data = dystrophy, model = inbagg, pFUN = list(list(model = lm, predict = mypredict.lm), list(model = rpart)), ns = 0.75, estimator = "cv") } } \keyword{datasets} ipred/man/mypredict.lm.Rd0000644000176200001440000000137114172231220015022 0ustar liggesusers\name{mypredict.lm} \alias{mypredict.lm} \title{Predictions Based on Linear Models} \description{ Function to predict a vector of full length (number of observations), where predictions according to missing explanatory values are replaced by \code{NA}. } \usage{ mypredict.lm(object, newdata) } \arguments{ \item{object}{an object of class \code{lm}.} \item{newdata}{matrix or data frame to be predicted according to \code{object}.} } \value{ Vector of predicted values. } \note{\code{predict.lm} delivers a vector of reduced length, i.e. rows where explanatory variables are missing are omitted. The full length of the predicted observation vector is necessary in the indirect classification approach (\code{\link{predict.inclass}}).} \keyword{misc} ipred/man/cv.Rd0000644000176200001440000000363114172231220013024 0ustar liggesusers\name{cv} \alias{cv} \alias{cv.default} \alias{cv.factor} \alias{cv.numeric} \alias{cv.integer} \alias{cv.Surv} \title{Cross-validated Error Rate Estimators.} \description{ Those functions are low-level functions used by \code{\link{errorest}} and are normally not called by users. } \usage{ \method{cv}{factor}(y, formula, data, model, predict, k=10, random=TRUE, strat=FALSE, predictions=NULL, getmodels=NULL, list.tindx = NULL, \dots) } \arguments{ \item{y}{response variable, either of class \code{factor} (classification), \code{numeric} (regression) or \code{Surv} (survival).} \item{formula}{a formula object.} \item{data}{data frame of predictors and response described in \code{formula}.} \item{model}{a function implementing the predictive model to be evaluated. The function \code{model} can either return an object representing a fitted model or a function with argument \code{newdata} which returns predicted values. In this case, the \code{predict} argument to \code{errorest} is ignored.} \item{predict}{a function with arguments \code{object} and \code{newdata} only which predicts the status of the observations in \code{newdata} based on the fitted model in \code{object}.} \item{k}{k-fold cross-validation.} \item{random}{logical, indicates whether a random order or the given order of the data should be used for sample splitting or not, defaults to \code{TRUE}.} \item{strat}{logical, stratified sampling or not, defaults to \code{FALSE}.} \item{predictions}{logical, return the prediction of each observation.} \item{getmodels}{logical, return a list of models for each fold.} \item{list.tindx}{list of numeric vectors, indicating which observations are included in each cross-validation sample.} \item{\dots}{additional arguments to \code{model}.} } \details{ See \code{\link{errorest}}. } \keyword{misc} ipred/man/predict.inbagg.Rd0000644000176200001440000000311114172231220015265 0ustar liggesusers\name{predict.inbagg} \alias{predict.inbagg} \title{Predictions from an Inbagg Object} \description{ Predicts the class membership of new observations through indirect bagging. } \usage{ \method{predict}{inbagg}(object, newdata, ...) } \arguments{ \item{object}{object of class \code{inbagg}, see \code{\link{inbagg}}.} \item{newdata}{data frame to be classified.} \item{\dots}{additional argumends corresponding to the predictive models.} } \details{ Predictions of class memberships are calculated. i.e. values of the intermediate variables are predicted following \code{pFUN} and classified following \code{cFUN}, see \code{\link{inbagg}}. } \value{ The vector of predicted classes is returned. } \references{ David J. Hand, Hua Gui Li, Niall M. Adams (2001), Supervised classification with structured class definitions. \emph{Computational Statistics & Data Analysis} \bold{36}, 209--225. Andrea Peters, Berthold Lausen, Georg Michelson and Olaf Gefeller (2003), Diagnosis of glaucoma by indirect classifiers. \emph{Methods of Information in Medicine} \bold{1}, 99-103. } \seealso{\code{\link{inbagg}}} \examples{ library("MASS") library("rpart") y <- as.factor(sample(1:2, 100, replace = TRUE)) W <- mvrnorm(n = 200, mu = rep(0, 3), Sigma = diag(3)) X <- mvrnorm(n = 200, mu = rep(2, 3), Sigma = diag(3)) colnames(W) <- c("w1", "w2", "w3") colnames(X) <- c("x1", "x2", "x3") DATA <- data.frame(y, W, X) pFUN <- list(list(formula = w1~x1+x2, model = lm), list(model = rpart)) RES <- inbagg(y~w1+w2+w3~x1+x2+x3, data = DATA, pFUN = pFUN) predict(RES, newdata = X) } \keyword{misc} ipred/man/DLBCL.Rd0000644000176200001440000000347614172231220013243 0ustar liggesusers\name{DLBCL} \alias{DLBCL} \non_function{} \title{ Diffuse Large B-Cell Lymphoma } \usage{data("DLBCL")} \description{ A data frame with gene expression data from diffuse large B-cell lymphoma (DLBCL) patients. } \format{ This data frame contains the following columns: \describe{ \item{DLCL.Sample}{DLBCL identifier.} \item{Gene.Expression}{Gene expression group.} \item{time}{survival time in month.} \item{cens}{censoring: 0 censored, 1 dead.} \item{IPI}{International prognostic index.} \item{MGEc.1}{mean gene expression in cluster 1.} \item{MGEc.2}{mean gene expression in cluster 2.} \item{MGEc.3}{mean gene expression in cluster 3.} \item{MGEc.4}{mean gene expression in cluster 4.} \item{MGEc.5}{mean gene expression in cluster 5.} \item{MGEc.6}{mean gene expression in cluster 6.} \item{MGEc.7}{mean gene expression in cluster 7.} \item{MGEc.8}{mean gene expression in cluster 8.} \item{MGEc.9}{mean gene expression in cluster 9.} \item{MGEc.10}{mean gene expression in cluster 10.} } } \source{ Except of \code{MGE}, the data is published at \url{http://llmpp.nih.gov/lymphoma/data.shtml}. \code{MGEc.*} is the mean of the gene expression in each of ten clusters derived by agglomerative average linkage hierarchical cluster analysis (Hothorn et al., 2002). } \references{ Ash A. Alizadeh et. al (2000), Distinct types of diffuse large B-cell lymphoma identified by gene expression profiling. \emph{Nature}, \bold{403}, 504--509. Torsten Hothorn, Berthold Lausen, Axel Benner and Martin Radespiel-Troeger (2004), Bagging Survival Trees. \emph{Statistics in Medicine}, \bold{23}, 77--91. } \examples{ suppressWarnings(RNGversion("3.5.3")) set.seed(290875) data("DLBCL", package="ipred") library("survival") survfit(Surv(time, cens) ~ 1, data=DLBCL) } \keyword{datasets} ipred/DESCRIPTION0000644000176200001440000000211514646210722013066 0ustar liggesusersPackage: ipred Title: Improved Predictors Version: 0.9-15 Date: 2024-07-18 Authors@R: c(person("Andrea", "Peters", role = "aut"), person("Torsten", "Hothorn", role = c("aut", "cre"), email = "Torsten.Hothorn@R-project.org"), person("Brian D.", "Ripley", role = "ctb"), person("Terry", "Therneau", role = "ctb"), person("Beth", "Atkinson", role = "ctb")) Description: Improved predictive models by indirect classification and bagging for classification, regression and survival problems as well as resampling based estimators of prediction error. Depends: R (>= 2.10) Imports: rpart (>= 3.1-8), MASS, survival, nnet, class, prodlim Suggests: mvtnorm, mlbench, TH.data, randomForest, party License: GPL (>= 2) NeedsCompilation: yes Packaged: 2024-07-18 11:48:25 UTC; hothorn Author: Andrea Peters [aut], Torsten Hothorn [aut, cre], Brian D. Ripley [ctb], Terry Therneau [ctb], Beth Atkinson [ctb] Maintainer: Torsten Hothorn Repository: CRAN Date/Publication: 2024-07-18 13:00:02 UTC