ROCR/0000755000176200001440000000000015134614023011016 5ustar liggesusersROCR/tests/0000755000176200001440000000000013644317760012174 5ustar liggesusersROCR/tests/testthat/0000755000176200001440000000000015134614023014020 5ustar liggesusersROCR/tests/testthat/test-plot.r0000644000176200001440000003051513653001011016131 0ustar liggesusers context("plot") test_that("plot:",{ some.predictions <- c(0.02495517, 0.92535646, 0.86251887, 0.80946685, 0.70922858, 0.69762824, 0.50604485, 0.25446810, 0.10837728, 0.07250349) some.labels <- c(0,1,1,0,1,1,0,1,0,0) .get.performance <- function(pred) { tpr <- performance(pred, "tpr") fpr <- performance(pred, "fpr") acc <- performance(pred, "acc") err <- performance(pred, "err") rec <- performance(pred, "rec") sens<- performance(pred, "sens") fnr <- performance(pred, "fnr") tnr <- performance(pred, "tnr") spec<- performance(pred, "spec") ppv <- performance(pred, "ppv") prec<- performance(pred, "prec") npv <- performance(pred, "npv") fall<- performance(pred, "fall") miss<- performance(pred, "miss") pcfall <- performance(pred, "pcfall") pcmiss <- performance(pred, "pcmiss") rpp <- performance(pred, "rpp") rnp <- performance(pred, "rnp") auc <- performance(pred, "auc") prbe<- performance(pred, "prbe") rch <- performance(pred, "rch") mxe <- performance(pred, "mxe") rmse<- performance(pred, "rmse") phi <- performance(pred, "phi") mat <- performance(pred, "mat") mi <- performance(pred, "mi") chisq<- performance(pred, "chisq") odds<- performance(pred, "odds") lift<- performance(pred, "lift") f <- performance(pred, "f") sar <- performance(pred,"sar") ecost <- performance(pred, "ecost") cost <- performance(pred, "cost") return(list(tpr=tpr, fpr=fpr, acc=acc, err=err, rec=rec, sens=sens, fnr=fnr, tnr=tnr, spec=spec, ppv=ppv, prec=prec, npv=npv, fall=fall, miss=miss, pcfall=pcfall, pcmiss=pcmiss, rpp=rpp, rnp=rnp, auc=auc, prbe=prbe, rch=rch, mxe=mxe, rmse=rmse, phi=phi, mat=mat, mi=mi, chisq=chisq, odds=odds, lift=lift, f=f, sar=sar, ecost=ecost, cost=cost)) } pred <- prediction(some.predictions, some.labels) measures <- expect_warning(.get.performance(pred), "Chi-squared approximation may be incorrect") actual1 <- measures[[1]] expect_error(plot(measures[[1]], colorize = TRUE), "Threshold coloring or labeling cannot be performed") for(i in seq_along(measures)){ if(names(measures[i]) %in% c("auc","mxe","rmse")){ expect_error(plot(measures[[i]])) } } data(ROCR.hiv) pp <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels pred <- prediction(pp, ll) expect_error(ROCR:::.combine.performance.objects(actual1,performance(pred, "fpr")), "Only performance objects with identical number of cross-validation") # plot failures perf <- performance(pred, "tpr", "fpr") perf@x.values <- list(c(1)) expect_error(plot(perf), "Performance object cannot be plotted") perf <- performance(pred, "tpr", "fpr") perf@y.values <- list(c(1)) expect_error(plot(perf), "Performance object cannot be plotted") perf <- performance(pred, "tpr", "fpr") perf@alpha.values <- list() expect_null({ plot <- plot(perf) # no error }) expect_error(plot(perf,colorize = TRUE), "Threshold coloring or labeling cannot be performed") expect_error(plot(perf,print.cutoffs.at = 0.5), "Threshold coloring or labeling cannot be performed") perf <- performance(pred, "tpr", "fpr") expect_null({ plot <- plot(perf,avg = "horizontal") # no error }) expect_error(plot(perf,avg = "horizontal", colorize=TRUE), "Threshold coloring or labeling is only") expect_error(plot(perf,avg = "horizontal", print.cutoffs.at=0.5), "Threshold coloring or labeling is only") # perf <- performance(pred, "tpr", "fpr") expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...") plot(perf, lty=3, col="grey78", add=TRUE) }) expect_null({ plot.performance(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...") plot.performance(perf, lty=3, col="grey78", add=TRUE) }) perf <- performance(pred, "prec", "rec") expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... Precision/Recall graphs ...") plot(perf, lty=3, col="grey78", add=TRUE) }) perf <- performance(pred, "sens", "spec") expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main="... Sensitivity/Specificity plots ...") plot(perf, lty=3, col="grey78", add=TRUE) }) perf <- performance(pred, "lift", "rpp") expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... and Lift charts.") plot(perf, lty=3, col="grey78", add=TRUE) }) perf <- performance(pred, "tpr", "fpr") expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...", downsampling = 0.5) }) expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...", downsampling = 0.9) }) expect_null({ plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...", downsampling = 1) }) data(ROCR.xval) pp <- ROCR.xval$predictions ll <- ROCR.xval$labels pred <- prediction(pp,ll) perf <- performance(pred,'tpr','fpr') expect_null({ plot(perf, colorize=TRUE, lwd=2, main='ROC curves from 10-fold cross-validation') }) expect_null({ plot(perf, avg='vertical', spread.estimate='stderror',lwd=3, main='Vertical averaging + 1 standard error',col='blue') }) expect_null({ plot(perf, avg='horizontal', spread.estimate='stderror',lwd=3, main='Horizontal averaging + boxplots',col='blue') }) expect_null({ plot(perf, avg='horizontal', spread.estimate='boxplot',lwd=3, main='Horizontal averaging + boxplots',col='blue') }) expect_null({ plot(perf, avg='vertical', spread.estimate='boxplot',lwd=3, main='Horizontal averaging + boxplots',col='blue') }) expect_null({ plot(perf, avg='threshold', spread.estimate='stddev',lwd=2, main='Threshold averaging + 1 standard deviation',colorize=TRUE) }) expect_null({ plot(perf, avg='threshold', spread.estimate='boxplot',lwd=2, main='Threshold averaging + 1 standard deviation',colorize=TRUE) }) expect_null({ plot(perf, avg='threshold', spread.estimate='boxplot',lwd=2, main='Threshold averaging + 1 standard deviation',colorize=TRUE, colorkey.pos="top") }) expect_null({ plot(perf, print.cutoffs.at=seq(0,1,by=0.2), text.cex=0.8, text.y=lapply(as.list(seq(0,0.5,by=0.05)), function(x) { rep(x,length(perf@x.values[[1]])) } ), col= as.list(terrain.colors(10)), text.col= as.list(terrain.colors(10)), points.col= as.list(terrain.colors(10)), main= "Cutoff stability") }) ############################################################################ # removed because vdiffr is not available on mac ############################################################################ # vdiffr # skip_on_ci() # skip_on_os("mac") # skip_if_not_installed("vdiffr") # for(i in seq_along(measures)){ # if(!(names(measures[i]) %in% c("auc","mxe","rmse"))){ # vdiffr::expect_doppelganger(names(measures[i]), plot(measures[[i]])) # } else { # expect_error(plot(measures[[i]])) # } # } # # data(ROCR.hiv) # pp <- ROCR.hiv$hiv.svm$predictions # ll <- ROCR.hiv$hiv.svm$labels # pred <- prediction(pp, ll) # expect_error(ROCR:::.combine.performance.objects(actual1,performance(pred, "fpr")), # "Only performance objects with identical number of cross-validation") # perf <- performance(pred, "tpr", "fpr") # vdiffr::expect_doppelganger("ROC-curve",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "With ROCR you can produce standard plots like ROC curves ...") # plot(perf, lty=3, col="grey78", add=TRUE) # }) # perf <- performance(pred, "prec", "rec") # vdiffr::expect_doppelganger("Precision-Recall-graph",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "... Precision/Recall graphs ...") # plot(perf, lty=3, col="grey78", add=TRUE) # }) # perf <- performance(pred, "sens", "spec") # vdiffr::expect_doppelganger("Sensitivity-Specificity-plots",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main="... Sensitivity/Specificity plots ...") # plot(perf, lty=3, col="grey78", add=TRUE) # }) # perf <- performance(pred, "lift", "rpp") # vdiffr::expect_doppelganger("lift-chart",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "... and Lift charts.") # plot(perf, lty=3, col="grey78", add=TRUE) # }) # # perf <- performance(pred, "tpr", "fpr") # vdiffr::expect_doppelganger("ROC-curve-downsampling1",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "With ROCR you can produce standard plots like ROC curves ...", # downsampling = 0.5) # }) # vdiffr::expect_doppelganger("ROC-curve-downsampling2",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "With ROCR you can produce standard plots like ROC curves ...", # downsampling = 0.9) # }) # vdiffr::expect_doppelganger("ROC-curve-downsampling3",{ # plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "With ROCR you can produce standard plots like ROC curves ...", # downsampling = 1) # }) # expect_error(plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, # main= "With ROCR you can produce standard plots like ROC curves ...", # downsampling = 1.1), # "'from' must be a finite number") # dev.off() # # data(ROCR.xval) # pp <- ROCR.xval$predictions # ll <- ROCR.xval$labels # pred <- prediction(pp,ll) # perf <- performance(pred,'tpr','fpr') # # vdiffr::expect_doppelganger("ROC-cross-valid",{ # plot(perf, colorize=TRUE, lwd=2, # main='ROC curves from 10-fold cross-validation') # }) # vdiffr::expect_doppelganger("ROC-vertical-avg",{ # plot(perf, avg='vertical', spread.estimate='stderror',lwd=3, # main='Vertical averaging + 1 standard error',col='blue') # }) # vdiffr::expect_doppelganger("ROC-horizontal-avg",{ # plot(perf, avg='horizontal', spread.estimate='boxplot',lwd=3, # main='Horizontal averaging + boxplots',col='blue') # }) # vdiffr::expect_doppelganger("ROC-vertical-avg-box",{ # plot(perf, avg='vertical', spread.estimate='boxplot',lwd=3, # main='Horizontal averaging + boxplots',col='blue') # }) # vdiffr::expect_doppelganger("ROC-threshold-avg",{ # plot(perf, avg='threshold', spread.estimate='stddev', # lwd=2, # main='Threshold averaging + 1 standard deviation',colorize=TRUE) # }) }) ROCR/tests/testthat/test-consistency.r0000644000176200001440000004526213644317760017546 0ustar liggesuserscontext("consistency") test_that("consistency:",{ .get.performance.measures <- function(pred, measures) { ans <- list() for (measure in measures) { ## need to enclose y.values into a list to avoid flattening perf <- performance(pred, measure) .check.performance.object( perf ) ans <- c(ans, list(perf@y.values)) } names(ans) <- measures ans } .check.consistency <- function(measures) { ## check entries of contingency table for consistency for (measure in c("acc", "err", "fnr", "tpr", "fpr", "tnr", "pcfall", "prec", "npv", "pcmiss",'rpp','rnp')) { if (!measure %in% names(measures)) { stop(paste("Performance measure", measure, "not in argument list.")) } } for (i in 1:length(measures$acc)) { finite.bool <- is.finite(measures$acc[[i]]) & is.finite(measures$err[[i]]) expect_equal(measures$acc[[i]][finite.bool] + measures$err[[i]][finite.bool], rep(1,length(measures$acc[[i]]))[finite.bool]) finite.bool <- is.finite(measures$fnr[[i]]) & is.finite(measures$tpr[[i]]) expect_equal(measures$fnr[[i]][finite.bool] + measures$tpr[[i]][finite.bool], rep(1,length(measures$fnr[[i]]))[finite.bool]) finite.bool <- is.finite(measures$fpr[[i]]) & is.finite(measures$tnr[[i]]) expect_equal(measures$fpr[[i]][finite.bool] + measures$tnr[[i]][finite.bool], rep(1,length(measures$fpr[[i]]))[finite.bool]) finite.bool <- is.finite(measures$prec[[i]]) & is.finite(measures$pcfall[[i]]) expect_equal(measures$prec[[i]][finite.bool] + measures$pcfall[[i]][finite.bool], rep(1,length(measures$acc[[i]]))[finite.bool]) finite.bool <- is.finite(measures$npv[[i]]) & is.finite(measures$pcmiss[[i]]) expect_equal(measures$npv[[i]][finite.bool] + measures$pcmiss[[i]][finite.bool], rep(1,length(measures$acc[[i]]))[finite.bool]) expect_equal(measures$rpp[[i]] + measures$rnp[[i]], rep(1, length(measures$rpp[[i]]))) } } ############################################################ # test length of performance measures .check.performance.object <- function(perf) { ylen <- length(perf@y.values) xlen <- length(perf@x.values) alphalen <- length(perf@alpha.values) expect_equal( (xlen==0 || xlen==ylen) && (alphalen==0 || (alphalen==xlen && alphalen==ylen)), T ) if (xlen==ylen) { for (i in 1:ylen) expect_equal( length(perf@x.values[[i]]), length(perf@y.values[[i]]) ) } if (alphalen==ylen) { for (i in 1:ylen) expect_equal( length(perf@alpha.values[[i]]), length(perf@y.values[[i]]) ) } } .check.prediction.object <- function( pred) { # 1. all entries in prediction object must have equals number of cross-validation runs lenvec <- c(length(pred@predictions), length(pred@labels), length(pred@cutoffs), length(pred@fp), length(pred@tp), length(pred@fn), length(pred@tn), length(pred@n.pos), length(pred@n.neg), length(pred@n.pos.pred), length(pred@n.neg.pred)) expect_equal( length(unique(lenvec)), 1) # 2. inside: xval runs: for (i in 1:length(pred@predictions)) { expect_equal( length(pred@predictions[[i]]), length(pred@labels[[i]])) lenvec <- c(length(pred@cutoffs[[i]]), length(pred@fp[[i]]), length(pred@tp[[i]]), length(pred@fn[[i]]), length(pred@tn[[i]]), length(pred@n.pos.pred[[i]]), length(pred@n.neg.pred[[i]])) expect_equal( length(unique(lenvec)), 1) expect_equal( unique(lenvec), length(unique(pred@predictions[[i]]))+1 ) } # 3. cutoffs sorted in descending order? for (i in 1:length(pred@predictions)) { expect_equal( sort(pred@cutoffs[[i]], decreasing=TRUE ), pred@cutoffs[[i]] ) } # 4. check 2x2 table for consistency with marginal sums for (i in 1:length(pred@predictions)) { expect_equal( pred@tp[[i]] + pred@fp[[i]], pred@n.pos.pred[[i]] ) expect_equal( pred@fn[[i]] + pred@tn[[i]], pred@n.neg.pred[[i]] ) expect_equal( pred@tp[[i]] + pred@fn[[i]], rep( pred@n.pos[[i]], length(pred@tp[[i]])) ) expect_equal( pred@fp[[i]] + pred@tn[[i]], rep( pred@n.neg[[i]], length(pred@tp[[i]])) ) expect_equal(pred@n.pos.pred[[i]] + pred@n.neg.pred[[i]], rep( pred@n.pos[[i]] + pred@n.neg[[i]], length(pred@n.pos.pred[[i]])) ) expect_equal(pred@n.pos[[i]] + pred@n.neg[[i]], length(pred@labels[[i]])) } } # .mock.prediction <- function( n.predictions, error.rate ) { if ( length(n.predictions) > 1 && length(error.rate)==1) { error.rate <- rep(error.rate, length(n.predictions) ) } if (length(n.predictions)>1) { predictions <- list() labels <- list() } else { predictions <- c() labels <- c() } for (i in 1:length(n.predictions)) { current.predictions <- runif( n.predictions[i] ) current.labels <- as.numeric( current.predictions >= 0.5) flip.indices <- sample( n.predictions[i], round( error.rate[i] * n.predictions[i] )) current.labels[ flip.indices ] <- !current.labels[ flip.indices ] # current.labels[ current.labels=="1" ] <- "+" # current.labels[ current.labels=="0" ] <- "-" if (length(n.predictions)>1) { predictions <- c( predictions, list( current.predictions )) labels <- c( labels, list( current.labels )) } } if (length( n.predictions)==1) { predictions <- list(current.predictions) labels <- list(current.labels) } ans <- list(predictions= predictions, labels= labels) # ensure, that random labels have exactly two levels if (any( sapply(labels, function(run) {length(unique(run))}) != rep(2, length(labels)) )) { # print(paste("XXX", labels, str(n.predictions), str(error.rate))) return(.mock.prediction(n.predictions, error.rate)) } else return( ans ) } ############################################################################## # consistency for (i in 1:100) { n.folds <- sample(1:10,1) fold.sizes <- sample(10:100, n.folds, replace=T) error.rates <- runif( n.folds ) pp <- .mock.prediction( fold.sizes, error.rates ) pred <- prediction( pp$predictions, pp$labels ) .check.prediction.object(pred) a <- .get.performance.measures( pred, c('acc','err','fpr','tpr','fnr','tnr','prec','pcfall','npv','pcmiss','rpp','rnp')) .check.consistency( a) } ############################################################################## # test errors crashCases <- list( ## cases that are ok to crash: list(pred= c(0), lab= c(0)), #-> Number of classes is not equal to 2. list(pred= c(1), lab= c(1)), #-> Number of classes is not equal to 2. list(pred= c(0.1, 0.2, 0.5), lab= c(1,1,1)), #-> Number of classes is not equal to 2. list(pred= c(0.1, 0.2, 0.5), lab= c(0,0,0)), #-> Number of classes is not equal to 2. list(pred= c(0.1, 0.2, 0.5), lab= c("a", "a", "a")), #-> Number of classes is not equal to 2. list(pred= c(0.1, 0.2, 0.5), lab= c(T, T, T)), #-> Number of classes is not equal to 2. list(pred= c(0.1, 0.2, 0.5), lab= c(F, F, F)) #-> Number of classes is not equal to 2. ) for (case in crashCases) { # cat(case$pred, " ", case$lab, "\n") expect_error(pred <- prediction(case$pred, case$lab)) #checkException(measures <- .get.performance.measures(pred)) } ############################################################################## ## use consistency checks to validate results on pathological input cases performance.measures <- c('tpr','fpr','acc','err','rec','sens','fnr','tnr','spec', 'ppv','prec','npv','fall','miss','pcfall','pcmiss','rpp','rnp', 'auc','prbe','rch','mxe','rmse','phi','mat','mi','chisq', 'odds','lift','f','sar','ecost','cost') # mxe needs 0,1 labels (warning otherwise), # rmse needs numeric labels (warning otherwise), sar as well pred <- prediction( c(0.1, 0.2, 0.5), c("a", "a", "b")) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe' & performance.measures != 'rmse' & performance.measures != 'sar'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(0.1, 0.2, 0.5), c(F, F, T)) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe' & performance.measures != 'rmse' & performance.measures != 'sar'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(0.1, 0.2, 0.5), c("1", "1", "0")) .check.prediction.object(pred) measures.to.evaluate <- performance.measures measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(0.1, 0.2, 0.5), c(T, F, F)) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe' & performance.measures != 'rmse' & performance.measures != 'sar' ] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) # prbe cannot be computed, because only one prec/rec pair available. pred <- prediction( c(0,0,0), c(0,1,1)) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'prbe' ] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(0,0,0), ordered(c(0,0,0), levels=c(0,1))) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'auc' & performance.measures != 'prbe' & performance.measures != 'rch' & performance.measures != 'sar' & performance.measures != 'ecost'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(-1,-0.2,-0.6), ordered(c(1,0,1), levels=c(0,1))) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe' ] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(-1,-0.2,-0.6), c(-1,1,-1)) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(-1,-0.2,-0.6), c(3,2,3)) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'mxe'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction( c(1), ordered(c("a"),levels=c('a','b'))) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[ performance.measures != 'auc' & performance.measures != 'prbe' & performance.measures != 'rch' & performance.measures != 'mxe' & performance.measures != 'rmse' & performance.measures != 'sar' & performance.measures != 'ecost'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) ############################################################################## # test measures for consistency on supplied data sets data(ROCR.simple) pred <- prediction(ROCR.simple$predictions, ROCR.simple$labels) .check.prediction.object(pred) measures.to.evaluate <- performance.measures measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) data(ROCR.xval) pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) .check.prediction.object(pred) measures.to.evaluate <- performance.measures measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) data(ROCR.hiv) pred <- prediction(ROCR.hiv$hiv.nn$predictions, ROCR.hiv$hiv.nn$labels) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[performance.measures != 'mxe' & performance.measures != 'cal'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) pred <- prediction(ROCR.hiv$hiv.svm$predictions, ROCR.hiv$hiv.svm$labels) .check.prediction.object(pred) measures.to.evaluate <- performance.measures[performance.measures != 'mxe' & performance.measures != 'cal'] measures <- expect_warning(.get.performance.measures(pred, measures.to.evaluate), "Chi-squared approximation may be incorrect") .check.consistency( measures) skip_on_cran() skip_on_ci() ############################################################################## # Combining measures <- c('tpr','fpr','acc','err','rec','sens','fnr','tnr','spec', 'ppv','prec','npv','fall','miss','pcfall','pcmiss','rpp','rnp', 'phi','mat','mi','chisq','odds','lift') # 'auc','prbe','rch','mxe','rmse','phi','mat','mi','chisq', # 'odds','lift','f','sar','ecost','cost') for (measure1 in measures) { # print(measure1) for (measure2 in measures) { n.folds <- sample(1:2,1) fold.sizes <- sample(10:20, n.folds, replace=T) error.rates <- runif( n.folds ) pp <- .mock.prediction( fold.sizes, error.rates ) pred <- prediction( pp$predictions, pp$labels ) .check.prediction.object(pred) perf1 <- suppressWarnings(performance( pred, measure1 )) perf2 <- suppressWarnings(performance( pred, measure2 )) perf3 <- suppressWarnings(performance( pred, measure2, measure1 )) .check.performance.object(perf1) .check.performance.object(perf2) .check.performance.object(perf3) for (i in 1:n.folds) { #check elements expect_equal(setequal( c( perf1@x.values[[i]], perf2@x.values[[i]]), perf3@alpha.values[[i]] ),T) expect_equal(setequal( perf1@y.values[[i]], perf3@x.values[[i]] ),T) expect_equal(setequal( perf2@y.values[[i]], perf3@y.values[[i]] ),T) #check order ind <- sapply( perf1@x.values[[i]], function(x) { min(which(x==perf3@alpha.values[[i]]))}) expect_equal( unname(perf1@y.values[[i]]), perf3@x.values[[i]][ind] ) expect_equal( unname(perf2@y.values[[i]]), perf3@y.values[[i]][ind] ) } } } ############################################################################## # test datavase combine measures <- c('tpr','fpr','acc','err','rec','sens','fnr','tnr','spec', 'ppv','prec','npv','fall','miss','pcfall','pcmiss','rpp','rnp', 'phi','mat','mi','chisq','odds','lift') #'auc','prbe','rch','mxe','rmse','phi','mat','mi','chisq', #'odds','lift','f','sar','ecost','cost') # print("Database combine test deactivated.") data(ROCR.simple) data(ROCR.xval) data(ROCR.hiv) all.pred <- list(prediction(ROCR.simple$predictions, ROCR.simple$labels), prediction(ROCR.xval$predictions, ROCR.xval$labels), prediction(ROCR.hiv$hiv.nn$predictions, ROCR.hiv$hiv.nn$labels), prediction(ROCR.hiv$hiv.svm$predictions, ROCR.hiv$hiv.svm$labels)) lapply(all.pred, .check.prediction.object) for (pred in all.pred) { for (measure1 in measures) { # print(measure1) for (measure2 in measures) { perf1 <- suppressWarnings(performance( pred, measure1 )) perf2 <- suppressWarnings(performance( pred, measure2 )) perf3 <- suppressWarnings(performance( pred, measure2, measure1 )) .check.performance.object(perf1) .check.performance.object(perf2) .check.performance.object(perf3) for (i in 1:length(pred@labels)) { #check elements expect_equal(setequal( c( perf1@x.values[[i]], perf2@x.values[[i]]), perf3@alpha.values[[i]] ),T) expect_equal(setequal( perf1@y.values[[i]], perf3@x.values[[i]] ),T) expect_equal(setequal( perf2@y.values[[i]], perf3@y.values[[i]] ),T) # check order ind <- sapply( perf1@x.values[[i]], function(x) { min(which(x==perf3@alpha.values[[i]]))}) expect_equal( unname(perf1@y.values[[i]]), perf3@x.values[[i]][ind] ) expect_equal( unname(perf2@y.values[[i]]), perf3@y.values[[i]][ind] ) } } } } }) ROCR/tests/testthat/test-simple.r0000644000176200001440000003672113644317760016476 0ustar liggesuserscontext("simple") test_that("simple:",{ some.predictions <- c(0.02495517, 0.92535646, 0.86251887, 0.80946685, 0.70922858, 0.69762824, 0.50604485, 0.25446810, 0.10837728, 0.07250349) some.labels <- c(0,1,1,0,1,1,0,1,0,0) tp.reference <- c(0, 1, 2, 2, 3, 4, 4, 5, 5, 5, 5) fp.reference <- c(0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5) pp.reference <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) np.reference <- c(10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) p.reference <- rep(5, 11) n.reference <- rep(5, 11) tn.reference <- n.reference-fp.reference fn.reference <- p.reference-tp.reference # manually calculated reference measures rpp.reference <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0) rnp.reference <- c(1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0) tpr.reference <- c(0.0, 0.2, 0.4, 0.4, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0) fpr.reference <- c(0.0, 0.0, 0.0, 0.2, 0.2, 0.2, 0.4, 0.4, 0.6, 0.8, 1.0) acc.reference <- c(0.5, 0.6, 0.7, 0.6, 0.7, 0.8, 0.7, 0.8, 0.7, 0.6, 0.5) err.reference <- c(0.5, 0.4, 0.3, 0.4, 0.3, 0.2, 0.3, 0.2, 0.3, 0.4, 0.5) rec.reference <- tpr.reference sens.reference<- tpr.reference fnr.reference <- c(1.0, 0.8, 0.6, 0.6, 0.4, 0.2, 0.2, 0.0, 0.0, 0.0, 0.0) tnr.reference <- c(1.0, 1.0, 1.0, 0.8, 0.8, 0.8, 0.6, 0.6, 0.4, 0.2, 0.0) spec.reference<- tnr.reference ppv.reference <- c(0/0, 1/1, 2/2, 2/3, 3/4, 4/5, 4/6, 5/7, 5/8, 5/9, 5/10) npv.reference <- c(5/10, 5/9, 5/8, 4/7, 4/6, 4/5, 3/4, 3/3, 2/2, 1/1, 0/0) prec.reference<- ppv.reference fall.reference <- fpr.reference miss.reference <- fnr.reference pcfall.reference <- c(0/0, 0/1, 0/2, 1/3, 1/4, 1/5, 2/6, 2/7, 3/8, 4/9, 5/10) pcmiss.reference <- c(5/10, 4/9, 3/8, 3/7, 2/6, 1/5, 1/4, 0/3, 0/2, 0/1, 0/0) auc.reference <- 0.84 aucpr.reference <- 0.8814286 cal.reference <- c() ind <- rev(order(some.predictions)) sorted.predictions <- some.predictions[ind] sorted.labels <- some.labels[ind] for (i in 1:8) { mean.pred <- mean( sorted.predictions[i:(i+2)] ) frac.pos <- sum( sorted.labels[i:(i+2)] ) / 3 cal.reference <- c(cal.reference, abs( mean.pred - frac.pos )) } prbe.reference<- 0.8 prbe.reference.x <- 0.69762824 rch.reference.x <- fpr.reference[c(1,3,6,8,11)] rch.reference.y <- tpr.reference[c(1,3,6,8,11)] mxe.reference <- -(1/length(some.predictions)) * sum(some.labels*log(some.predictions) + (1-some.labels)*log(1-some.predictions)) rmse.reference <- sqrt((1/length(some.predictions)) * sum((some.predictions-some.labels)^2)) phi.reference <- (tp.reference*tn.reference-fp.reference*fn.reference) / sqrt(p.reference*n.reference*pp.reference*np.reference) mat.reference <- phi.reference my.log2 <- function( x ) { ans <- log2(x) ans[ ans==-Inf ] <- 0 ans } mi.reference <- (tn.reference * my.log2( tn.reference / (n.reference*np.reference)) + fn.reference*my.log2(fn.reference/(np.reference*p.reference)) + fp.reference*my.log2(fp.reference/(n.reference*pp.reference)) + tp.reference*my.log2(tp.reference/(p.reference*pp.reference))) / length(some.labels) + log2(length(some.labels)) chisq.reference <- (((pp.reference*p.reference/length(some.predictions)) - tp.reference)^2 / (pp.reference*p.reference/length(some.predictions)) + ((pp.reference*n.reference/length(some.predictions)) - fp.reference)^2 / (pp.reference*n.reference/length(some.predictions)) + ((np.reference*p.reference/length(some.predictions)) - fn.reference)^2 / (np.reference*p.reference/length(some.predictions)) + ((np.reference*n.reference/length(some.predictions)) - tn.reference)^2 / (np.reference*n.reference/length(some.predictions))) odds.reference <- (tp.reference*tn.reference) / (fn.reference*fp.reference) lift.reference <- (tp.reference/p.reference) / (pp.reference/(p.reference+n.reference)) f.reference <- 1 / (0.5 * ((1/prec.reference) + (1/rec.reference))) sar.reference <- 1/3 * (acc.reference + auc.reference + (1-rmse.reference)) cost.reference <- (fpr.reference * n.reference/length(some.labels) * 1 + fnr.reference * p.reference/length(some.labels) * 1) .get.performance.measures <- function(pred) { .get.performance.measure.result <- function(pred, measure){ perf <- performance(pred, measure) show(perf) perf@y.values[[1]] } tpr <- .get.performance.measure.result(pred, "tpr") fpr <- .get.performance.measure.result(pred, "fpr") acc <- .get.performance.measure.result(pred, "acc") err <- .get.performance.measure.result(pred, "err") rec <- .get.performance.measure.result(pred, "rec") sens<- .get.performance.measure.result(pred, "sens") fnr <- .get.performance.measure.result(pred, "fnr") tnr <- .get.performance.measure.result(pred, "tnr") spec<- .get.performance.measure.result(pred, "spec") ppv <- .get.performance.measure.result(pred, "ppv") prec<- .get.performance.measure.result(pred, "prec") npv <- .get.performance.measure.result(pred, "npv") fall<- .get.performance.measure.result(pred, "fall") miss<- .get.performance.measure.result(pred, "miss") pcfall <- .get.performance.measure.result(pred, "pcfall") pcmiss <- .get.performance.measure.result(pred, "pcmiss") rpp <- .get.performance.measure.result(pred, "rpp") rnp <- .get.performance.measure.result(pred, "rnp") auc <- performance(pred, "auc")@y.values[[1]] aucpr <- performance(pred, "aucpr")@y.values[[1]] prbe<- performance(pred, "prbe")@y.values[[1]] rch <- performance(pred, "rch")@y.values[[1]] mxe <- .get.performance.measure.result(pred, "mxe") rmse<- .get.performance.measure.result(pred, "rmse") phi <- .get.performance.measure.result(pred, "phi") mat <- .get.performance.measure.result(pred, "mat") mi <- .get.performance.measure.result(pred, "mi") chisq<- .get.performance.measure.result(pred, "chisq") odds<- .get.performance.measure.result(pred, "odds") lift<- .get.performance.measure.result(pred, "lift") f <- .get.performance.measure.result(pred, "f") sar <- .get.performance.measure.result(pred,"sar") ecost <- .get.performance.measure.result(pred, "ecost") cost <- .get.performance.measure.result(pred, "cost") return(list(tpr=tpr, fpr=fpr, acc=acc, err=err, rec=rec, sens=sens, fnr=fnr, tnr=tnr, spec=spec, ppv=ppv, prec=prec, npv=npv, fall=fall, miss=miss, pcfall=pcfall, pcmiss=pcmiss, rpp=rpp, rnp=rnp, auc=auc, aucpr=aucpr, prbe=prbe, rch=rch, mxe=mxe, rmse=rmse, phi=phi, mat=mat, mi=mi, chisq=chisq, odds=odds, lift=lift, f=f, sar=sar, ecost=ecost, cost=cost)) } ############################################################################## # test PerformanceMeasuresReference expect_error(prediction(some.predictions[-1], some.labels), "Number of predictions in each run must be equal") expect_error(prediction(c(NA,some.predictions[-1]), some.labels), "'predictions' contains NA.") expect_error(prediction(as.list(matrix(some.predictions)), some.labels), "Number of cross-validation runs must be equal") expect_error(prediction(some.predictions, factor(some.labels,ordered = TRUE), label.ordering = c(1,0)), "'labels' is already ordered. No additional 'label.ordering'") expect_error() pred <- prediction(some.predictions, some.labels) expect_output(show(pred)) actual <- prediction(some.predictions, factor(some.labels), label.ordering = c(0,1)) expect_equal(pred, actual) expect_error(performance("tpr",pred), "Wrong argument types") expect_error(performance(pred,"tpr","mxe"), "The performance measure mxe can only be used as 'measure'") actual1 <- performance(pred, "tpr") actual2 <- performance(pred, "fpr") actual <- ROCR:::.combine.performance.objects(actual1,actual2) expect_s4_class(actual,"performance") actual3 <- performance(pred, "mxe") expect_error(ROCR:::.combine.performance.objects(actual1,actual3), "Objects need to have identical x axis") expect_error(ROCR:::.combine.performance.objects(actual,actual), "At least one of the two objects has already been merged") measures <- expect_output( expect_warning(.get.performance.measures(pred), "Chi-squared approximation may be incorrect")) attach(measures) expect_equal(tpr, tpr.reference) expect_equal(fpr, fpr.reference) expect_equal(acc, acc.reference) expect_equal(err, err.reference) expect_equal(rec, rec.reference) expect_equal(sens, sens.reference) expect_equal(fnr, fnr.reference) expect_equal(tnr, tnr.reference) expect_equal(spec, spec.reference) expect_equal(ppv, ppv.reference) expect_equal(prec,prec.reference) expect_equal(npv, npv.reference) expect_equal(fall, fall.reference) expect_equal(miss,miss.reference) expect_equal(pcfall, pcfall.reference) expect_equal(pcmiss,pcmiss.reference) expect_equal(rpp, rpp.reference) expect_equal(rnp,rnp.reference) expect_equal(auc, auc.reference) expect_equal(aucpr, aucpr.reference, tolerance = .0000001) expect_equal(prbe, prbe.reference) expect_equal(mxe, mxe.reference) expect_equal(rmse, rmse.reference) expect_equal(phi, phi.reference) expect_equal(mat, mat.reference) expect_equal(mi, mi.reference) expect_equal(unname(chisq), chisq.reference) expect_equal(odds, odds.reference) expect_equal(lift, lift.reference) expect_equal(f, f.reference) expect_equal(sar,sar.reference) expect_equal(cost, cost.reference) ############################################################################## # ecost ecost.x.reference <- c(0,1/3,0.5,1) ecost.y.reference <- c(0,0.2,0.2,0) pred <- prediction(some.predictions, some.labels) perf <- performance(pred, "ecost") ecost.x <- perf@x.values[[1]] ecost.y <- perf@y.values[[1]] expect_equal( ecost.x, ecost.x.reference ) expect_equal( ecost.y, ecost.y.reference ) ############################################################################## # test cal pred <- prediction(some.predictions, some.labels) cal <- performance(pred, "cal", window.size=floor(length(pred@predictions[[1]])/3))@y.values[[1]] cal.x <- performance(pred, "cal", window.size=floor(length(pred@predictions[[1]])/3))@x.values[[1]] cal.x.reference <- rev(sort( some.predictions ))[2:(length(some.predictions)-1)] expect_equal( cal, cal.reference) expect_equal( cal.x, cal.x.reference) ############################################################################## # test cost pred <- prediction(some.predictions, some.labels) for (cost.fp in rnorm(50)) { cost.fn <- rnorm(1) perf <- performance(pred, "cost", cost.fp=cost.fp, cost.fn=cost.fn) cost <- perf@y.values[[1]] my.cost.reference <- (fpr.reference * n.reference/length(some.labels) * cost.fp + fnr.reference * p.reference/length(some.labels) * cost.fn) expect_equal( cost, my.cost.reference) } ############################################################################## # test Rch pred <- prediction(some.predictions, some.labels) perf <- performance( pred, "rch") rch.x <- perf@x.values[[1]] rch.y <- perf@y.values[[1]] expect_equal( rch.x, rch.reference.x ) expect_equal( rch.y, rch.reference.y ) ############################################################################## # test RMSE pred <- prediction(c(0, 0, 1, 1), ordered(c(0, 0, 1, 1))) rmse <- performance(pred, "rmse")@y.values[[1]] expect_equal(rmse, 0) pred <- prediction(c(0.0, 0.0, 1.0, 1.0), ordered(c(1, 1, 0, 0), levels=c(1,0))) rmse <- performance(pred, "rmse")@y.values[[1]] expect_equal(rmse, 1) pred <- prediction(c(0.0, 0.0, 1.0, 1.0), ordered(c(2, 2, 3, 3))) rmse <- performance(pred, "rmse")@y.values[[1]] expect_equal( rmse, 2) pred <- prediction(c(-0.5, 0.2, 2.5, 0.3), ordered(c(-1, -1, 1, 1))) rmse <- performance(pred, "rmse")@y.values[[1]] expect_equal( rmse, sqrt(1/4*(0.5^2 + 1.2^2 + 1.5^2 + 0.7^2))) ############################################################################## # test PRBE pred <- prediction(some.predictions, some.labels) prbe.y <- performance(pred, "prbe")@y.values[[1]] prbe.x <- performance(pred, "prbe")@x.values[[1]] expect_equal(prbe.y, prbe.reference) expect_equal(prbe.x, prbe.reference.x) ############################################################################## # test prediction interface pred <- prediction(seq(0, 1, length=10), c(rep(0,5), rep(1,5))) expect_equal(performance(pred, "auc")@y.values[[1]], 1) pred <- prediction(seq(1, 0, length=10), c(rep(0,5), rep(1,5))) expect_equal(performance(pred, "auc")@y.values[[1]], 0) pred <- prediction(seq(0, 1, length=10), factor(c(rep(0,5), rep(1,5)))) expect_equal(performance(pred, "auc")@y.values[[1]], 1) pred <- prediction(seq(0, 1, length=10), ordered(c(rep(0,5), rep(1,5)))) expect_equal(performance(pred, "auc")@y.values[[1]], 1) pred <- prediction(seq(0, 1, length=10), ordered(c(rep(0,5), rep(1,5)), levels=c(1,0))) expect_equal(performance(pred, "auc")@y.values[[1]], 0) pred <- prediction(seq(0, 1, length=10), ordered(c(rep("A",5), rep("B",5)))) expect_equal(performance(pred, "auc")@y.values[[1]], 1) expect_error(pred <- prediction(seq(0, 1, length=10), c(rep(0,5), rep(1,5)), label.ordering=c(1,2))) expect_error(pred <- prediction(list(c(0.1,0.3,0.7,1), c(0,0.2,0.8,1)), list(factor(c(0,0,1,1)), factor(c(1,1,2,2))))) expect_error(pred <- prediction(list(c(0.2,0.3,0.7,1), c(0,0.2,0.8,1)), list(factor(c(0,0,1,1)), ordered(c(0,0,1,1))))) pred <- prediction(list(c(0,0.3,0.7,1), c(0,0.2,0.8,1)), list(factor(c(0,0,1,1)), factor(c(0,0,1,1)))) expect_equal(performance(pred, "auc")@y.values, list(1, 1)) pred1 <- prediction(data.frame(c(0,0.3,0.7,1), c(0,0.2,0.8,1)), data.frame(factor(c(0,0,1,1)), factor(c(0,0,1,1)))) expect_equal( pred, pred1) pred2 <- prediction(cbind(c(0,0.3,0.7,1), c(0,0.2,0.8,1)), cbind(c(0,0,1,1), c(0,0,1,1))) expect_equal(pred, pred2) }) ROCR/tests/testthat/test-aux.r0000644000176200001440000000367513644317760016004 0ustar liggesuserscontext("aux") test_that("aux:",{ # Farg ll <- list(arg1=c(1,2,3), arg2=c(4,5,6)) expect_equal(.farg(ll, arg3=c(7,8,9)), list(arg1=c(1,2,3), arg2=c(4,5,6), arg3=c(7,8,9))) expect_equal(.farg(ll, arg1=c(1,4,3)), list(arg1=c(1,2,3), arg2=c(4,5,6))) # Garg ll <- list(arg1=list(1,2,3), arg2=list(4,5,6)) expect_equal(.garg(ll, 'arg1'), 1) expect_equal(.garg(ll, 'arg1',2), 2) expect_equal(.garg(ll, 'arg2',3), 6) expect_equal(.garg(ll, 'arg3'), ll$arg3) # Slice ll <- list(arg1=list(c(1,2,3), c(2,3,4), c(3,4,5)), arg2=list('a', 'b', 'c')) expect_equal(.slice.run(ll, 1), list(arg1=c(1,2,3), arg2='a')) expect_equal(.slice.run(ll, 2), list(arg1=c(2,3,4), arg2='b')) expect_equal(.slice.run(ll, 3), list(arg1=c(3,4,5), arg2='c')) ll <- list(arg1=list(c(1,2,3), c(2,3,4), c(3,4,5)), arg2=c('a', 'b', 'c')) expect_equal(.slice.run(ll, 1), list(arg1=c(1,2,3), arg2=c('a', 'b', 'c'))) expect_equal(.slice.run(ll, 2), list(arg1=c(2,3,4), arg2=c('a', 'b', 'c'))) expect_equal(.slice.run(ll, 3), list(arg1=c(3,4,5), arg2=c('a', 'b', 'c'))) # .select.args actual <- ROCR:::.select.args(ll, "arg1") expect_equal(actual,ll["arg1"]) actual <- ROCR:::.select.args(ll, "arg1", complement = TRUE) expect_equal(actual,ll["arg2"]) # .construct.linefunct actual <- ROCR:::.construct.linefunct(1,2,3,4) expect_type(actual, "closure") expect_error(ROCR:::.construct.linefunct(1,2,1,4), "Cannot construct a function from data.") # .intersection.point f <- ROCR:::.construct.linefunct(1,2,3,4) g <- ROCR:::.construct.linefunct(2,3,4,5) actual <- ROCR:::.intersection.point(f,g) expect_equal(actual, c(Inf,Inf)) g <- ROCR:::.construct.linefunct(2,3,1,5) actual <- ROCR:::.intersection.point(f,g) expect_equal(actual, c(2,3)) }) ROCR/tests/testthat.R0000644000176200001440000000007013644317760014154 0ustar liggesuserslibrary(testthat) library(ROCR) test_check("ROCR") ROCR/MD50000644000176200001440000000345115134614023011331 0ustar liggesusersea24a1ada01b04a18d195fbdb48879e4 *DESCRIPTION 0b7ae19d64f9121936826eeda3ced802 *NAMESPACE dfdef656b14071451bfb81f8d85b3b2a *NEWS 8e6acd7985738b147021f586f43b8cc9 *R/ROCR_aux.R dcf6ad11eb3e5d5e316ef3adc05c7ffe *R/performance.R 673949c7018967d2da4953a31cdeaca3 *R/performance_measures.R a89d501af16fac35a0dc2b906c90a0b0 *R/performance_plots.R 69636a83fa2cb4918d77906348c6e778 *R/prediction.R f4b1aafb3e3a50c01e8881ad194223a9 *R/zzz.R b8e91ecbb43e3a900dfbb4f91ca8e681 *README.md 28b5077a5f737a8d1a9e62d623c81c72 *build/vignette.rds 497d34bf928630ed582476e043a68f18 *data/ROCR.hiv.rda a6b723208917a41ca8d978a95640f1cc *data/ROCR.simple.rda e4d3b38035f21f0bd36606cd08b3ded3 *data/ROCR.xval.rda 4593314ea62a4f184e25aceee2a3cfd0 *demo/00Index 4ba4a692fa050d2312b3e3ca9091297f *demo/ROCR.R 8d85016d7e9969e2e97d205afa79502f *inst/CITATION a33aba9828d48bdcd9b53b9a04b1a419 *inst/doc/ROCR.R 5af2a94489e9b613925c90a7c76faf94 *inst/doc/ROCR.Rmd 958e1c3821139d8444c1081358831008 *inst/doc/ROCR.html 9ff3521d7c646083d76fc763457a1bce *man/ROCR.hiv.Rd 0fce1efd4305836eee19c9d4cc1a7783 *man/ROCR.simple.Rd a27a1275f2d024d7d0450fdc595e9809 *man/ROCR.xval.Rd 6a49a4f01e00ad05501d072ffc4e67dd *man/performance-class.Rd 3c0b368a356559ad8ffeafdfa87e7ca0 *man/performance.Rd 81515165d5f1214fe4b033249236df09 *man/plot-methods.Rd 80fec1fb008a2428531afd81cf298565 *man/prediction-class.Rd b678b3421650e3cb7f32beb5411f78e7 *man/prediction.Rd c3cd3ed4bdaeb75a7bc7ebdcfcaa628e *tests/testthat.R 3f54ab8be1a9f704174f871841be018d *tests/testthat/test-aux.r 6e418f64b7f1b75fdb0abf37c56cdaf2 *tests/testthat/test-consistency.r de0e4e7a6b743412fa1471d75f307759 *tests/testthat/test-plot.r a4ccef13e877457c73070bddb4c878a1 *tests/testthat/test-simple.r 5af2a94489e9b613925c90a7c76faf94 *vignettes/ROCR.Rmd f4d20c7bb157b62a464cc624772b4711 *vignettes/references.bibtex ROCR/R/0000755000176200001440000000000013703321565011225 5ustar liggesusersROCR/R/performance_measures.R0000644000176200001440000004243013644317760015566 0ustar liggesusers## ------------------------------------------------------------------------ ## classical machine learning contingency table measures ## ------------------------------------------------------------------------ .performance.accuracy <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, (tn+tp) / length(predictions) ) } .performance.error.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, (fn+fp) / length(predictions) ) } .performance.false.positive.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, fp / n.neg ) } .performance.true.positive.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tp / n.pos ) } .performance.false.negative.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, fn / n.pos ) } .performance.true.negative.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tn / n.neg ) } .performance.positive.predictive.value <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ppv <- tp / (fp + tp) list( cutoffs, ppv ) } .performance.negative.predictive.value <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { npv <- tn / (tn + fn) list( cutoffs, npv ) } .performance.prediction.conditioned.fallout <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ppv <- .performance.positive.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1 - ppv ) } .performance.prediction.conditioned.miss <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { npv <- .performance.negative.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1 - npv ) } ## ------------------------------------------------------------------------ ## ...not actually performance measures, but very useful as a second axis ## against which to plot a "real" performance measure ## (popular example: lift charts) ## ------------------------------------------------------------------------ .performance.rate.of.positive.predictions <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, n.pos.pred / (n.pos + n.neg) ) } .performance.rate.of.negative.predictions <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, n.neg.pred / (n.pos + n.neg) ) } ## ------------------------------------------------------------------------ ## Classical statistical contingency table measures ## ------------------------------------------------------------------------ .performance.phi <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list(cutoffs, (tn*tp - fn*fp) / (sqrt(n.pos) * sqrt(n.neg) * sqrt(n.pos.pred) * sqrt(n.neg.pred)) ) } .performance.mutual.information <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { n.samples <- n.pos + n.neg mi <- c() for (k in 1:length(cutoffs)) { kij <- rbind( c(tn[k],fn[k]), c(fp[k],tp[k]) ) ki.j. <- rbind(c(n.neg * n.neg.pred[k], n.neg.pred[k] * n.pos), c(n.neg * n.pos.pred[k], n.pos * n.pos.pred[k])) log.matrix <- log2( kij / ki.j.) log.matrix[kij/ki.j.==0] <- 0 mi <- c(mi, log2(n.samples) + sum( kij * log.matrix) / n.samples ) } list( cutoffs, mi ) } #' @importFrom stats chisq.test .performance.chisq <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { chisq <- c() for (i in 1:length(cutoffs)) { A <- rbind( c( tn[i], fn[i]), c(fp[i], tp[i]) ) chisq <- c(chisq, stats::chisq.test(A, correct=FALSE)$statistic ) } list( cutoffs, chisq ) } .performance.odds.ratio <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tp * tn / (fn * fp) ) } ## ------------------------------------------------------------------------ ## Other measures based on contingency tables ## ------------------------------------------------------------------------ .performance.lift <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { n.samples <- n.pos + n.neg list( cutoffs, (tp / n.pos) / (n.pos.pred / n.samples) ) } .performance.f <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, alpha) { prec <- .performance.positive.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1/ ( alpha*(1/prec) + (1-alpha)*(1/(tp/n.pos)) ) ) } #' @importFrom grDevices chull .performance.rocconvexhull <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { x <- fp / n.neg y <- tp / n.pos finite.bool <- is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] if (length(x) < 2) { stop("Not enough distinct predictions to compute ROC convex hull.") } ## keep only points on the convex hull ind <- grDevices::chull(x, y) x.ch <- x[ind] y.ch <- y[ind] ## keep only convex hull points above the diagonal, except (0,0) ## and (1,1) ind.upper.triangle <- x.ch < y.ch x.ch <- c(0, x.ch[ind.upper.triangle], 1) y.ch <- c(0, y.ch[ind.upper.triangle], 1) ## sort remaining points by ascending x value ind <- order(x.ch) x.ch <- x.ch[ind] y.ch <- y.ch[ind] list( x.ch, y.ch ) } ## ---------------------------------------------------------------------------- ## Cutoff-independent measures ## ---------------------------------------------------------------------------- #' @importFrom stats approxfun .performance.auc <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, fpr.stop) { x <- fp / n.neg y <- tp / n.pos finite.bool <- is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] if (length(x) < 2) { stop(paste("Not enough distinct predictions to compute area", "under the ROC curve.")) } if (fpr.stop < 1) { ind <- max(which( x <= fpr.stop )) tpr.stop <- stats::approxfun( x[ind:(ind+1)], y[ind:(ind+1)] )(fpr.stop) x <- c(x[1:ind], fpr.stop) y <- c(y[1:ind], tpr.stop) } ans <- list() auc <- 0 for (i in 2:length(x)) { auc <- auc + 0.5 * (x[i] - x[i-1]) * (y[i] + y[i-1]) } ans <- list( c(), auc) names(ans) <- c("x.values","y.values") return(ans) } # written by Thomas Unterthiner (unterthiner@bioinf.jku.at) .performance.aucpr <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { tmp <- aggregate(list(fp=fp), by=list(tp=tp), min) tp <- tmp$tp fp <- tmp$fp prec <- tp / (fp + tp) rec <- tp / n.pos if (fp[1] == 0 & tp[1] == 0) { prec[1] = 1 } finite.bool <- is.finite(prec) & is.finite(rec) prec <- prec[ finite.bool ] rec <- rec[ finite.bool ] if (length(rec) < 2) { stop(paste("Not enough distinct predictions to compute area", "under the Precision/Recall curve.")) } # if two points are too distant from each other, we need to # correctly interpolate between them. This is done according to # Davis & Goadrich, #"The Relationship Between Precision-Recall and ROC Curves", ICML'06 for (i in seq_along(rec[-length(rec)])) { if (tp[i+1] - tp[i] > 2) { skew = (fp[i+1]-fp[i]) / (tp[i+1]-tp[i]) x = seq(1, tp[i+1]-tp[i], by=1) rec <- append(rec, (x+tp[i])/n.pos, after=i) prec <- append(prec, (x+tp[i])/(tp[i]+fp[i]+x+ skew*x), after=i) } } auc <- 0 for (i in seq.int(from = 2, to = length(rec))) { auc <- auc + 0.5 * (rec[i] - rec[i-1]) * (prec[i] + prec[i-1]) } ans <- list( c(), auc) names(ans) <- c("x.values","y.values") return(ans) } #' @importFrom stats uniroot approxfun .performance.precision.recall.break.even.point <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { pred <- prediction( predictions, labels) perf <- performance( pred, measure="prec", x.measure="rec") x <- rev(perf@x.values[[1]]) y <- rev(perf@y.values[[1]]) alpha <- rev(perf@alpha.values[[1]]) finite.bool <- is.finite(alpha) & is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] alpha <- alpha[ finite.bool ] if (length(x) < 2) { stop(paste("Not enough distinct predictions to compute", "precision/recall intersections.")) } intersection.cutoff <- c() intersection.pr <- c() ## find all intersection points by looking at all intervals (i,i+1): ## if the difference function between x and y has different signs at the ## interval boundaries, then an intersection point is in the interval; ## compute as the root of the difference function if ( (x[1]-y[1]) == 0) { intersection.cutoff <- c( alpha[1] ) intersection.pr <- c( x[1] ) } for (i in (1:(length(alpha)-1))) { if ((x[i+1]-y[i+1]) == 0) { intersection.cutoff <- c( intersection.cutoff, alpha[i+1] ) intersection.pr <- c( intersection.pr, x[i+1] ) } else if ((x[i]-y[i])*(x[i+1]-y[i+1]) < 0 ) { ans <- stats::uniroot(stats::approxfun(c(alpha[i], alpha[i+1] ), c(x[i]-y[i], x[i+1]-y[i+1])), c(alpha[i],alpha[i+1])) intersection.cutoff <- c(intersection.cutoff, ans$root) intersection.pr <- c(intersection.pr, ans$f.root) } } list( rev(intersection.cutoff), rev(intersection.pr) ) } #' @importFrom stats median .performance.calibration.error <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, window.size) { if (window.size > length(predictions)) { stop("Window size exceeds number of predictions.") } if (min(predictions)<0 || max(predictions)>1) { stop("Calibration error needs predictions between 0 and 1") } pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] ordering <- rev(order( predictions )) predictions <- predictions[ ordering ] labels <- labels[ ordering ] median.cutoffs <- c() calibration.errors <- c() for (left.index in 1 : (length(predictions) - window.size+1) ) { right.index <- left.index + window.size - 1 pos.fraction <- sum(labels[left.index : right.index] == pos.label) / window.size mean.prediction <- mean( predictions[ left.index : right.index ] ) calibration.errors <- c(calibration.errors, abs(pos.fraction - mean.prediction)) median.cutoffs <- c(median.cutoffs, stats::median(predictions[left.index:right.index])) } list( median.cutoffs, calibration.errors ) } .performance.mean.cross.entropy <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { if (! all(levels(labels)==c(0,1)) || any(predictions<0) || any(predictions>1) ) { stop(paste("Class labels need to be 0 and 1 and predictions between", "0 and 1 for mean cross entropy.")) } pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] list( c(), - 1/length(predictions) * (sum( log( predictions[which(labels==pos.label)] )) + sum( log( 1 - predictions[which(labels==neg.label)] ))) ) } .performance.root.mean.squared.error <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ## convert labels from factor to numeric values labels <- as.numeric(levels(labels))[labels] if (any(is.na(labels))) { stop("For rmse predictions have to be numeric.") } list( c(), sqrt( 1/length(predictions) * sum( (predictions - labels)^2 )) ) } ## ---------------------------------------------------------------------------- ## Derived measures: ## ---------------------------------------------------------------------------- .performance.sar <- function( predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { pred <- prediction( predictions, labels) perf.acc <- performance( pred, measure="acc") perf.rmse <- performance( pred, measure="rmse") perf.auc <- performance( pred, measure="auc") list(cutoffs, 1/3 * (perf.acc@y.values[[1]] + (1 - perf.rmse@y.values[[1]]) + perf.auc@y.values[[1]])) } ## ---------------------------------------------------------------------------- ## Measures taking into account actual cost considerations ## ---------------------------------------------------------------------------- .performance.expected.cost <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ## kick out suboptimal values (i.e. fpr/tpr pair for which another one ## with same fpr and higher tpr exists, ## or one for which one with same tpr but lower fpr exists if (n.neg==0 || n.pos==0) { stop(paste("At least one positive and one negative sample are", "needed to compute a cost curve.")) } fpr <- fp / n.neg tpr <- tp / n.pos ## sort by fpr (ascending), in case of ties by descending tpr ind <- order(fpr,-tpr) fpr <- fpr[ind] tpr <- tpr[ind] ## for tied fprs, only the one with the highest tpr is kept ind <- !duplicated(fpr) fpr <- fpr[ind] tpr <- tpr[ind] ## for tied tprs, only keep the one with the lowest fpr ind <- order(-tpr,fpr) fpr <- fpr[ind] tpr <- tpr[ind] ind <- !duplicated(tpr) fpr <- fpr[ind] tpr <- tpr[ind] if (!any(0==fpr & 0==tpr)) { fpr <- c(0,fpr) tpr <- c(0,tpr) } if (!any(1==fpr & 1==tpr)) { fpr <- c(fpr,1) tpr <- c(tpr,1) } ## compute all functions f <- list() for (i in 1:length(fpr)) { f <- c(f, .construct.linefunct( 0, fpr[i], 1, 1-tpr[i] )) } ## compute all intersection points x.values <- c() y.values <- c() for (i in 1:(length(fpr)-1)) { for (j in (i+1):length(fpr)) { ans <- .intersection.point( f[[i]], f[[j]] ) if (all(is.finite(ans))) { y.values.at.current.x <- c() for (k in 1:length(f)) { y.values.at.current.x <- c(y.values.at.current.x, f[[k]](ans[1])) } if (abs(ans[2] - min(y.values.at.current.x )) < sqrt(.Machine$double.eps)) { x.values <- c(x.values, ans[1]) y.values <- c(y.values, ans[2]) } } } } if (!any(0==x.values & 0==y.values)) { x.values <- c(0,x.values) y.values <- c(0,y.values) } if (!any(1==x.values & 0==y.values)) { x.values <- c(x.values,1) y.values <- c(y.values,0) } ind <- order( x.values) list( x.values[ind], y.values[ind] ) } .performance.cost <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, cost.fp, cost.fn) { n.samples <- n.pos + n.neg cost <- ((n.pos / n.samples) * (fn / n.pos) * cost.fn + (n.neg / n.samples) * (fp / n.neg) * cost.fp) list( cutoffs, cost ) } ROCR/R/performance.R0000644000176200001440000006431415134431532013655 0ustar liggesusers #' @name performance #' #' @title Function to create performance objects #' #' @description #' All kinds of predictor evaluations are performed using this function. #' #' @details #' Here is the list of available performance measures. Let Y and #' \eqn{\hat{Y}}{Yhat} be random variables representing the class and the prediction for #' a randomly drawn sample, respectively. We denote by #' \eqn{\oplus}{+} and \eqn{\ominus}{-} the positive and #' negative class, respectively. Further, we use the following #' abbreviations for empirical quantities: P (# positive #' samples), N (# negative samples), TP (# true positives), TN (# true #' negatives), FP (# false positives), FN (# false negatives). #' \describe{ #' \item{\code{acc}:}{Accuracy. \eqn{P(\hat{Y}=Y)}{P(Yhat = Y)}. Estimated #' as: \eqn{\frac{TP+TN}{P+N}}{(TP+TN)/(P+N)}.} #' \item{\code{err}:}{Error rate. \eqn{P(\hat{Y}\ne Y)}{P(Yhat != #' Y)}. Estimated as: \eqn{\frac{FP+FN}{P+N}}{(FP+FN)/(P+N)}.} #' \item{\code{fpr}:}{False positive rate. \eqn{P(\hat{Y}=\oplus | Y = #' \ominus)}{P(Yhat = + | Y = -)}. Estimated as: #' \eqn{\frac{FP}{N}}{FP/N}.} #' \item{\code{fall}:}{Fallout. Same as \code{fpr}.} #' \item{\code{tpr}:}{True positive #' rate. \eqn{P(\hat{Y}=\oplus|Y=\oplus)}{P(Yhat = + | Y = +)}. Estimated #' as: \eqn{\frac{TP}{P}}{TP/P}.} #' \item{\code{rec}:}{Recall. Same as \code{tpr}.} #' \item{\code{sens}:}{Sensitivity. Same as \code{tpr}.} #' \item{\code{fnr}:}{False negative #' rate. \eqn{P(\hat{Y}=\ominus|Y=\oplus)}{P(Yhat = - | Y = #' +)}. Estimated as: \eqn{\frac{FN}{P}}{FN/P}.} #' \item{\code{miss}:}{Miss. Same as \code{fnr}.} #' \item{\code{tnr}:}{True negative rate. \eqn{P(\hat{Y} = #' \ominus|Y=\ominus)}{P(Yhat = - | Y = -)}.} #' \item{\code{spec}:}{Specificity. Same as \code{tnr}.} #' \item{\code{ppv}:}{Positive predictive #' value. \eqn{P(Y=\oplus|\hat{Y}=\oplus)}{P(Y = + | Yhat = #' +)}. Estimated as: \eqn{\frac{TP}{TP+FP}}{TP/(TP+FP)}.} #' \item{\code{prec}:}{Precision. Same as \code{ppv}.} #' \item{\code{npv}:}{Negative predictive #' value. \eqn{P(Y=\ominus|\hat{Y}=\ominus)}{P(Y = - | Yhat = #' -)}. Estimated as: \eqn{\frac{TN}{TN+FN}}{TN/(TN+FN)}.} #' \item{\code{pcfall}:}{Prediction-conditioned #' fallout. \eqn{P(Y=\ominus|\hat{Y}=\oplus)}{P(Y = - | Yhat = #' +)}. Estimated as: \eqn{\frac{FP}{TP+FP}}{FP/(TP+FP)}.} #' \item{\code{pcmiss}:}{Prediction-conditioned #' miss. \eqn{P(Y=\oplus|\hat{Y}=\ominus)}{P(Y = + | Yhat = #' -)}. Estimated as: \eqn{\frac{FN}{TN+FN}}{FN/(TN+FN)}.} #' \item{\code{rpp}:}{Rate of positive predictions. \eqn{P( \hat{Y} = #' \oplus)}{P(Yhat = +)}. Estimated as: (TP+FP)/(TP+FP+TN+FN).} #' \item{\code{rnp}:}{Rate of negative predictions. \eqn{P( \hat{Y} = #' \ominus)}{P(Yhat = -)}. Estimated as: (TN+FN)/(TP+FP+TN+FN).} #' \item{\code{phi}:}{Phi correlation coefficient. \eqn{\frac{TP \cdot #' TN - FP \cdot FN}{\sqrt{ (TP+FN) \cdot (TN+FP) \cdot (TP+FP) #' \cdot (TN+FN)}}}{(TP*TN - #' FP*FN)/(sqrt((TP+FN)*(TN+FP)*(TP+FP)*(TN+FN)))}. Yields a #' number between -1 and 1, with 1 indicating a perfect #' prediction, 0 indicating a random prediction. Values below 0 #' indicate a worse than random prediction.} #' \item{\code{mat}:}{Matthews correlation coefficient. Same as \code{phi}.} #' \item{\code{mi}:}{Mutual information. \eqn{I(\hat{Y},Y) := H(Y) - #' H(Y|\hat{Y})}{I(Yhat, Y) := H(Y) - H(Y | Yhat)}, where H is the #' (conditional) entropy. Entropies are estimated naively (no bias #' correction).} #' \item{\code{chisq}:}{Chi square test statistic. \code{?chisq.test} #' for details. Note that R might raise a warning if the sample size #' is too small.} #' \item{\code{odds}:}{Odds ratio. \eqn{\frac{TP \cdot TN}{FN \cdot #' FP}}{(TP*TN)/(FN*FP)}. Note that odds ratio produces #' Inf or NA values for all cutoffs corresponding to FN=0 or #' FP=0. This can substantially decrease the plotted cutoff region.} #' \item{\code{lift}:}{Lift #' value. \eqn{\frac{P(\hat{Y}=\oplus|Y=\oplus)}{P(\hat{Y}=\oplus)}}{P(Yhat = + | #' Y = +)/P(Yhat = +)}.} #' \item{\code{f}:}{Precision-recall F measure (van Rijsbergen, 1979). Weighted #' harmonic mean of precision (P) and recall (R). \eqn{F = #' \frac{1}{\alpha \frac{1}{P} + (1-\alpha)\frac{1}{R}}}{F = 1/ #' (alpha*1/P + (1-alpha)*1/R)}. If #' \eqn{\alpha=\frac{1}{2}}{alpha=1/2}, the mean is balanced. A #' frequent equivalent formulation is #' \eqn{F = \frac{(\beta^2+1) \cdot P \cdot R}{R + \beta^2 \cdot #' P}}{F = (beta^2+1) * P * R / (R + beta^2 * P)}. In this formulation, the #' mean is balanced if \eqn{\beta=1}{beta=1}. Currently, ROCR only accepts #' the alpha version as input (e.g. \eqn{\alpha=0.5}{alpha=0.5}). If no #' value for alpha is given, the mean will be balanced by default.} #' \item{\code{rch}:}{ROC convex hull. A ROC (=\code{tpr} vs \code{fpr}) curve #' with concavities (which represent suboptimal choices of cutoff) removed #' (Fawcett 2001). Since the result is already a parametric performance #' curve, it cannot be used in combination with other measures.} #' \item{\code{auc}:}{Area under the ROC curve. This is equal to the value of the #' Wilcoxon-Mann-Whitney test statistic and also the probability that the #' classifier will score are randomly drawn positive sample higher than a #' randomly drawn negative sample. Since the output of #' \code{auc} is cutoff-independent, this #' measure cannot be combined with other measures into a parametric #' curve. The partial area under the ROC curve up to a given false #' positive rate can be calculated by passing the optional parameter #' \code{fpr.stop=0.5} (or any other value between 0 and 1) to #' \code{performance}.} #' \item{\code{aucpr}:}{Area under the Precision/Recall curve. Since the output #' of \code{aucpr} is cutoff-independent, this measure cannot be combined #' with other measures into a parametric curve.} #' \item{\code{prbe}:}{Precision-recall break-even point. The cutoff(s) where #' precision and recall are equal. At this point, positive and negative #' predictions are made at the same rate as their prevalence in the #' data. Since the output of #' \code{prbe} is just a cutoff-independent scalar, this #' measure cannot be combined with other measures into a parametric curve.} #' \item{\code{cal}:}{Calibration error. The calibration error is the #' absolute difference between predicted confidence and actual reliability. This #' error is estimated at all cutoffs by sliding a window across the #' range of possible cutoffs. The default window size of 100 can be #' adjusted by passing the optional parameter \code{window.size=200} #' to \code{performance}. E.g., if for several #' positive samples the output of the classifier is around 0.75, you might #' expect from a well-calibrated classifier that the fraction of them #' which is correctly predicted as positive is also around 0.75. In a #' well-calibrated classifier, the probabilistic confidence estimates #' are realistic. Only for use with #' probabilistic output (i.e. scores between 0 and 1).} #' \item{\code{mxe}:}{Mean cross-entropy. Only for use with #' probabilistic output. \eqn{MXE :=-\frac{1}{P+N}( \sum_{y_i=\oplus} #' ln(\hat{y}_i) + \sum_{y_i=\ominus} ln(1-\hat{y}_i))}{MXE := - 1/(P+N) \sum_{y_i=+} #' ln(yhat_i) + \sum_{y_i=-} ln(1-yhat_i)}. Since the output of #' \code{mxe} is just a cutoff-independent scalar, this #' measure cannot be combined with other measures into a parametric curve.} #' \item{\code{rmse}:}{Root-mean-squared error. Only for use with #' numerical class labels. \eqn{RMSE:=\sqrt{\frac{1}{P+N}\sum_i (y_i #' - \hat{y}_i)^2}}{RMSE := sqrt(1/(P+N) \sum_i (y_i - #' yhat_i)^2)}. Since the output of #' \code{rmse} is just a cutoff-independent scalar, this #' measure cannot be combined with other measures into a parametric curve.} #' \item{\code{sar}:}{Score combinining performance measures of different #' characteristics, in the attempt of creating a more "robust" #' measure (cf. Caruana R., ROCAI2004): #' SAR = 1/3 * ( Accuracy + Area under the ROC curve + Root #' mean-squared error ).} #' \item{\code{ecost}:}{Expected cost. For details on cost curves, #' cf. Drummond&Holte 2000,2004. \code{ecost} has an obligatory x #' axis, the so-called 'probability-cost function'; thus it cannot be #' combined with other measures. While using \code{ecost} one is #' interested in the lower envelope of a set of lines, it might be #' instructive to plot the whole set of lines in addition to the lower #' envelope. An example is given in \code{demo(ROCR)}.} #' \item{\code{cost}:}{Cost of a classifier when #' class-conditional misclassification costs are explicitly given. #' Accepts the optional parameters \code{cost.fp} and #' \code{cost.fn}, by which the costs for false positives and #' negatives can be adjusted, respectively. By default, both are set #' to 1.} #' } #' #' @note #' Here is how to call \code{performance()} to create some standard #' evaluation plots: #' \describe{ #' \item{ROC curves:}{measure="tpr", x.measure="fpr".} #' \item{Precision/recall graphs:}{measure="prec", x.measure="rec".} #' \item{Sensitivity/specificity plots:}{measure="sens", x.measure="spec".} #' \item{Lift charts:}{measure="lift", x.measure="rpp".} #' } #' #' @param prediction.obj An object of class \code{prediction}. #' @param measure Performance measure to use for the evaluation. A complete list #' of the performance measures that are available for \code{measure} and #' \code{x.measure} is given in the 'Details' section. #' @param x.measure A second performance measure. If different from the default, #' a two-dimensional curve, with \code{x.measure} taken to be the unit in #' direction of the x axis, and \code{measure} to be the unit in direction of #' the y axis, is created. This curve is parametrized with the cutoff. #' @param ... Optional arguments (specific to individual performance measures). #' #' @return An S4 object of class \code{performance}. #' #' @references #' A detailed list of references can be found on the ROCR homepage at #' \url{https://ipa-tys.github.io/ROCR/}. #' #' @author #' Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander #' \email{osander@gmail.com} #' #' @seealso #' \code{\link{prediction}}, #' \code{\link{prediction-class}}, #' \code{\link{performance-class}}, #' \code{\link{plot.performance}} #' #' @export #' #' @examples #' # computing a simple ROC curve (x-axis: fpr, y-axis: tpr) #' library(ROCR) #' data(ROCR.simple) #' pred <- prediction( ROCR.simple$predictions, ROCR.simple$labels) #' pred #' perf <- performance(pred,"tpr","fpr") #' perf #' plot(perf) #' #' # precision/recall curve (x-axis: recall, y-axis: precision) #' perf <- performance(pred, "prec", "rec") #' perf #' plot(perf) #' #' # sensitivity/specificity curve (x-axis: specificity, #' # y-axis: sensitivity) #' perf <- performance(pred, "sens", "spec") #' perf #' plot(perf) performance <- function(prediction.obj, measure, x.measure="cutoff", ...) { ## define the needed environments envir.list <- .define.environments() long.unit.names <- envir.list$long.unit.names function.names <- envir.list$function.names obligatory.x.axis <- envir.list$obligatory.x.axis optional.arguments <- envir.list$optional.arguments default.values <- envir.list$default.values ## abort in case of misuse if (!is(prediction.obj, "prediction") || !exists(measure, where=long.unit.names, inherits=FALSE) || !exists(x.measure, where=long.unit.names, inherits=FALSE)) { stop(paste("Wrong argument types: First argument must be of type", "'prediction'; second and optional third argument must", "be available performance measures!")) } ## abort, if attempt is made to use a measure that has an obligatory ## x.axis as the x.measure (cannot be combined) if (exists( x.measure, where=obligatory.x.axis, inherits=FALSE )) { message <- paste("The performance measure", x.measure, "can only be used as 'measure', because it has", "the following obligatory 'x.measure':\n", get( x.measure, envir=obligatory.x.axis)) stop(message) } ## if measure is a performance measure with obligatory x.axis, then ## enforce this axis: if (exists( measure, where=obligatory.x.axis, inherits=FALSE )) { x.measure <- get( measure, envir=obligatory.x.axis ) } if (x.measure == "cutoff" || exists( measure, where=obligatory.x.axis, inherits=FALSE )) { ## fetch from '...' any optional arguments for the performance ## measure at hand that are given, otherwise fill up the default values optional.args <- list(...) argnames <- c() if ( exists( measure, where=optional.arguments, inherits=FALSE )) { argnames <- get( measure, envir=optional.arguments ) default.arglist <- list() for (i in 1:length(argnames)) { default.arglist <- c(default.arglist, get(paste(measure,":",argnames[i],sep=""), envir=default.values, inherits=FALSE)) } names(default.arglist) <- argnames for (i in 1:length(argnames)) { templist <- list(optional.args, default.arglist[[i]]) names(templist) <- c('arglist', argnames[i]) optional.args <- do.call('.farg', templist) } } optional.args <- .select.args( optional.args, argnames ) ## determine function name function.name <- get( measure, envir=function.names ) ## for each x-validation run, compute the requested performance measure x.values <- list() y.values <- list() for (i in 1:length( prediction.obj@predictions )) { argumentlist <- .sarg(optional.args, predictions= prediction.obj@predictions[[i]], labels= prediction.obj@labels[[i]], cutoffs= prediction.obj@cutoffs[[i]], fp= prediction.obj@fp[[i]], tp= prediction.obj@tp[[i]], fn= prediction.obj@fn[[i]], tn= prediction.obj@tn[[i]], n.pos= prediction.obj@n.pos[[i]], n.neg= prediction.obj@n.neg[[i]], n.pos.pred= prediction.obj@n.pos.pred[[i]], n.neg.pred= prediction.obj@n.neg.pred[[i]]) ans <- do.call( function.name, argumentlist ) if (!is.null(ans[[1]])) x.values <- c( x.values, list( ans[[1]] )) y.values <- c( y.values, list( ans[[2]] )) } if (! (length(x.values)==0 || length(x.values)==length(y.values)) ) { stop("Consistency error.") } ## create a new performance object return( new("performance", x.name = get( x.measure, envir=long.unit.names ), y.name = get( measure, envir=long.unit.names ), alpha.name = "none", x.values = x.values, y.values = y.values, alpha.values = list() )) } else { perf.obj.1 <- performance( prediction.obj, measure=x.measure, ... ) perf.obj.2 <- performance( prediction.obj, measure=measure, ... ) return( .combine.performance.objects( perf.obj.1, perf.obj.2 ) ) } } #' @importFrom stats approxfun .combine.performance.objects <- function( p.obj.1, p.obj.2 ) { ## some checks for misusage (in any way, this function is ## only for internal use) if ( p.obj.1@x.name != p.obj.2@x.name ) { stop("Error: Objects need to have identical x axis.") } if ( p.obj.1@alpha.name != "none" || p.obj.2@alpha.name != "none") { stop("Error: At least one of the two objects has already been merged.") } if (length(p.obj.1@x.values) != length(p.obj.2@x.values)) { stop(paste("Only performance objects with identical number of", "cross-validation runs can be combined.")) } x.values <- list() x.name <- p.obj.1@y.name y.values <- list() y.name <- p.obj.2@y.name alpha.values <- list() alpha.name <- p.obj.1@x.name for (i in 1:length( p.obj.1@x.values )) { x.values.1 <- p.obj.1@x.values[[i]] y.values.1 <- p.obj.1@y.values[[i]] x.values.2 <- p.obj.2@x.values[[i]] y.values.2 <- p.obj.2@y.values[[i]] ## cutoffs of combined object = merged cutoffs of simple objects cutoffs <- sort( unique( c(x.values.1, x.values.2)), decreasing=TRUE ) ## calculate y.values at cutoffs using step function y.values.int.1 <- stats::approxfun(x.values.1, y.values.1, method="constant",f=1,rule=2)(cutoffs) y.values.int.2 <- stats::approxfun(x.values.2, y.values.2, method="constant",f=1,rule=2)(cutoffs) ## 'approxfun' ignores NA and NaN objs <- list( y.values.int.1, y.values.int.2) objs.x <- list( x.values.1, x.values.2 ) na.cutoffs.1.bool <- is.na( y.values.1) & !is.nan( y.values.1 ) nan.cutoffs.1.bool <- is.nan( y.values.1) na.cutoffs.2.bool <- is.na( y.values.2) & !is.nan( y.values.2 ) nan.cutoffs.2.bool <- is.nan( y.values.2) bools <- list(na.cutoffs.1.bool, nan.cutoffs.1.bool, na.cutoffs.2.bool, nan.cutoffs.2.bool) values <- c(NA,NaN,NA,NaN) for (j in 1:4) { for (k in which(bools[[j]])) { interval.max <- objs.x[[ ceiling(j/2) ]][k] interval.min <- -Inf if (k < length(objs.x[[ ceiling(j/2) ]])) { interval.min <- objs.x[[ ceiling(j/2) ]][k+1] } objs[[ ceiling(j/2) ]][cutoffs <= interval.max & cutoffs > interval.min ] <- values[j] } } alpha.values <- c(alpha.values, list(cutoffs)) x.values <- c(x.values, list(objs[[1]])) y.values <- c(y.values, list(objs[[2]])) } return( new("performance", x.name=x.name, y.name=y.name, alpha.name=alpha.name, x.values=x.values, y.values=y.values, alpha.values=alpha.values)) } .define.environments <- function() { ## There are five environments: long.unit.names, function.names, ## obligatory.x.axis, optional.arguments, default.values ## Define long names corresponding to the measure abbreviations. long.unit.names <- new.env() assign("none","None", envir=long.unit.names) assign("cutoff", "Cutoff", envir=long.unit.names) assign("acc", "Accuracy", envir=long.unit.names) assign("err", "Error Rate", envir=long.unit.names) assign("fpr", "False positive rate", envir=long.unit.names) assign("tpr", "True positive rate", envir=long.unit.names) assign("rec", "Recall", envir=long.unit.names) assign("sens", "Sensitivity", envir=long.unit.names) assign("fnr", "False negative rate", envir=long.unit.names) assign("tnr", "True negative rate", envir=long.unit.names) assign("spec", "Specificity", envir=long.unit.names) assign("ppv", "Positive predictive value", envir=long.unit.names) assign("prec", "Precision", envir=long.unit.names) assign("npv", "Negative predictive value", envir=long.unit.names) assign("fall", "Fallout", envir=long.unit.names) assign("miss", "Miss", envir=long.unit.names) assign("pcfall", "Prediction-conditioned fallout", envir=long.unit.names) assign("pcmiss", "Prediction-conditioned miss", envir=long.unit.names) assign("rpp", "Rate of positive predictions", envir=long.unit.names) assign("rnp", "Rate of negative predictions", envir=long.unit.names) assign("auc","Area under the ROC curve", envir=long.unit.names) assign("aucpr","Area under the Precision/Recall curve", envir=long.unit.names) assign("cal", "Calibration error", envir=long.unit.names) assign("mwp", "Median window position", envir=long.unit.names) assign("prbe","Precision/recall break-even point", envir=long.unit.names) assign("rch", "ROC convex hull", envir=long.unit.names) assign("mxe", "Mean cross-entropy", envir=long.unit.names) assign("rmse","Root-mean-square error", envir=long.unit.names) assign("phi", "Phi correlation coefficient", envir=long.unit.names) assign("mat","Matthews correlation coefficient", envir=long.unit.names) assign("mi", "Mutual information", envir=long.unit.names) assign("chisq", "Chi-square test statistic", envir=long.unit.names) assign("odds","Odds ratio", envir=long.unit.names) assign("lift", "Lift value", envir=long.unit.names) assign("f","Precision-Recall F measure", envir=long.unit.names) assign("sar", "SAR", envir=long.unit.names) assign("ecost", "Expected cost", envir=long.unit.names) assign("cost", "Explicit cost", envir=long.unit.names) ## Define function names corresponding to the measure abbreviations. function.names <- new.env() assign("acc", ".performance.accuracy", envir=function.names) assign("err", ".performance.error.rate", envir=function.names) assign("fpr", ".performance.false.positive.rate", envir=function.names) assign("tpr", ".performance.true.positive.rate", envir=function.names) assign("rec", ".performance.true.positive.rate", envir=function.names) assign("sens", ".performance.true.positive.rate", envir=function.names) assign("fnr", ".performance.false.negative.rate", envir=function.names) assign("tnr", ".performance.true.negative.rate", envir=function.names) assign("spec", ".performance.true.negative.rate", envir=function.names) assign("ppv", ".performance.positive.predictive.value", envir=function.names) assign("prec", ".performance.positive.predictive.value", envir=function.names) assign("npv", ".performance.negative.predictive.value", envir=function.names) assign("fall", ".performance.false.positive.rate", envir=function.names) assign("miss", ".performance.false.negative.rate", envir=function.names) assign("pcfall", ".performance.prediction.conditioned.fallout", envir=function.names) assign("pcmiss", ".performance.prediction.conditioned.miss", envir=function.names) assign("rpp", ".performance.rate.of.positive.predictions", envir=function.names) assign("rnp", ".performance.rate.of.negative.predictions", envir=function.names) assign("auc", ".performance.auc", envir=function.names) assign("aucpr", ".performance.aucpr", envir=function.names) assign("cal", ".performance.calibration.error", envir=function.names) assign("prbe", ".performance.precision.recall.break.even.point", envir=function.names) assign("rch", ".performance.rocconvexhull", envir=function.names) assign("mxe", ".performance.mean.cross.entropy", envir=function.names) assign("rmse", ".performance.root.mean.squared.error", envir=function.names) assign("phi", ".performance.phi", envir=function.names) assign("mat", ".performance.phi", envir=function.names) assign("mi", ".performance.mutual.information", envir=function.names) assign("chisq", ".performance.chisq", envir=function.names) assign("odds", ".performance.odds.ratio", envir=function.names) assign("lift", ".performance.lift", envir=function.names) assign("f", ".performance.f", envir=function.names) assign("sar", ".performance.sar", envir=function.names) assign("ecost", ".performance.expected.cost", envir=function.names) assign("cost", ".performance.cost", envir=function.names) ## If a measure comes along with an obligatory x axis (including "none"), ## list it here. obligatory.x.axis <- new.env() assign("mxe", "none", envir=obligatory.x.axis) assign("rmse", "none", envir=obligatory.x.axis) assign("prbe", "none", envir=obligatory.x.axis) assign("auc", "none", envir=obligatory.x.axis) assign("aucpr", "none", envir=obligatory.x.axis) assign("rch","none", envir=obligatory.x.axis) ## ecost requires probability cost function as x axis, which is handled ## implicitly, not as an explicit performance measure. assign("ecost","none", envir=obligatory.x.axis) ## If a measure has optional arguments, list the names of the ## arguments here. optional.arguments <- new.env() assign("cal", "window.size", envir=optional.arguments) assign("f", "alpha", envir=optional.arguments) assign("cost", c("cost.fp", "cost.fn"), envir=optional.arguments) assign("auc", "fpr.stop", envir=optional.arguments) ## If a measure has additional arguments, list the default values ## for them here. Naming convention: e.g. "cal" has an optional ## argument "window.size" the key to use here is "cal:window.size" ## (colon as separator) default.values <- new.env() assign("cal:window.size", 100, envir=default.values) assign("f:alpha", 0.5, envir=default.values) assign("cost:cost.fp", 1, envir=default.values) assign("cost:cost.fn", 1, envir=default.values) assign("auc:fpr.stop", 1, envir=default.values) list(long.unit.names=long.unit.names, function.names=function.names, obligatory.x.axis=obligatory.x.axis, optional.arguments=optional.arguments, default.values=default.values) } ROCR/R/zzz.R0000644000176200001440000004640515134431535012215 0ustar liggesusers#' @import methods NULL #' @name prediction-class #' @aliases prediction-class #' #' @title Class \code{prediction} #' #' @description #' Object to encapsulate numerical predictions together with the #' corresponding true class labels, optionally collecting predictions and #' labels for several cross-validation or bootstrapping runs. #' #' @section Objects from the Class: #' Objects can be created by using the \code{prediction} function. #' #' @note #' Every \code{prediction} object contains information about the 2x2 #' contingency table consisting of tp,tn,fp, and fn, along with the #' marginal sums n.pos,n.neg,n.pos.pred,n.neg.pred, because these form #' the basis for many derived performance measures. #' #' @slot predictions A list, in which each element is a vector of predictions #' (the list has length > 1 for x-validation data. #' @slot labels Analogously, a list in which each element is a vector of true #' class labels. #' @slot cutoffs A list in which each element is a vector of all necessary #' cutoffs. Each cutoff vector consists of the predicted scores (duplicates #' removed), in descending order. #' @slot fp A list in which each element is a vector of the number (not the #' rate!) of false positives induced by the cutoffs given in the corresponding #' 'cutoffs' list entry. #' @slot tp As fp, but for true positives. #' @slot tn As fp, but for true negatives. #' @slot fn As fp, but for false negatives. #' @slot n.pos A list in which each element contains the number of positive #' samples in the given x-validation run. #' @slot n.neg As n.pos, but for negative samples. #' @slot n.pos.pred A list in which each element is a vector of the number of #' samples predicted as positive at the cutoffs given in the corresponding #' 'cutoffs' entry. #' @slot n.neg.pred As n.pos.pred, but for negatively predicted samples. #' #' @references #' A detailed list of references can be found on the ROCR homepage at #' \url{https://ipa-tys.github.io/ROCR/}. #' #' @author #' Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander #' \email{osander@gmail.com} #' #' @seealso #' \code{\link{prediction}}, #' \code{\link{performance}}, #' \code{\link{performance-class}}, #' \code{\link{plot.performance}} #' #' @export setClass("prediction", representation(predictions = "list", labels = "list", cutoffs = "list", fp = "list", tp = "list", tn = "list", fn = "list", n.pos = "list", n.neg = "list", n.pos.pred = "list", n.neg.pred = "list")) setMethod("show","prediction", function(object){ cat("A ", class(object), " instance\n", sep = "") if(length(object@predictions) > 1L){ cat(" with ", length(object@predictions)," cross ", "validation runs ", sep = "") if(length(unique(vapply(object@predictions,length,integer(1))))){ cat("(equal lengths)", sep = "") } else { cat("(different lengths)", sep = "") } } else { cat(" with ", length(object@predictions[[1L]]), " data points", sep = "") } }) #' @name performance-class #' @aliases performance-class #' #' @title Class \code{performance} #' #' @description #' Object to capture the result of a performance evaluation, optionally #' collecting evaluations from several cross-validation or bootstrapping runs. #' #' @section Objects from the Class: #' Objects can be created by using the \code{performance} function. #' #' @details #' A \code{performance} object can capture information from four #' different evaluation scenarios: #' \itemize{ #' \item The behaviour of a cutoff-dependent performance measure across #' the range of all cutoffs (e.g. \code{performance( predObj, 'acc' )} ). Here, #' \code{x.values} contains the cutoffs, \code{y.values} the #' corresponding values of the performance measure, and #' \code{alpha.values} is empty.\cr #' \item The trade-off between two performance measures across the #' range of all cutoffs (e.g. \code{performance( predObj, #' 'tpr', 'fpr' )} ). In this case, the cutoffs are stored in #' \code{alpha.values}, while \code{x.values} and \code{y.values} #' contain the corresponding values of the two performance measures.\cr #' \item A performance measure that comes along with an obligatory #' second axis (e.g. \code{performance( predObj, 'ecost' )} ). Here, the measure values are #' stored in \code{y.values}, while the corresponding values of the #' obligatory axis are stored in \code{x.values}, and \code{alpha.values} #' is empty.\cr #' \item A performance measure whose value is just a scalar #' (e.g. \code{performance( predObj, 'auc' )} ). The value is then stored in #' \code{y.values}, while \code{x.values} and \code{alpha.values} are #' empty. #' } #' #' @slot x.name Performance measure used for the x axis. #' @slot y.name Performance measure used for the y axis. #' @slot alpha.name Name of the unit that is used to create the parametrized #' curve. Currently, curves can only be parametrized by cutoff, so #' \code{alpha.name} is either \code{none} or \code{cutoff}. #' @slot x.values A list in which each entry contains the x values of the curve #' of this particular cross-validation run. \code{x.values[[i]]}, #' \code{y.values[[i]]}, and \code{alpha.values[[i]]} correspond to each #' other. #' @slot y.values A list in which each entry contains the y values of the curve #' of this particular cross-validation run. #' @slot alpha.values A list in which each entry contains the cutoff values of #' the curve of this particular cross-validation run. #' #' @references #' A detailed list of references can be found on the ROCR homepage at #' \url{https://ipa-tys.github.io/ROCR/}. #' #' @author #' Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander #' \email{osander@gmail.com} #' #' @seealso #' \code{\link{prediction}} #' \code{\link{performance}}, #' \code{\link{prediction-class}}, #' \code{\link{plot.performance}} #' #' @export setClass("performance", representation(x.name = "character", y.name = "character", alpha.name = "character", x.values = "list", y.values = "list", alpha.values = "list" )) setMethod("show","performance", function(object){ cat("A ", class(object), " instance\n", sep = "") if(length(object@y.values[[1L]]) > 1L){ cat(" '", object@x.name, "' vs. '", object@y.name, "' (alpha: '",object@alpha.name,"')\n", sep = "") } else { cat(" '", object@y.name, "'\n", sep = "") } if(length(object@y.values) > 1L){ cat(" for ", length(object@y.values)," cross ", "validation runs ", sep = "") } else { if(length(object@y.values[[1L]]) > 1L){ cat(" with ", length(object@y.values[[1L]])," data points", sep = "") } } }) #' @name plot-methods #' @aliases plot,performance,missing-method plot.performance #' #' @title Plot method for performance objects #' #' @description #' This is the method to plot all objects of class performance. #' #' @param x an object of class \code{performance} #' @param y not used #' @param ... Optional graphical parameters to adjust different components of #' the performance plot. Parameters are directed to their target component by #' prefixing them with the name of the component (\code{component.parameter}, #' e.g. \code{text.cex}). The following components are available: #' \code{xaxis}, \code{yaxis}, \code{coloraxis}, \code{box} (around the #' plotting region), \code{points}, \code{text}, \code{plotCI} (error bars), #' \code{boxplot}. The names of these components are influenced by the R #' functions that are used to create them. Thus, \code{par(component)} can be #' used to see which parameters are available for a given component (with the #' expection of the three axes; use \code{par(axis)} here). To adjust the #' canvas or the performance curve(s), the standard \code{plot} parameters can #' be used without any prefix. #' @param avg If the performance object describes several curves (from #' cross-validation runs or bootstrap evaluations of one particular method), #' the curves from each of the runs can be averaged. Allowed values are #' \code{none} (plot all curves separately), \code{horizontal} (horizontal #' averaging), \code{vertical} (vertical averaging), and \code{threshold} #' (threshold (=cutoff) averaging). Note that while threshold averaging is #' always feasible, vertical and horizontal averaging are not well-defined if #' the graph cannot be represented as a function x->y and y->x, respectively. #' @param spread.estimate When curve averaging is enabled, the variation around #' the average curve can be visualized as standard error bars #' (\code{stderror}), standard deviation bars (\code{stddev}), or by using box #' plots (\code{boxplot}). Note that the function \code{plotCI}, which is used #' internally by ROCR to draw error bars, might raise a warning if the spread #' of the curves at certain positions is 0. #' @param spread.scale For \code{stderror} or \code{stddev}, this is a scalar #' factor to be multiplied with the length of the standard error/deviation #' bar. For example, under normal assumptions, \code{spread.scale=2} can be #' used to get approximate 95\% confidence intervals. #' @param show.spread.at For vertical averaging, this vector determines the x #' positions for which the spread estimates should be visualized. In contrast, #' for horizontal and threshold averaging, the y positions and cutoffs are #' determined, respectively. By default, spread estimates are shown at 11 #' equally spaced positions. #' @param colorize This logical determines whether the curve(s) should be #' colorized according to cutoff. #' @param colorize.palette If curve colorizing is enabled, this determines the #' color palette onto which the cutoff range is mapped. #' @param colorkey If true, a color key is drawn into the 4\% border #' region (default of \code{par(xaxs)} and \code{par(yaxs)}) of the #' plot. The color key visualizes the mapping from cutoffs to colors. #' @param colorkey.relwidth Scalar between 0 and 1 that determines the #' fraction of the 4\% border region that is occupied by the colorkey. #' @param colorkey.pos Determines if the colorkey is drawn vertically at #' the \code{right} side, or horizontally at the \code{top} of the #' plot. #' @param print.cutoffs.at This vector specifies the cutoffs which should #' be printed as text along the curve at the corresponding curve positions. #' @param cutoff.label.function By default, cutoff annotations along the curve #' or at the color key are rounded to two decimal places before printing. #' Using a custom \code{cutoff.label.function}, any other transformation can #' be performed on the cutoffs instead (e.g. rounding with different precision #' or taking the logarithm). #' @param downsampling ROCR can efficiently compute most performance measures #' even for data sets with millions of elements. However, plotting of large #' data sets can be slow and lead to PS/PDF documents of considerable size. In #' that case, performance curves that are indistinguishable from the original #' can be obtained by using only a fraction of the computed performance #' values. Values for downsampling between 0 and 1 indicate the fraction of #' the original data set size to which the performance object should be #' downsampled, integers above 1 are interpreted as the actual number of #' performance values to which the curve(s) should be downsampled. #' @param add If \code{TRUE}, the curve(s) is/are added to an already existing #' plot; otherwise a new plot is drawn. #' #' @references #' A detailed list of references can be found on the ROCR homepage at #' \url{https://ipa-tys.github.io/ROCR/}. #' #' @author #' Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander #' \email{osander@gmail.com} #' #' @seealso #' \code{\link{prediction}}, #' \code{\link{performance}}, #' \code{\link{prediction-class}}, #' \code{\link{performance-class}} #' #' @export #' #' @examples #' # plotting a ROC curve: #' library(ROCR) #' data(ROCR.simple) #' pred <- prediction( ROCR.simple$predictions, ROCR.simple$labels ) #' pred #' perf <- performance( pred, "tpr", "fpr" ) #' perf #' plot( perf ) #' #' # To entertain your children, make your plots nicer #' # using ROCR's flexible parameter passing mechanisms #' # (much cheaper than a finger painting set) #' par(bg="lightblue", mai=c(1.2,1.5,1,1)) #' plot(perf, main="ROCR fingerpainting toolkit", colorize=TRUE, #' xlab="Mary's axis", ylab="", box.lty=7, box.lwd=5, #' box.col="gold", lwd=17, colorkey.relwidth=0.5, xaxis.cex.axis=2, #' xaxis.col='blue', xaxis.col.axis="blue", yaxis.col='green', yaxis.cex.axis=2, #' yaxis.at=c(0,0.5,0.8,0.85,0.9,1), yaxis.las=1, xaxis.lwd=2, yaxis.lwd=3, #' yaxis.col.axis="orange", cex.lab=2, cex.main=2) setMethod("plot", signature(x="performance",y="missing"), function(x, y, ..., avg = "none", spread.estimate = "none", spread.scale = 1, show.spread.at = c(), colorize = FALSE, colorize.palette = rev(rainbow(256, start = 0, end = 4 / 6)), colorkey = colorize, colorkey.relwidth = 0.25, colorkey.pos = "right", print.cutoffs.at = c(), cutoff.label.function = function(x) { round(x, 2) }, downsampling = 0, add = FALSE ) { .plot.performance(x,..., avg = avg, spread.estimate = spread.estimate, spread.scale = spread.scale, show.spread.at = show.spread.at, colorize = colorize, colorize.palette = colorize.palette, colorkey = colorkey, colorkey.relwidth = colorkey.relwidth, colorkey.pos = colorkey.pos, print.cutoffs.at = print.cutoffs.at, cutoff.label.function = cutoff.label.function, downsampling = downsampling, add = add) } ) #' @rdname plot-methods #' @method plot performance #' @export "plot.performance" <- function(...) plot(...) #' @name ROCR.hiv #' #' @docType data #' @keywords datasets #' #' @title Data set: Support vector machines and neural networks applied to the #' prediction of HIV-1 coreceptor usage #' #' @description #' Linear support vector machines (libsvm) and neural networks (R package #' nnet) were applied to predict usage of the coreceptors CCR5 and CXCR4 #' based on sequence data of the third variable loop of the HIV envelope #' protein. #' #' @format #' A list consisting of the SVM (\code{ROCR.hiv$hiv.svm}) and NN #' (\code{ROCR.hiv$hiv.nn}) classification data. Each of those is in turn a list #' consisting of the two elements \code{$predictions} and \code{$labels} (10 #' element list representing cross-validation data). #' #' @references #' Sing, T. & Beerenwinkel, N. & Lengauer, T. "Learning mixtures #' of localized rules by maximizing the area under the ROC curve". 1st #' International Workshop on ROC Analysis in AI, 89-96, 2004. #' #' @usage data(ROCR.hiv) #' #' @examples #' library(ROCR) #' data(ROCR.hiv) #' attach(ROCR.hiv) #' pred.svm <- prediction(hiv.svm$predictions, hiv.svm$labels) #' pred.svm #' perf.svm <- performance(pred.svm, 'tpr', 'fpr') #' perf.svm #' pred.nn <- prediction(hiv.nn$predictions, hiv.svm$labels) #' pred.nn #' perf.nn <- performance(pred.nn, 'tpr', 'fpr') #' perf.nn #' plot(perf.svm, lty=3, col="red",main="SVMs and NNs for prediction of #' HIV-1 coreceptor usage") #' plot(perf.nn, lty=3, col="blue",add=TRUE) #' plot(perf.svm, avg="vertical", lwd=3, col="red", #' spread.estimate="stderror",plotCI.lwd=2,add=TRUE) #' plot(perf.nn, avg="vertical", lwd=3, col="blue", #' spread.estimate="stderror",plotCI.lwd=2,add=TRUE) #' legend(0.6,0.6,c('SVM','NN'),col=c('red','blue'),lwd=3) "ROCR.hiv" #' @name ROCR.simple #' #' @docType data #' @keywords datasets #' #' @title Data set: Simple artificial prediction data for use with ROCR #' #' @description #' A mock data set containing a simple set of predictions and corresponding #' class labels. #' #' @format #' A two element list. The first element, \code{ROCR.simple$predictions}, is a #' vector of numerical predictions. The second element, #' \code{ROCR.simple$labels}, is a vector of corresponding class labels. #' #' @usage data(ROCR.simple) #' #' @examples #' # plot a ROC curve for a single prediction run #' # and color the curve according to cutoff. #' library(ROCR) #' data(ROCR.simple) #' pred <- prediction(ROCR.simple$predictions, ROCR.simple$labels) #' pred #' perf <- performance(pred,"tpr","fpr") #' perf #' plot(perf,colorize=TRUE) "ROCR.simple" #' @name ROCR.xval #' #' @docType data #' @keywords datasets #' #' @title Data set: Artificial cross-validation data for use with ROCR #' #' @description #' A mock data set containing 10 sets of predictions and corresponding labels as #' would be obtained from 10-fold cross-validation. #' #' @format #' A two element list. The first element, \code{ROCR.xval$predictions}, is #' itself a 10 element list. Each of these 10 elements is a vector of numerical #' predictions for each cross-validation run. Likewise, the second list entry, #' \code{ROCR.xval$labels} is a 10 element list in which each element is a #' vector of true class labels corresponding to the predictions. #' #' @usage data(ROCR.xval) #' #' @examples #' # plot ROC curves for several cross-validation runs (dotted #' # in grey), overlaid by the vertical average curve and boxplots #' # showing the vertical spread around the average. #' library(ROCR) #' data(ROCR.xval) #' pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) #' pred #' perf <- performance(pred,"tpr","fpr") #' perf #' plot(perf,col="grey82",lty=3) #' plot(perf,lwd=3,avg="vertical",spread.estimate="boxplot",add=TRUE) "ROCR.xval" ROCR/R/performance_plots.R0000644000176200001440000006071313703321565015101 0ustar liggesusers## ---------------------------------------------------------------------------- ## plot method for objects of class 'performance' ## ---------------------------------------------------------------------------- #' @importFrom graphics plot.default plot.xy par .get.arglist <- function( fname, arglist ) { if (fname=='plot') return(.select.args(arglist, union(names(formals(graphics::plot.default)), names(graphics::par())))) else if (fname=='plot.xy') return(.select.args(arglist, union( names(formals(graphics::plot.xy)), names(graphics::par())))) else return( .select.prefix( arglist, fname) ) } .downsample <- function( perf, downsampling ) { for (i in 1:length(perf@alpha.values)) { if (downsampling < 1 && downsampling > 0) ind <- round(seq(1, length(perf@alpha.values[[i]]), length=(length(perf@alpha.values[[i]]) * downsampling))) else if (downsampling > 1) ind <- round(seq(1, length(perf@alpha.values[[i]]), length=downsampling)) else ind <- 1:length(perf@alpha.values[[i]]) perf@alpha.values[[i]] <- perf@alpha.values[[i]][ind] perf@x.values[[i]] <- perf@x.values[[i]][ind] perf@y.values[[i]] <- perf@y.values[[i]][ind] } return(perf) } .check_performance_for_plotting <- function(perf, colorize, print.cutoffs.at, avg){ if (length(perf@y.values) != length(perf@x.values)) { stop("Performance object cannot be plotted. Length of x and y values ", "does not match.", call. = FALSE) } if ((is.null(perf@alpha.values) || length(perf@alpha.values) == 0L) && (colorize==TRUE || length(print.cutoffs.at) > 0L)) { stop("Threshold coloring or labeling cannot be performed: ", "performance object has no threshold information.", call. = FALSE) } if ((avg=="vertical" || avg=="horizontal") && (colorize==TRUE || length(print.cutoffs.at) > 0L)) { stop("Threshold coloring or labeling is only well-defined for", "'no' or 'threshold' averaging.", call. = FALSE) } } #' @importFrom grDevices rainbow .plot.performance <- function(perf, ..., avg = "none", spread.estimate = "none", spread.scale = 1, show.spread.at = c(), colorize = FALSE, colorize.palette = rev(grDevices::rainbow(256, start = 0, end = 4 / 6)), colorkey = colorize, colorkey.relwidth = 0.25, colorkey.pos = "right", print.cutoffs.at = c(), cutoff.label.function = function(x) { round(x, 2) }, downsampling = 0, add = FALSE) { # Input checks .check_performance_for_plotting(perf, colorize, print.cutoffs.at, avg) # getting the arguments arglist <- c(lapply(as.list(environment()), eval ), list(...) ) if (downsampling >0 ) perf <- .downsample( perf, downsampling) ## for infinite cutoff, assign maximal finite cutoff + mean difference ## between adjacent cutoff pairs if (length(perf@alpha.values) != 0) { FUN <- function(x) { isfin <- is.finite(x) # if only one finite is available the mean cannot be calculated without # the first/last value, since the leaves no value if(sum(isfin) > 1L){ inf_replace <- max(x[isfin]) + mean(abs(x[isfin][-1] - x[isfin][-length(x[isfin])])) } else { inf_replace <- 0 } x[is.infinite(x)] <- inf_replace x } perf@alpha.values <- lapply(perf@alpha.values,FUN) } ## remove samples with x or y not finite for (i in 1:length(perf@x.values)) { ind.bool <- (is.finite(perf@x.values[[i]]) & is.finite(perf@y.values[[i]])) if (length(perf@alpha.values)>0) perf@alpha.values[[i]] <- perf@alpha.values[[i]][ind.bool] perf@x.values[[i]] <- perf@x.values[[i]][ind.bool] perf@y.values[[i]] <- perf@y.values[[i]][ind.bool] } arglist <- .sarg( arglist, perf=perf) if (add==FALSE) do.call( ".performance.plot.canvas", arglist ) if (avg=="none") do.call(".performance.plot.no.avg", arglist) else if (avg=="vertical") do.call(".performance.plot.vertical.avg", arglist) else if (avg=="horizontal") do.call(".performance.plot.horizontal.avg", arglist) else if (avg=="threshold") do.call(".performance.plot.threshold.avg", arglist) } ## --------------------------------------------------------------------------- ## initializing plots and plotting a canvas ## (can be skipped using 'plot( ..., add=TRUE)' ## --------------------------------------------------------------------------- #' @import stats #' @import graphics .performance.plot.canvas <- function(perf, avg, ...) { # requireNamespace("stats") # requireNamespace("graphics") arglist <- list(...) axis.names <- list(x=perf@x.name, y=perf@y.name) if (avg=="horizontal" || avg=="threshold") axis.names$x <- paste("Average", tolower(axis.names$x)) if (avg=="vertical" || avg=="threshold") axis.names$y <- paste("Average", tolower(axis.names$y)) arglist <- .farg(arglist, xlab=axis.names$x, ylab=axis.names$y) arglist <- .farg(arglist, xlim=c(min(unlist(perf@x.values)), max(unlist(perf@x.values))), ylim=c(min(unlist(perf@y.values)), max(unlist(perf@y.values)))) do.call("plot", .sarg(.slice.run(.get.arglist('plot', arglist)), x=0.5, y=0.5, type='n', axes=FALSE)) do.call( "axis", .sarg(.slice.run(.get.arglist('xaxis', arglist)), side=1)) do.call( "axis", .sarg(.slice.run(.get.arglist('yaxis', arglist)), side=2)) if (.garg(arglist,'colorkey')==TRUE) { colors <- rev( .garg(arglist,'colorize.palette') ) max.alpha <- max(unlist(perf@alpha.values)) min.alpha <- min(unlist(perf@alpha.values)) col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length( colors ))) if ( .garg(arglist,'colorkey.pos')=="right") { ## axis drawing (ticks + labels) ## The interval [min.alpha,max.alpha] needs to be mapped onto ## the interval [min.y,max.y], rather than onto the interval ## [ylim[1],ylim[2]] ! In the latter case, NAs could occur in ## approxfun below, because axTicks can be out of the ylim-range ## ('yxaxs': 4%region) max.y <- max(axTicks(4)) min.y <- min(axTicks(4)) alpha.ticks <- .garg( arglist, c("coloraxis.at")) if (length(alpha.ticks)==0) alpha.ticks <- approxfun(c(min.y, max.y), c(min.alpha, max.alpha)) (axTicks(4)) alpha2y <- approxfun(c(min(alpha.ticks), max(alpha.ticks)), c(min.y,max.y)) arglist <- .sarg(arglist, coloraxis.labels=.garg(arglist, 'cutoff.label.function')(alpha.ticks), coloraxis.at=alpha2y(alpha.ticks)) do.call("axis", .sarg(.slice.run(.get.arglist('coloraxis', arglist)), side=4)) ## draw colorkey ## each entry in display.bool corresponds to one rectangle of ## the colorkey. ## Only rectangles within the alpha.ticks range are plotted. ## y.lower, y.upper, and colors, are the attributes of the visible ## rectangles (those for which display.bool=TRUE) display.bool <- (col.cutoffs >= min(alpha.ticks) & col.cutoffs < max(alpha.ticks)) y.lower <- alpha2y( col.cutoffs )[display.bool] colors <- colors[display.bool] if (length(y.lower>=2)) { y.width <- y.lower[2] - y.lower[1] y.upper <- y.lower + y.width x.left <- .garg(arglist,'xlim')[2] + ((.garg(arglist,'xlim')[2] - .garg(arglist,'xlim')[1]) * (1-.garg(arglist,'colorkey.relwidth'))*0.04) x.right <- .garg(arglist,'xlim')[2] + (.garg(arglist,'xlim')[2] -.garg(arglist,'xlim')[1]) * 0.04 rect(x.left, y.lower, x.right, y.upper, col=colors, border=colors,xpd=NA) } } else if (.garg(arglist, 'colorkey.pos') == "top") { ## axis drawing (ticks + labels) max.x <- max(axTicks(3)) min.x <- min(axTicks(3)) alpha.ticks <- .garg( arglist, c("coloraxis.at")) if (length(alpha.ticks)==0) { alpha.ticks <- approxfun(c(min.x, max.x), c(min.alpha, max.alpha))(axTicks(3)) } alpha2x <- approxfun(c( min(alpha.ticks), max(alpha.ticks)), c( min.x, max.x)) arglist <- .sarg(arglist, coloraxis.labels=.garg(arglist, 'cutoff.label.function')(alpha.ticks), coloraxis.at= alpha2x(alpha.ticks)) do.call("axis", .sarg(.slice.run( .get.arglist('coloraxis', arglist)), side=3)) ## draw colorkey display.bool <- (col.cutoffs >= min(alpha.ticks) & col.cutoffs < max(alpha.ticks)) x.left <- alpha2x( col.cutoffs )[display.bool] colors <- colors[display.bool] if (length(x.left)>=2) { x.width <- x.left[2] - x.left[1] x.right <- x.left + x.width y.lower <- .garg(arglist,'ylim')[2] + (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) * (1-.garg(arglist,'colorkey.relwidth'))*0.04 y.upper <- .garg(arglist,'ylim')[2] + (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) * 0.04 rect(x.left, y.lower, x.right, y.upper, col=colors, border=colors, xpd=NA) } } } do.call( "box", .slice.run( .get.arglist( 'box', arglist))) } ## ---------------------------------------------------------------------------- ## plotting performance objects when no curve averaging is wanted ## ---------------------------------------------------------------------------- #' @importFrom grDevices xy.coords #' @importFrom stats approxfun .performance.plot.no.avg <- function( perf, ... ) { arglist <- list(...) arglist <- .farg(arglist, type= 'l') if (.garg(arglist, 'colorize') == TRUE) { colors <- rev( .garg( arglist, 'colorize.palette') ) max.alpha <- max(unlist(perf@alpha.values)) min.alpha <- min(unlist(perf@alpha.values)) col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length(colors)+1)) col.cutoffs <- col.cutoffs[2:length(col.cutoffs)] } for (i in 1:length(perf@x.values)) { if (.garg(arglist, 'colorize') == FALSE) { do.call("plot.xy", .sarg(.slice.run(.get.arglist('plot.xy', arglist), i), xy=(grDevices::xy.coords(perf@x.values[[i]], perf@y.values[[i]])))) } else { for (j in 1:(length(perf@x.values[[i]])-1)) { segment.coloring <- colors[min(which(col.cutoffs <= perf@alpha.values[[i]][j]))] do.call("plot.xy", .sarg(.slice.run(.get.arglist('plot.xy', arglist), i), xy=(grDevices::xy.coords(perf@x.values[[i]][j:(j+1)], perf@y.values[[i]][j:(j+1)])), col= segment.coloring)) } } print.cutoffs.at <- .garg(arglist, 'print.cutoffs.at',i) if (! is.null(print.cutoffs.at)) { text.x <- stats::approxfun(perf@alpha.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(print.cutoffs.at) text.y <- stats::approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(print.cutoffs.at) do.call("points", .sarg(.slice.run(.get.arglist('points', arglist),i), x= text.x, y= text.y)) do.call("text", .farg(.slice.run( .get.arglist('text', arglist),i), x= text.x, y= text.y, labels=(.garg(arglist, 'cutoff.label.function', i)(print.cutoffs.at)))) } } } ## ---------------------------------------------------------------------------- ## plotting performance objects when vertical curve averaging is wanted ## ---------------------------------------------------------------------------- #' @importFrom stats approxfun sd .performance.plot.vertical.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= (seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=11))) perf.avg <- perf x.values <- seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=max( sapply(perf@x.values, length))) for (i in 1:length(perf@y.values)) { perf.avg@y.values[[i]] <- stats::approxfun(perf@x.values[[i]], perf@y.values[[i]], ties=mean, rule=2)(x.values) } perf.avg@y.values <- list(rowMeans( data.frame( perf.avg@y.values ))) perf.avg@x.values <- list(x.values) perf.avg@alpha.values <- list() ## y.values at show.spread.at (midpoint of error bars ) show.spread.at.y.values <- lapply(as.list(1:length(perf@x.values)), function(i) { stats::approxfun(perf@x.values[[i]], perf@y.values[[i]], rule=2, ties=mean)( .garg(arglist, 'show.spread.at')) }) show.spread.at.y.values <- as.matrix(data.frame(show.spread.at.y.values )) colnames(show.spread.at.y.values) <- c() ## now, show.spread.at.y.values[i,] contains the curve y values at the ## sampling x value .garg(arglist,'show.spread.at')[i] if (.garg(arglist, 'spread.estimate') == "stddev" || .garg(arglist, 'spread.estimate') == "stderror") { bar.width <- apply(show.spread.at.y.values, 1, stats::sd) if (.garg(arglist, 'spread.estimate') == "stderror") { bar.width <- bar.width / sqrt( ncol(show.spread.at.y.values) ) } bar.width <- .garg(arglist, 'spread.scale') * bar.width suppressWarnings(do.call(gplots::plotCI, .farg(.sarg(.get.arglist('plotCI', arglist), x=.garg(arglist, 'show.spread.at'), y=rowMeans( show.spread.at.y.values), uiw= bar.width, liw= bar.width, err= 'y', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist, 'spread.estimate') == "boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist( 'boxplot', arglist), x= data.frame(t(show.spread.at.y.values)), at= .garg(arglist, 'show.spread.at'), add= TRUE, axes= FALSE), boxwex= (1/(2*(length(.garg(arglist, 'show.spread.at'))))))) do.call("points", .sarg(.get.arglist( 'points', arglist), x= .garg(arglist, 'show.spread.at'), y= rowMeans(show.spread.at.y.values))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } ## ---------------------------------------------------------------------------- ## plotting performance objects when horizontal curve averaging is wanted ## ---------------------------------------------------------------------------- #' @importFrom stats approxfun sd .performance.plot.horizontal.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= seq(min(unlist(perf@y.values)), max(unlist(perf@y.values)), length=11)) perf.avg <- perf y.values <- seq(min(unlist(perf@y.values)), max(unlist(perf@y.values)), length=max( sapply(perf@y.values, length))) for (i in 1:length(perf@x.values)) { perf.avg@x.values[[i]] <- stats::approxfun(perf@y.values[[i]], perf@x.values[[i]], ties=mean, rule=2)(y.values) } perf.avg@x.values <- list(rowMeans( data.frame( perf.avg@x.values ))) perf.avg@y.values <- list(y.values) perf.avg@alpha.values <- list() ## x.values at show.spread.at (midpoint of error bars ) show.spread.at.x.values <- lapply(as.list(1:length(perf@y.values)), function(i) { stats::approxfun(perf@y.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) show.spread.at.x.values <- as.matrix(data.frame(show.spread.at.x.values)) colnames(show.spread.at.x.values) <- c() ## now, show.spread.at.x.values[i,] contains the curve x values at the ## sampling y value .garg(arglist,'show.spread.at')[i] if (.garg(arglist,'spread.estimate') == 'stddev' || .garg(arglist,'spread.estimate') == 'stderror') { bar.width <- apply(show.spread.at.x.values, 1, stats::sd) if (.garg(arglist,'spread.estimate')== 'stderror') { bar.width <- bar.width / sqrt( ncol(show.spread.at.x.values) ) } bar.width <- .garg(arglist,'spread.scale') * bar.width suppressWarnings(do.call(gplots::plotCI, .farg(.sarg(.get.arglist('plotCI', arglist), x= rowMeans( show.spread.at.x.values), y= .garg(arglist, 'show.spread.at'), uiw= bar.width, liw= bar.width, err= 'x', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist,'spread.estimate') == "boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist( 'boxplot', arglist), x= data.frame(t(show.spread.at.x.values)), at= .garg(arglist,'show.spread.at'), add= TRUE, axes= FALSE, horizontal= TRUE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("points", .sarg(.get.arglist( 'points', arglist), x= rowMeans(show.spread.at.x.values), y= .garg(arglist,'show.spread.at'))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } ## ---------------------------------------------------------------------------- ## plotting performance objects when threshold curve averaging is wanted ## ---------------------------------------------------------------------------- #' @importFrom stats approxfun sd .performance.plot.threshold.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=11)) perf.sampled <- perf alpha.values <- rev(seq(min(unlist(perf@alpha.values)), max(unlist(perf@alpha.values)), length=max( sapply(perf@alpha.values, length)))) for (i in 1:length(perf.sampled@y.values)) { perf.sampled@x.values[[i]] <- stats::approxfun(perf@alpha.values[[i]],perf@x.values[[i]], rule=2, ties=mean)(alpha.values) perf.sampled@y.values[[i]] <- stats::approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(alpha.values) } ## compute average curve perf.avg <- perf.sampled perf.avg@x.values <- list( rowMeans( data.frame( perf.avg@x.values))) perf.avg@y.values <- list(rowMeans( data.frame( perf.avg@y.values))) perf.avg@alpha.values <- list( alpha.values ) x.values.spread <- lapply(as.list(1:length(perf@x.values)), function(i) { stats::approxfun(perf@alpha.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) x.values.spread <- as.matrix(data.frame( x.values.spread )) y.values.spread <- lapply(as.list(1:length(perf@y.values)), function(i) { stats::approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) y.values.spread <- as.matrix(data.frame( y.values.spread )) if (.garg(arglist,'spread.estimate')=="stddev" || .garg(arglist,'spread.estimate')=="stderror") { x.bar.width <- apply(x.values.spread, 1, stats::sd) y.bar.width <- apply(y.values.spread, 1, stats::sd) if (.garg(arglist,'spread.estimate')=="stderror") { x.bar.width <- x.bar.width / sqrt( ncol(x.values.spread) ) y.bar.width <- y.bar.width / sqrt( ncol(x.values.spread) ) } x.bar.width <- .garg(arglist,'spread.scale') * x.bar.width y.bar.width <- .garg(arglist,'spread.scale') * y.bar.width suppressWarnings( do.call(gplots::plotCI, .farg(.sarg(.get.arglist('plotCI', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread), uiw= x.bar.width, liw= x.bar.width, err= 'x', add= TRUE), gap= 0, type= 'n'))) suppressWarnings( do.call(gplots::plotCI, .farg(.sarg(.get.arglist('plotCI', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread), uiw= y.bar.width, liw= y.bar.width, err= 'y', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist,'spread.estimate')=="boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist('boxplot', arglist), x= data.frame(t(x.values.spread)), at= rowMeans(y.values.spread), add= TRUE, axes= FALSE, horizontal= TRUE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("boxplot", .farg(.sarg(.get.arglist('boxplot', arglist), x= data.frame(t(y.values.spread)), at= rowMeans(x.values.spread), add= TRUE, axes= FALSE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("points", .sarg(.get.arglist('points', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } ROCR/R/ROCR_aux.R0000644000176200001440000000561213644317760013004 0ustar liggesusers## --------------------------------------------------------------------------- ## Dealing with argument lists, especially '...' ## --------------------------------------------------------------------------- ## return list of selected arguments, skipping those that ## are not present in arglist .select.args <- function( arglist, args.to.select, complement=FALSE) { match.bool <- names(arglist) %in% args.to.select if (complement==TRUE) match.bool <- !match.bool return( arglist[ match.bool] ) } ## return arguments in arglist which match prefix, with prefix removed ## ASSUMPTION: prefix is separated from rest by a '.'; this is removed along ## with the prefix .select.prefix <- function( arglist, prefixes, complement=FALSE ) { match.expr <- paste(paste('(^',prefixes,'\\.)',sep=""),collapse='|') match.bool <- (1:length(arglist)) %in% grep( match.expr, names(arglist) ) if (complement==TRUE) match.bool <- !match.bool arglist <- arglist[ match.bool] names(arglist) <- sub( match.expr, '', names(arglist)) return( arglist ) } .garg <- function( arglist, arg, i=1) { if (is.list(arglist[[arg]])) arglist[[ arg ]][[i]] else arglist[[ arg ]] } .sarg <- function( arglist, ...) { ll <- list(...) for (argname in names(ll) ) { arglist[[ argname ]] <- ll[[ argname ]] } return(arglist) } .farg <- function( arglist, ...) { ll <- list(...) for (argname in names(ll) ) { if (length(arglist[[argname]])==0) arglist[[ argname ]] <- ll[[ argname ]] } return(arglist) } .slice.run <- function( arglist, runi=1) { r <- lapply( names(arglist), function(name) .garg( arglist, name, runi)) names(r) <- names(arglist) r } ## --------------------------------------------------------------------------- ## Line segments ## --------------------------------------------------------------------------- .construct.linefunct <- function( x1, y1, x2, y2) { if (x1==x2) { stop("Cannot construct a function from data.") } lf <- eval(parse(text=paste("function(x) {", "m <- (",y2,"-",y1,") / (",x2,"-",x1,");", "c <- ",y1," - m * ",x1,";", "return( m * x + c)}",sep=" "))) lf } #' @importFrom stats uniroot .intersection.point <- function( f, g ) { ## if lines are parallel, no intersection point if (f(1)-f(0) == g(1)-g(0)) { return( c(Inf,Inf) ) } ## otherwise, choose search interval imin <- -1 imax <- 1 while (sign(f(imin)-g(imin)) == sign(f(imax)-g(imax))) { imin <- 2*imin imax <- 2*imax } h <- function(x) { f(x) - g(x) } intersect.x <- stats::uniroot( h, interval=c(imin-1,imax+1) )$root intersect.y <- f( intersect.x ) return( c(intersect.x, intersect.y )) } ROCR/R/prediction.R0000644000176200001440000002340515134431534013512 0ustar liggesusers#' @name prediction #' #' @title Function to create prediction objects #' #' @description #' Every classifier evaluation using ROCR starts with creating a #' \code{prediction} object. This function is used to transform the input data #' (which can be in vector, matrix, data frame, or list form) into a #' standardized format. #' #' @details #' \code{predictions} and \code{labels} can simply be vectors of the same #' length. However, in the case of cross-validation data, different #' cross-validation runs can be provided as the *columns* of a matrix or #' data frame, or as the entries of a list. In the case of a matrix or #' data frame, all cross-validation runs must have the same length, whereas #' in the case of a list, the lengths can vary across the cross-validation #' runs. Internally, as described in section 'Value', all of these input #' formats are converted to list representation. #' #' Since scoring classifiers give relative tendencies towards a negative #' (low scores) or positive (high scores) class, it has to be declared #' which class label denotes the negative, and which the positive class. #' Ideally, labels should be supplied as ordered factor(s), the lower #' level corresponding to the negative class, the upper level to the #' positive class. If the labels are factors (unordered), numeric, #' logical or characters, ordering of the labels is inferred from #' R's built-in \code{<} relation (e.g. 0 < 1, -1 < 1, 'a' < 'b', #' FALSE < TRUE). Use \code{label.ordering} to override this default #' ordering. Please note that the ordering can be locale-dependent #' e.g. for character labels '-1' and '1'. #' #' Currently, ROCR supports only binary classification (extensions toward #' multiclass classification are scheduled for the next release, #' however). If there are more than two distinct label symbols, execution #' stops with an error message. If all predictions use the same two #' symbols that are used for the labels, categorical predictions are #' assumed. If there are more than two predicted values, but all numeric, #' continuous predictions are assumed (i.e. a scoring #' classifier). Otherwise, if more than two symbols occur in the #' predictions, and not all of them are numeric, execution stops with an #' error message. #' #' @param predictions A vector, matrix, list, or data frame containing the #' predictions. #' @param labels A vector, matrix, list, or data frame containing the true class #' labels. Must have the same dimensions as \code{predictions}. #' @param label.ordering The default ordering (cf.details) of the classes can #' be changed by supplying a vector containing the negative and the positive #' class label. #' #' @return An S4 object of class \code{prediction}. #' #' @references #' A detailed list of references can be found on the ROCR homepage at #' \url{https://ipa-tys.github.io/ROCR/}. #' #' @author #' Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander #' \email{osander@gmail.com} #' #' @seealso #' \code{\link{prediction-class}}, #' \code{\link{performance}}, #' \code{\link{performance-class}}, #' \code{\link{plot.performance}} #' #' @export #' #' @examples #' # create a simple prediction object #' library(ROCR) #' data(ROCR.simple) #' pred <- prediction(ROCR.simple$predictions,ROCR.simple$labels) #' pred prediction <- function(predictions, labels, label.ordering=NULL) { ## bring 'predictions' and 'labels' into list format, ## each list entry representing one x-validation run ## convert predictions into canonical list format if (is.data.frame(predictions)) { names(predictions) <- c() predictions <- as.list(predictions) } else if (is.matrix(predictions)) { predictions <- as.list(data.frame(predictions)) names(predictions) <- c() } else if (is.vector(predictions) && !is.list(predictions)) { predictions <- list(predictions) } else if (!is.list(predictions)) { stop("Format of predictions is invalid. It couldn't be coerced to a list.", call. = FALSE) } ## if predictions is a list -> keep unaltered if(any(vapply(predictions,anyNA,logical(1)))){ stop("'predictions' contains NA.", call. = FALSE) } ## convert labels into canonical list format if (is.data.frame(labels)) { names(labels) <- c() labels <- as.list( labels) } else if (is.matrix(labels)) { labels <- as.list( data.frame( labels)) names(labels) <- c() } else if ((is.vector(labels) || is.ordered(labels) || is.factor(labels)) && !is.list(labels)) { labels <- list( labels) } else if (!is.list(labels)) { stop("Format of labels is invalid. It couldn't be coerced to a list.", call. = FALSE) } ## if labels is a list -> keep unaltered ## Length consistency checks if (length(predictions) != length(labels)) stop(paste("Number of cross-validation runs must be equal", "for predictions and labels.")) if (! all(sapply(predictions, length) == sapply(labels, length))) stop(paste("Number of predictions in each run must be equal", "to the number of labels for each run.")) ## only keep prediction/label pairs that are finite numbers for (i in 1:length(predictions)) { finite.bool <- is.finite( predictions[[i]] ) predictions[[i]] <- predictions[[i]][ finite.bool ] labels[[i]] <- labels[[i]][ finite.bool ] } ## abort if 'labels' format is inconsistent across ## different cross-validation runs label.format="" ## one of 'normal','factor','ordered' if (all(sapply( labels, is.factor)) && !any(sapply(labels, is.ordered))) { label.format <- "factor" } else if (all(sapply( labels, is.ordered))) { label.format <- "ordered" } else if (all(sapply( labels, is.character)) || all(sapply( labels, is.numeric)) || all(sapply( labels, is.logical))) { label.format <- "normal" } else { stop(paste("Inconsistent label data type across different", "cross-validation runs.")) } ## abort if levels are not consistent across different ## cross-validation runs if (! all(sapply(labels, levels)==levels(labels[[1]])) ) { stop(paste("Inconsistent factor levels across different", "cross-validation runs.")) } ## convert 'labels' into ordered factors, aborting if the number ## of classes is not equal to 2. levels <- c() if ( label.format == "ordered" ) { if (!is.null(label.ordering)) { stop(paste("'labels' is already ordered. No additional", "'label.ordering' must be supplied.")) } else { levels <- levels(labels[[1]]) } } else { if ( is.null( label.ordering )) { if ( label.format == "factor" ) levels <- sort(levels(labels[[1]])) else levels <- sort( unique( unlist( labels))) } else { ## if (!setequal( levels, label.ordering)) { if (!setequal( unique(unlist(labels)), label.ordering )) { stop("Label ordering does not match class labels.") } levels <- label.ordering } for (i in 1:length(labels)) { if (is.factor(labels)) labels[[i]] <- ordered(as.character(labels[[i]]), levels=levels) else labels[[i]] <- ordered( labels[[i]], levels=levels) } } if (length(levels) != 2) { message <- paste("Number of classes is not equal to 2.\n", "ROCR currently supports only evaluation of ", "binary classification tasks.",sep="") stop(message) } ## determine whether predictions are continuous or categorical ## (in the latter case stop; scheduled for the next ROCR version) if (!is.numeric( unlist( predictions ))) { stop("Currently, only continuous predictions are supported by ROCR.") } ## compute cutoff/fp/tp data cutoffs <- list() fp <- list() tp <- list() fn <- list() tn <- list() n.pos <- list() n.neg <- list() n.pos.pred <- list() n.neg.pred <- list() for (i in 1:length(predictions)) { n.pos <- c( n.pos, sum( labels[[i]] == levels[2] )) n.neg <- c( n.neg, sum( labels[[i]] == levels[1] )) ans <- .compute.unnormalized.roc.curve( predictions[[i]], labels[[i]] ) cutoffs <- c( cutoffs, list( ans$cutoffs )) fp <- c( fp, list( ans$fp )) tp <- c( tp, list( ans$tp )) fn <- c( fn, list( n.pos[[i]] - tp[[i]] )) tn <- c( tn, list( n.neg[[i]] - fp[[i]] )) n.pos.pred <- c(n.pos.pred, list(tp[[i]] + fp[[i]]) ) n.neg.pred <- c(n.neg.pred, list(tn[[i]] + fn[[i]]) ) } return( new("prediction", predictions=predictions, labels=labels, cutoffs=cutoffs, fp=fp, tp=tp, fn=fn, tn=tn, n.pos=n.pos, n.neg=n.neg, n.pos.pred=n.pos.pred, n.neg.pred=n.neg.pred)) } ## fast fp/tp computation based on cumulative summing .compute.unnormalized.roc.curve <- function( predictions, labels ) { ## determine the labels that are used for the pos. resp. neg. class : pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] pred.order <- order(predictions, decreasing=TRUE) predictions.sorted <- predictions[pred.order] tp <- cumsum(labels[pred.order]==pos.label) fp <- cumsum(labels[pred.order]==neg.label) ## remove fp & tp for duplicated predictions ## as duplicated keeps the first occurrence, but we want the last, two ## rev are used. ## Highest cutoff (Infinity) corresponds to tp=0, fp=0 dups <- rev(duplicated(rev(predictions.sorted))) tp <- c(0, tp[!dups]) fp <- c(0, fp[!dups]) cutoffs <- c(Inf, predictions.sorted[!dups]) return(list( cutoffs=cutoffs, fp=fp, tp=tp )) } ROCR/demo/0000755000176200001440000000000015134440366011751 5ustar liggesusersROCR/demo/00Index0000644000176200001440000000007713644317760013114 0ustar liggesusersROCR demonstrates some of the graphical capabilities of ROCR ROCR/demo/ROCR.R0000644000176200001440000002261313644317760012652 0ustar liggesusers## ----------------------------------------------------------------------------------- ## Demo file for ROCR; start with 'demo(ROCR)' ## ----------------------------------------------------------------------------------- # if(dev.cur() <= 1) get(getOption("device"))() if(dev.cur() <= 1) dev.new() opar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "gnome", "windows","quartz"))) data(ROCR.hiv) pp <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels par(mfrow=c(2,2)) pred<- prediction(pp, ll) perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots like ROC curves ...") plot(perf, lty=3, col="grey78", add=TRUE) perf <- performance(pred, "prec", "rec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... Precision/Recall graphs ...") plot(perf, lty=3, col="grey78", add=TRUE) perf <- performance(pred, "sens", "spec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main="... Sensitivity/Specificity plots ...") plot(perf, lty=3, col="grey78", add=TRUE) perf <- performance(pred, "lift", "rpp") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... and Lift charts.") plot(perf, lty=3, col="grey78", add=TRUE) # ------------------------------------------------------------------------------------ data(ROCR.xval) pp <- ROCR.xval$predictions ll <- ROCR.xval$labels pred <- prediction(pp,ll) perf <- performance(pred,'tpr','fpr') par(mfrow=c(2,2)) plot(perf, colorize=TRUE, lwd=2,main='ROC curves from 10-fold cross-validation') plot(perf, avg='vertical', spread.estimate='stderror',lwd=3,main='Vertical averaging + 1 standard error',col='blue') plot(perf, avg='horizontal', spread.estimate='boxplot',lwd=3,main='Horizontal averaging + boxplots',col='blue') plot(perf, avg='threshold', spread.estimate='stddev',lwd=2, main='Threshold averaging + 1 standard deviation',colorize=TRUE) # ------------------------------------------------------------------------------------ data(ROCR.hiv) pp.unnorm <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels # normalize predictions to 0..1 v <- unlist(pp.unnorm) pp <- lapply(pp.unnorm, function(run) {approxfun(c(min(v), max(v)), c(0,1))(run)}) par(mfrow=c(2,2)) pred<- prediction(pp, ll) perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, coloraxis.at=seq(0,1,by=0.2), main= "ROC curve") plot(perf, col="gray78", add=TRUE) plot(perf, avg= "threshold", colorize=TRUE, colorkey=FALSE,lwd= 3, main= "ROC curve",add=TRUE) perf <- performance(pred, "acc") plot(perf, avg= "vertical", spread.estimate="boxplot", lwd=3,col='blue', show.spread.at= seq(0.1, 0.9, by=0.1), main= "Accuracy across the range of possible cutoffs") plot(performance(pred, "cal", window.size= 10), avg="vertical", main= "How well are the probability predictions calibrated?") plot(0,0,type="n", xlim= c(0,1), ylim=c(0,7), xlab="Cutoff", ylab="Density", main="How well do the predictions separate the classes?") for (runi in 1:length(pred@predictions)) { lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="-1"]), col= "red") lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="1"]), col="green") } #--------------------------------------------------------------------- par(mfrow= c(2,2)) # ...you can freely combine performance measures (pcmiss,lift) data(ROCR.xval) pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) perf <- performance(pred,"pcmiss","lift") # plot(perf, colorize=TRUE) plot(perf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(1.2,1.2), avg="threshold", lwd=3, main= "You can freely combine performance measures ...") data(ROCR.simple) pred <- prediction(ROCR.simple$predictions, ROCR.simple$labels) perf <- performance(pred,"tpr","fpr") plot(perf, colorize=TRUE, colorkey.pos="top", print.cutoffs.at=seq(0,1,by=0.1), text.cex=1, text.adj=c(1.2, 1.2), lwd=2) # ... cutoff stacking data(ROCR.xval) pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) perf <- performance(pred,"tpr","fpr") plot(perf, print.cutoffs.at=seq(0,1,by=0.2), text.cex=0.8, text.y=lapply(as.list(seq(0,0.5,by=0.05)), function(x) { rep(x,length(perf@x.values[[1]])) } ), col= as.list(terrain.colors(10)), text.col= as.list(terrain.colors(10)), points.col= as.list(terrain.colors(10)), main= "Cutoff stability") # .... no functional dependencies needed, truly parametrized curve data(ROCR.xval) pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) perf <- performance(pred,"acc","lift") plot(perf, colorize=TRUE, main="Truly parametrized curves") plot(perf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), add=TRUE, text.adj=c(1.2, 1.2), avg="threshold", lwd=3) # -------------------------------------------------------------------- # (Expected cost) curve + ROC convex hull par(mfrow=c(1,2)) data(ROCR.hiv) plot(0,0,xlim=c(0,1),ylim=c(0,1),xlab='Probability cost function', ylab="Normalized expected cost", main='HIV data: Expected cost curve (Drummond & Holte)') pred<-prediction(ROCR.hiv$hiv.nn$predictions,ROCR.hiv$hiv.nn$labels) lines(c(0,1),c(0,1)) lines(c(0,1),c(1,0)) perf1 <- performance(pred,'fpr','fnr') for (i in 1:length(perf1@x.values)) { for (j in 1:length(perf1@x.values[[i]])) { lines(c(0,1),c(perf1@y.values[[i]][j], perf1@x.values[[i]][j]),col=rev(terrain.colors(10))[i],lty=3) } } perf<-performance(pred,'ecost') plot(perf,lwd=1.5,xlim=c(0,1),ylim=c(0,1),add=TRUE) # RCH data(ROCR.simple) ROCR.simple$labels[ROCR.simple$predictions >= 0.7 & ROCR.simple$predictions < 0.85] <- 0 #as.numeric(!labels[predictions >= 0.6 & predictions < 0.85]) pred <- prediction(ROCR.simple$predictions, ROCR.simple$labels) perf <- performance(pred,'tpr','fpr') plot(perf, main="ROC curve with concavities (suboptimal) and ROC convex hull (Fawcett)") perf1 <- performance(pred,'rch') plot(perf1,add=TRUE,col='red',lwd=2) #--------------------------------------------------------------------- # (plotting cutoff vs. measure) data(ROCR.hiv) pp <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels measures <- c('tpr','fpr','acc','err','rec','sens','fnr','tnr','spec', 'ppv','prec','npv','fall','miss','pcfall','pcmiss', 'phi','mat','mi','chisq','odds','lift','f') ## Don't be surprised by the decreased cutoff regions produced by 'odds ratio'. ## Cf. ?performance for details. pred <- prediction(pp, ll) par(mfrow=c(5,5)) for (measure in measures) { perf <- performance(pred, measure) plot(perf,avg="vertical",spread.estimate="boxplot") } #--------------------------------------------------------------------- measures <- c('tpr','err','prec','phi','mi','chisq','odds','lift','f') par(mfrow=c(6,6)) for (i in 1:(length(measures)-1)) { for (j in (i+1):length(measures)) { perf <- performance(pred, measures[i], measures[j]) plot(perf, avg="threshold", colorize=TRUE) } } #--------------------------------------------------------------------- data(ROCR.hiv) pp <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels data(ROCR.xval) pp <- ROCR.xval$predictions ll <- ROCR.xval$labels pred <- prediction(pp, ll) par(mfrow=c(3,3)) perf <- performance(pred, "odds", "fpr") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "phi", "err") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "f", "err") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "f", "ppv") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "mat", "ppv") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "npv", "ppv") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "acc", "phi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "lift", "phi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "f", "phi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "mi", "phi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "chisq", "phi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "acc", "mi") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "fall", "odds") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "tpr", "lift") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "fall", "lift") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "npv", "f") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "prec", "f") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) perf <- performance(pred, "tpr", "f") plot(perf, colorize=TRUE) plot(perf, avg="threshold", lwd=2, add=TRUE) par(opar) ROCR/NEWS0000644000176200001440000000204313644317760011530 0ustar liggesusersThis file documents changes and updates to the ROCR package. Version 1.0-10 (Mar 31, 2020) - added area under the Precision/Recall curve (aucpr) Version 1.0-8 (Mar 26, 2020) - Changed maintainer email address - fixed issues for R 4.0 Version 1.0-7 (Mar 26, 2015) - Changed maintainer email address Version 1.0-5 (May 12, 2013) - Used standardized license specification in DESCRIPTION file - Removed LICENCE file - Removed .First.lib in zzz.R - CITATION moved into inst folder and adjusted Version 1.0-4 (Dec 08, 2009) - fixes bug with 1.0-3 that prevented plot arguments getting passed through Version 1.0-3 - adapted to more strict R CMD CHECK rules in R > 2.9 Version 1.0-2 (Jan 27, 2007) - fixed minor bug in 'prediction' function concerning the optional parameter 'label.ordering' (thanks to Robert Perdisci for notifying us). - added an optional parameter 'fpr.stop' to the performance measure 'auc', allowing to calculate the partial area under the ROC curve up to the false positive rate given by 'fpr.stop'. ROCR/vignettes/0000755000176200001440000000000015134440377013037 5ustar liggesusersROCR/vignettes/ROCR.Rmd0000644000176200001440000003132415134440267014251 0ustar liggesusers--- title: "ROCR: visualizing classifier performance in R" output: rmarkdown::html_vignette author: Tobias Sing, Oliver Sander, Niko Beerenwinkel, Thomas Lengauer abstract: ROCR is a package for evaluating and visualizing the performance of scoring classifiers in the statistical language R. It features over 25 performance measures that can be freely combined to create two-dimensional performance curves. Standard methods for investigating trade-offs between specific performance measures are available within a uniform framework, including receiver operating characteristic (ROC) graphs, precision/recall plots, lift charts and cost curves. ROCR integrates tightly with R's powerful graphics capabilities, thus allowing for highly adjustable plots. Being equipped with only three commands and reasonable default values for optional parameters, ROCR combines flexibility with ease of usage. vignette: > %\VignetteIndexEntry{ROCR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} bibliography: references.bibtex --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Introduction ```{r setup} library(ROCR) ``` Pattern classification has become a central tool in bioinformatics, offering rapid insights into large data sets [[@Baldi2001]](#References). While one area of our work involves predicting phenotypic properties of HIV-1 from genotypic information [[@Beerenwinkel2002;@Beerenwinkel2003;@Sing04learningmixtures]](#References), scoring or ranking predictors are also vital in a wide range of other biological problems. Examples include microarray analysis (e.g. prediction of tissue condition based on gene expression), protein structural and functional characterization (remote homology detection, prediction of post-translational modifications and molecular function annotation based on sequence or structural motifs), genome annotation (gene finding and splice site identification), protein–ligand interactions (virtual screening and molecular docking) and structure–activity relationships (predicting bioavailability or toxicity of drug compounds). In many of these cases, considerable class skew, class-specific misclassification costs, and extensive noise due to variability in experimental assays complicate predictive modelling. Thus, careful predictor validation is compulsory. ```{r, echo = FALSE, results = 'asis'} table <- data.frame(group = c("Contingency ratios", "Discrete covariation measures", "Information retrieval measures", "Performance in ROC space", "Absolute scoring performance", "Cost measures"), measure = c("error rate, accuracy, sensitivity, specificity, true/false positive rate, fallout, miss, precision, recall, negative predictive value, prediction-conditioned fallout/miss.", "Phi/Matthews correlation coefficient, mutual information, Chi-squared test statistic, odds ratio", "F-measure, lift, precision-recall break-even point", "ROC convex hull, area under the ROC curve", "calibration error, mean cross-entropy, root mean-squared error", "expected cost, explicit cost")) knitr::kable(table, caption = "***Table 1:**Performance measures in the ROCR package*", col.names = c("",""), align = "l") ``` The real-valued output of scoring classifiers is turned into a binary class decision by choosing a cutoff. As no cutoff is optimal according to all possible performance criteria, cutoff choice involves a trade-off among different measures. Typically, a trade-off between a pair of criteria (e.g. sensitivity versus specificity) is visualized as a cutoff-parametrized curve in the plane spanned by the two measures. Popular examples of such trade-off visualizations include receiver operating characteristic (ROC) graphs, sensitivity/specificity curves, lift charts and precision/recall plots. [@Fawcett2004](#References) provides a general introduction into evaluating scoring classifiers with a focus on ROC graphs. Although functions for drawing ROC graphs are provided by the Bioconductor project (https://www.bioconductor.org) or by the machine learning package Weka (https://www.cs.waikato.ac.nz/ml), for example, no comprehensive evaluation suite is available to date. ROCR is a flexible evaluation package for R (https://www.r-project.org), a statistical language that is widely used in biomedical data analysis. Our tool allows for creating cutoff-parametrized performance curves by freely combining two out of more than 25 performance measures (Table 1). Curves from different cross-validation or bootstrapping runs can be averaged by various methods. Standard deviations, standard errors and box plots are available to summarize the variability across the runs. The parametrization can be visualized by printing cutoff values at the corresponding curve positions, or by coloring the curve according to the cutoff. All components of a performance plot are adjustable using a flexible mechanism for dispatching optional arguments. Despite this flexibility, ROCR is easy to use, with only three commands and reasonable default values for all optional parameters. In the example below, we will briefly introduce ROCR's three commands—prediction, performance and plot—applied to a 10-fold cross-validation set of predictions and corresponding class labels from a study on predicting HIV coreceptor usage from the sequence of the viral envelope protein. After loading the dataset, a prediction object is created from the raw predictions and class labels. ```{r} data(ROCR.hiv) predictions <- ROCR.hiv$hiv.svm$predictions labels <- ROCR.hiv$hiv.svm$labels pred <- prediction(predictions, labels) pred ``` Performance measures or combinations thereof are computed by invoking the performance method on this prediction object. The resulting performance object can be visualized using the method plot. For example, an ROC curve that trades off the rate of true positives against the rate of false positives is obtained as follows: ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "tpr", "fpr") perf plot(perf, avg="threshold", spread.estimate="boxplot") ``` The optional parameter avg selects a particular form of performance curve averaging across the validation runs; the visualization of curve variability is determined with the parameter spread.estimate. ```{r, echo=FALSE, results='asis', fig.asp=0.35, fig.width=7, fig.align='center',fig.cap="***Fig 1:** Visualizations of classifier performance (HIV coreceptor usage data): (a) receiver operating characteristic (ROC) curve; (b) peak accuracy across a range of cutoffs; (c) absolute difference between empirical and predicted rate of positives for windowed cutoff ranges, in order to evaluate how well the scores are calibrated as probability estimates. Owing to the probabilistic interpretation, cutoffs need to be in the interval [0,1], in contrast to other performance plots. (d) Score density estimates for the negative (solid) and positive (dotted) class.*"} data(ROCR.hiv) pp.unnorm <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels # normalize predictions to 0..1 v <- unlist(pp.unnorm) pp <- lapply(pp.unnorm, function(run) {approxfun(c(min(v), max(v)), c(0,1))(run)}) par(mfrow=c(1,4)) pred<- prediction(pp, ll) perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, coloraxis.at=seq(0,1,by=0.2),) plot(perf, col="gray78", add=TRUE) plot(perf, avg= "threshold", colorize=TRUE, colorkey=FALSE,lwd= 3,,add=TRUE) mtext(paste0("(a)"), side = 3, adj = 0.01,line = 1) perf <- performance(pred, "acc") plot(perf, avg= "vertical", spread.estimate="boxplot", lwd=3,col='blue', show.spread.at= seq(0.1, 0.9, by=0.1),) mtext(paste0("(b)"), side = 3, adj = 0.01,line = 1) plot(performance(pred, "cal", window.size= 10), avg="vertical",) mtext(paste0("(c)"), side = 3, adj = 0.01,line = 1) plot(0,0,type="n", xlim= c(0,1), ylim=c(0,7), xlab="Cutoff", ylab="Density",) mtext(paste0("(d)"), side = 3, adj = 0.01,line = 1) for (runi in 1:length(pred@predictions)) { lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="-1"]), col= "red") lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="1"]), col="green") } ``` Issuing `demo(ROCR)` starts a demonstration of further graphical capabilities of ROCR. The command `help(package=ROCR)` points to the available help pages. In particular, a complete list of available performance measures can be obtained via help(performance). A reference manual can be downloaded from the ROCR website. In conclusion, ROCR is a comprehensive tool for evaluating scoring classifiers and producing publication-quality figures. It allows for studying the intricacies inherent to many biological datasets and their implications on classifier performance. ## Additional examples Below you can find many additional examples of ROCR's features of performance measurement and the possibilites in plotting. However, this only a first taste. For more examples, please run `demo(ROCR)` and make sure the plotting deminsions are big enough. ### ROC curves, Precision/Recall graphs and more ... ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots\nlike ROC curves ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "prec", "rec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... Precision/Recall graphs ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "sens", "spec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main="... Sensitivity/Specificity plots ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "lift", "rpp") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... and Lift charts.") plot(perf, lty=3, col="grey78", add=TRUE) ``` ### Averaging over multiple predictions Multiple batches of predictions can be analyzed at the same time. ```{r} data(ROCR.xval) predictions <- ROCR.xval$predictions labels <- ROCR.xval$labels length(predictions) ``` ```{r} pred <- prediction(predictions, labels) perf <- performance(pred,'tpr','fpr') ``` This can be used for plotting averages using the `avg` argument. ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, colorize=TRUE, lwd=2, main='ROC curves from 10-fold cross-validation') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='vertical', spread.estimate='stderror', lwd=3,main='Vertical averaging + 1 standard error', col='blue') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='horizontal', spread.estimate='boxplot', lwd=3, main='Horizontal averaging + boxplots', col='blue') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='threshold', spread.estimate='stddev', lwd=2, main='Threshold averaging + 1 standard deviation', colorize=TRUE) ``` ### Cutoff stacking ```{r, fig.asp=1, fig.width=6, fig.align='center'} plot(perf, print.cutoffs.at=seq(0,1,by=0.2), text.cex=0.8, text.y=lapply(as.list(seq(0,0.5,by=0.05)), function(x) { rep(x,length(perf@x.values[[1]])) } ), col= as.list(terrain.colors(10)), text.col= as.list(terrain.colors(10)), points.col= as.list(terrain.colors(10)), main= "Cutoff stability") ``` ### Combination of performance measures Performance measures can be combined freely. ```{r} perf <- performance(pred,"pcmiss","lift") ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(1.2,1.2), avg="threshold", lwd=3, main= "You can freely combine performance measures ...") ``` # Acknowledgement Work at MPI supported by EU NoE BioSapiens (LSHG-CT-2003-503265). # References ROCR/vignettes/references.bibtex0000644000176200001440000001571613644317760016374 0ustar liggesusers @article{Sing2005, author = {Sing, Tobias and Sander, Oliver and Beerenwinkel, Niko and Lengauer, Thomas}, title = "{ROCR: visualizing classifier performance in R}", journal = {Bioinformatics}, volume = {21}, number = {20}, pages = {3940-3941}, year = {2005}, month = {08}, abstract = "{Summary: ROCR is a package for evaluating and visualizing the performance of scoring classifiers in the statistical language R. It features over 25 performance measures that can be freely combined to create two-dimensional performance curves. Standard methods for investigating trade-offs between specific performance measures are available within a uniform framework, including receiver operating characteristic (ROC) graphs, precision/recall plots, lift charts and cost curves. ROCR integrates tightly with R's powerful graphics capabilities, thus allowing for highly adjustable plots. Being equipped with only three commands and reasonable default values for optional parameters, ROCR combines flexibility with ease of usage.Availability:http://rocr.bioinf.mpi-sb.mpg.de. ROCR can be used under the terms of the GNU General Public License. Running within R, it is platform-independent.Contact:tobias.sing@mpi-sb.mpg.de}", issn = {1367-4803}, doi = {10.1093/bioinformatics/bti623}, url = {https://doi.org/10.1093/bioinformatics/bti623}, eprint = {https://academic.oup.com/bioinformatics/article-pdf/21/20/3940/522867/bti623.pdf}, } @book{Baldi2001, author = {Pierre Baldi and Søren Brunak}, title = {Bioinformatics: The Machine Learning Approach.}, year = {2001}, publisher = {MIT Press, Cambridge, MA.} } @article{Beerenwinkel2003, author = {Beerenwinkel, Niko and Däumer, Martin and Oette, Mark and Korn, Klaus and Hoffmann, Daniel and Kaiser, Rolf and Lengauer, Thomas and Selbig, Joachim and Walter, Hauke}, title = "{Geno2pheno: estimating phenotypic drug resistance from HIV-1 genotypes}", journal = {Nucleic Acids Research}, volume = {31}, number = {13}, pages = {3850-3855}, year = {2003}, month = {07}, abstract = "{Therapeutic success of anti-HIV therapies is limited by the development of drug resistant viruses. These genetic variants display complex mutational patterns in their pol gene, which codes for protease and reverse transcriptase, the molecular targets of current antiretroviral therapy. Genotypic resistance testing depends on the ability to interpret such sequence data, whereas phenotypic resistance testing directly measures relative in vitro susceptibility to a drug. From a set of 650 matched genotype–phenotype pairs we construct regression models for the prediction of phenotypic drug resistance from genotypes. Since the range of resistance factors varies considerably between different drugs, two scoring functions are derived from different sets of predicted phenotypes. Firstly, we compare predicted values to those of samples derived from 178 treatment-naive patients and report the relative deviance. Secondly, estimation of the probability density of 2000 predicted phenotypes gives rise to an intrinsic definition of a susceptible and a resistant subpopulation. Thus, for a predicted phenotype, we calculate the probability of membership in the resistant subpopulation. Both scores provide standardized measures of resistance that can be calculated from the genotype and are comparable between drugs. The geno2pheno system makes these genotype interpretations available via the Internet (http://www.genafor.org/).}", issn = {0305-1048}, doi = {10.1093/nar/gkg575}, url = {https://doi.org/10.1093/nar/gkg575}, eprint = {https://academic.oup.com/nar/article-pdf/31/13/3850/9487404/gkg575.pdf}, } @article{Beerenwinkel2002, author = {Beerenwinkel, Niko and Schmidt, Barbara and Walter, Hauke and Kaiser, Rolf and Lengauer, Thomas and Hoffmann, Daniel and Korn, Klaus and Selbig, Joachim}, title = {Diversity and complexity of HIV-1 drug resistance: A bioinformatics approach to predicting phenotype from genotype}, volume = {99}, number = {12}, pages = {8271--8276}, year = {2002}, doi = {10.1073/pnas.112177799}, publisher = {National Academy of Sciences}, abstract = {Drug resistance testing has been shown to be beneficial for clinical management of HIV type 1 infected patients. Whereas phenotypic assays directly measure drug resistance, the commonly used genotypic assays provide only indirect evidence of drug resistance, the major challenge being the interpretation of the sequence information. We analyzed the significance of sequence variations in the protease and reverse transcriptase genes for drug resistance and derived models that predict phenotypic resistance from genotypes. For 14 antiretroviral drugs, both genotypic and phenotypic resistance data from 471 clinical isolates were analyzed with a machine learning approach. Information profiles were obtained that quantify the statistical significance of each sequence position for drug resistance. For the different drugs, patterns of varying complexity were observed, including between one and nine sequence positions with substantial information content. Based on these information profiles, decision tree classifiers were generated to identify genotypic patterns characteristic of resistance or susceptibility to the different drugs. We obtained concise and easily interpretable models to predict drug resistance from sequence information. The prediction quality of the models was assessed in leave-one-out experiments in terms of the prediction error. We found prediction errors of 9.6{\textendash}15.5\% for all drugs except for zalcitabine, didanosine, and stavudine, with prediction errors between 25.4\% and 32.0\%. A prediction service is freely available at http://cartan.gmd.de/geno2pheno.html. HIV-1,HIV type 1;NRTIs,nucleoside inhibitors of the reverse transcriptase;ZDV,zidovudine;ddC,zalcitabine;ddI,didanosine;d4T,stavudine;3TC,lamivudine;ABC,abacavir;NNRTI,nonnucleoside reverse transcriptase inhibitors;NVP,nevirapine;DLV,delavirdine;EFV,efavirenz;PI,protease inhibitor;SQV,saquinavir;IDV,indinavir;RTV,ritonavir;NFV,nelfinavir;APV,amprenavir;RT,reverse transcriptase}, issn = {0027-8424}, URL = {https://www.pnas.org/content/99/12/8271}, eprint = {https://www.pnas.org/content/99/12/8271.full.pdf}, journal = {Proceedings of the National Academy of Sciences} } @inproceedings{Fawcett2004, author = {T. Fawcett}, title = {ROC graphs: notes and practical considerations for researchers.}, booktitle = {HPL-2003-4.}, year = {2004}, pages = {89--96}, publisher = {HP Labs, Palo Alto, CA.} } @inproceedings{Sing04learningmixtures, author = {Tobias Sing and Niko Beerenwinkel and Thomas Lengauer}, title = {Learning Mixtures of Localized Rules by Maximizing the Area Under the ROC Curve}, booktitle = {In et al José Hernández-Orallo, editor, 1st International Workshop on ROC Analysis in Artificial Intelligence}, year = {2004}, pages = {89--96} } ROCR/data/0000755000176200001440000000000015134440366011736 5ustar liggesusersROCR/data/ROCR.simple.rda0000644000176200001440000000252513644317760014474 0ustar liggesusers‹­WiP“gþÀ"¦J=ZA.©„Š:VŠÕŒÅ—åƒ-•J9¬‚–Š–ZüQq†£Zl=J ­‚T¹ „#˜„3´T 3•ižýÑÎ0áð›I6»ûì³ûîî›L<ßóq4ð1`†Íè°4ï:šºlÍ‹ÑekäO÷]žoF†Ñø—”a 5¯*^Ï•gê †áI<¶:œÑÈŽëf¾¯ü©aô¶œµß:kdƒ+«ºzi¼7äpÿâtýü³5rèwQì}jŽ ð’WYŸ@W¤˜$ϵʬ2ð›]¼ Y²˜ýyzWìO¿Ø¶I®‘JŽõà“¬iÏOÉâ:à›IO©Q&Å Ÿú‚}ô]µÔu% Ôg(¯ãÈÎbþ äùú X¼Býz“pƒ-xû"¹ËI* :‘¿O±ÁyïËÆ¾%*‰ÿÈF >Õ¶öb>=åÉÇ:§€Üë\·<å8ñÝp'¾•S<â„¶Ê[T¯UŠ1ð’-ÆÀÕEÕîï F]ÃúÊ Ø[¯®3…^kɧ<}‰‡¨>ŽŠø{â’À[ßj¿²½2‰MórVo†¼/ñކTò=s‘O±þÎIàzÆ ¦in{Àך·×y¤—í€ld1<È‘ç:ô³¡¾‹ ž¡SƒjÌE›‘E`Œ”êÚ6­$ûcÙZð>ÈÒ]öÏ—6ÍÝ"_êß’ÏßN\!Ù {Õä¤+¤¤NTM}L­©N*]všòzäy²o#ÍÍ™»þh§¿iŽªn ÎÕ—¼Ý‚Îáj‰ü,»±šÁ×]y…øJ³1;Ú3ïji.a‰ÍèËÍÑó£t^VYxŠÒî*¡wNq7ª/s¡þO³>Æ9ÌcÜh'Êü ÷oþŠö&`E.pƒSù;I–ù/‚_â‘h¼,¹È‚ä;^¯/ÊXDsR/}’M÷æ@ dÓ‰¦Ô¡z`—ƒúòþ¨Àwï—måįò~â ubë*àDNéÍtŸÜË¢ïé°vÏ ñŠräoÞeuùÚ¿_ƒø´UÞútþ«Ã`Wš°?þÉJéx圷üÁW•W é%Sà…y÷h/KDxAö÷rnQܽ 3àÅîß}ÑOŒ¼÷}s3«kÛRI{q6þà$Å&tUùûC*¸ûוî!ü®ù4ïáÝÆÓ˜w¿äO/øJÒ𤠮t¦ý¯HEþÎ5ëh~®ñ{hO¬·Þ…=Ç+Dû@ƒÓNØÄù±tn~0Íïçvntq²Ãà/sH: ÙdBßSYOÇnÃ߸Þþ,õ;¶Ú…êñ°»‚>÷í‚” iŸãn•SŸ¿q÷@|OøVúP.,Pà‚½ø£ÏÌ 9ÓóÜLqÚôÙòj{f›G[ÚòÎ÷œóÕÚÏ™øæ:gmùf[‡¶g¡sœmœ6}¾{;Wû|ïÛ|óϵžÙÞmÏBïÍB÷w¡÷f¶ö¹æŸk_ö~ÎäŸÿ÷Ïþ²è;t4$RóÁˆÁŸ•çÿc"N„BzüXäsÓ¢ðCÁ!áNãõ/¸ y² ROCR/data/ROCR.hiv.rda0000644000176200001440000014303713644317760013775 0ustar liggesusers‹ìýwT[÷-ŠŠ" 0¡ˆ(æ"&ŒSIb1€Šˆ ˆŠP0c˜Š€3‚€ÒÉ’%%Ç•W­HR¹U,·çýóÞ¯Ý÷^;wŸö}»µ½Ù@1k¬YcŽÑûHµÖÔÚPÍZ­K—.]»tS¢ÿÛþ_å®ô”º(wQ¥¿ö\»Úd­þ.çÃô/t^÷÷kæ+ó·}éËmPÞkŽÖ¸ÅÀºxy‰nF¯ºŸ» ½…N“/˜’Âòœ«µö¤ñüñúZ "O6ö©Ê™QÖ£ÄÓ©BH w|û0m(Ädz­Aò3tø±»!¸‹E£“QYöãyÏGÈ#~T]]bÙ§‘o޼‚$Ípºµ¸ 6É·ÝÇî…Èì„ý×û ¬JmÒÒ !Òê­–»à>dk]ütŠ!‡œÐvîîe-ë[÷z!é§V‚ëØ(’x|í Ã3 ‹1è¯yº¢Q²;DƒÁ‰ë~1ü $Ow0Ût ½ÖÙWê 5Hó]Âë_>¼èõ°JHåszôh'üY'7é¹u!YkÂã¿däs½&=b Ø(ä .Õsr“òQš1R·¦¹Ò§Ê5Mfàé×v™ûiÄÖ“Ÿ¸x1û )بq¡ÏImHÖOVÛŸâù}»ñ*E=+½¤óv ¤WY/W?>iåç3Ï]V€:þr–§z¤1†“dG4 üêÒÏkíyðzß^{Ûû"š­z–°콯9;"/‚* þ5ÈYYºR^ÀÆpˆî0ÛÖ"â¬´Üæ>ø»{˜•žI~ø¡…Žÿ½o±M²þë]HA“ñÝÒ31àó½÷¿ò¦ckçÍš ‰eèäþ‹!nvzÞuÝã§ÛoØ)7¼Ùt i hçñêõÃú-3ý ýÒÞDze()2Ùèšä´Ú®ºú¶7É[Ó²hÈpeˆüNŸ~ô ²{&‰ÌÖB°zjÿùÆOÁ73Î(†|öMÉ|.©§÷Ù\¼ý‚ß:äænHéùA§ö6]l:‘/ïò÷Cgœ¼tÚ¦Í?³ÕY/×@¦>ÿk4$÷;Føª¢uþ¾·QIk@-*b-Ö€ôÞÉ¥Ž^vn¸?ØB{¤±¶Ý4ÖGC|]õkG¥+šFý°íÞN ÌŽ”­?¸’öö÷3·uºí˜‡fé÷ìpÄ õƒuo>M…|¾åºs»û“çgÇkgÇVãéͲ¡ŸX"Ô;q³×M;P~³»,YˆèŒ¼ejê+Á>1òý›¿°¬Â"¢K]÷&dÌ…4Š¿8jÓ!PR–{ZG4„?oz:ÊÀK‹\;[ù(dç­šzÔ†¸4`sÅá]í¸òvšßuˆö”)]9t”ݰöè¥ÛA Io·Ÿ‘™Ã]æIBòl‡¶õrmˆÔê'V?JÞ+uDœ| ^¸1ûâ}9}®î¿÷¸üöïýù«&«,ì Ñ·Dnˆ'ý9î›í1ÕŽå5 ð†ü„?,ü¯ùB&ëÁ.[Ùbïm]ƒ4ßAXBñœ}©°¾#ýèfÈßÍ-Š«+%IAp D›–}›6oûì_·üÝÒ·~nr3àÉñ“ÔnA´ã{§”–œ·¹ÇHžëÆ;7µC¾U³¸|¨¨]ÌäºÓŸï m©@êüSRiáè²ÑÃÀ;]¿|ßõ-ÅùžhMËÙ\——¥Ñ ý©qÓ_A<éYeqÌ%FÏëÝZùpÿ£vNû!¹–QQe’ÉãÒÄÇô9+‰7óö5È2팬!Zu~ÅÕv?oX2ã` $gF8PIZ‹4€VõùÍECÅp«0úÜmm½¼+¨õVÂ=¶æ¤±~ë*gÊœW}7(¹NDëg]Ÿ«Ü“Ôt³ÍßDôæn[5ü Òƒm?Ì9–KÞ /D9ëNEˆë¹©›Àª{=vq) rµ!_Ç{“ãÝ'Î ±ûjÛ™ ¶¶œèJ N}&|ð^ì9^Qg6€{þ.g³¤zEþñ¯”!Þ°ëI÷žXɽ!¶ÿÑi=D?f§ßcC4+·—Ùëm‡Ž«d£ñ¬Ò} ÇcQ]½?|›Y,dý~þ§„æ´yp¼˜Cæ­,°uÜŒù±›@9_¸zn¨=$¿ÏØòE5é<{û!àÚ?r£ùÄÂn²Ó ÞÚºŽ; ×Î:vlð%5®­çI["a ß^ë:šp—–›ÎDóôS?³¬bé×§où|Â~"Ÿž³ã)ä11^Š!ˆñ%OgB<7ÿîM§rˆ}Oœì_UɃÛß΂|ê´wk%Ià½ÏØøì\"øüOãîiöÁ?΃zW6ùD5mŸë¶l^öü ý<+לþfJ9n‘±ú~ˆïm ¹cFëcý6'‹úC÷™ÛxöÇȇ\h±ÚÒ3EGn͹I™³mÀ˫忟P~â<Õêq1Þ—z4 ¢)ŒÂ¿åï;ÎNÞ§S,ªMBP?Ä­#\g {ž,íJJ­,f¬0ÂÛÀ$­”ÐzP{úÜÐò_¾ ÈaWœ@Càˆ³%¦r’Vú®ÂvA:I^ÜpìALêK¾Ù­2©ýۋƯõõÒ9¿ z?JÛf;*ƒÈ3öBN-m¿¤ßŠråxýrc%(½5n#·A&~ö—2m6Î^7it<Ä µ¯äÁ ’³²W·ž>¢íâžw»ZƒýÍÏúʬ«à*¯ß¹ý7$ aá[üBÚˆåÃkM6¢þE߯&ºdv[Ý¢‡´Úo à–Ãþ´´Ú5Þ õ 87ÄËÌ@UOñ?•Xjþ¸ /+=a­þ£_ó Úÿ¨ÄËÛƒ°6‹÷ë¤ô!-&æå¶;.@ÖÓáÆ›Õ}Qh²LéeÞKÜê5å ïâdB=Né§Æía]v>Ï!…ÇcD]g̓lêÖ´Þná(üvuø±`Ø7 Í/C ÷Ž->r RÛiŸö€?vΡ½×Ÿ¡qÔ.Õ›CÀ¯øÔ:u2Ä=§“´½<ˆ4–MÍi{üÏz$Q3ptÔM=éŒ}á®qá¡Ð¡ËË!Yâº:,"¬‹Íµ]L!>^ýö]!¤Ïv»î Þ qï®èµ>¨ø½]¾½™í1XœÈo+m¬wgPu¿|7dG•Y”÷‡øC l½ ¨Y¾²ûÞ5~HáØùdy“¨8¥‚B;RøRK§r¶5ø½ÚVØ—¹5‚ôE¤ýeîÃþO†#/,W³wIÌ•ƒKv·Òv”1£Ö$ï»:±P¼[Yv8cÉÂáºzÐ|ôý‚¢•O iËq*9ñÔŒy„@½w®ºý"rõT ŠÝ¢Ù=Úˆc!‘-ØÙ÷ÁZ4ïÔˆ4x>¢[½½ÕÍ£ylƒ ë7éFG—q¨Ê×N÷Ô–Ö§™“|ŒTÑÒ¼½tþÞ;4.¬°ß.’݇. K÷@îVcâ½>WáʼnÂÂ9ïêR2õøÉÄ.`›:Ämzø¾¨–é´Åßÿž%¬e—­¸cG‚«É"Õ™F½#¿†}9Èß*(¥â‹*eøüд­ûÀùúG¯o>³‰×VžWu×Ü5©Ë}7H?pQ¢Mã—•ù¢çí4>¹e8ïó¾wk”•YÁú}×ïýAõ3ëÁÙúÖöSzÛ "£ÐÞ«¦BTþòí0ZþÓ=ÿ<Üâb©ÑòG,HÄ”«ž|ÞèBÑVïË4~Þ®}¼¾ïh}8çšõRëhܤõxt+Z¦ðÒ–L€DE¸`®Ž!8WƦi<٠馕ÒwÐz#lk.š¥ìSFG“!²Ééa·?ŸÆ £ïµÓ¸ØÐd€W䇣kû£áB9&kרUÒ°â˜vD€ ‰ÝVŸ–õGÑñKÙeó—úwQüƒFYÍï‘eû!QµºáühH—í¹ž•ÛARfß6ïjœ q؇ºV¶ ¢_3 ”ÒÝß&N[![دÿávUÈóðë'D7Vf\ÑŸiƒÃ‰È»gA Ð÷Zp¥ ZÆv|M·†8²2z릳ÖúÚ®$žÓgïiÓøL'ÂŒ¿ rÜ‹û(PûN|ƒD“R¹ü)œ‡«Û•¾†ôbìîÇÑ áÒhZ) ÒŽW ¯õ÷N§þÓ¸[±ßâ7ŠÔ '¯QöÓ&r%W>~ÎD>¨®#÷7iAÔE…ñ8„;VwYë¨[(Sà?R½™¦/"ˆ'¾?)š2"e×ø¦Ïó!^Z­Së¬ Ièüƹ[†A’µùrÊ%ÔWMz°ÝKŠå8´©÷4®ì<˜ŽQÑ}yÆ-c×Uw‡ðØ–(ÿvHúzçΜ­Y{D¯M» ûã'×ã"d ªË&?XùBù7Ï€Tí}ó|eÈ4Æz,ׄà¤ÄàŒ‘„×Máꛊ¿-CªíÙptÏ/Úîzä5sòßû³gmØ•h?Í‹¿<éX¦ŽåO†vÙ‚Æ®ñÙ–‘šI÷—>èªLª¬¸s‚l;µCê¸/½Á.. =¢Hv¾¸õðQ¤3fɹ•œ~)Tùà± òCŠs(õ_íòsK$±æ¿¸k3Àé~j風÷*3Ì"¡Ð{Ì‚lpg5 û>yRß$_ú®MYC6fO„pÚZIâ÷rÈnw:zˆr,ó7½\Ñ¥•¼a:.»L¸ÔÔí-DWò39«â ¥áÍz_Ô¼í”~Fˆê#–AúÝ‚ÐOc` í¯Î\ê7ôû’×ká÷Äàš¿µÒ:}æI Ýœ ñÏÛËžDEÆö÷Kãç”ð%Ú$´¿*~4^óÚ±ò•!iµµ00õáä çäZL"wMÞîcs Â`›S^7Ci¿Üûi—·QÝêP;h:¤ Ï_}ù½/döq“—Ú4Bdò*HM»»WoòœP7£Ñ4NØlàzåZ„´Òo{}ÒÅ·vº-ül¾JÚAH¿â\<< ²¬zKû:ý]·éCBÎ^Õ…?þ¤eÅ+PÓ»k¨Í¡ñ_5CËi<øT¦4ÿ ¤+iš´ú DUw¢´}A`±dJ_/ÚNØÕÏ:TQÞùÇ^¬Î€øÄëo8 ]Õ]qQ$CˆQ)ÿb—4bÖjÕµWµHnç6[If4¶fážé;¿ìÉ(|L¹×~í$ˆí6•íÞ­D’ú^°î8t›PæÑIÊYp~~PQáÆ'.u†´ýÚ²Üä²ù7Hâ®õVyx2î]µßÛhùÚKûûI!»ÔœñÌ:õë}kŒ¿ ^¶Å…©}{ý½^´?nzxQxýž˜ˆ¾Òþ¬2ÔfæÏy´^ûê´_§yË…’˜žØ0Ò=åïßQ+e—zFÑ|íT‹- ¡!YuwD³æ ˆÆyÙi —¯Àé`·ÙoºÌÕ€dnÔ“®Ž™±åwåp¦;\:h=Í'*¼|ʽ/g€ô¹œ—| Ù=*‚”Ø9í{<Ê—Ö[ñ1–Ô‡šoûô ¡"Þ™-¶"o&<íwD7P=äûÎ¥B²TïÇÛ‹\œ³óI  ÂòÀ•Ï»@ô¾Ó~C:Þ°j­dª'žä_îñºêÛ>¼´±9ûÊÊ’ý7W½[Ðêüè’=8·$×Iã»ãµ×AæÔ£æPÞHBTή_ØLã‘¡§·›íGŽié‘1m~¨éÇ ¸bh‹ÖâOnGÝ·Aª»föêGšÏoŸÞ¬²[îË'ÄC‡*ôs²¦iAŒ¦—‡vLFÓEë²lšNk*»sy8¢b•i®Î¨kéõÂZ/Ž´ÌöqœüZ™4² º\@ñü£zýÏ'’æÙ›OhZäÕ\¨€:מïV Þ:7ßySh=Ñ^§µ%~Î /UuØ' ®‡àÃ`auNoÈ,ÊûÕëÓøè›«þДãàN*öéÙ­þ©á#·€¿ôMÍw¯^9YmËsTûO‡EYG!ßÛ~XïÌWðçŽö.Ï8ˆK·®[~ãïz¢±kÎ+iC¿ÕΠ©|šLÓ–@Z2ï,«í7®G•.UÊuãŒiÒqš?Î~ô‘y6‰§Y›ë}Ú_»Üvs/YSÏx- ªîIBjÏËžO5<^º)ýñÜ •ÿZ'»µ­AhžÛ{ÅJHúír}«›Brö†¥~7#‘}ôiýòFϼªß=¬A³ƒžéþÛNþ†´¼Ç·¾xklYÎB±¦š• ^Ì ú†fƒAóòœÁ¿¦yºÕt ¨N,ó'%4¼ýAý’íøOí'ÌÄ º|ê";úº‚Å›ަùaÕÛ¢§Á ×÷_Dã«5ªS¾ÿ¢…•·s÷ÄAr\ñUô­”úz¬¢ë·7¤Ï¥yñ}¢ûŒBó>»¶y÷jh\žç“èJÛ¡£Løx"$E¦õ«Ç¨‡þGí 9YXîðUTœۅ¼!ÝñìûãŸ!Îí&Q}‰ForjÄjKˆ~юɤç̉ØaŒf^Áƒ`ÇÁ….;_wå-Êöå J}–³Å9©í6ý繜ÏxÂX»>k {Ài^„S±8öÒtˆ­ÖÍåDÝ%ÙÇ÷®ŽùãçÙÉ&ÙšO™1ï äE7Ä»y= L¸™uö„d÷ ý\ÎѸr{Ÿrû›æ4¾´¢»ÄÝÎЛÆu œJyoJ>áA„Iážqñ†ð&¼ú-JQ›Üã!^uºïö»4ïVº8{yj±ßV_áOëϬ%1Ï?"mÝg*2²æïzÒÓƒ¢Ÿ„Òú°)žÈý!Ÿ!Ì,ÚГ$7„Ú:ÅŒ†xÌ»1¹wk n>’+’yOõSp‚æA® ¹_jñ©$áë£cQšÁh.i^1sÜZO"ò¢C”…Í‹T¼‰ÔØ}ÅGxâ+ã½Ô?lðá…ËâH†ã»†øè»à„®W^h5 ”£ošæhðm+&±jŠ!1¯¹ïÏyÙ½_ýŒfѸzÅZïÝïAnÿ‚t[ÞÕ]ÎÍþ¢ ÇǨçY5TW˜zÐþóMVMÊiH<\”¶d øk‹ŸŸ67oÊ« éü¿ë5¥®þ¡ú57=çêBÚ6møÙ_Ý!uiþFpÂŽˆ“O³AUÒX¹^B£bµ–´¿Ý­4ö.¬Áýnx·‡ØhØÁÕ9îÎ8L½Ã¤Pcé°Òþ}?Oú ¯¤y®}Å#›gó@ õvÝæ[ œ&kÝMÕ>U=²G”#xNŠ<‚ÐeÃ[õ_!Ÿ+_´ãÍ ýÕ쵘ùŸ|X®õbldµÁ¿fÓ8þÐðv—ŒžmØj®â~4h81ÅÒí¦;Tìn‘G{Md­šJà3jìø|Sç5?ƒ3iå»114¾[gmÎ’€¥s_ûh™ÄwyÆ^P‚ Ëç8ûo»„Lݽ¦„”|¶©Ý"ï.Ôí¢|¶£)¬pÆ£¡_ˆüpÊ[@ʇUÚQíEZζÞ\F ]u¬²+ös[Ýë¡ ¿JÆžLêÙAßoxe¿;oh»}üýåsŽÜPäÁdÛyÍ‹…̪ÌÑȄևŠóÍÿy}‡Ìµ’ô¸«4¤µqq®ËÂ_¯ÌeÀ»¾ö yð˜dÜ´–Gþúº5 @û>x8"½¤ê ß§‚FI›Q¼‰våéRP~Ñ*æ4î–}Kwí‡W3j7¼E—¯¤oî5ÞÙA6žÆ»ç~¬‘ö…øaÏ I.a4_ÖE#µbµ+ú“/‹‰Šz~ uì—ë‹JÁÒî´ä‘Iƒáðm éÉ/oÊî©×ÇÁíÝIš‘"žÚªúƒ7æ$-Á_†—Uý€t' ìÝ'pNv|_´¶âYLÀu߬îã³¼ßÇGÄ_ Cm4§Â¦w/°:×¥ïºɈjå(Ùšÿ°‡l ¤u†ó!k éLCéAîºuCöÞ©MU-M-ñ¬½.M;i^n°ñå§“h;||ñªã´åÌc"w·ÙÑÀ òÙnA3ú¬‚äÄÞqw¿~¤ìóô–õýÀ ;nÄí¦¹Ò¶µkôy¨¶°­éÚrf4¬ö ¸M.0n”s ò»Ïægéþ>ÍÕþn©•ÓeGf&œæ½AÙ¨½ó®3G}*’®Î"Ò LOî€dÖoý}aë YÙ^üäz(Ígù«VÜЃ(@wÈãy4Ÿ8ʸ·Á(_7v:ó‚7VÄxæTHòæïrÚѱ(‘Û!P‹: .Íiëç}bçÙy¿wÊÀÚVýü» šæ¦O|¡s-ö£ÚŠ3­ ö%ÛT‰V˜íŽ¿ Tê¹àóèiÐäbóŽBð˜poÈ/°¦­JT¿Fþ7¤®ëÉuÆÜhƒú<èjÄš? –1œU±1ëó’!¹—Ud`ñšÜÂo¹—Ib—xjħ)äu­É†[ ¾¤ÎñÔóO™WH­VØ’÷æsH£–&& I ~¶G¾&E?=çœVʦíꇟK.Ñ|Eü¦¨r’³¬{ÅÕko!^lõEóðNÚoÿz{ÏiDegn¸ßz aЭ‡óo]„lˆ_¿E÷Æ€÷j(wAÞ0ˆ3åuÍ~_ a€(mŸ\Ó¦]{ *bSCGh ‰§ðkÒ<ã]›'CÌŸ¾÷ê+Ý'>¸@ãÍw_Xz^; sìc¶½úDãµ"ʹžšE#òqhèh òüŒ:µˆ6 ƒ¡þî²üÄ&gw¦?hå{°Òù• „}†׌C: غY#?¼!¬-Šü9Ÿ¶Þ²`%"ï„áÃðf}GcÑ¥«LZNÞÇàíÒmœxs,}_ËdzOÓçƒò±o¥†‘Töªºk5BPÜw»nL‡D‡?¹j ›´²Æ‹4Ò­å0öâ8Ÿohò’*%ÿñÄ…Ãî/¦ýÃEzšÑ'!™~õèÙñà 3ìÓƒÝU¦²˜H(ÄYÙeóÍK!½Sì¿ñïý‹o<4>Çí—Éú¹g†ìýþØ™Šø³˜Šx¨T9þïõ2ÑÕÒCéç©ÀÍA–Î'FÜG“}lû·Þ ÎÓvBn×Z’Ä‘Ÿ9áUCxâõËû…¤fù «ž±þHÙ¶‰VŒR|;´¼{Ô·FÂÝÊ(LW°êÀƒf]&1òRê·ò±&àeöfª¿_bf$œ ª‹ÂÞ‰í³òN@63HúË…ÆÝs|–N•Ñx¢ãhšqͺ•=_7 ¢Nx=–¶‡/?¿ËºQß@33ŸýˆOyëQ6õ=Dǰ7Š6f›ï_3"õÞG*ÎO…PôÀñUD7È*sÌ—úCtÞxÀ£ÔW]°6ùîn3iÜŸ§n~â¥6cw8AÖIú zsKtL°%ùbØç®Yd¢Ê?nO‚lÅQ¹ãèåhrÚÞ—•§ŽZ“»4QééÛ—\ì§6þÑeû¢µ Åd-U $Ðm׫]g ºªKnJ[Ïé“IjÐx} !-=[0`6­/ò4á ñ¼.£Pµàè;Òžf,ÒqhBø’äοåÙïù¡7oA¥¯½ªÕg0š3™ –@\¸ôN®ÙoÈç(ì´ØÌy2}’ 9üyxGÐÈÎùýÈoƒ°6î‡ìäk¥£·AòcýØ,ãàø/Ÿ¿Ðý-‹¢NÎpI…lþŽñ|ŸˆGœ¼;øú=Úß½L§ùADÇ9[ïw9rfåxB {°iˆç2Ôu†Ÿúà½gü“ˆß»YÈj'™ñ+üÒ³’º#É[b£IYÛÕc‘ ÊàMUÄ…¨\SQð½]¼ÎgHÿà ÁÖéã´zÑz²xÇ‘‡è}æÉº áC0Ÿ£¾úÅÎÄ#°9}b žvÕ¤° Â_«T=V¥‚rgÂù4_qŠd 3®xê§õX“4þ±7¼g¾’ú]™à­~7dÞ¶ Èë¤!jxµÿg?[ùzÄ ï9m’Gš~[ …ämç‚Ðâ• vÆpÚ0Žý2d}+,ÌÐò2å-´_rèLÿÅéMfLYÌP#6< ?UÉR©Ú„#HÖ«Õٶ⫾9n‡ìÚmï M÷!?’våè‹&coÜso$ÛøØ 2mçþý-çA^Éü¾àÛ)â`MN bðÍG)}r‚”Iÿ Ê@mK¾ÿ¤nš÷5 #õIï¬n¤ŽamC¾áË4>Ýûx_O1$}¸ê¯Ä|¾Ï,ö 1"·"N¥ÏûÙï»M{@’2,ȵy('ë}.dü˳"Ò Û÷j5[w×…†5 „ÄGïÒ$³Uh^Ra:`ýH¬Ó¢š/‡$L*Z±WÓ^?“[à \4¡¾ý$F›7pq; K“^At3ó¨zä´¼[×Åpm·l”?$5-‡,>Ò-YáO¶Ý ß3 ¬ëÆ>©‚<¤Âón[Ò"æXklGëÄÒE×'@ê3ÉñGɈ?šú1w©Q»:÷¾Ê)ÒÈö‹(¼p†ÔôË[5/{©“*ìhÅO‰AŸò.®@]Eo1I?UMX[CJ ³+Ñ”™õN³ÑrPOoÞüýÝÒzú»« KÛ_þÎÒEº§N|ÝjSâ¹–W<¿½àá×.¨ ¯¼ZDã‡jh»cÒY'N¨Ør¬9Џ[ãå÷&Í8ÉÆIqÔ#HmûÅís0BQ€"~|©&¹„¬â”HÅ©"Ö˜^Ý–€ª‘gdYéæn›‹úó òc¯²™ÎÒñ O™A˜;}øÜc/ éý¥¿éÓQu†%¦£Äùy¿î5 ¤¥iKȧí*Íž†DÏ€`Ê‘«]õÉý¯ÍžòE;Éçß_Tk&­Eý¡Æ:Ý…deTô¢h.;íœ&ÄSv'Цí”ëÀíÅy˜…/Ñ^⃚ӿÕ*ï¥åKXs;­'¤–Wf%;CR5À/û÷OˆºÛ2QͰ<<ÐÆ =¥ø g+ünL×ôn\N Ï¡qŸ KC²›qŒ1HÚ=üT¦y,¤Û«®{&@žvÞþÂ;ðÃnïØê>‰¶7ôUŸÿç~$>òõ± {¥4^°ÚòÐñº6 D](Ú÷7>+è¤ΤapˆÅd ú<.{~ð˜˜È‹Ÿ§Ø£Ì*_}2O»ÕÉÊÝÍ;é1ßÇM§¾Òû+?ê¡yñ€Ôsö¯!¢¿›ò`0d sé®è__¦C\=ôËаQ Ív!ã!2ÓÍ3¨ G-]üëÓop¿mvÎÂâ*œ1Ò7C‹Åð%ýë{’ôUŽtÛ QÄ(/&}áVk·‘ÃÞÒü`ÛGú@~ÄÓc¿ 9Ò|è=§lüªÑhʬáwý©Æµ“—Nƒâö<@ã„oŒakÂ'ÑÊ7!ï:jU•ýˆµsÒV_õ¥ŸÇòÝö^ú› 8æ›6’ØÓÁSý5|HDgzv¹{åû(ù“>¤xÐÁÑ›&’úïu#qߣe6rÜ™f:Î÷Í·&dæAôl+ûÞvø6«F5A:7ØR§#”]e‹…íý»Ö^ ‚4A‹9ðÙ™óÇlžžkïKÅó?Aò@QW#JëNC0°Ô°>|€üAý)™Ñ8PEölƒĮg·nñ' þ“×øç>’ûçö™¼ð„$¾XfKóâêaE­s sj¹q™LpûìOÒ¬+îªAãâßL„½yÛwºÐç®wPä€õxÍÚëp†µ‘äïy´]-jánfÊîÆ¿ªËËfZ?¶Œ+¯»o†·–ÙÄ;DyìªuÍϵÈ×Ãá‚j)DqOlš!~?³/‚áINoºœ#Š:Zùš›o&®,¡õ°&lܘh^É„ƒÖA’3*$q¶(iÞŽ§Q¥ýÒTÉ~=¾ðìæ,§Ïcçù…ü@·]»W<À~eË»ãЬ¾Üþ÷HÚo Ò½*:˜ÜÉ‘»~òYYÓÝà_ƒvÓøýÞ·¶ãQסï ö+†äê^!‡Äçh­>ÁX¸Ò×5Ud,ï:¦ú×¾¢”Úìø~Sv·6špíɲ;/Îÿ²³~…ŒŠŠáåzÑŽÆä”Û«HÚ¼S‘«º/hò,OuÿÛÉþ†j1ÖnæPˆ?*ò8â?uËUAëï/è‘nï›s>]p¥íJsÉükÖà…÷õJJùBóƒJÕ™†,PZeþ_¯µRÁãY¿þÔgôX'9²ùÍW$åÝWIÁËYõª÷‚±(Óýƒ½£WÞè‰Öƒ¡ï–ùÓšž7Óh\¹×Vwïy°ÒîÈ»¿œñŠëºe76Cx¦ÿ¾Û—×€ú©ayî‡%FõÚóŽäo~À.7°IÙôº-› çè»iwÉ& úí%yËTóëD@ºwç {RÚçÂqŒŠ‡xßÑ1oz‰ÁûXPýÅŒ$¼ÿâ²¥’ÜãnäVмž5qéÚ|±d ’!ÚÕ™¨o¹"þÔt ´DmÁAPAýOlõAb5ÁàÑ»ÏhQÿW÷•Óðùš“§1Gˆ iVêu |¡º¦ÔhjGÂtU_pÔ5„ÚÒ<ªÛF&3q[‹qÀ!È –EªDÑx¬Öàpüš8pz:½ºb2ìeÇœ¦sãѸ2@ðÂe8ÅÛï,ƒpûMÝ+ƒ”PÝ÷>“9Çž¿ÃÙ¾¤J[QdßõèÆn4.ðéþááxzפp§Í«…œwæM©‡|‡g×;ug/WzºÏý‹gsœ.u±ª-Æ×_ Üš/€FûkîAÍþsî¢"Ei‘ÛÀŒkvlR߬¡T”¤I’ùý5F‘htò$ÇrðÞͼpêxC˜·âŒþ®éË {æm–ù™æWï"<Œ ~A´C‹ìw²O‡÷Þt ef—ò{A”~–Úïl É´nÆ/‚èóâ3Ùm¹ålÈ¿~ùÖ­ígN›²»q12Îf¼¥íìõgý,íQÞs`y"¨YSŸ|¹‘鱯'_CìzO;èNd/ÚÂ+‡¢pLÚ,ÍôyyªÕ¹
S¶ÛAÚÊNhTèY³ä5ÍKß+êEM½T’É6’š0×kšŒuguqéáW/\ï qä[ù«Ò>­çÉR¥Hvi:çä~ q¤çY¥-#!›rä$]mŠoôˆÓÝuz®: ÷–xÄÞWž÷t‹Ü ¢_5L%2äL¹Zrä¡Ö/%˜‚ f ‹ØCóäq›*'€¥}ðЕ‘·@…•:níÊ;Ö°ûÌôFˆvÔoŽî½óP—ñ§Þôä{ã½.C”C~KüϽ±ãþÃùàFÎ8~Ò?õýÿ¬Ëo¸Ñï®o*žCR¿ž#ÐUFÍ]ŸGõ‘¤vBPêwÓ³Dv[QÃ^¤¨cçÕ•[ìίÀ¹_#;ô-ؤȤ³"gEݯ3l¥ñRE¾P4U‘GE}=­6¶Ûî>È´¦õ#Ù0cc$u¿Š7 \ ÙûÁ»-èç`Él?­ï›VwXJï×½›ÆÃ”Ñ|‹I[­x C¤¶“´e%7žr¯¡‰¯;qÏôõhn´Nþ2½b»ôŽkâHHþÔ9IåÕù Vk¢Uyì„×îP³9Ãvñ¬Ð0OQ¿ÁýS÷.=ÝnÛMc¨aV’Ú4Nþƒ;jFóºŠ[Ï“à¥#ôßrh{ø&ím{$óÐæ¬7îö²aÂ1ê/Hóª9<êøÀ öù1çÃc×¹o:h¿Ñ¬¹Â5ò°Îê˜ S¸ªðö4ÕËKТwÌ{JðRZÞœßÅ3ÖƒwÌA»w&m÷™h¡­ÒéS‹_@èw7`–N:„e§¼n†Ü íÏÂüßâEþ"¡³lS ”=o•«ÕwHÔW_ÿ¬Ö¥ó÷Þ.ý6üгšëе!#> Â/Iž§W~‹jÀÑ9a žmŠ*÷Œ‡týÖ=©#?¡EWãÛ\¬êç*DóS&ÝNŸóÎzdÂs˜\d?œË]&=I%õ=UWÙn‹RW¡Zì5( Zû}ý ”ŽôûÕ¶Ä?þXÚ=ðÒuÝïü‰w gû¾Mó¥ñôøþ¶¹7ß@È„= wî¿ ÁÝž §¾y@z2ãÑxòÛC£™´]¦Oa¯Í´½°©‘<9~ÒÖ6ûMW ž«­µ­MÒ· &»šØï< 5ÑOÂîßö¿îÏÏÒ-Úþ²‘jf…º ^*âÇÿ\/ý§æÏïE' [P…Æ¢ƒË#ÏþÍoˆ]¬31(A’¯Iì¢$~ʯ«½TCÃçIíä± kPTÙ½›{Ç9„1i‹=È«ÖzûR0Ž­<³( þ<}NKã\oÉ!<{l0!Vâ×hùò jêìÁÔ‰ »ò²#Tj}|•u„{[ÖT zBÿ]­¾m-øL8ã¼Tñ“ë!U×AÍñ|‘ º Íë’æY] Ö ™ç2.þD•Qú¡‰Ghü|•σ!Ëžbä¢_ éW•U9÷¦@ðŒ¸ûºÑÙ½kü ¿Ò»<+â«Òs÷˜ÆòqÞ1-HV_˜NÒ\è¯Ïïï·ÅYʰ‚Z6ñ[ÅhË×wé}s.’×§o3H\„ºåu(DlÜ€&â‘§6õø`éñèúkŸ HÁè¬Ëµá†D¨°ó¤èO}˜xkg½äÁ ¦EG¥1|¬”ÆåN´?~ôÜÚê8ÄÎcÄãs'‚ðlKn,Í{ OšÏ´EÙjyÉþ¿×Ë>\qôJœš³²ó@Xm»]Cû E±¸Â¹ ë)Ät–…~„ü‘JnáÝeä´®¢®Q¶j˜õØÍÝh<ð•Aj3ÑÐë ì5Bð>7„¶«Ãßö„È~¢™Û³;hÚÀ”7)ÑŸÃÁËçG äú¡šCu>¡òFÖË…~ÈÿÒ"uSV¦ù¦¥§ùàùD\¾ãõØÅe¤©3mÑ—4øŒVÿv²œ´Ät!T2iÏãÑœó¼QÕ¢Àé®:V9ýTÔ± Kζä7 ‚|7[`–dƒº›JZÊ’!ü'.šÉÐþýà1¬¯k(ds'Ѐ—ÆŸ+gÜguB"Vàc*EÑ7+üu÷B~p+ØZ ¾~IÑ·,Åž=â Í?§÷q;§±@9Þf$+=×2Éhêoó°$õçßëjVüêYS ñщü9VUh>OÙö5rÃ1/¾OkÓ¾µŠ”`êzë‘Éwc3Ù«[Ï UbÊ©”I‚œqv†Õz@¦Ùàk(] Éé4ÁÛJ¡‡ßÎd˜r;éÙ"z1eÝ \£è÷£F)x¡¬ÀöEq×.4OéL€0 šéŸŒ"/Ú®ßï¬wçO=Fó/Ó»—•iûa>ÝoÉW‰ý`&rJÝø÷iËã´½¯© O ^ŠmønAºvù/ý4?¸¨ÀGµå>ëçéeÃ3j½HøB •'î¯Ê/þCšÐ`v|´x¥?i Ñwºï(Âß–Ó+@/‰ÙÆ ®MK%buE^U4ª³~âÒÏ6ŸºÎ…dY?G›<=ˆþÄ ¤ºŠ¼(¼³Q ¼2E_<ÿñœûSåÑxºs=ˆ÷_Liì8Nëƒ"~"œî>Ë0A Îä¹/³=’!¦°ÛâEÿ7tB•ù!MK‚ÎtÀHãymÎqjŒyÑ°Ú Ô“7â`×pHâs+¦¯  ŽhÚ‘¤ÓlŸEûIíCÏV¨¡yDï aáñì­n«DD’™§ÜÓƒ”/]¹†ûüβÃCøèQøKiNŠ'¨¼1ÑóïçäŒe?¼ñ’Æ)¹¿Ž›\é¢ÎzbòÕ½[Åþjÿ|ÙYÚà„Äü„ZÈPWˆœB>Lgo‹%ØšWMV¤_·´Ú-§ ¼ðå¹ yß×ÀtóBu錓|ÔæG¤îöo&“L×VÒ ã¨Ia\ówHþP1…Yha¢þ…>:sç)¨An;½^ПooúHóà'Ùþé—]Ý?Ñs‡¤‰Ë§o©¢¯·Ñ»ƒàíOÿ|dà ˆNöÜá•Kã· §©ê°‡œ_zi8(¦ý£WJl6DzßïÐÿùPmm®#Ï^•ëåa¾=¢Å4ZKã¹¢ŽYø3ztH yÂÈòG'ÐÄîqS~˜júE}Ö2¯Œþ]‡øÎ2ÁcŸë÷ËúlD 4Ë8z ’/¯\y jßÙhV²'D _ÔІtÆ¡aµWŠÁz§è ç÷Þ^Ÿ÷t ¤ÜËŽè ù¹¢/Yú5^jyx 4–Å­(¡ycÝ6JH}‡%#i’¬Ü­9|5rG0“*ˆ`Òë‰n·òHñ’¤>»'£âêÀ6i¿³o#}pÎÑøI Æ€ŠÐn¯RU‡l ­5KÐû¢ÈCHÔbMzÛApgÂIËôf|Vô›¡1:žËo3…È•>6—rhÿcrYÇ—æI÷D\Z&C\µÝàp¼%Äëÿè#í<ŒtÐÒ»*<Ôþ.(}þEey5ñë ¤Ñf~ÑÿÒÇåT@Þ£|vèù¤ sœÃÚ'ÜÜ]ÿ^l¥ýmC#Dûä-Ôbs÷1…>CH㟺Ì7ﳜó,#\ñDæ ¡Xiaò6þ)";ø›©¬ 9ò«ò~ĈH™.Zݹ`¯at¤ýh™{ãfNoÚØ þâׄ¦>I³G-@3oìekã(Ú.+òÄüÓŸp£Ü!·Xf¹…ÝÉ^ Yv’Š‹ô ùãŽwõÏ© "اŒ<éŸË×Ó&Æ_Š|§ìÛ¶ÙΣ!i¨5§1ÛŸ 7ª2›mmƒ~Ì€,•¡?Ëh|ÐÄLrøcrØ<><3ϼµ‡æCÂ.»~ø…ƒ-þä³h¯¤A ƒGm†(½‘ă<×S¿î¿‡ùTÍ}]®ÜÇš}íIE„·}[×8ØžJ5xzBÎ^ÒªÃbø X{r·tw0¤‘~¼š»æ¥N_—ûÐx„Iµ®îsÁ¦s $µL'šG+úš§t*.ó»pj|3Ð<°ÇEû†•0¡™ ¤š¿ÂfqW£ù~™é «Ÿ$¯–Ñ<É…ÿ:ù±*dóŸ;=”M?×”GÑŸúË—±g¾Miµ…lÓè±²m©>%{Pw¥Hdô¹šÞ×âêݯAš¶eLrwšgu>›,—ˆ lPŸÌÝÐ~Õšö»Š8ZÓߊ¦LkiÒݽϰî4ÍÊ¢‘ÎBò¾³ ËPJó_žjÝMj¹ê(óåÏ—Ç™BðÊhÉþI›h¿Øç—Ðç$}âû8iƒïÅ=ZžõŒa.WÁ7QµXÑÿ"bÒä‹Ò¼C‘GI˜A,½Ðb¡ØqüÜOÛ߃”éNÒ§ñ >³àv¶µŽ…ìãêÙ‡> €ì¾¢ï¸¹vÒ”ÑîG!îT§`»0…2ñ(a+êQ©c ?%™Ûð†ðfÆÞGó—CöãIUó74Œ÷y¾äÒyÊŒo)ò!%ó­\¶N$µ‚뛚û¢AãÙiÿ]wHÓÚÚöº¹Ùî9ÉEä JT³‡ÂG†NݾÑ8E]¬T3öÎ×ÚÓå%…_£ýÏNµ=׳ò Ý[lgVQWÖ±)^i¹˜ô¥¤7–œY6¥rƒ‰Àƒ:ÚËçÎÉ›a /liœÖ¼¢`ðnšÇÐ,ÎtšþÔ9Šò{Vß> ꦢ/OÆaw´‚:þ…†)êÄ„¿¶æîzCµ×b|äopÏÓê8-†$'2ìôu‚=gº—A)º;C˜Ab,Ó¹ÿ·~–Û.JÍDlFÙ«³Uaàõ:¾²îS„U/|O?±‡Äî©uò—ˆùSOYĤ‹ÄO!讈¯ |=&ª\³M[ѪþaÚÐu„¼óQô÷²+êÓ†¤³ÜáiÒæPÛ !]Ñ&”¥& wÆÑ™‡• ÿOÜ^”ßý³Rè>ÈÚõ ŽÔDC4fHŸù4¾dºfµl!Ï)Ó-7úFM¨Ô‘ÔfÌß9cKi9êôË‚ Îþx"{<ôöfšßÜïÆ1ð"#™€1„•aG?Ð~pé8Ÿo=æB¾¤ˆ¯ò»ÔREþýg¾?|„q¬£Ø¿“ÖW¹AQG{ÍWæä\¾=¥2Ÿ7×&nÏ¥ñÂÀÔm!Jཿž×õ³ÛßõRý>Sþ<µŽŠ9v-ûvNn=ÓÒ¦YÊ3 !nˆCC p+-FXÞJJ–X2•§º(â’KjO/Š: éd¯×Ù=΂Õ9Va1˜¼^8¢Q¬N{CêMs<Ÿd@š;ÃÆI íŪ„@éþlæÁ!áS´5~7§í¢î±Uª4ïïËmÒÐÌñ ýˆã2ÍÇç§£uëþæªG}!ó4ñ§¢9@aOä> =—xú1ýv(ÍãÕç@´wHËpß©h9ÜÝFónEý™¸oµËõSkA1é}®êICºÎ HÊÅLÇ6¨Íoí^^ ñdEýµ^QÏÃÎVäC¥å4•š™î|5vGP»·2™}È'1m„ÞÓ,<ã¡ ’ÒJ©æ—÷¨™[FÇMFlBg>†|óÕ­ì9©fPû¾«¤i¥Ó/²yI`¦ÀFê;ó­GYAR¬è7—¥\bÛ÷\ufhþHüsì$"» ?¯ï„dA§Ø˜öbûiœjÜÙ@Q‹öÎêuÎhÛãð¢ E?Œd”bŽ!õzZIÿG‘¯œ™×í’>þósÈü:[?¶{§_öƒ3·sd;—Ì/rÛ éŸ˜¾> 2ãp&A²ò¢Ÿ~`&ªÞZ|îDw³höòCƒüNŸÿiÖ°ÐÇ! m{×ëé8È\UŒçlso¢¢ß3GÕ¬µjÝUZŸ: h©6HÕûx]y¬ÚÌo½öì£ÏÈé]*WXB× ƒ¸øzü¼ä|Ⱥo¯bí‚T‰içê ëÆB •ž ^㬀zß/³ÛCª»tR)Œd†&Ÿ•‘ºúξ-ÂOUÞüÆŠ ÉØcÙ 4ùÀ2â÷Q„ÙÏÔ}C ú­œjû€$<¿vÝ6úœ¯È¢ÿÉÿOL¼@Çd j…«”Aª×Ù¿ ‘÷?çõúŸ=ÙÖwuèTÓ4v¶E€È<äÝÕR4›ýRùIAÚíÔxÍîhòó` ò¡>^cv\†øÕafÒÄw™6ͽ¨šý¥û›½»!ŠùÈ ²‚l%ýµ¤‡w&|ÑÈœ"ÜÞFû‹ç~VÖ‘„…5×õö£ñ²²]M¿ñœÐàE­¤qv×£÷FäAôšI`ZC4²³O˜¼;ë—rÿäšÅVou¥ýí«ÆŸV¡ˆuwkÍldÿ¼j8Û÷v©ûIÂø Î^ºQòŸ~ж§´‡CT¼EoüIðB×ríìzúSŸÆý3_ˆZ®¨;,pÉß㺰|Ë.¡×i?ºAãU6m‡œ; 0!X¶ÈãÖ@ÈæW×)/¤ZñDõ~Nº°ü?ó2䃻dŸµ9HŸÏû+shTÒ¢z:S·}X/&©ÝK¢º6î\yÞ•¼ŸoüÔå‹)ÿ3ï•/p0Úõk(¸Q Ìž IÛ¬-ì@tzö’œä+CC~Òþ£˜ÌɆ¸kç~BÈ´£F=Dƒ—ƒã¥q!±ø´gP‹?ã/”i\ÿZüÕÐÒtu·¹¾‹ ±„©¸€xŸVtßçMÙ¾é¦Z°Ý2µš+ú:¤M7…Ô¥€ºåøü´)ÒµŠ9¢´â‚ÈLÚžœÙÇí¿×I˜ryž,Æ;ؾ„t+Ó8Ùn½b®L³.˜òβ‡Í¤ùÑ{ÚbÓz0ðífïßÖD ¾ó}ÿi|ÛÐÞbˆÎ½ßÓr8)³¿ÛW§„¡9ÊxÃç'³!Sû×Ù Ö„êô²sY÷æØ¹Ó8ô^©mÛyPöû<§ ?‡Æ?ýÓ‚Ð/Ué×!Øuùôï­ðüM2inÞå±_Íì)·ø ñú =—YÒüW*Sëúi1ÒÎfˆ*š9÷qŒƒ$¤5cs™dŸ‹*¼¿ýë¡Càæ–föÈ}éË]9r×h’Óvš&`Þ4Obk]ÿÊÝœñ:­è+¢®,?ÿå[7¤+x¡Üs2Š™êüî[HY[g¾‹´O_ûëà[’ÈLý{ooÚëG~ç郤n„¯jWšÏÜ}ª_qŒ".*î)Ê;”óf¬{¤aü¡výª&RÛÞ9¿–Ô3UŸõàãÚ-É´^Ö¥Ì*^_‡”K×G8y!þi¾Ó¢“m(ÉÅ£v!š]2ÿÚÆsà}ÚþÞû n>xn½ÖlR]fÏíx‹æ~ƒ?NØcq}—ö°Ó{h¾uâýI!£¨`¹Ô¨·Ê›™¢7ðDW®á¾¨D³dbÒV6ØS:ëQ4-LxDLóc-+ë½aÀnxøÍÅTTØå5 ð&¸Ýœ^:ô'dÜ1OÃïW]™!²klÕŸÐ~ýgš}p9ø}T<â»/„´ÿ‚ÕƒšÒ!ì,“veÖôÃ_ºuÆsH“ÍØ™&¯_B¨¾¶åÞ3•“ýo¢Q¥KÍÛ\Í™Œ?„é×Î>™æi;˜ «$¨Ïâi£¢bÁ]«X_·‰™¼ÑrÏacsÝ!5óhHæn„ô¸pkZoúûl¦œl'š ¼~‘@Û»Î< Ø0Ç"ֺ̌«QküãáËÊÑüÈWøþExêß%=¶,;ó.ù)Ç@~/”®_²iÚã8HçfÌÒº} â_ãl~9ŠÖ'ÅÀz¦5@HÒÂËX 7Ó<~AgÂoÈælžù6VÑ;ßs%#fAÔCÐKMö‹”uŽÝžArÆiK¼.’ÊLÞa Æ•™Ç“†ŠT­u;Ëiœãx'v%ÄÛ3î%9Ðr-TÌ•‘ö•9.U› ‹3'E'œö«Šú¦æÛŠ8¬ø±B¿D´—P0 Ô´ÒŽ'AÍ[1ÈG_ûnÊ*2X iJ'.ø»žàÏÜ3j¡‚/ÊšÓï±Çˆñ}­¢Lâ¹xxfÄÌØªùhyÌ b>¡·¢ÿM\Ä„f€EQ÷)å(êmÅ÷˜´)äd‰W· æš×”¯=w‘ïëý ñLW¾Ý=±¿ëÑë…5‘ÉÎzÖG‰þ?Þ}aܺ¹Ù›w©@jÝnRØ@R¶oÚ5™^®Ç]Â_¦ ² ?®ïú÷ç’iJ3_:Ðö2ˆÊ|s üó-çböCp×{Á®E›Ñ ´rvÂlˆ?ÏÖ =.‡üª"ÿÑ2bFŸ•©[ P· ¿÷ÜrvCí’ ·)êEùuÛ.÷ B>ÎH¿ a Â~ñ §%¾J©K㛬ZP£¯ÕÝèsЮ3±â C·„BÂìvŸ hJeõQ»uˆDõ‰æ@Ÿ©OȇÎpÂZ°$g¨V©wÏ‚»Yzþgô\­¾»>ûB¨¦}G]¼ƒO´W{]ö€°c?Í»ɆÎQ-«jˇhºbNˆxŒèš|ZÔÞ|š] iGgÄ_7öÞ°ú„¹µ„7WC|@¸a§?í6ÞxÊ ˆ€ìu3ÀÒ´_Lw& gƒÎ±5Å Ö«Ûæp B>¦’?ó^d÷/§¬é’~ç8¾1½­d:jÐlzXüäñnPÖý™ ÁÅœþ†$Å\q퀡WšO@òÈ5ïõ‡ ú¼1•³¬ñc&©‚¥lßJiŸ£÷·³/’Wæ‚¿ØdþÀÍß;ËÖ mƒ"þ*æ æ¾Ñ !Ó­4žÆµÂ‘LE4!ÆËÆï9Žés¢qöë· ñ¦­Í­g,­iˆ ™ä¼¦ê†Ôa rv!꟢žÙM}±Ä1µòÓŸ¹|òŒ:ƒ“Ô!¸ž,¼@ó×–+F Œi|¦ôɱ~ùfäë׳b8ʶÎd: Þà¿á-¤Ws!ÿ\ªÁvØÜôIÕTEÝ_ Q–¢ß¸©j3–ä»iÜ)<òì‚ÎD€ÿ.ë–ù„®4N'ïÛ7âPÝ[Þ´|Ë:å!ùœ— àA 3&Íë)¸C î¬"¤úÚÂÍgí Éó§×h\þÉ• ‰(bºñ×}„ÌÏiþêyÏ \Ø ¼!fÒýKÚ ˆèÜÚ1´}/ÄÆÕ‡lGSp^¯‚ ~ßÝÑ_ú ⡊yÂLËÙ“Œ¢!5дXTã Q/Uöåì¯QÄߨq6rÞfÈü?Ž™s ‡O¯_ñf^R¤²6né± âw[v¼k Q‹Ö ¥mô¾wÖ‹Cº3¾Ê:ÞÒÍázTášg2c½GBzçºûçÝ4^^Æ¿›)8IxÙ ÞL9væ-HÝ:V{—Ïq„óûˆÇgäwŽ%„¢—éKTV}ƒD$4x\çIJÝôdÝsÛ@½PÄóDªŒ»œÁŸùÙ’œORUés{££ËXZï/àºÂ¡Š¼°Ô}†V]—AYÏÙö“Æ›O:K‚jÓl íÇK‘߆T÷ɸ›7i}‹#ÌDPÌ­Ž4û}1R¶‚l}¿ªŸë3Ñœ¿ïöåÕU¨úrcÙãÙË‚ûv Iœbn­ø}çœ+Ôïx°u™‘!¤äqªÎ¹Ñ­½ÒûÁƒ<±fÔn³ш òÇ7ŠúƼ¿sÌ$+˜ò®-hîÆÁ/ÂRÌ+%ÉvÞN6/]HÍŠ/„SÜHÔ¬ûN”ŽG*íä p£U(Ãb^ 4C?ÑoÒP½ÿÌ¿ùó½là{÷ÍâýEýîùº}28ÏBº{4ÐüþÝÏüUš}Á=uaRùŠ%ïìÿMýdÃc˜Œ$ÌÛ,æ‚Ä¡×Z“»ÀêtoËA æÎ½rp Ø=i159Š~\*:¼ãw/Z«×:¶)‹eÿÞW"¿•}~>5¥Û§˜uý+ŸäMßl!æ'}±¹fBóÁô¹i^Eg_!‰·0™’jn)aÝ»Hóå—Ûöx!š~¨³ I9óvÇ $ÍùãSro¡"zt›xr ([^ÃÁž§ ŠÜšµpÚ H‡3eê; ÆÉþUÅ´~<êœg„šÝ6sõZ ´8¨sô²„Ü «CißÑÔ{Ið‰Å!VWÌa•îŸ%<¹áïœp‰ûE«0Í_ÚæÝ«N€¨s,˜„¬”Ûý®$2¹x«¹Ê!z_;ÅŠ[9éŒ6ÌEï¯ñ0eã³þTði+„7ö_>ÝîaúyÈß|š’ÉŸ9^ü~–öÚ¡Ì~)ælòÕy}.½e®ˆCKÛª¨×{¢þóBƒ“ÆÈ¦KG[!i•CÜ£˜¤¬PáoZ{ÐfªÏoÂÆ˜Ò_[>ѼI‘Ÿ Ì’¬ß$*\ÿ—Éåá$·ûpéä$Ú_*üg3ß®Ó~˜i·t„Ô­s®¤[ß2*Hv·UQÍ V¦y‰+3€RÕݯt”u@ù3¯i0„h‰ÅÖ!­ö`Ì{_PÍ[Ÿ÷Ù ¹Å´ql"ÈþðÉì&Å|ªzIAá„^E÷ÿñ¸¸â J£õßà0]ýîí'Úö03öо ¢?¼G¤È_ š™ö>+bÿ+ÖÚ/ü¿¼'¬ƒk;ß-öÿæßŽÿMÿü’á§ÿ•忲üÿS–‹ÿ=ËÿïÉÿhkÿM{óŸ$Ãÿ$Çeù¯,ÿ•åÿ(9þ5¸ö<ÿz9þCõä¿rüW–ÿÊòÿ›,ÿGáÚÿÊò_Yþrü›dù7Èð?Éño’å£ ÿµµÿÈòoá’㿲ü{uäß´ÿÁ{ò”­ý·Èñ¬/ÿz9þ+Ëeù—Êñ_[ûÿ…ÿ‰ºò_Yþý²ü[äøŸdù·Èñ_[ûï—ã?ñüü[äøŸdù·ÈñŸ*Ë¿EŽ¡,ÿµµÿ7eø7íÇeù^–» ÿ©Ïå_(ËÿQ¶öß$Ë¿A†ÿD9þ+Ëÿ}9þwÉð?ÉñŸ-‹r×Îû*wQ¥¿vßgïâàFÿýo×??ìuÀÕa§ówçýûÜþüHe¯ýv‡½Ì…¿éët^ü¿:ÑÃòþ¼-9äÈ•ðâˤÞíáø±¨ï[°·õ÷VR·ìŇk* /½³³Žð´Žì¾v¼5V36y>ß0sçà.9ø~•„žùý÷çµýÌßh´FU]´ÇCp]¿³„Æ&àøêŒ)Vý½Ž3?ç|”0ÜÇk7‘¦#È›˜,s’_Wò+M0´üÌ@Ü=àê~•?ž:ËÎ7‹nýæ^+ž¡¤gšÿ/ö4úÛu˜Ù ¡ÓP=|žã˜–}þÞ‡ën}û@† W5u<"3Ñ µ|™¼ð"ø‡;'âÚéÕ!{ŸŠ kŸ²ïóy9¨Us\´/•ð}} u.gQÈýŒ!hhÙ¹ïóa°‘pŠLMujNlÝc ÄUfŸ´?ç`¿ØN¤ÁÏÎtúÌ(‹—1Þê§þt¡è&øm¶lrã@8qvàªñ`÷ ‘môF­J†l‹‘:„f‹4–|Fݬ;½ó‚î¡(õÄ"ËŽgæô˜¬Úb›ZPsåØAÓÆPu‚óÓ>©")ÀSÃiÃP‹DÞšŠúñke6u/¿Ü¬ñÆðîK ]Fû8Ÿ '¥NŸŸÅÞ_AïgTæíx"õâçWà}²ÚÇl¼4´° C²¶ñ<£ÆqàO*Ñš,ûEšÒ?¦¢îJ%‘vͰ½Gq¯ÙÕνj/Y¾'wgÜœÞÕ/“<‰ œ£b Fã¶úSî[³Á§Ljm¾A°GnÜT®ƒÚWW¬ˆ ýœÜGª|Ì_¸ßgÉ j£,Õ¯iY Ï]¿ÑÕa%ªßkǦ?Fƒõ Íð’¶•v‚`Ò§·~£>€ÏýÊéûS‚†‚Üz–nùBýú4üúfÉdp¿E|xºm<8û—ÕŒI»At—_'¥ïÁ}”£n]´^}ÚÙ]Aâú¦óÜ3Á[R–Vù2¬ˆ‹«$¯û@”uð÷2ÍÈÕ~vœY_4mRÝsåî6$*Ç4˜|ŒøW\¥œQ„9(3yÎ$”Ï~ófê+ù±S×ëÐíiˆ¶TMÔFMšu·‘KB ·»¹èTï© ºÅíïp•tü&/ÎÛn”i¨[oÔ™pÅ%9´ûæVdçý³õIZܽÁq%ƒòÀ"ukúUˆ{lùXç=Uj›üSf‚ç>q~ƒÑsPs7¦·}µœÑ÷ ×/_à çO/δÞïì&ðT‚&¯,ý{ÿ†+®Îë”|ÑxyÂù AÍ„5üÅ£ñãíQ7ç‚wì—Í(µ¾Ê¼:Ôß»º!u`GVÏã86·7cWò·à U¿ouF TvÉ„Ô%Ùôù\Ô°òÈRØmãฺ“„5˧&ïV$iœùìðÏšëxî|®dÙDS|Kÿâ‘Ú¨O>eŽ^çÂÝCªç^ì3Mž ï1¾ê\J ¯ÅU›éqùÛºCò“{Öås'Ne÷}…é"RsÀ»hAÄ-Âe‰~N[舚½kƒTI™õšÑú]]Ñ0ÀoŠpÅ:T³þL<;iœél'’¼Jß~þ¦64N ^u7¡8wd¢a´”é=FS;ó‚è¾`U<í6é ®W§7¨~‡àîç¾QVà‘n\o¹ßo]/¿ÎýS÷÷û€³‚¨s±~›Ù3Ñ`›Ùpø#Œ§ SŠ'Ñ%gNÄFpfhç ÀÊÓ û$Z…Ê»œ‘sÁÕRLFã[ž÷ÞúË}–|ÇÿÈÆã.W< Ç£Þq¸±š£JÔ³´GyNØ_V5ÜZ²žäò«ïô$?qoðõºòúAÇ÷„)¨x~ì×»¯}¯‡úÃ3W(ß’˜yO¢ômÒ¥ó³÷Bp,`ÍRávÔ¹µ^rÛc Ñž¹nûìßÓÏ×£¥AÿI½QçÈþ ¥ƒ÷|œx‚]MOöÌP‚pfÖŒfëkôyº;f«xÕC"6TÍ×ÔD7`¨ê5æ-^l‡ºb‹îÞ_–ë¨ênÑ­°œÝåô:G­ç­ßsűJ¿+ú¡aí€ÐCö´½ìÏj¨áÁ©ŸÖíO+Ô|¦pÍT4dç.Њe#ªùÛ›âaJ(ÔÎvh-“£z™úÜn$ÙÂve>†”Y|œ§–ø ¼×ù]ÃÏ ÜÇÉø»™"z­œ7lfrÓúÛ> I1[£æÝ¢õĤLzìø›Xî9}¬âí!Qž'Á½qSÀÑïޘä֧!àt„Ô Ž Ê½Èwf¨XŸtëDô‚ÖÑ(–?oW0ئŽQ¦c\!Øæª{¦lI@ÐüÔ7ùhÝò%aï0ð.”{$;…ÉÏnæÓ$|4y¾I³¹¦7 b¸ÕÏxù]ƒ ¬ýÔ­ÁoP¨eueØå"oö®÷…Æ­g½'/}‹&A®¡¡Ò&œ.(›UU‚=ø¶ h5ø £'OX‘K’²Þ.ÙÜs'¡¾«$Äꀲ<ž¶¥ÝsÚ¶O[yµŽ³2÷ÿI„d]Ï›¹Qhzøcã‹^J„ÝmÍ ‹WB~_MÖK¿ÃÓyís*÷FÖžø|…ŽAÝ9c²ÜC䨺t=„¯^öy1Ñ/¢½/SCP’ýÔ»Ëä jUή_Ø AÑlçÖ;´|çŸ×~núÏz$Úh¯õ`¡IŒÞ­ute)ý¼Ç¡øvëf×Íc ÿ—ç€Äu!ÌòKÏ^ØË ÎwusÀËÕÜ!¼Ú#úÆyÝ·IÍ!â*À×dÖ+$¥c÷Îü°h¸Ÿ²ç¿é!ÿÒ$³•,°Æoµðý°’Ä•] ÷žG>%ÌJ¸ «K"~Zµ´oB½çÃÀËøJx½]á?zè¼ø˜ùÜ_µV,ÎúÝLòï¡ðVÊ 4©Ë,4µÀ:Êg'jÛÎ|ø®Ùí§±Vm³¼±dÙb4™UßxñÕM¦U¸FYƒr^ê{ù2ýü+æ ¯_×7|y7 ´q:ó™¼òsêõ[‡VÒ~sAÖ‹Àø¡ß`†¿×WÞíxòeÀA°c"×Ù1“üNxúB<ôJ7£}ï >s¸ßºy‹ÁWIy6àÅ+p*ìW¯Yf^ÇM»}^£q©ÿÈGÑiÜ5D§¬ 9š¹?+fæýƒgIYÈ«ˆ×êÈ#µ1A9×HEÿÅjQ ‡¢¡œ÷†§éŒ´Mi9Ç‘¿óÝÔËökÑÈú´ÍfÚÄ׈ëÏ•—S;ö›+„;½Kký5Cw»Áqíhÿâ¥kˆê¼`ã{Ñ0×~`â÷wÈøº ‹ãéEhÝ´ípÝ,ˆ¼2?fßÁS“Ÿo€ó|ò«ÓßýáϾörÁ2Pe!¿Ó†]E“¸|¹is F‹ |ð£¨çYß;r¤k¼½9•KM67%àˆW~ï ¨Ógå85Ûbºg¤@xŽey9`øl·ÏË/‚òvö¼-Gý.æÅ[ ó¨@æ×Ó~+Θ‘¬~‘$MÇtºßƒW„WaÓ[-w!áNY¦az ¤?3½vjäq"èïìì5$—p'š¾ê¦Waû‹ÓžÉns¹~Êò1.Ý:wã͘¡¨µYVsJ ŽÃÉü06Y~oCC»è‹˜c‚ÆYGõÎ=vxü§½.´Ÿ*•†®? Êj×þ—=_ÑÏGË( ]$µÌÿ2âmøaWº]YÇw{˜WUÐvÔxOÈÕ+Zþ噟ÜÓþ#¶vgŸÔk`9‡‡¨+ *êF<¢qRQôÔKjèϾ\ª…ºqÁmMý\Ámy6¨¬;¿¯ÐrË€q$õ©¨{Àœ£$xÅ‹‚cñrTEò+®ßõDÉÔÜߪ?‚ ßç¨åê°jÔÏv÷s™Fª64ÛÞSWkÍô¾#ÖžBCf–y|7°•³wYÉàÍñ0o¿½à¿ìûy³Ã)‰®_°lnCóƒvpÏö›vòå%|ÿÒm[…?}޼W7¹ñû :Êèð€)¯Á£ÜŸL?¾o¯|óé{Àð;ÇòýÚÿô¼s‚­…Z–íçô ƒàÎ 07e8YƻއëgwAã,i¸µqõƒ‘sP4¼¡Pù›¸·26´Ç,wÍèž.QqïÏ>s²îæ¼y×mp»ñ~ÝØ@xHŒÆô³+!ß/+›,­%ŷךâWMªÞòõ.#w¿kË#‚†®ÚÝ´LP:tåxgîP’?pUZŸ- NŽ4û½ßBË3ûúEhÐó2Æ·â íµ£Q:g kPЛØì¨i*Üîbë nÁL• 1(2ßµbÖöá} M h)H ~\¥ŸÑ÷ë)4ô>qýÞl;q1kcâGnÓ04¢qs©Åë~ƒW€÷%òÓ¯Ÿàǽ® }ù_š­G6.B}ðËÛÔÿ^Ï 2‹>S$E^ÏØ¨ÞJÞàÏÒ¹×¥ë pΆj ñ¹ þ ö¯.qWðcÝÓÍo5æþý;þÑey§ª"Q‘äþÁ–ZõXÚÏ<^âØ-gü'tÉUg{_Í-¿h¹_'ùül=Mšú\Ï:|ïõEp9 •sƒ¿‹,EçHR÷QE¤æÒ”úw{¼ó3†õ³¾Cž_}ï¯ö8ˆ¤Å_‰Ý?e+áéÛ×ïb/ä•ä\¸ò†°Ò˜¯Mïи) Ž+P³N½V×|êúü¯·‡–3'&gØëÞ \Æ']ß5‚¬¤ì ·¾à]ëæš§÷ƒæ_’[þcòÀ:YÐMrâ87'E7ñcÁüÖ@µÇ<Y±ä”ü­^3OîvGSáàäÄ/Á‹N”¾Kš†ô”«iÅó òµ¾$,ž@ÛùPi€= Ô®Óž? B~ÇWV4¾žû¨ïïG>`ݯ=Þ­¾Eëóѹ´]MÕÍëÎ;ŽZÝ×ÚÝ9yÜ·`²ÏuÔy^yÚ¼F¬«[§Eûyþªèh÷—¤â`YÄJÄ“vǾqDXâ¾nÉz|¿L|ûÒcö™¸ÕbF68 ]QàèüòŒÈPkðU"SþÞ%ýÏNÇ¢Êo@Ì0íL(¿Ð…>œ£ÛV-Ý9Š”^;*ÛyàxÂXá÷¤ ïY6– öõRõå£Àk7ÞãëΔq;—\oºJ©=í_gèm›yèïz|%ãek·>e?¡ù¾ÒcÔ~ýy!¤.øm¶emáhºôþäµ9x6:6•Û‹Ö»œ˜¶Ã“D%yÝ\ñª ìÕ'TG\qûÖŽçiþàŠ—×nÎ&+F¸õn¢È“ǦÕú…¤°)²¢eÈzT­ËÕŽD jÎ3y8vFÍù@VdèEj¿%ioDÕSž´“†%AïüÂâQ¾{«ËÃëèŠÆ¥Ÿ€p™åƒ ×.¢îìØ gNAÔç©óÌœ»à»—MêèaHã•{MÐx{²º‡IO÷kéï¸ÇúZ 8×ztÂŒ_¨q¿½x̧ƒ}Þ]rÕâš´=©YÓ÷¡Ì{y¼kÖÔ% ãZÌþEj¶ÔnõÙÿü¬.çC”ç]¡»±/DåN8²öBØðÀj¶ï9W¸¸F—Gók+'Nüqˆ^$õZç Êú”¹Æ[oä§Ýµý(ÔöÎ1 ìgêó¹›…\ðÂ{ëÍà$õ½qOßó¤ý—ìrä¬ D8a¼ãá É¢ þ¹“ŸHÉ£ÄÁNcòÁž"¹õ…–S.Öî¥yÍ„¦Åž4¾^°ŠL˜ªr`ªÎTO­õòùö·v¿qsÖàGÀõ§s²P=L«»ïo>é•mž j)éÈX£ êÈð¾•ñIÒX5/å š^(.ˆÌëág“×Þ—ãÞ_÷ºÔá WQ@E’Ä˯_Ðö8°ãÄ3šŸüy³µ ¸Ãϡ̀þÚÝ{ôtP=SÎß7M§¦%}݃ê­ç«§¿«_ؾ’}ç/že—mž`šF>Ù¾°0×9JXÖ‰êKú’”és_'lÏ[¤ŒU%ËQj3˜ûô(i̘Ù » ΢qwµ©¸?êzÆöU÷Yë‡÷FMýƒèõÀ;?îV¹ó4“´×¤ÊÀRZ>­â;x£Â^ô²ÛŽ&þÙÕ l/Ñç¾Ô'ddøs(ÝékV‚½Ñ¾ú ¢¬g¼ üõ÷þìJÛ´‡ÂQ³ežõ©ŠÃà§ ¿6=a(x3öhLœQ{ЦæUˆ¸×”ãî¹ãÌSÉ¢,³màÎó¡ ®2‰~}5ìüÓw ŒÜ ”Ð>¡ûWU&tŒ}Wûy‚ïñÙï]I÷™ U|‡ü˜¶Zoç-ÒXï'øò‰ðž.·NjtÇ©aäÆ]ÐðÎßqà4ÚO8|×ú솺!ªµJýP6çËKÝ—àïŠËõýº ¶ýF@­×«ø;½Æ€?3¹ÇdÚþ¬±XÄM—ýx¡ÑRÉßuù‹ sgÕßך_Rµúí§m§/³GÙ¨JK›+,pŠÞdØžKw~ù;s;z¿UŸ;3ü׆Ʈ{6¢éU…Õ”ì8pšK*BzŒ'xlDÊ~5šLþÙcE;šŒÌ^Ü»-zŽk¿›„òUÛ·ÇH¶wúÂìTš·—¨„¼Ciû¬.‹äøú&…\jwæiYÜt$êZ³™Æ= †%kkŸüGuíÒ•??=XçN¢¾²û·ÄÙilï[9pI óÎßc{–¢v]ˉÒßÓÉ[ËgN—OWƒ=ÝÐ,7eGOø¦4õ$ØõSmhü+çOw¾Á,ØëbFÛ‰•JêAgñ!_ð­˜OÛVsü-Û'ˆßiÐeeéSyßæüÜK¾Ä¥„ö®ªEàÌhïí¿À}—6çæ¨~hü1kñÛ£gÁIš§SGŠiÞµïÌ"Çlš·.Šëµ \Ñá«…|»k(ÿ¡GJßNß0#žÔ¹˜O3ƒïjÕzùÁ£ˆ vgš}áøºOi9w•o¾Ü.Œ¶'Ñ‘ ¹9Z…¨?šÛµ¨1.††§§£þÀ€Qb·5ŽØï?¶ ;ž_3n ¨P–J½ü*Z)] „%øº{ø©LóX”çÍÕ*Y|Býƒ·Å¹.` N‰þ0ð&xÛ|W…´#żqßÖÓ{IZŸCÓ/$]Ûk²ÖÅ…]A%î-™Òˆš×®s¿¿Aõ§¥c†Ôƒ³âö…%ÛA ž6Æa®&òÝœH._ˆÏ‘ÇÃ_Cè5ÐmŸ¨ý ßñG‚m’vøgdD£û^Ùñ© ›7ó è¢ýúýÔëøq;¤²)üy7þSñ£H-’ýÝÐëØªj4îÞ2§éÕ*”­voãÓœO¶6GvßçVÊ··nâÃ=ÿ˜#Õ— êÖuÐkV }ã³úøN¯-tÝ™mÈ­ ê苺âd–wñMÔôªˆ«=î€&ÿñËÌ35Qܽ¿uÚCTû÷¯S}ñÐóËßÑöælÓ6]Ú¿´ä¿P9º<•Æ®¿?)ó4¡ýò‰³¨Ü¶aVÒ¶kàyÙß&=£>”S¨]B÷dHÂFXX ž§èƒý B ^ W^ï`mÉ„ðk—ô9ªî™<¼`=„ýìL¦éŽ?Uç6M #)?>LÕ|G{c ”_á[î\ÝÙÖäýÞuNj^4®íï7¬c bƒn¾§‘8XßÜK˜ÅCºÝÑ#±£NÎÕy·7åv‡@1ƒÆàWY÷zJøóû»€J§½Ý7l×bðÍô%ö£íÄÔdžGsÁs}6ey!X™¯zùM㥑šIþu,p-BЦÜpæ¼<'ÇÅ Bö}­ÌÛ¾¿~ÂÅ#ŸÀaßT¾è} ìnÏ‘³Þ¡)DûwýÈñ]6k¯–‚oZâË-. ñÆ=GIÔ”þ¦yƒ¶@hÒÚQ¹…Ü<’É>òÂ-WŽn¹Œ¦‚½ÇÏì¢í‹¯eᵟ>ç,Ù“ P¿U¹÷‰¶hñæÃ,O¤¦©i`pH¾ Ü3¬+ZI…Ër§÷{rå!ÜëÓBÙgu¥Nê­TkKòúª’ôMq‹ xON ‘£æÍnݳá4޳9èg{Õ«î ièhƒ°ˆ§¡Úû.ê‡ç¬°JgÝÆõqçPìtqœ>ÕIzÞsûÐûúnzò±¨üšjx¼t%2‚?èš?T÷½w”&’è›§ 3^]¿ÈÊfÈ¡Á(¦”wë $^mÕìûGâiÿj@õæûû{ÞÁcC+òJÿ~_Ûpýز2ä?bôƒý¼¦%ãk‚®l6£ß ðõèíÙWƒšm±Ïf^9˜R Ñ!¿Ç‚Ãg#Ì‹P?ñIýëó'¯m'\³#Œ…FÞÏ´4©úpxu¿È^hÚv;&Ð"ü)QÑÄr5j='ĪMyAâ–]=°Œ…ïa¹]k ý!‘øöÉj°&Ûî7§íl\‘íM•Ë&©í)NFVzƒÕ{l˜Á̵ô ¸>÷ûWðï On»‚énvnzÃÿ®Ã›«xS»ÃٛŷþÞŸ=£ùÊâÃIà5öÞ:â¨YÌÀao”çøŸËÜö¬h5‰*¾¯çQ/F>ÓúÎà+¿¶:h ÞÂqfÜï×`]Ìxzªêo½Aõ™°¬ù>ƒHÝ!Ý0ž$‚4~áœyè#i ¬s¸yNñ¯ºF®G]P@æ¾ß±¤Æed—Ii“Áº­dÔ0ºâÞÆW¨t†ÖŸ/gý]G@~œvQKEÄSÿ•hZ¯=õÅe°rêÆŸ« ¾Gw­ÓQã3°WîO“Û/÷äó÷­Gý›Àe2UÔNì·ÁùîHZþ`ñ™›ÁhL»,q›˜DR>Lˆ,Ý—€¤•}å»ïvþ”óÈ)(K7Õ|köšðoÆ4}m+FžZä|§«›ÁãfÍz\K¸ßWï]U ÁŸ7+ñ?¼cfóÜÍký¿Ð~á^šK¢:ºJã—¸€òðtÐåÑòZ/ö)wC¥6óbcuPš#zmí:ì}ïï‰íô!x».À2ËëïºÿüžãÚ#¦çc¯±žpB™Öå#5z1L8FýØ[vñÖ>îOóïc=ëhÞ¤f ü—þûh×3ü»^©Ù»˜–+bôï¡ó|+7ÒÏRû!üm4ÐäL¢«v„™+%Çü~†ü^BjÞ—\ =G¨®ý7ÜÅRñ°ôA¬J·ˆÒ|!å{[gÁq|;‘þÒ”TÌÜ1gæúl<½ùÒÃÙEfø¿Üº ”éï# s3Q6å”[é4?¬ßnBÛÅçG> ä"ûë-þÞ¢=hŠ;kè¾E¨Éi];¯¶8ØÞeìÂBUÆ?3bŸÿºûá }÷IÆ£›‡3f¥ Ib²ÐdÚkªjFßû¡m÷o!ñªäcæä(vl*¸ÇwŸ~ÿ÷º”‘á+lj@Ó¹]¾]G @Y„.q±dÑü¾¿Ÿ$î9(¿‰Ç nuCÓ¶oh„»&N‘0£rrîØ=Ä¢Þ3n“’!ƒNšV– mèÑëÎ…oIô¨+['BÍþK¯€Ž<¢Ú‹Ç’Æ!&³¶]E½&w¡7‹ÞïÎõ–¨»Wõ˜û~'”oßÛ½B§} GŽj@ãG““•³i=|4¹›…½Û.¦Þ*(œÞÜsFøPê#ñ\ð_–oÔwBÞÝW-YèÖ‘E_¢þ®óÏ×:Yò’;C égôpV+„¢ŽgÃí ¨TÚþò~.8}õ»7ÇF£i°Ñ #ÀݤqþÒÓD°6§Ñ³œÔùíÓû=·ÈbÜ ×Ñ(ÃîdÓþ‘{0õV>ŠIm¯cGÆŒÎBf×é‘ï<‘z2lZï~çÿû†íÎwÀéókH²¸•æg¬\—øþöm{òÃI¼ñQn ¸>«LlDõ` «·¿LMBCßš†í)i¨¹ªzaáo. &_0çÑ÷bÏLpʾ±–Æí“«­ïeA×±ºËu(޵(0Ì‚¦Ñ.îôçœm¦ñr)øZ}Ûö¥žÿ_ëD§SG= švÊúæ;iþ*þÙëx4xÇ;_­ö¤5{ÖnuE#Ãö×|†PcÓúHn ê+NÅn8LãfêÞ´ý}Ñøæ|¯–Í_Áj9ºj•1¸ƒûî׿Ô)9…õé&¤ZeQBÀÔ]¤JMðƒ»24ªtÜÉ“#…ê~Wj§ gŽ¿é¹Å9`ŸÏ‹]ª™{•ÇôÁgQûyt¾Ëˆ8’~“­FI3@±U¬J?÷û††½Ñø@wïù~¥ôþ²Dêg V{½F/Ã× )ÚãCi»|Q¶bIÉTp 3Æ—€_-zo·c¨/Ì+¹JÐ(¾kZÝ­ü SsÇ›ÌÏnùðZ“`_Ê›1÷ž9-~·=6‚“ynÎî³ï+ìë½³hïkð”|îˆä³Ù¶-ö“Àç|[Ñ­O_m Þ%Mšæº»HÛ÷’€åü…¨çÉ]Þª$‘x†¾Å|#üi²·¢Çšõ€×÷;áÅ)ÞX˽ÐíûÉQ¤ŽºþnԊ͈æëÝÜyò(ýef÷'¢|vOâ²™Ö¯ßwt%· äǬܷ¤Ô¹ v]€ÛñvUSò#4œæ™µêÿjïM ©úÿ÷QʨT¤AÈÔ ¢y [©•E¥¢YÉP¡A!ˆ³(CÊ!Q†äA¢Ìó|œy”©¸»ïç÷ñ_Ýå~׺÷®»îZÿu¬e=gï³÷û½Ï³_ï×ëyöÙû¼õ ü eW?Zl_FYX êÖÍeÛΘ€u~lé͇||PÞúx–(Éã7›M¡ß$Àž·ñç 9iP¯]ë @Ç¥ù«{Ñt¾ýÈGñDÍ’˜¢×7vkyzu×pbÆš¤[f£QP—÷lP“M&ïù Ö´ïýea ¬´ˆö(>Î_·O7ÙòÀÝóy·¢¿ä¿û7öV{m,W xo‰[{4ˆÎåae§¿8 :ØTAÎ.%$N›I“úwö4·¥¨óŸ|ëaß;ðºøsB¶RÁ¨·Ï*óÝCžÇ5.ÍÉcA)硘ÁÒCo}Fý@îq+]ò“@ë:õ8³îXù¹ò*ÄAü˜ôím0™W x{Æ€ç:Šêç×zf¾ÊÚÉ3 Ó"~>CÝÝçfRèY}Ìפf øÖ÷.´ß‘'º”O íܯ>Ñ|Œ~ï,º9¯z‚è‰Þj…!”«)^ZªöèßþˆRí?eñéödL)±ûò­{®wQ}5!/ñÃôáë³õ îr+ì=‰|‰‹-WFÅBðä̆›™GŸÑ—ãðöñË™•Ü×àûS¦ ÑÐ=Ù¶ÔÑè1~Jԟ ß7 ÎÇs\׃wÁøÑ_÷s ”ÍŽòeN+À/Ŧ[GÀÒ˜xëâûp\KÖvÓ=Ûî€?UñÞ¹õäçüœáÄ÷Æ#ŠXÄCôJo?™½ž¦ros$IýþÊä£ùЊh[O/ FËë†þ®ñR ù¦8Ž¿æHÔvÇM?@UëÄhz0xµ{^ ¸'ÁT×éï”ã‚9µö©ãÜHðwpg›'.@O¿ÁèÔóào×Ovyvè¢{ëÀ_œ¹­]\r|žnïüpsçð~mˬǶVׂf2…5{{ºÍ›V‡íEp×8ÿi—¨_nòÐo=šõO;…uÊ¢êÊøôïÌ4‚[è—’@æJç•¥iä8¹W¶™áJÔ¦ÒeLnDMçP—+ÎÀE2¯¹W²*_Ä’¯ô9 nÌö3£gƒWzJfœ™+z)k³%ûX X¯xÓj/ :ïPÑí—ÏÐZŸ‘Ýá¶u¿ ÇÅ´]$<w¹ ÏOö+¸1!Í®YY([ê´Óÿ<™wÆþXðlQ»²¬o{@MгWT¸n°BØÅìyè1 ?lþ3Pó¼ÛW§ï£Ð€«3í1KÞË„v3Þ"…›÷=µoßÎó~Ï,í&9ðÞÉxJýà…&µö5VçWnÝÉÓ>(zvϱúÙ‚~ãìÒ‰>„S}Û®#”`,ËYrôJ%]No˜qàñ¾*.vnýk—nŒ‰*¨ÌïŒÔ=ö`¶?سˆ§6¿ïë{ªbÕç*;ì5úm·óXpó7MÎ<º¼Ç¡’_äuÁW¿h²mx}ïéôêûSg#‡êÞŸE饬اjê¢ÄÁ¤¡ôÕ½ûÀ‡<ï×-\B{ÉxÊ|~¾€Ô…¥'û>.^‹Î ·Wüt@Ý7QúQŒ ¸WDÚÒ^<wLô†ég!§~—7Îïí:Ê­Qå e –¬N%öÕ&Ë÷¯&ø~^3ÕB°Î)Vœ^`B0®ŸoPáŸDO™ðª¬ªºÎˆ¬¤þ§”Èþ$}¨È‹Æ½}¹ ”VœÔ¢~¾¶±ÃñàåÌÜé¼Þ¼ éS¶û!ئ@VÀ¤>%öJì¹î¢çÑ%Œ‰`}Èÿ!6nxë£4þЯ蠚sõpm®Ñy€'|®¼o€ÀÜ÷þ¦[‘·ÒÛ{ÏŸ.…ºZwiïø­ZN;·\¥îøJ’ëØõ«’z r”×¼`|íwçâ‡ÒúªàO*Õ±š®þA߉½d†ûåë….öÊB÷)Ó@ÿÒÔ‰Elì Nnüïœg‡Qcqr>eu[d6ÉtÈt?ÍÔ)7 J˘À؉g@ÓóÙ'}wÂèolD£ ˆ>ÿÌøÖ¨Ê¾ó]ÌKW|¦ã=¾%ÌËÀ>ùÉò¼ò ´ÙŒWª#JÌFåëƒÝò!]q‹?xÆòîÀÜzQìq<OãsUÌW:£iÖ®)ñG¤QCmçøˆ:£ÓÙuTså{ðе$óHÝéÝrÔŠ¬ÞmMJScPÓzœÀP¹{mrðXP5_¿;µXhûZ§.µùÚQ¨_?õ‹Žâs³ß–Tmp“JEƒgŽ‹ÕC¤ÏÑufž¶G­ix†¦L3:Ÿ˜c±’Ô³²ë’?ݸGd»_n^W‡ºè#'<©Ä۳łUÙ‡Z|tŽúÕ»Ûtã×E‚<‚½iMoÍ”‚UɱUË×t@ßx2ªÎæ¾4[O®µ}Z ¸ÄDÞ)‘Kàä¿”xáüM£³%‰É€ý½\*ÅŒä‡þ†¿êàFp*5äõm õy«·§É¨«ºT×oõ§êRÿ¢é`)õlXåÖJÇ o.žC÷—!ºˆ[2:Õ½Úô«¯J} ÀYðE`~Ú/˃Ö;™RH=øþyÊ’½ÏÞeE> µšu¥v ÊŠmäß.+CSºñÝ^u²ßžÁ‹|ÀP7ê#FkSsõæñ`¾R©y©´Í÷„H«Èk_&€`æX^rµÝ77/ý½’à¾ôðµ|° õmF£ú*®¿oàG‹<Ыu—/ï{^—“¶îEÇíY:ÕÚTs®¦ì/!õijÝB%Ñúgjd2ïo¼ï6µ§ìqœuéÍd=øv£ùÒRtÎlÈ”~p¬…M'&Œª÷ÅÁ=¶Ú`Þ‹ÛÒéy„Œÿöȳ@ðç®#+çLk® Ë÷€Ã—ƒÝàßëkLÜ ¾Gd\e ™'6³Å#¾÷¡‹¨;s]? tuñÐwËFoþÌ nw|JpÛoçlô&¶z:A¼…¡ß—?ßS/ æè€éA–庂áë³yɱL‹/wˆê32ÍMõDç–ÍÛOÕ]•R¡ÚË JD•æÇ¨nôÌ“¹±±ˆèî ùsÁ—hoJ ^«|ô¹’AßœY ÏTXl’¦“zìÀ·$t,Ù&­ñ‘ôDÂÝUàÚVx:¾Ì ñ-.±ÁÞqëÏÔó¨I¾>†Ñú‘ÔùÞO%î÷ŽLÍÖiwˆz=fÐ>û,p‘Ò’5T·—…Å2àí¾£g·ÁÇê6òªš ÆI¶¬×‹kàûZŸeÝ ö·¡ÕíªV¤¶ç»ªëƒwöñmÊL¢äd¶æïÈõß,™Ô@}þÉéË ˜''­”eÜgjõ'£ 4ÑÏÌI>–“ߘeH¼µ9%M=v»Óï½~‚¦àÍž"•`]åÝãòˆà'ßô›n zþÃŒ´­òàoç3_Ë¿HP¯ê=ä Á¬Š Ù¯Ñ+¿@_3‰¬‹¦êZÍ9æëy”&‹rÐï”ÞOaú‘y:ÓuTj;XÊWÆØìNØÇ³X½íÓýß´m(@ïU®ìª%³Á|xÚÝãA$Y?WñVšÏN½þâÌDÝ—ñÅAQt0;t^˜ž¿sկ˲ë hŸpô{ÕЩEgI ç¿íÒý/zv.½3ΞÐH$}ô@¬³­h¥¾Om'|!>þtc„^7"è¿ï8¼»I4|_ðeŸ˜'¸»¶¦v-L%j_~«4q>Üwï5Ÿ^„ŽGu)Ší ˜Ýevîh¡6ï“úýZìK(ÔmŠn0ŽÔ•¿%ªvŸC{T]ò»~UPŠvŸ’{— Îwûx/»ñàYœ1×¼<Á, x€ãÔüpýWÆ^Õ]¿-\ÝP²²÷ý‹ò–]ßfÀ«M7ß‘õàc÷¯»’`xÞv×Ü#‚ê™!RŸÈø¿:éЮY¯ÁØÓ±´i{ "SÃŽžF¹öÉ3¾Kþ›Ä|9q°çwkjX$‚­Óabæ?ü`»åúîv¨îñ¿üvZ/íKâ—¢×®"/¤Ýšh±±·Ëˆ`k¾ýV®E°Ò­¯OÚÜ¿¼˜¦° ô 7’G5|‡¯x8SÛ\‡—Å÷¶‚ÅÏnÝ·ä òSø •óA=cí7®X‚M}Z7 ¢A£l6Q ˜JÄÜ×WÖƒ6ÿëñGG4äñ5¤.§rPÊ8É”·õeŽ`üÖï|Œß2óQ7Ñ¢-ýëðýp44ÂÚ•«˜ÚM݇ÀŽÎxXvƒ™ 1&ù±?èÄxšGÈñeÛÚÐúaz/O-±>¨óžh­iåγq«>í}ÖÅ“3ÎnÙFê=}–ʃD4èþùšxèJ~+òóÀš3!t`… ˜Ûœ;[À¡=ûƒÌã¹—>Õ2#¿apïòêŸDçÁ•G?ìÏ+¤î¼®.A°·*¿ýµž`g”¿›R©I°ÏD«Ô ˜ž~^%Fú±ƒÛ–ª¡ïú'.óŠ$š2Î~ÜõÒ´AéGaéáh_‘ûÀ4z\£×¯Ô'· ø?>‹’yy»E=•{ŠÇ¤£W#&Ú]žÎRG•:ðTÿ\Èü~nƒ¯¡}8žWW¦ ·ÃÎ^¡ó%åS–ñžê‚GX.·~ݶOywÄZ6e–s˯©£õBÎ}+êTo9¸Ú´o×E2ïý.²^É‹¬Û Ä#ó ·pB³ zg×·?º†îÅêÄ%"M}`Ù±7ç‰dêÍQÌÉn,ÜÎØ…žý¢žŠ‘z|lÙЙ%Rè<î¾-ŒZ™ÜS÷E,¢q†ßzç8WPV3¦^K.†`´K^ÍQ?Êã®ï“SèW}Ùï”4ضMå>‚Ú¼XW6f5¸IÉo7SЖ¥·´¥èÑtX¼}<野lxðÌ/é7*V÷ØÕöjN­y]AúnöñKÌy;Èñøäæß) ­j¹* ,Ú$;2Ïa;óËE06ëy!ý@Ù—bêl´ª›J î¿úºý/%½—ƒ~èiÞ;5XšÇ¿ïcµQãêpBÕö>eÏ”kòŸ”W\%ºçv&\ð®%hŸ©ÄË-ñhù¡6ßšG”¸_©]»ÌìßõDDßÕ7Æ[ˆžÚ|W‰Û/Á¹p䪛o hâ»2e6.Ë2ÚŠ²>ìÁìÀ#ê`¿ÛÎiðZ“s­,JÏï¹QvH ÍÓgÙX®žŒ¦¤J ÅèhÐmÅ¢.<çyÓÚðx›ê”×LlÚhžšu7³û´@ÛåX8aa SôŸ£ o÷%p{z”ñº0óDpØŸ’.ÏL!u¶ëÉ´õûÑËŸ¼ãØcÐë³òbÈ|%¥¯æJñ=uTRUQ§zBÎw.¨ôòk—¦“çáxB£–ÆðõÙç‡öÒ«)x{mBlVšQkØßç¸cÚžÉØôÇÖöâïQ—i&—¸ô5ž[g×qlPO<à}–ÿ,†dðÝs¾MÏ"ÞŽq*6“ üw¼Æbïå‘ ëÓr:îžÛIéeLv8bt§â«?‡·ëuq‹‘Öð¦õøçj¿GÜ¥9÷ÆSÁ¾8~L¡ð¿Ú¯ÒØ"W?Œ¬Ã†û¿n6w’¯ÏÄû[Á‹ w>éOúס\_4¡ç@é€Ó*°+.LvyvÖà^é2>E3ŒâÁ3úóب5ºU·Þ[Ì3Ü«²ãY?A•cí!ë‹®S扥\0æÛç\vC÷%íð½3‰ò‹´UEo¶Þ1,Ê4U%Efž«%!˜±Àj¬G6zÖ§¸ï¿.ƒB·•{)Ѩ7ª¿ðPùj¯dilvß8*ÇUEÏ ËecUIýÜ­¹»NTÁåÇ«ÜÝ@K9f7µÛ”¥¡ =[À;·õÔ%S²®òU¥íÀ¨ó¬ó“^άÜÅ¢d«¢ëÍ·éÛ¤~€} /ª|sø}Nœ•VváBt\ï˜j| ¼Õoò"õ M¯ù×ë®w.x’HæÕi¯z‹æ€íeûªj/c›ú¶ Æ a{¿SÉèUûþ)§²›#ãÇZe±ÙÛ}+OùZ5XLJ='>ì²îû-R;ˆö¥îê5ç%ˆú¢Q㜭´A_ÔÖ˜•Gžéës¦‘:· ?îÌ"ÏÔáÙp•·oùré8xöÿøuš·â£÷ZàOû§>p¾‡Ö˜Lýò\Ç“&€sõWÝÕY†à‰Ì¼ß¥ _jðÃÕ=ˆþ>c¹t ¸~sÞÝò=_}G[ ðÀ[ʵÿ½&è/ê‚6혃ž'–ªr5`t{gasq¸ßî{ë¥ä£jÕ…@G=p v$Ñ$@).­y¤J€“§H;•ü¬ˆC!Ä–Dׯ6ù'ìM’Oõ‚£QjîÜõŠÆo÷ú˜,}=Á6ÉC'K$ò°Ž Am›áE°ç€ë¶ô¤ÿ±Gäç?~YÌ'ô$nÍU¹6 óR"©d\N˜µÝ2èÝ^|(|†:èŸã¶ø<œ ZpQE,å'WÙDv }rÁ¹Á¹÷Á7¸`u„KCÏU³?™ÔU,ÖƒI«‰W¯Ò÷u–/AwÄèò“[?£Z¯LÅXõÑfÇ0˜^^xþ¼ãåÁ_s휟º Ÿ=µ¾ öÔn­õà½<ùû“ Øçç×x\‹«Ëùù±ß@ý®…ï;@5I8ýåÁ{´~ØæÿR­‡¨ê˜x–?–J4ØŒë Øµmz»{c ¦Âœ–Ë=`Q•˜g¿¡±×-RñÞ^‚{Ä7dgŠg|Ž­H\Þ‚žãeyèÖ’ztÛƒùÇïY€[å+XÞ AСÁ3¥hоåù>j ØùrœïÈzxÖBFçù9Ì7®øü ŒusÅZ%Éó»áŸúÜ|æfaƒûu°#•ÙæÕä8¢[†K{GƒÛ¸¿+^S•¯÷ß^;‘ô}zŽK¿yúÕØE1d‚+hE«ØS£!ÿTå^˜êY‚Øu§Æ ¯g5%|]dOæ)‹-þn¥Tð¦‡V—éU‚Ýóó°´©Ÿí–Ÿ×Ùu`LÏŸwEt RËC8ᛉ¶g꺛ž"øÅZ”vß5Dhiqših1Áû™e”"‰È‚Í·©šcÑ}Dmz¦îi°›­:&}½ ÁÑ‚3&k¢¬‘¥wÞô(¯8«:NïAk¾” ¬r~¿ŠÌk°Æ iÒ•½Á?Ré-H…àÎL4ßþaÞ"gûGà™³^¼®½†Nê 3‹^ ÂÛ A7~:£]tÜ}›»û%€ß6v…¬Ÿ-‘¼Tk^Õø_à)/o}Ùÿ¬7MNÕAãÑu›é2¨½ ¹s­W¨§iNX²-Ü?sHúw·®Þ’ ®ìááæ š½áJ‹¿%*î½Ù)~c'Q»Üu޽O5Á;»LâìX)TÙ\ÝÔ¢«Apf‰Æ„ØÏ$èŸ~œ5Ú° ÜK£9éWæ­?·ëE ÓtÛç¤}¨zºöÏ?`J…9îNNC]ÚñĽbWÁ¹èØBµ³2zEŸ¯j÷‡M=µo"é[C=§©‚q)÷`¾[&xSîþh‹“ïWÿE5Q?ã{—ÞV¿öæñ2ïh èFN‚`»äƒ9KIý¢ÊôÿHt™® P½)ÉX3n¨zû$§œÏD¯—üÌGÎÐÓz{=á¥7·­æïWÔÑ?W sNϾ[Ò5àéœw,èÑ…`…†ÉdµR¼JîwÞÔ€'ÍšÕ®9²h˜¶;B?ç2Á0Ʀ…öí5Ï“´K7‚×0äpDÎ…è¼1ËÈZ¶éÀhÝÒçà5Åä×Íõò ³õ¦4pÍû¦¬ˆm=þcÐ'RßRYóÂ}¤RI_´êþûId^­óøž{4 —ã8 ù¹àõæ7”‹‘qb)èpî8Ò·e¯ì]©þ‹PM"³æ`÷ʰOè|Ru®ÆìuÒ~Õ¯À±i»C£¨¡ATå)óŽ)Ñ=çõ¾ Wð— üfìÿjÌ/~¢Ñ ¼Øà7»wY¸ónŽš´‹ ^ì¶™Å+Ázti¨òº>êÚäó·¢þÅK)éÂ=ðh×Xõaç'‚u2SßaÎ"<Ÿ3Ó(û Q­ÅHÛ[›N4þ¤XàC´–O^’}J@ðâk¦Ê;Ý":~œ`«”U‘ãrhB’É›˜™jˆx§‚ä.fÄ %Tþ¶èÓM`_ýtÄ}Ô4Ò¿±}qý‘“½óóéuhýçû<ÔßFUnÔsDÎ\¾‰UU üï;û `:ÔÖZ,˽}þô× hð°Xõ»:c…§«h"`xß¹Ún~̆)»öoÞŒÔí %I=žP-}yÌ!¢øÙÝ{{¶q@mtü¶HßsWë7š+: ·»>ж%ÇÑyÓËŽ¡uúÓŽšUQv^3™ù"÷=Œ˜ªo?l%?%k9t·hï?¼c/Ñ»àǵÒg…'vÉ%[£xº¼ÙÏæTSjß>X Úžj7›µäyXo㸸d.x.¾ç ÖÕƒÓÛT~÷ž™ϨOϵG‹‹„„™9éûŸ¾™å škÜíÍe±¨ö˜°9RÜ<ñ²Ø¡]À׿l̉õïñ É·ë:ÁÿLì¼¼å7ØÞi«ÎÙ,gÍ'YÕá~îA=MKÆÓ8¨¿„»ôÓI§Vƒgæ–òª{øUvmž»,Ád]ø*µ‡C<™9ιk «nݼàUNÆõŸÞî¢þ±‘èç™Ã×g©SžÅß­A¶.—Ó(>”ºö;Á:{Ñ*{Y†ôn`é›-Ü}¥ =£F?¿ÿEENœƒ¼Á+ º÷Í’ FJªÓã^iÐtXÙÛ¥Ñ]_!òiqéCxñhµ”¸?9{:8in–ú]ãVžû(~$ ˜ßˆ¹¤/¢W»èd‹“õ%úS©È]p/g$­‘wòDŸÓC À'D~¹­B{lý¸ÿ~'øVk ›ŠOɰo¹”¨W?~=ÏGÉ´¤‹ ƒóý¹¸<Óýë}~lЩeeãlÐBsYpPRŒœ„Œ°J¢ò@l=½ u?ç6Ø´ ×P"hƒËh‚vzQkoÛ ¥Ë_õ“zóÑàì¬g)„€Èüó Yww—x;.ïÄOM×Ó·Á09¡gªKúßòê$©ö§d\$J7Ć€í¸T¹p Xÿ¹ÑT Þ¸»KsÁ4fhðâ&·§ûAàiKR¯QòZ4 /S.Ë%ˆ9Ä“~¨nEÝ^0–N*xü¸-‰ÆÅ‹>Bp+Øñ'¬»g~Y¶íné.JÑß5Ü/Ûvœ½™É 4yO—sG{lêSL´Î,–éaU?·=Ÿ­  ¾™¤rÊg¢Fù OÐ3‚3ËTUrW‚¶ø´ŸÁ³kýþF÷Á9e^x0£Íž£w+O!ÚV[^æ¦ØÛ å=müºe}¸÷Y7¾&ä=ƒ` Ý1 `2hŠ&v%‚¿å°aÁ·çè6)ùvÕyìÿ¹“›aU¬÷2ü%a_;ØwHøY3H>žÜ»r_Õÿs?jïÆXâ ¬X{÷‚ýîd]ö+üP( AõvV­Rzü/œXó¼AƒdûUçA£wý3p¬·®¾ëÂF¡±B½Ëªñ¸’SÍ©ÔÀc|w½½Yû¹O˜I•”…Vàº\y®-¹™çÎMr.À«MâWÖ9õ\é‚ãÉÆ(ÝóS­~Ñøn£{‹GvEê—ɯ@ËÄ/JÆgº‰’5öË‹üV‚wð÷÷ó¡{÷`¤yi0h–Û´Wü€S­¼üǼýµ…GAßuŒr0#˜¢>¬•¥Ô {L½ûõ%”ÍTXÀû…vËÞ]¯#ȼT7ëúXø±ÑëÓ³µÀÎ35Hí»@úë;_˜Ãíî³êÐY-"è犰Í'âÁßYÅWñ&}§dÓÖ#¯Æ€#ó.®w_V¡VaÕBJ<§3ÇÆ¬³GóŒü{»Î¿ƒÞmùì÷yÔHÔUïºLP=w7ÿ¦÷¬ît‰4‡CjšÊdJ]¹Å%® {2mV'©*µMážÜm¢Së‹Ù/ñ2¢ûèüæ¡S^ þÏó*Ÿ*-Jì¦Z¹Ùnc´ÀVLY¤µ‘Ìo38ût*Á›ø™F¯Ÿ Îå3sú7~3Ë¥³{ÁMö)¬£§Í§&Íü—8¦vš<'ÏçL‡«ëþÛ«vMï^hÝ.í'¸ŸÎ—ú:‹àíjf™j Éš™ð›'r¢.ì8~ ¼CGùoßçƒÿe]Å>Ù3äqÿ¸cÒ MÂB?‰•àæeÞ6šqìëëc>/¦€ç–r›/CæÓZܰ¨ŸèMÈXµ`æPýMKØòв6—nyMdêy²?,\‡Ì[ëFïG²^ô€'óå”qô]ð#ÒÖŠhœ"x£ÌÏ-tê»GÕðž A÷ÌÉÖ:ïÁ±øþéÕ`.‰zÖ꿆¨ºç¨Ùž;ì籪KÒn^©çPJ–ʺSédž:Þ!ÚøPÝ‘“{?KAÁ¸_!ì°×]T‚O‹ã7µsÀmøjöl—زu•Iî;À{4é|ùó‡(¹=çʬ¼B¢¹8R¶ªc?Ó嘲¤^°ç^È#óÒáõgÞΖn—Û´º©8% =”cÊU9ð$4Úž%õu:›zºN¬½‡oLñ%:ÌÝ0³À’`UÝÏÚöúÁW±Ý‚Ûw&ω#ªõÔn’Nsx=càÿB ïëM¨üŠäÍlÅÓïÀÙÁ)èHoÛÉp‰Ž—(zÞL¿æ·Ò å߈Ä!õÌâôý¾aèYÚ®]Hú¹+†K%Éúb¤=9 =ü @×m¤NseX=±›¯'”ˈO»÷>¥î—žøÝôÚæ æ¿]pÜ5¬Ië\J_ƒ™ÛÄâž_¾á¥QOœKлl—–³†3x 5’W¾øþŠÒo Š–à-‘k8±â8oeÞQÐõMà[ìŒfÛë»,@ís~¹‡ªñÖ¨„sÆUŒà²Ç©ï.#Ïój=×Îh::);x¼1x©žO× ï7/¯KÄ;ûSû.>=)º¬­¥œk¿€ùþ`^܆¹¨•ˆ•’ÒW ýæš´Q8þËwmÓy ^á`‰â0Ç>s‹R2OëíÛq­+À“Õî'ýž~Sñ¨©‡ãµúû6ßµrìz°Ûš\{IꎃLJ)-%À Gìª]õ›·B°Aõ45i:/êõîߊ–g&W<:Ù(hÊ[q ghM~7[ãé{ÑýeI¤:{×ûÆ#x>jqé´ϦIC Ü|ó%¤`í»’5õ-ˆîÏ=jóâΕQIé;ª¼ˆNµÅ‘w];QNÝö¸Z4]úGñp•5X¾û‚.¾v±o;‚%ã¾\dúìÇþ+7å¹Ydïʘzp)ÌÄi‘à²GkyÞ'óCB²›¯' íÊ;½4cóm'zƒ{øò˜‘—†·È~Ïé[ûöÛn¬J]ÏšI_òñzºCiØ|ÒÄ(-If"ãÆWí²_Ž?ÒKžqç8»UÌKm&š÷G/ð’›Ëcü¯š @t1¡†± ïÝ-¯ûJæ‘Y_DV5É€,õvÉ5e°>6¶ˆå¡ã^JÁo;oÔl’Üuç'ÉôEŽ3еuµbü§»èv˜ô˸ •Z÷»g–‡`ZÙX}_Eð¦JN«¶q‰Æk©¤¨íÎH=FúÑè’â—à¿*”ŽOLŵa;“€³¨ÿí¦=8j¼ŸÊP,;8öÈ¢ÓÛµkö¬FPæõ–Ž ]0ësÛ{+¢àèè÷·€Yá¿§*hÏðq³?ù±Üõˆ¢é) |"¨¶Ò¸lN Á=¥œµ¦ …4¿“‹ùk zó„.¥‚§qüûõ¶ŸD'ãöjwmÐe¦û€Â<§ê¹5¸‘½+&X½õKÝ\ÿ(ÐVÍ—˜jôÜ(U·%­àªª83‚ÀòÚ ¥ä›êFû륤þô—ïbz¡y܃¾e\U0.¦-:V†ºä~«O[Óˆ7…>»ž]X¶ÁÁ©ÍÛÐóÉ]ü\›è¯¢ŠÃûÓ_®Ü_…¬‡“­mµbÐÛ"¥;IýXÖ•‘ùê›@oH‰W GUáÃÇ5Ÿ‚•kÒ™ ‹î¼ç[ŽÜO±!nÆaKPŒœ¤B,@WWèö® Z¢3ZÕ€ð6Úó•èZå¦È~¤D´¬ Ù rO e5”BË4âÙ“«f†ŸÆÛú[ ÁÙùI¬ó(É%Å 6=ÁǸ¹Û€›~±ÙÚ§ƒ_þóÆGlG­©{•sbš¶WÏóÊêw“OÉ;s&X—r;šÄÁ||yZ©GgÊQrköƒw\`vü`*zÜ>ž´P ¬Ú&iæt>íŸ]t üÛͺµo¼‡‰Ç «îƒ]§¾T¯äRMgÖkª ÷Û½ó‘ÑÐÌù`=¯Myër|§°æ“2Á`NúV\ Aúú¨Pú:píO®Ù3ëZJôRÓ3wŒoÓ –¦5£²= 9 ØÆOk0JˆÞ2þaµ±ïÁ6>ûrîºih×°W¡”¨¶×ýþ»z]]û&ÆzUR,bš6Ýëµí÷šiƒ›åðuYt j'.ÈÙj· l·Yé)¼ pG.üæwÌ×ΜO~ÿÉÅè¤;h¿¿q¼¨±':â&×E6k€›\²Ïe} ¸¹f©^oN³!Í4´è‘WN,¬0\¿ƒýé-GÑuqr袼hkŒñ:CÐÿÜméàît¶™Ër¼ºö¾Õ$ùý÷ùÿzY™7WbÁœ¾p3ާ/·–.Ø<þºrkŠ©;†ˆ|[ÿʹ·ñÈ£VD±Å‚Ök/5ýéDWöLÅÞ' Jê¸ñ:ö‡ [=E¦õ›TžÍØBÖQÿ£ŽSß}#:²&sƽC‹ÁŒ¦'[ÍÁïl(0ºåîÇ;S%ÍujÚ{J ü©-_H…ƒ©kUjwî2xjØŸ>o$}¡UÓ˜bt~ضx’ ©ç+¼jV~EOŸËíê´Ôj•t;y€ÏÝù\SôC3 nô]mÈV∣ *Ì$fKß|„\Ûü’›¡¯ÁŸÇ忘'úö-vëâßCà}6õkÕU䧇ûQo˜£ÎÆêÕ_iàíp´½Úd ¾£ò‹˜ºQ`†Ø^iˆX öt%›˜ë¿Î}žÝþD‡Ø¬Šç³Â‰Ôw«ÑÐÞxRúšQsøHÌñ÷/QrÚÕ´5SÍV/\^õ‚t§íÊ—þaloÖ5¶`>…@ÔT\¥j¸ŸW†¨uP{æÞäéî`mˆ;¿5õØü÷£n9¸‚¿uš*-£Œó•…ê5J฼?ÿµË\4ÁÖëxwäE§‰e ã§,~ÔòÓ`%üó{"¼â±åÒ­ÉàY™þ$cZåtUN¸‘¨ÄÕš¨ ƽ«ÑÎ`«ê$9,#·?û}eÚ,gÐ;ÆÞ\ç&ŠÊäã§BÕIñ\Œ˜ÛEÔÜÖÚâÝ»Ÿèþ¥?º7N•èhXqó&ª‰±Í ¼˜£9¾í$·3ÆõLÛ€ÚÈG[LHu} ¨­b;(bšùÞ ½³ã\èÕ‰ÃëyÁ‡â-ç©‚Û8sÍÎð&"seÊÃåm2hϦnÞ`·¼U'êâtùà̦ ÙJ¯Fùý»+ñ`N®vZ°+ œÖìäñ v¾)«2 Ö²#Þo†E}Gy~ !ëÇ}÷wG~ƒs$çî­sIh,²Öx :Ë ‘oHÞLÕ“cWìÍ ý.ðº#û\ ãïDÍ5®ÿ'A™•ú¢Óß¹^.¥ÇV9zF)ìÕÎhð3_éô/‚Ö[kñbÓÔšhryˆ _MÑœz…è~yðÆ#¹](nºžøi xz8HÛº`ÍŸu–¢ý‡ÊDAêXQ׆‚¾Ýh©Rëµ×ôü°k}Gé¤@ÌÚöpB³fL¶s¶¶õ”pp*HYÜâ úè??Œ¼ìài®ñ%ÒÃíð¿h½:¡3ü~Á×%3äɃs·æÅƒQÃíÑO f΋Jæ{/[‘f¢R–nè: œkåÏ‹ßFwÀ¶_ov[ƒ)«¢„Ñ#äcž~º6—DñT‡õlsHué­MDglriõFo®õFÊ?ÁÕ|hqêç,0SCkcRvÜêÛ#"DǺQE»½¸DÃoŠãOAÝ`˜¶ïÙYðÄ“\lKEѲ`ô‚Ö6Yp'º]¼Y¿ |“'êoÔÈ:H˜J|CóÆùI]©È:›#QîÂ@צA-›{zàî*Q+Nêÿ‰’^ý×^Rw=ô³ŸÜþæ!·cÃípôDºZ>óÃÌ⬴Ãk?:ðìš)ØÒë†*¡úy¥¨OX4¦Þ‡¨¡åè¶È™±Þl*h‰îÑ?Ehèì ºsV‰¨k}IqÕ%ýdxãIßýv ›?y´4ŽÞÂLÕ`OÔKUœ{íF´îO~’?Õ›ô9Ÿn¼vð"J"N‹/ Kp<îEúÈÐ˳ÆÜYO°# _¯Ö#¨®aã®E^Cgçk­œPèÕ®òúN¢Ëµ^î¼– (íg]•Þƒ@nñçðRw.½ê[Ѥfà™Î­Yè)(¿Õ¼ð%8“Îì>#x¶R´ù².sÛË~MZÞÍþ7ò‘±D…Ï?¿‹]‘ÂxµâÐ$ÐÓŽrYOÆÉ·-ý ÿn>ïÏ4£Iÿty¹¨az×¼:X®¿\ŸŸºN³äPú?ß¿r™)^”¹:`]ð Piõê̸çéþ`©T,­ íù÷xˆç¯ß´¼]ZJ¼èýØS'jЦn[ñîPY‚)~åìû³ïPsqãlXm0ƒÞ8›)‹ª^ãψ,Åç²!Ši¸ºþ>ýó×=𕌮Vç‘:’ÐÛÓ™´.Å ]MÏaf™h݋⋼ê 8]§«cž‰fY«o«GßD{—ÑŠïLqð’üN,Ò»ƒJÞzƒòǤÏÞ¶Ý]E¥ ¬)‘S3<ªÉ¼±¼×óµ5xã>ËW”ùoæÊ²ïd]ûakY@ú÷È™{†ûmÝùèìçÕ¤?ÉxnÕrx"+©ç¸iáóÙIv…îv`?ïøåfÛ‡ê¼Åß¶¸ ô0%ÖÓê`ç&k_¸GTç(ä~šGp¯hì›a@ä]c7ÎûØ…»ñá±ËÈúXüuÝòV"ïíÉÈ_>ó@¡E›]œjÓÄM¿#k@©=úôl±ü³k;TPUñbõ%ÓÿîæŠPÅ‚†§àªïMÛ²æ™÷'©¿ ­ åv˜F=XZ+gßÉ1D‹¯¿ñ´!²^LŸ‘ãð ô^ÚFòà}žË” ú†úѲ»³ÂV©¨¶eÙá,P”ã% iàh6º.¸0}×w\ 뜣lx“æï´³n#>þyšØFÚ¥ÿ\xÓhÃhÂ4šôßû¶Ó"*'¥j/[†¶?ÓLœ#xÎE¯8›Žf…㣘úçÀ>´ïœÍhSô(­¾uBA]Ñ-7/¥ƒRP4öÆi€«(O}pq2xA¡”ÜÙwˆØS—ÎSì@í•o«½¬¶€Ê×´3 ¬ïùûM-¥ÈºxGÔ‡ t¹ë›æPbÁûÝÓÐrŽ¬Ç”ÅsϲÅÀ»ðϸAO*ÿ­®¿ÃÄí:èk­ãM43mˆî±Wܲës£d.¹Ö*¦Qâ Ñ"ÛOÏfh‡òç>]ûˆŸ~½öE^½|*A¯?k·smŸøw'<ÔúJêÜ, ñÊ‹¤¾4{ÞºÇü)ø œRGgÏ$òâ_ÿ]Žô ]J×V&㽺gòW‚ómÊó‹+’ÑÜ[ýl‰òD‚E9¸ôSU>}kn·¡ ’Fuø¾³¬cµSŠHÿ•áïµO™:ŒõV5ö!Ã˽gõ·d®‚S7ž·r¬,á²ú¾B÷Z°?.Zîký õ>÷¬–Ï÷n·hÒ”#à–Š¶Y ‘úž¡,‘ÚŽÞ뉧4‚»P5–Þ_Œöh¯Õ{kÑs¶!Çý@‘å7hÓb–¢žtõÅ<Ò7»–èyüµéaNþÿö FµÉù*y*ø5G<£?ý¯ã[®à¨3ž®Ñ†Vu²^\éúèÒH#šúµØ§7] ºâΜtèª\ÁIêïåo ˜ã¯ÿ¹@ þÆù6ì=äø–6~ÕæBti%Æ¿ 6EwWbÎÊଋ¿qœüÜ›fpIŸÊ Ê%>lÁ¥Y ·ö¢Ýs.ûvíKRwFœH\²ßsÎÓ¬«Èó­[¦>‚unË׊%ƒª3”öë8¼Ñ²*c2ÄЮÛéÞž™†Þ 1%Üõ`Í®½7!Y]W3?¤—LCÏÞÃŽTåDtùý±­ÆDCÊ×W•Ýâd~s ܱt³eI>+4Ñ»E§ólø:ÍÞêî Ð@ëྠŸ÷i œ·ºå‘ ƒàmêLí|FpVèž–¶ ݾþ칩šOïÏÏ O!xõÏ/°së”KÌÇr0ßýùyíà·~m–ÙîÊý‰QºçÁ+q·f–A`-úÆÞšÌ#'¶;.Š÷ÂÆÓŸ‚ñ%s!ÈÔMÓï|w›þÍ-`yʨ ¤Éºž¸1è{½vÍ?^­¢€¶'YåÐŒix÷5÷¹²‚"˜~¹¯`¯ì·Ÿ$Þ(Ë*ÿ…§I]§÷ÌãÇ)ˆç^! uèò+=#÷+Ø;+B*èn¤.:úÉÏ̉]ž·Èút¨åÄCR/6Íö¬¾Ùf³ÓþÊ;Èý’Z‚ÞØowN.u†(|fë)OÐçǘ[­"Ø›= –åƒ5îáÚ¢wv8]þg5Ÿ`{¼7N*wEé¯HUûàû`̸ÿ}ã;¸ÑÒW]©„@¶qnÿ¬ñèq]»ðÑ…_´…Ø÷%wƒöÐ@ÁhýaôÈÍ^:Ë>”7o*ŸßTÛþ¸òÔÒŸ ”iìŒìGwó!ËÌ@ðŸZî>mk¦HÅk©Ý,°JΘϰ/·mzB™ jG…­8?6Å’ z«ãÀ¹a)F ¼ žÆö›¢²Ñ¨ #}ÝU-‡–¿×Gqß9fk<¨'ÇX×d€mºAÁzCªk•ãŽ-]. ÿ“[çA %îÓ6öþŸç #"ÿùû¿‹øß ÿŸòð¿+B^þ; y…¼ŒŒB^FF!£0NþF!#£—¿Qäÿ¥žýß•—ÿ·ñòÿ÷q yøÿ…ãedò22 y…¼ŒŒB^FF!/£¿QDx}vDòñ7 ÇÍß(äãoòñ7 ù…¼ŒŒB^FF!/#£—‘QDx}ö¿¢—‘QÈËÈ(äedòò7 ù…¼üBþFa|ŒŒB>DþýêÙÿ‚B^FF!/£‡¿Q#£—‘QÈÇ?(Œ‘QÈËß(äcdêÙÿŠB>þFaœüB>FF!£0NFF!/#£—‘QÈËß(äãoêÙÿŠB>þFaœüB>FF!/#£—‘QÈËß(äcdòñ7 ãäoêÙÿŠB>þFaœüB>þF!#£¿Q'#£—¿QÈÇÈ(äedêÙÿŠB>þAa|ŒŒB^FF!/#£—¿QÈÃ?(Œ‹‘QÈËÈ(äedêÙÿŠB^FF!ÿ 0>þF!#£—‘QÈËß(äáoÆÇÈ(äed%"2jˆ|!Kþ‘"QÚáì±£6GÎÛØÛûŸU¶Ö‡Ùž#_ þ_ì$yÒÆyá9ç3ÿîðgÑÎîŸDÿ°þè§{ROCR/data/ROCR.xval.rda0000644000176200001440000005012113644317760014150 0ustar liggesusers‹ŽXÔÝÓ7ŽØÝ](v 6&Ø¢¨˜ØŠØ-*¶¢˜("a ¤€”tÇË. ,ÛØ…ú:Ÿ}~ÿÿûÞ×Ã,Üï»×u{îS3sæÌ9gÎÌœ/ ¦.Öhi#CƒÚµþþ[ûïÿÖ1üûO-ƒ: ÿ¦ Ì›²ÀäÐÛík[£¡¥Ô©Ùßv«Í_¶9ÙóoÆœ¿8ßœ÷7\;ý7Õ$ ]Nyáû˜6”¾ú(»Kí%¥¨^Ø~È@·¿)sà¤)”êy¾9à¬}Bùlûç'þ¦"…çO£¿)ϨïIjÏ Šr¡z¿6Ç>RûÂcží(-îä´…ÊSÝg÷¢|z~ôLJ ¦®½J©|BÞ‚µSí(ë|‚#[òÃàrßMXBõê:™¨>½èÌ2›Ã˹O©tâÂ"*ψé<€ÚiZ¬x‘‡fSš8]ûÚe³Þm¢¼”¥q±ø›–XºHùÌ¢Å1ŽÜ ¡TÎez­¡ò$×VK¨\ö©t9¬¸Gõâ,Fgе²Æ—{8‡ÆU4iÍmJ¥½.R»¨Â>TÏbÒÊÅæ(E,ºNýe'tóÀ¿ú¥±‹Ìü _Üì±ZJ‹om¨œbãMJsF·'¸‰Üv9Wõ©Ž7Á‘¾;KyíÁw£‘oñ1æSºæH¢»8·NªÏ;ªTî&eÄ_aÏ“(Ïé¤I øÆûÁ ‹µû'åUö<êç×î@Jý¯·Ø‚ñt:ôüÉM&øªý^ó1þ¨Í;©žQÚà Ñé0È÷:òÝŽñ!o›\Wx_þÜÍ)U¥ú%¼òsœq”O™#:Lð¯Œ-¢þ)®cÖQžÉ±³Çü¼÷“Q¾¸nœÁ-´¨“Må²1tóĹGãgkcPáÅ&MyÀÿN þ/Ù=ðp^gA~&Øž!ü±¬û(å?%rqlÂÊçò€Ü&murÄiO©ÆÐ~6¥âqC¦¯Yßé„§¤ôD2è‹oKý¼‡[­$xÞ¤òäÆ,)~êícÌÛ°ŽÔ/wÌj#ªþÊó›¥ö§üMÍìRʳ–ýéEùôe,È,×\My噿+!_Ò#'©\rþ› •ó§õíBt Z¿t¡<ûÅd ñ©pÔƒ¹/£°õ”zÍ.=Fý˜ŽÖq”ÞŸcŠu©•PPÿ,þÄ4jW0Ë¢%áIqyÌœîuÀú—Õ=ùHwt2D¿ÝŸŽ¾´lñ•¿¿á ‚vσêLI&ñ_ò&ô•œÜEp-;î£úww ¡”op¤9õ´Z·‹ò¼e7–¥÷!_Yw ÿ­—^0Ï ß$>*×]³ xñú4£äMXgyÉÝDýX{ýºR}ïÄ3ê÷´e£ë3Q½bþ–^T_ìÙú¥ò!ŸXGzíä cyÛM˜ÿô Ø_ö1vSª~Ös;Ö—¥÷B¿ôá ‚—bdõ ëàtÓ%”W7X¹Ž»ÿZAío¬ÙÅ ¼Ø#¶ ÑŸyÔø+æ«[×3àë…Þ±þn¬«K©üÓñ”úìôE¹xR,“ú)l}§qjÇø )Õ\[¶–ðñrZÌ'z„ûÍì)•üpJ¥~I ëA~ï›A~‹²® öâZ4^Í¢Ã*/¼•Níƒïš |)}©Ÿÿ­¥‘ oHp+ðÿµÛ2j—i‹u$þsr9Á‹ŽL°BýrUk‚Ë^ó½Õóz5IåW[ºaÚ‰íhù×Ó°;`¬¨–ø½ó†;•Ëc­ ±Î¦,ìLð3̶ÜŸ­†œŒu•ÆŸÖhá#ÈÛäf?°.¬5¹Ô/p¯Gò¿„™àODžÑ]8ìô”¯›"£ò"I7/Ý<7]‰ŸfB>yzM ø²¼Æ/ o§Þ}Âk›OE¶;ö9™$;˜à¤eýYˆñŸäCù|^{ðçnç-Ä~¤Ï¬›çý!“t\º€àðº:Q¹æÉKÊ3î~éCyyâùæÔNrkÕ}ê¯éÝÓ‚àúü|³ðúýÈź2qòŸúûs±ýú•ÔNè~¢5•3%WK©]ÑI-ä8'(Ì|™[Fåiãê?ß-ë%z…©æ'¨ÊïK7ê¯Úç;ã·(È$z˜éƒßa=S†Pš36iÕ«Ì6'ú#;¬êùwþÌ¢´hè„À7öüMíóú§¡ö±«/@î¸=ZŒ¤zþšBœoòßY}¨¾@õë+§Yà¿ÜHõRö«BÂÃ}3ÍŒúå:CíbÖìÁ~šÝ±o¥œkO2²¯'¥²;SšRIz·s” zù–b½ÕztœÒxû; ©_!ûÏ~Â'õ(ù1ÿé~ðUÄQ³…ý '¡qQà»ã´&Ô¾Döa%áQ;6Ê…<£Iúey0h¾žòŠa­ÚR½hÁÒ@¢#a ½øÃBƒ¨ëŸÚÐËâš ³×Þdc¾'¬„žÆÓÖºOó^°@1€ÊefO^o_V‹©½Ï.Î/A¬vÁ/yàÜëÅÆ½Áëª;wÒH©“Ý1^ËÖP^ºé†ø]²R)X}I}»p(Ò¤·— ?7R‡þìÅB¨\8âÏ‚Sà]4“ò1ߨ·(Õf»÷":ÔF ;¨½/wæW°;=žðˆŸ§*!Ÿ—³¨=ä;¥yÞ`¼ü.s×S;{æ{*/95èúŸ?vòüÓç‹$áM!•sød`] ¾KåÌ=Ïßü´-Çã¨?#ÂmøÄ’ྭØÐ3 tïr}D픿¥ùPø.dQª Ãþ(µ¾´›êƒ&>Å=MZdéBt¥û _x1/ ?òöo] ¹úòa¥çL×›¢Üx!Æ{ðQÑuʇ< Žþ óèØ§ëÍÿî3nK‡À~;"z9¥‘ÅFß)M-êœõbØñ4Ñ#ð™3 rå³÷æH³N¸—ÄDÊNlG»XJóoÍ’`þÞ÷Ãz¹”æi†üׇ¾à§÷WêŸ0E<raÓó tÉ|íËŸ _ZßžÐCå&á×)-~| 瘴kJ õçÿðýHí.ɳó-Ø'2z½ä€·Û,Âúýfþšú±LŒV`tˆH¤zÕŒ¯Û¨^½ýâw‚ófî^ì“â­‡‡Í2£¶”&ùàIý‚.±½1ßï÷ö'ü̆{{Ñx;;E˜ß›Ò˜þ?RûĘýà»:æÎ9aXf$äýræSÔ£uw¯¸AÔ$JÓW5À|pæ¶Çø”1_p_Ù®KpsBJ!on®õ4èïrÍ„úI4Áx×tÃ|g³g< öÒ½°ÈÆqÚP;áhf Õ«m/½¥zQ½“XgR£¸/ rw ?‰×fê§9¶ ö„WË@oR_{Ø9ɱ _õó1ÿöyÅ}1„‡b‰ý7*ÒTÈáÝ· ®¸Ïã/€ïñó&ßúi¥’˜g§jfìcJYR9:¿æRÊñ2–ðœËƒ¾/¿¼„úå9™ù™7h(áW–5Nðƒ]Öâ^Ï~-;Mý.O2†|yz—20ïA&SZÌ{‹u”³ôI>µËùmKå·¦«QšoŸ ûAôŸSh?ý!ø®vo7rðzã¢7§Ù g‚#™¾²>Ñ¡ži»™¢ ¯!Æsþ8æeÛNÜÏ}ŸšDƒ/ÏGÁN¡ù“™EõÉm,úYë-ó½ì4é>៨â?z¯<Òã Á—37¸Q»XÁ´ÑÛ}0Âfi£4ý8û…äG|¬îA*÷ûqúh¢Bò‚RùÝf@ׯÏ ÷Vö¹«±~¸¯‹ËÄߨ^u{7Ö}®g î…Ù}ÕfXÇ:P?iÇW!l£µ„G|á ‡ú)ÇnUP*Où2(ö0áò>lÌg†/ôPÕ™áîÔNÝâöÄ!˼1o ÷Hï,†ü¸ÕE©l æ3~R)Öÿ»çjÂ_ÒBŒs"õêû˘ß1û½‰ÊÎ)‚“ÕÏzl¦ÇCÊçOl÷•úq‡»á¡É_cIíÕãTÞuè;ÐQ$u¤ôŽZ };Ó^‹ý@±Á°1õc^=ýŒòOYí¯Pýë÷Ó¡o §”â~!9<‘Ú•˜w¬ùòˆÇþñÂÍv¢ã[Šx”h»èöEÇØÔNú°÷1ʧ?/‰¥4i×¾ÅX/ Ø_ø³ú^Ǽ¤ÙÂþžûpÒTÂ9ûøʇµMµ½ÁËa7,Yºmõ+¼Þ{µËâˆuÈ1n ý1{Ï^*ç°È£|17c:ÁW?û–üÆ܇‹Æ~†œÿÉq§ñLŸŠu£¸5{è]j6ëmm äKs_4Ÿä0ÿÖ.œ‚µ„ðĺ± oåd¯¡|Áªë(Mn=÷'÷ñ<Ø•+/m#~9›ÝTv{át‚ËHîtƒòê¯-±OJ®„- Tݹ–xÊø°ë„Ìh AÑ͹Dg~Þ·DÌŸ‹÷Êü^jØ3_ÛÞŽÀø~ìÏ$:ÄËû½!<ÂzƒQ_\¨)¢òô¨{à[ÌÕWT.:Óæ:ÑÇ߸ç1¥\“7Ÿ¨`à‰¶”çt»ãJí=·ö"ø‚­‡ŽS^t¨ûz¢/¢7ï0•;ûQªa_ÁýçŽÄ vUû¾K ûVÃT.l³ ÷%‰Õ£Þ'¾ÎÛ)”rgÝ`=ÁÅÇpï“5Úͦñå˜Örþëó™øŽð®oDæ}óÉd¾û;®Ç[@Ž˜vÐ{äSØÍ¨?+s r+µYBtHš´›92£Ý®°·Ê?]‡¿©d×9[j/¨µ÷ðë(èaRó‡¸Çf<}¿°C„7Áa |šòEyN#(•-ï8”ðrÇ÷…<Ëìj½Çú±ø¿„lçf؇ó‡oáQ*Ÿ(¿Lõ¼ç†£Ý9ðk:¤ž©] ïóÌÄg ¿ÈÎûg Ø,šÊ³å‡R©]ö·Ô‰GNÖ%JÙ†LØk’O¦T¼dü-âÖØ·_ÝîƒsIìå·˜Æ-²h ûM¶ãz[´íL©:*~§¤}1ðOi>|8HããýæR?ù™eD·²óù§Ô¿0ÔzÈŸúØ/­&ðù³§+æQV÷®!õÏš>ñ•g”¾Hxr,„_C=ò¨Æíz7‡RŸÁ8G¹ƒ(,᡽ÿh¬Á®%¸˜|„Ý7ÿÀcœoñ­Îà -»öžèd\\äJå…‚±Ã Žüæ•[”×”NÅþØñxŒo»ï=ŒÇ2$šÒ¢Ö“¦œÐ$î%‰W pŒè ý“ŸssÖÙà2è§ÊåÑ/öPÓºÔŽ÷ë:è–w9;CN¾ö'ž[ö$ê'=óàú½Îí@ýxš[¸/ƶú?Ù4$:Xð©óvmÁ:xe…ôqãp_ÎãÅÐã ú¶œLåñgç´û´ÞOýb­¼áŠoþ÷±!ž:½2¨ ð‹~õI'üÌGR}ââ%¦”ªÒ´´Á› ÎzÃŽŸ6î5õËüâû¼`øTè‡ÌäTø#o~Å9–—q ~’Üc ±_{ží »¾bÓ¦»Ô®$Æt¥1«»‚¿œ»K7`JÅ¢£ «?ì+Å›˜Wí²–¸—Š .ßÄ —¾Æ:Ù¾µ#ø8º!ö'E¤r+ÕË©¿ÀíÐ*×~ØOpßü¡ráæ=©ÖœÑG©^9ÖOˆqvÚ€}C:êI¬××Boê—0ŒÚ±^n°§4õj-Ø}…A›Ÿ€¬¤¯+wR*v8zú©øk6bžšwiFtí;ŸDýUãsgR¹Ö²°ú³÷›Pya²Ï%¬ë5ߢ¨<»}Æ¿àæiT`=Ì«h^_Ðè•ù³µ…½P´`aá}ÝÏçd鈇›i ~äß\rRY–ë|¬s×.”ò.¼5]—àÇ.p¼ .g”öu@æ;+ab_J#VoÝ 9HøñôŸ“â~þs®+Ök­F°äÚ0(/³ï¹p½ÝG\áö–†D¯zî|œyo ·öðÈJÆÁ¾¨ðoiÀƒÞ9Jã/ÜKpÅîË`7yõ¡‘ ÖW¼ù3Œëæš Ç”îªÏ.1ƒý>¤Ï0ø¡Õ¦ÜOfÞx%ŸFï¤ö…& è=-#„ê%Ïìàäí6øFõÚ2?JùÝ—Qšô¨løï¶z—Pnx‘Ê#ÄaaDKÿâ5øÞ+š4’Ú•8mXa~ºuÂý`gÒÅA [Œ}éå°¤QTÏ/ðÝL|ïyu—Êù}-qþój‡Õ¡úëŒcÔ_:;xVi+å-)ö!^“|œ³1D¯(U¦ŸE¯ÇJ7Ô›‡À"ëuö+å/Ì ~F–A/RŽå‚ù²>Æ”ò>*[@¾ÿ‹>nŒ ô@Þž¤Ï/Ù#Øe™Ñn”—n~¿•Æ÷~á).…øö÷#í Wƒœ=|¶A¿eæ  rŠõ…è)yî˧¼× OÆwuäL3ÊLI©6Ö7ôq‡_Bõ|ôøÿ{:½âÉõÇàϵ©ã×3.r·/ v‹èm1ï¼@ÈWàlØ©´ÂOQ$²„]AËYULpT/ê×Ç8r ÔÑwšò dž˜¿ …ð;¾óÒÀžÂèX†{ÿââÅ”—6úŒý^øÕû äÚvÞiJc¿b_–<íû‡èt …ErVÚ›òùÓœWÂÆ™b}XÕ~õj×ö`eèsØ›vCaçÖ²Žãœ`~:›Òìs§¢Ø?“Ðú(üðòfIÐC4[@Ï9ºÀß*èsöfÈbÄ[YÜ‚??ÓÕ÷ÃzcðËê:üGqq£6R}òÅ ØCX­ä$HæWòfYoc‡P*O š‡qÊæ. |i‡ _áæ~k©]̺³ÅTÏqWü$ºØµ®Ã?ç_w,üøï†ØC¿à?( Áº\¿úKW®C\ŽÚÕûÏf³à×}é‡sƒwhüüÊù³r ^† l•‡.?½Frãõ8à;(<ƒy°ÒÅ·”$GÀo2"”–Å_=¸ù9øAÏÃ_”•5ð<øÁû²æQ–Û­ñ?øJóÈó\+´÷±*€V[P‹ÚÝðˆñÌŽTɺÌߥ½yD'Óî$ì#’äW«ž™Y8å…ÚcŨոïñœŠõ ‰ZCp9n…æÔ^±|«~?¢€ÚåÜ‚?_ö¤%àM.¼At³“„8ƒ~nÓé5Òr*m@¼Œfù«†'Iûg4Ö…ÊvÞ][ µã%‰m©^ȽsŽúk_4‚½Q\ʪCåË¡ÖÔ¾¸ù4Üû">@‘iãþ‘­‚ê•B‰þ·ñ§©½`ÛþöÔN;Òj$øh³ù7µÏçø‡èÈØ8vï’Ž½`×N¬§5¢|atüù¬±›À^§+ð_H›…Á¾œqã8ôáü©ˆçmXéLí"W ½‰3|¶1åeY¡†„¿àÞ%øx·Mà§Nþe ù?°©p·¡œèé‡ö¼™ÎÞÄ7aòXìc÷¬9¥Ô®¶(ò¦¾÷¨â9¸wð ¦QmŸÕ‡¨_ÖÑÕ:{¿ÝºFDŸÜß =ø³ÀztbØ[ÄqÄ6ª}T}Ügõóï<â6µKhÜëÍH =œ9#v½”ztöáÜóh'vi y8Üd?ÁÍ7nŽ8œ…-a' <“ÿhÞë‘ð¯Ç÷nŠx"Ëì8ÖʼnPÌK kôˆøÍOÆóh-%>ȾgQ»Âˆ9à“ÀwPÁÉð‘O©f®Çr¢»Hã }_ÛB‹ý«Yy’ð·ùR½òð{o£j9¢+Á•œ½‚ø©â¨ÅðÇœúnŸ¼çäCN&%n$¼¹î)¯Îkõà2›3² ßeG¶å½±¯ >£sëÅ—]àjă¼¨žÝÖˆÂÍÓw…¯Ã|ÔNÿxLÄ?o2Óã3@y?RëõË]‘¤‹[›h yOÚâÿ£w(â©ò&׆\䙾ðn9°ƒ©ãmÆSž}ZnŠqt´zNpÕ÷jýFþO:ÎÕ¨¹?`'4oDyuÿÀ1O.šm~mÈFÜD~bˆnþ|›~ïÙ‚kå‹ûˆpĉ¶Ô.qó@Ìë“»‹@§BQÇëeÕØ'Kf{Â^›tå<âJr9³qÿ_ѹ%åeÊ}°{䚺a\¯]?ÃNY ¿8ƒQwØSm&áüÔx˜Áž-˜~î,ÍSž0~/Ñ¥˜jGnA>Öã„iÙ†w©Z÷¦”º˜x$Þñu¸Šœ‚'m>ZÇçÅi¸ÄD¯Dü!_½Aó"]×´µO˜ÝåWÞX¿s ËÊz¿üåcø¥UIƒ•Wp¡Ý ê'pø2‘òª¡Gñ¯,†PºÄã”ÌœeIôgþÙÑã«c=„Ú½¿ï)ÑÅúµñªáà¶mù ±ª}Ò¸¬>Чœ­ÖÀ¾µ77ú:¥±Æa:?L-+{J™m¬Ú\ßàDÄ1ç¼ã]ÜΫ§'5¾òîq0˜òÙñœN”Ïv µ£|„üîßܺ±£°îNŸ½BõÂY#qOÎ>:ñ?œ.†ý1_¦SIŽ þ£÷ò[æo¥qJÚ=Þ° ×äò†o‡œþ @åâ${迬#[`—H©{ö?¾ýÄ)䜛üR¥õŒk/tÖ?ªgîqÝ£è‹5ô¾ÄI¡°—i´Ï44¢ã“/Qû˜ûšQ½’?©ÍCžùL;¬—Xï ªÕæFåܙ鼯rörôoÿ;÷áOàOgA~|ç¯æÛèÚ³SGQÍB!ìšØí«ˆž‚w`¯”6øNxTn±°ãŸ=ÿ…R–ßEès 2[ØAXg»Â®.·8Žq«Aóˆþ„WÁð_)gf†Pž}f<ì”yË÷#.&¿¯±%åþÈŸ+ïqe6ÑÃõ`>€_#Íù2î# ï-÷•÷ÎqÂ`ÀdÇá¾ß]» [ ‚«úå‡õ%Xÿñ ‚U­nÒ8“s® ÎVãm1‹ò\3ø³iô§SBÁÇØþC(¯ ီÂaÐ S’üŒPµ%ÕÇjýàgP®<ûÚŶøžOÌkÙ'—Æå³zZ®)ßëc‡tøöÆ ëˆç¥Jí•§:¹Ñü¨e“¯£8²»•Çu~ û$ßÏñ52Á È£ÍKì7² ¿·Óxâ¤màO xœ?Í£Ö·±Þ"˜' ‡Q¥uÁïÜØ ˜ýO#Þ"$yiSJÏz‚ÿZ›QÉO°wÚs´à—¶ycFôH×îzOõÅ#gÿÀ¼ûßqi¸Éuj'³qö¡ˆ?‘l9Ž8Ü’#±o2^¹:Ry–¯q4<“VÐ㙂©æT¯˜³z@òÄiÐoÜ>ŒAÜÙ}wÜ»D=®!žŒ1r[CÌwÔt被7aïà$Ù?žö§¨EG`_bÀn ŠÌê¹”çaÜÁæ6¸g´sD|vQÖ&ø<lÿu#X{º2cBS‚›íÝö=u¿CˆCæy·R9ïˆ÷ë@×éÐ…]BJ0¿;œ¦ò˜oc`S”LE| kI9 Ì‹zƒžl` û#,ñļy}á—Q|~;…ä‘1Þ‹°ípîÇbã~(Ýê@õê%šQšœŠõ‘57† :N[Oü‹»®¥4Ë¥ÞIdß“ûc’&€Þܳa‡”œ(‚õݶR•KJüÅ”g¹hùÖô_Cüþ¾YF©ËíuðË]o}íãߺƒï— a¿õ‹r½‘÷ïc\Ü,âÉ´Mem^®Âq’©ÏHýø ·Á2øݳàG9ìûF~#-ö)Æá8W5Møë‰^ñ¸Žî4.­\û ü:9s)µãŒôüȱrì<·ÐÔg`ŸQŸ[x xò:"®"À}Ð`*ÏîÜ£µK™ô z2käÛŽ9DíÙã²OF–qq§Û@?ÒÈND§Z™RyçhÄM©¾¯J¢q$8ü^þ¾«Q”gÎÓù‘b™Ž°‰v.@<¥¬S"ôUMmsøuñ¯.“¼ŠÙÏ…XSrpINÃüò¯r’ oQøTÈwVàzØÛ9…µp¿ÊJ‡ïQû[րƈÃЮîûQÉšcàÃû`¼?P˜|ĺcöú‰ýJõ;´+ÁòÃúc¨ùo‘8Âþ,v ·¦”—);F|Tõï‹û½"ùŽ=Ö͆©°ÈsFA³×P*¸Ù™ÚIòñÁŸ’;NÆ¥˜Ÿú“gR;yæ_`«É"úâ&…}Hýë$îÛÒ‹2Ìc®]î÷Ú.îˆÇfµ)Æzï©¿Í$ë¿ô^Ÿg:;öá&ÐÇÒCÆ#.Ukå×’òªÁî°Ï¤¸Ï…ß+ïÜjØË”[Ͼ›Þ8z²Ê¶ôí~•t.wDã(nûž ù«³]L©4sžNÿθ¹É?ýÓN9AÿS ‡}Lš”Œøá¬éˆ³zyÚñ©Ê§?pÊÿvïi4GÿÜ'|ÊÝ?áËÔéYð¯ˆY ħÜáŸpß–ôo•CíýOgïÕÉÏÜ\jç~‰x ¥ÐþÓà+9ˆ'{n³þøôθ×ÛMÆ}@>ú—èKj|÷± I‘ˆµžºó¹ÐÙšèÉ4߸Mé°q©_îM Îð³Ð›"¢vC?‰1yˆ8ÔWeOßÿuìe2³]ˆCÜö\Nød›® øœ¢Ö:{œóLØiø1m¯ü°ÝtLýųþNÅ<{#ª/^×2 |-˜¼“Æ' s¼JõÜ;§’Ûr Žø†ø±ÌçØŸ^Ý…{©háMø7$Û¢q?˵­¹ç‡N„–¿E¬ ®Ïj'Ÿ©Â; žÙÙ`Ê3e°ž•§9ˆËN[¶q‹2_èš3Nw©}~äHÄ›•Œç-F{îwø #œuñ~£ú"~‹ó½×Dª~ÏÁ|Ççφ¿E¹ãF[Â÷jë*¼[“¿Œ@|Šï¦3¸¯\hyRæT !õÍv ~rçµÈ#øYa Ï'Ù:c•ç¹=̆ܜy,#>*.¼Ã¹Â˜¸ã÷_Ö vSþ»-ð¿ÉjëÞ±$Úõ„½€œyÈZÙ,€èK>Ÿ}]sÿ—µ÷öWÅÚHäÃ_„ay7Šàz†7Á9ÀrK€ýW8·ÙjÏß'|*œÿ°|¶ñÕìù©ã©¼ ìŠÌ ¶ˆJN*ƒ~•9h/ÎEî®Íx¥=ûë6Øñ¿ò·ÓŒ oýﮌ4ßy˜Ùç—"S‰v.ñìªZß¡'Š[Õ)%úù½Gþ¦ù•L¯‡¸‚ìù;±NN}ĽTþnî“꼈~@åªMá7Жރ^¢úi½‘àÉÅN¡rÞÀ_Ó(«ü}ª{¥X7læ¬ å°vŸ)½YÇÄ‚êÕ›m»Ó8Ô™`çV8oG|[Úžð»²Vž$:øy 6œ<ÿ¨4¬‡ ì J5¥^¦±J{A<¹2¡#îý¯¦¤â‰«wOìûInjø%2>iáïöjÊǹ­Ñö†ŸHyÐqéÒŸ¿b=gÜÁ¸¸¬è¡Áñ0±¬‰x_ ²£µË †8F¹Å@øÓ5×y~ÍŽžû©}¶Í èYœ`^*ÙòzëùÚ°³°{yq.Vµß_}A\]V2ð%“{ú¤¤§÷§çl9ôhiögÄ«3ºd ,Æ{ ì¼ã"ÄÉ|þ‚s#÷Z0âɲ|îŽyr¼ ¾ß•‡ NKòÓ¤ õ—ÌÚ¾³?÷A|¯rúnØ#å#'QýU>… ý•ïoç_B΃Šï?é+öWiŒåȹáÄžßÿ—â¿ ¢ú6!x² ÝˆGʨ¿vŒÂ‚ç8¯ú .)›u÷Ÿ¢ìC¸‡F÷ þ@íR—͇¾)Øý|*äÐ?]Cð“fœÉ¦rÑÄËØ7±·c}·ÈžCpÞOíJíó6¬‚¨#ÞåEÖ݆u3g3ìÁï_À¿œZ{ù:ª—M°íIð²/fÁ!æ6DœyXÈxÈwµìâiü­MÀïFˆá~rÙLãd„vÿEÙïìï9ýëסòÌ«J©ÝÕ¶Æ¥˜?k]Dx×m áãMy%¢q)¿–° _ôŸˆQøŠy`ïp@+õïŽùu›3ŒÊ5ý[µÅ|Å·†Ü¸¿±ðdøùé‹tûUç—¹$ŸâÌ0øå”šÏÅ”W¼~ òº¤»µc†·Äýâ ÃË”ÒøÂ9? Ž8b$âB>”â½Qì,WÝù»ô(Þ(Íg-ñí‰÷Šì—ýWsþâƒÓø-½Ì ¯¤ŽîAB§EK¨½ÀN©ÓGš@nyÚ˦DWò#Œ;zý|ø¯xÕT.-Û ÿŒ\’ˆ¸d¾ÀïßTm§ú|Ûxàuó‡Ú)B±ÎsW€®¨f¸gÅÕ•!N/±G1ð ”+ðþAu;ï0¤'—?üì$¼ÿÑ{¥£ŸÀÿ”5yÎE›Ý¸ú×ý&ïñ¼+â?8EuÞ:Ð8S²ƒqOÊC½rh¾“‡GaÚ¼»®4¾§©ºøJþè?¸_‹³™AïŽc©^Ôn"ÞÈLÛšÓxX’dø§syæx/¯ÎKûAýò›s?Q»”¼À¿è¡ˆkT¨.!z¥{‡SÊX2 ÷õÌR6üž‚ïñ]FcKœ«Âq…xg¨È7D\‘¶ÝÈ‹D‡Ö¸/âDMÞ .ùnN]Œ;àõ)؇%ýd¶‡÷òawê§nœ^‹RYÙecÌãuGäµW]Äoä6ùÐô¿7„ßTxûóDê/÷ƒwaŠÌ(WM9¬¤q'ím…ý± “»î}«ÝcÄCG©·|õ5üZŠ"ù^JsOÞÅ;2N“lÈ­ç>|_C{_»¡´§e6µã–¼¤ræ!¼n©À=Cù` Ö©Ìé8â ²%@~ ßš’ãÓ<Óð$âö½rr©*9ñ­â}ž`~žêΑâ+ùˆ?åÛ.…_”ï³qö%éq÷޶Þ©)cÒñ^7 ø äY0Íà$ÕGö ½Q” F<\‘ÅuÜÓ…%ס'š¼ÔÙwRŸ•ûÉLgokÔvÏTy/Ø’Ö\‡^¤ø¾öDáé吆ŒÆ;Á¼ñ8/„µ"¦Ñ8„9ä&nuGÄ%«âö.'ø¼Ž#Ù˜Ïa]æ;!.2ñâ èib·MI”ÆÆqpìNÜ µýÇ Ø9ïÛøµ•ÂëŠwlEGžàóóë±ßó³Ÿãû!E)™¸—¨gŸÅ9SÜhÖ*Ï8ñbéÎ~ûWoV¨C!»L÷_knøQ?Ñól°DœIåy£ÁžÊbDÁ4óÞÑÊï×B¹ÇIÄ-ÅH;èäJ匸a€1ôFáùäªÑ˜ûÔ>Õmck*w­?ç»öÐÜÝ”–l€-(a$ö§˜éƒq¿M}ö~jÙžÙ3¯ÂZ¸Ç‰öAͽj:”Ëøø^KÁÊ?ªç®æ"ŽÎƒ‡8=F§WðcIêXÃßT˜úòáÙ úbî°Aà«*Ðñ™ESW#>/àL™–ê'ç_Ãø/M€}@ëž>ŒRÑüÓÑÄ?M«YønCzt²n>ç Àw$ ¹4ïÜAxU°1~Ų?Я{vEü'sÊ¢V ‡}ñàiÓfãývιÜëòç­„ýWjí“O|5߇ø_AÝu‰ú<=˜?|)¾_ÀÈ?„÷@ò§NødþuÄHNÂ}O¹¾]6õ—œ‹[@åJƒo°;åZÁ*JIüH©úÁvì̽kBÎ;¼€='µ{뎄/v•ìÏw<6YSª^¨mIøOöÁ>&œþï‘Kà—“SÁ¯Z9x_ž=.ú±"qΓü¢Ø  §­äM–™Œ} äÉKø¿T·&R»Øïóýç#žK˜Ðc.•³?rá?³êú‹f,%z„Å<ÄEüÙåKý’÷dÀ¾™3ÁöVĬç‚5Wq/t¯‚~žuÍï%4ÛÝooErY ô«ì²]ð_„¤\Ä}œ+€~Èlg„¸`N³×:»¸gø·xwwÁàðl˜äÒúè1ü °>·öFüwd¸)ÕK‡]Ǿ$l!˜Kã(ÞÒ t‰òz Ž^›·¨¾øn»qê–‹x·à/ ÇýŠåÝñ•Y—ûâþœÕ.v<ÅJ«Ôþš4÷áó&cáTíNf<†r*ühùGmé Ræìà…x÷¥|áŒuzÚþWËÂï{ú<übÞ `=©S»ðgñq¯çnü¢{769vYmñì«ÉNí°ÿ¦…ôQ*6a[BÎfì€]‡ëuì&äõk]cªÖn 졼_KI¾xOÛ4&xI—6\§rÁ´fˆƒ~s¢ÏnÌgQ!Þ£ò[Mƒ¾ç·ä"âù~#VÃíðŠòò~ 6>é‹÷XÏðÝ…Wñ®SznE>ñ?ãõlø{ù¶i)«¢Îx§Î™{÷SÅ­BÄ)ýßž$x";¯;DO|Ù ØïÅÍ™£©_~’ÉM‚Ÿ×»·.~þ¹ërªW‹¦àݲ›‘.îyk 쀼Ôj—ãƒ8fÍPؤ&=°>äm9”cž‡ß=àHKè_éL'œ3lyü¼Û±¾˜Û/Ãϵq2=SøÛ»)ÞiÉv/Ç÷‘Xž6°Giæ€N:ÉvO©¸ÖE¬»q ŒG´š{“Ê <ã±îT«\ü)/0î¿ó“·û­–é‚óYÞéôÍ ß ¨kø™ü¹ÓáßåNPŸI›ç rÜéÎÔ®Är'öaÇ'­1σצ½qa¯èݨÁÿßÞö9Uièl‚S0j»=ák}™è ‘·Æ:Œ]n¹—¹ß„ÊYsÝvPûl¯£Ðÿ‚·Æá;G|õI%á/j>u0ÁN+~HxÃo­Å¹Zøaë0¢3®[üÐ6Fj`~(mR^‰¡3õ/>Ðñ~ø¸Ç~!}>ŒèUïÚû6gÅ€S„_ÓüÖ(l¿P½"b$äXÐ4@Bå~½KàÏâÍ»§{_tX û/³½ ìWÙ'šâ}DöJ-Þw1/·Á¹Ìx¶£üÛ2禃ãqÐ)}k„÷ jWåJ‚Ÿþè8Ö½|^ôvaóõ½ oûvÆR=ó®¾[•Øè”n>¬KáWUµH}QSâ‡õ±ñ:ìÛb¹—î{)ñ*øëÒÖºa<ªg[ñ½ ÅÓCø¾ßÀ û*ãÑÆ;ƒsÑÐ Ô+qþuì;•VÕûÑñäA1¾ÿ!Éïþ‘è‹ùÖvì¢>½'œã!¥!ø Ùìˆ{lšE |µx¾nŸø˜ƒwåÑù¿qOÕ>÷ïNøó†@ïdIñý1þ KÛð+12Oáü“¿û£20þ{9³ì8Âãc“»Òß›z’¡ûnžj˜úðÙ—ø®À ãDÄïr?Æ9¤N…įٖ̈°“*W] ñú·jõ&03…ßKí»ú(×Òþ[é¡ûˆ)j¨{ß'Õèâvwý>Ëk7ÞéåBœR¶"ïÊ”ÇÀ.$té?‚<6ïr‡,Æ;q#¼ózòÇv®kÞ]q2æ@Ÿ×<½ù r:»¾¯ãÔbìÀ‚\Ä‹OOÛJåñËÌ¡Ær>ëì,“LoþÂ¥Oa?â›`]2FÃ~,¯8[EË/º{Bg¼³ö½½ï´XLoĹD­k ¿žäy)üI±ûu߯ œ¼ç¾âóTì#¼æ°+ bÞþ 4_üòÍp\ÿœè)èµïÖÒ›5Á~ÌZ„¸}Õz{àQ\@Wž4 ¡þÌ‹m¡¿ä¬´å§O…ý±’xßZrÔþê7°n?¶O&¾ úÂ;v™Á7Ì3ÏùâÊäe¸GÅî¾ ûÒ+Ÿ ˆÓÖÎh86nˆñ&¼¥?Sõæ,øWdIÎ-0ލ…ˆócou†¼¥L<‚ñ—zÞuýøq¿to â£U|IäñìÁSÜ>íLi˜lº5ðÔY ?–|~=Ø$³é!£yºÇxè™2ŸÎ4žà½c€÷…IK¼c>¿©6Ñ~WÍç©ÝN±ðÞ³ò–¼hLåtïÔ±Ž­©^"H:DåÜZÇW–|yâND[¥ˆ×ç…êäf5Þ+p>î×ÅóùBoΣpš¿ûxkÝw¹Ä7žÀ¬}qé.ÑÍæ´…=A²b‡šÚ+ï[DitéЇ¢O˜"žÕÑí=áÛŸú@xTõ§àý¿ÿ;Ä5g&Â9”>t>èInú vyÅ‚ý‰~åÎ,胹§"|‚cý°nJŠy°ãÊŸŠ|síàדˆnøQ¿¤&Àõ1ô¨¢öBog Z;GIóPØ9$¦Ýð]§ Óà×áK'6£ô™m1î%us{Üì¶1:»ÌÔבD¿æ\)ö]ÞÅx#èøãÁl¡ó?yNÔ}g„{ñt‚åºï礟Ç=·¤cŸ·×\îS²vÇa¿’¶|;:£ ÞïÈŒ_+ Ÿ0<ï· ëÒ½3ù{ÿ yæÙ¯ö 4±ƒq%â>­‡7óüÊ…Ù»pOSn6—ÒØ_Z¦FOÎãRÄ‹ˆç[úmÁ' JëÃ/Äxðaõ»×úvvd]¼ï/^sû–"fòënôÃ8LmZb]þ)Ãw\ò&NÅ{)‘ÑN¼/úy ï÷we@’º€ï…ÿÙ=OÝÑtè«'¾Éóâ}‘vù>ø]²e=ð[<ènÑÅyëøYéDÜ/|—sÁ_YþL|ÿÏQÞ¸”ò^§Ý Ïäl,…݈1ô ôtqPÏÞD/gi2î×Oíka­Íàà,>‰øͺ–GˆŽlËÓøŽŒæé¾iÔþÍLjWWGŸCùÈÏ£q/xŒÆ=EÓ+Sw?rŸòÊvèÚöÛ§ÇÃ?-.t‡IÛk…˜èÉn5˜´˜+¦~Aœ#gv4RžQ'ØÓŠÌÆÂΖ9Ñv³„}M`×ç­ÍAü–Èt#Þg3j›#^HY°ß=`©]`çô)Y ;„ê±=ôí8 âÓ9Ý¿"—†í¼Œ]àcØ}ÜOäŽgXbÙß7JÍŠóò…pìóìÛ7a¿(¾ ƒ}¸ ËÑQ͇_‹Ï9Æ¥ræjoø“2ŒŽà¾±Ý!Øë¥V$âÜØ×ÍÈoh𽷸˚Ýg—ÛˆëÖZîßæ¦A1ÎûìsRðS}Ì rÛl1¾ßò.±ü­%~Gq~¼>Óñgvã‰=ÿ· ß=41èLõ¼«ƒŸùñü/w@oŠ¿¾z—×™98?DMËà÷ÕrstïøRÞRªI˜z rÒ¾ð_|s´Àö ìÅ7@Iîßï)”/ñÎ+ö çTÞØÖs)¿ûÛRSð§wo|7ˆ3Øû°ôâð‘$Y7>C_Hâ|Ã~¥™¿þAÕ¢ÍøÞxëz|¯!xð@¬o™Ý-¼+Ñ̉÷NÜÆEˆÓypnâb¥Øÿ¬ ‡Ln ûXþ¸í¸—ñoj)uXÅ rá$ ìsÊF ð®YÕ/û¿„gҊʳ\%sP_¿'ü´mÂÏÃ8Ú ß çßåö«aÿËß3ñnÅGk!œáì‚ïðצ· ¼šgWW’fÐï‚ÿ†þšz÷ö!þ)Àál ÿO²ðü¹ 'O¼ÏL·‹´çÅah6iÒz”-èNýŠ¢÷â}£Ø|¡•'rÍoP» ú¥të;dDèª_à·‹ ï*Ò;çèÞç1[½¡ñ+–l¦þÒÀ«ðŸ§7gã½­ìl2ì±™Wê✉K{šðÃÛ~„_ÕÛ öÍ‚¡·m,°ŒÂ}^Ûeä[\w ü½Œ%×°ÿ)½àg ¯¹€yböÉ <·îŸÔO0®ÍPJóà}dtÝýÐ3ë¦4§rN« x¿¯üù ñäb›ðƒ¹»Ä:ä[½Àû)¦Yü6r‡=8?…¯?#þ׿ÎÚ­™ ¿}Äã™Ø bëÃþÌß5þ Ù gø)ƒÇ•!¾&¹[>ä[žœ»ƒäXa7q-Áõξ…ýY‘Óz€z³-ü%š­ßp?/n¶þuyçÆÄߘ¾ÅÑ óXoÈ™üÑkÜ_ œwá½&{ÕØcxáè}¬¹xÕö‰TÏú9 #åÏðeð“Ⱦ†l!xœÑƸGðNßDÎνFíc®¨a7Ê  ÿ"Ïþ2ì?žVÇñ]ÙîÙ ¨Î…•Ðò,7à{{¢up~Êe·WÒøÝÌÐ x’x¼ ÓŒÃý_’—†83‘CÜă#~KÚÏyåÛ¦ £úMsñ.U:ß.¾ŠïQ…ܰÁ½'ÉoVäéâYøÃKn:㻃ò ,ØË“:/Yâ½¢1Á• òõ¢TºZŽ{T†÷x¼ÇÏY²þ¥ð-¹ÀGí ì[ùî%'(•Úµ‚ü2˜Ûñ.E•pã9ò·aÇ•6žMt±½åx·«|®}Gí²»4…?]6íõUÂkåMÇÐßyWaó¼'@Ÿ*šññÙI#zA¯~¹÷Ÿ‚°Gçv?c9â]â 7ã¼—d<@\UI¼-âTÒŽ.ƒÝÛ=ëG¿ñMþâÐûƒ ;ž£6© û&óS`õ{vÆq ª8·v4ŽÜ^¼¨\~/~>µb$Þ úÎê‡ùNˆyÔ”ð< ü{‡hæ¬+ÔWlŽ÷í¼1aÇØ<¤¿ø#øƒý*vÇ€óO‹ ?§{.ôº€ØÓˆÛ,fŒÂ~y¬u)òRcèñy!KnP{~Óñøþ3{!ôeÙ#Ä%ðq"¼ïìN#þ9õÙ0àyÊØ ;´ÿÄðûî—ÁÎÀ™ióŠàåÕîøé¡™ø^Uþ0[Ýü?³Qƒßx¯ª>Ô÷«ânxŠi*ªOh;®â\{[¢Gu¿.ì×¢B;œ—ìð…°KJ%‘ºûµ¡ÁtªO±À"_öp,å“ÚÿDœÓ“Ö}á§Kÿu7„Ò4ƒà ÇõÞÈÏYôåÜî= ÒGe ‰Š«®x'¦8Þk?ÉCòVsè¹ì<èü1sáônœ¿‰zeáê'Üûß!RíɆ?Rp6ñsñ—Ï~ þÑ{×÷Â:‘ŸE¼…`ùÄóˆ|b£*|Œ8žË`þÙ×ïÈRÁwrwt‚ÝS¶mü},îY]ü¿Õ)Äq´§Äm?oÁI5‰mEå =m ‡ªˆ_To æY9¥ç¢3Ni;Tø+'ìg™söC¿s¼mè@iØ…@è/a/îc?’ÝÙƒïŒ*<>á;eœS[ðÞS¥ø‚ïÞp’‹ñ=áüþãðB̵•Ô›† |Üð<ÄAJ¾-À=JáŽïrÉw:á;G¢äUˆgRl Åû4UÈjèŸÚÂÓx&öy¤ÞU_?%2Û‰xcy§Ð ŠØÃqÏcs6á=£ì÷›„?lþCèÌ6‹+ZP†ø²ünþøÞ–¦}Þ—–ô[»§Æ zzâÆMð¿ùmÅý—*B¼„Ä?qwŠåöBNlRñ~7iÐr¼‹‘t;…w%r;!ÎwI½æ¸Wå\½‚v¼Û%¸?/Ûx‰¼Tèu™ûƒ7Ç{ìhªž+a÷Ô,ì7ŽàÚŸ@[ ~5X„õñê°û³q®î}k§ÄÅH׮¹-°·@œ¿¦ÍwÄSªú ½Aåé#v#)èöaè…JÃ'ÉÑ(>µg[âÝ 8tÞ7” þGó3˘ÊóÆ]¿Hå©+?C¿M¶3Ô}}ŸPBðù õ±o);‹Sûå°Á ï½M}¢C¹Ìy=µãˆÇcßU8úÀoy´Ëp?JCþ˜@Ÿ=‘@ÏùþÞSîlGüª¦ûEÄ#ûzCß‘<}ÒëhÓøcTZØgn¦á\’$­Gð}| àâLï&eGÍD«<(÷1³âaø†AÕ{‚8±üõÙðwj2̉^±£¯#¥ªs,Ä'ñ&?‚¼rNn…¾Åqþ)=¡ç=hk‰s5Tú ÷¢4ûÙ° ë8&¼ á^ÀçÍs€Þ)úSèà9ïÀ÷æs¼û`Ÿ`Ú||Bõ‚ðT+ƒ>#hzv®¼N;zÓx˜ƒgÁ-÷nò„ú3¬†`¿áNÂ<½-æ@/—fCïM·gÃY|Ý|d¬ƒý9¹k'¼·(êâŠ÷R"þWè7r»|Ø­”K2ŸPžpñõ¼PÉàÛ–M-(NÙח蔟?€8AÙœOq'Gã;²îGàÇQ]¸©Óçĺ}NÆêsV¼ ÿlº‘ÞQvèÖI+¼ó‘¬íO©úÑNÀ×e"âÊJ¶L¼j;›À-êî ÿUÆê–ˆÓÈo÷pÖÜ¢ûT¿ß |7øë¡û®•äÎŒA4.íÀ†øÎªàx?ÄuifËðއ{jì'Ù» ž¬pì"|ïOÓ®vKÕµÍà»wÿ:ÖÕÕ¼[ඇ}¤h×aĽÊ›M¢~¹?Btï%Û9¸á!Ä_^i=Xëy z ìÓø1¹ý”x7À¾a„ø®woû—R}á–:Z*W$Úm<¸?Kæ²ð^Bâ }[;hÖSAï‰ø^±øÅWÄ9E÷y÷¡\ãFˆÛ.*õÆ}µä~oÜ›ùƒ4à‹\ƒøG™H„¸-ñI-Æ!ùë$Ñ--†#zŽ÷$y·N$Ê2WÜãò¾½kG©¸¯ŒRAÃîƒ(ÕÜzŠ{ˆàf7È·È!ïÞ}ûj¬q‹Ò’«ˆ³¯ÎÀýMÝÃþágk|#N³ úN±¿!ìgŠó{àoäìm }Oôi9ü¨o_>‡Ý_ã!YExøÈÍ·É࿪d îaÒ:`‡/h³ ëB˜R‚w"ö\œk¢’Gxï»ÏF÷®U"m€wT›‡øNƒh–íYª×ŠÜñÞ6ÛfüÇ.SVàþP8RpòÓÑï›Ã÷ãÜ*|ä‹wqЧ^ø^ç/ü£Bëoºx…}…NDWÜÃ!°‰-Oƒ¾­×ð®6O•@ßÁ4;=ö÷ÇPÈ«LÆÄ»Æ Wÿ€¯½ß€qARØ çÕ†C3wÌoq‰Æ¥îâ‰8aQý{øî»¶=äBºÂqÛšÚöx?ʼn+ƾ™3vu/Ä2vØ Ž5á‡_ÕË–œ™Nã zm?QzÞ(œ÷’·~°ßlú­{‡YÿË.Œg¨¬ä®Î3f]KÜSS{^€]„÷Õ ß½.:¿£/áÑIÝOøê@Žêa©‹«Ø!÷£úÂL#|ÿLë×!‹æ-Þ»^4äò3ü9‚Áø¾_Q¤/Þ™ðš.Æ{ò ƒbØâí ¯üõŸqoK:¥ÆûúÐ3é8ßäØ)T®hý„ú…~6@\Ìqk2ëRüÔÑ „{oÇnŒ»þN¬Ë~º¸“ÔáctßeK¿hDýµ‡ïàïlˆÛ¦C³„ŸaÇѾ™wHªé«a¯“9ìÅ{˜gÉð'šÀúgdnÀ»Xõ›ëi\^Šý:ûñ®Ž‰Î„—×1ÎÂ8GOª—ºdà{F%Ee”WyÆa^$aÛ½¿ìFØg‹?>…^ø÷ûg¼9ݨ]ÄÍÙXJeîEZ,‚¾Êö|5 ë²Ñ=øoøÍÚ#!Ý.zsÂë8Geý‡*žÿ¤ˆ3o­-¤~üæk|©½¬y‹XJ‹zœzä§ÛSšag»Z˜ÅqÝ~è­ûn®öz4ô&–å@ð-w‘+â Dáð«ˆNª&Sñ ¼G3÷lF:Ä8ŒÆ©™¡J¥qä†ëþþ+ڲѣ9S=MüÝóðÆ«5ük‚龃1oOºÂ.'d[} x"§&ÐC4M®@Þ8ëéü5sø˜et$ìvE=K G‹XÂïË9"„_V¦¹0 ðŒÇß•IºØq’Œ½°+ ÿÄwžœu÷æƒD>ÖÏfÇÉàÃôµx÷¢pé‹ï>g/ðET³j!ž@ÚïhrFuÇ{xñíº¸¨GüÀ÷Ä-^Ö%ºøô9…¿ã[mJû£Á?þn›ÁýÌKÿûôŸ¿ÊÖ——¯ì¯<8åá¯,žªÒ_ÙòŠàÖ4œê«êüÖ4}á­îüV¿¦ñV„¯¦àTMÓUÕv•åwEóPÝýª¦ä¹ªû`Mï?•Å[úò½"üU¥«²ùò~UÝkJËëWS|Ö^UÇW=•­¯é}¦<¸5]_Y¼úî;ÿ·ú•÷«î¾«ï9þÏòòÚW_u÷kƒ轕í÷o­ÃÿÎ*•×”\TöWÕý¹ªã¯îyPÝy¨îy ï~\^¾²íª»oÿ3_ÝqT_ß|y¿ªî«UíWQyUûW„¿¢_uõˆŠÚ•·"zª{ÞT”¯©ó°¢_uϽªÂ¯iyªî>£¯ü×ô¸jz_+¾¾ç|eñV•ßúÊû¿¥OTöWÕ}¦ªûï?ë+KOUóÕÝ¿*¢£¢z}åæ¿ùéâ*‰¿¦ç¥¦ù«/Õ=÷ËûéÛ^ßz}áë{NU·ªýª{.Õ4ÞêîïåÕ×Ô¹UY¸úê•ÅSÝý¶¦öѪâ­*]åÁ­.]5Å—ªÊë?ûë[_Ù_Mé;•…_Ó|¬j¿òè+/_}U­×÷W]:«ÊÇš–k}÷š–·êêU•ßòàU¶^ßs±¢úšou÷]ƒrôÞªî—5EwMµ«ªœU6_Ùr}×Oe뫺W¶_uùZÝõZÓtTö§ï¾R<}×yõååõ¥§"<åõ×—®š’ }ûUWþ«JWeé¨ì¯ºóWÙvú®ãšZÿ•ýU—¯5=¿åõ•ÇÊ­¨üŸ¿šÚ÷jú\¨*ú®ÿÊ©¨}U×WEpª+Á­îùZQ¹¾¿šæãÿÐï¿ï­¼*ýª»¾+j¯/žòà×Ôz.ïWÙúêîúÒUÓã«î¹ZüŠè¨î:¯éyÕE?}å@ßõYÓçpeÛë[^ÑïßÖ?Êk_Sçe5µ>+ ·ºíþíu§¯ÜëËÇš‚_]¾éKß?Ë+û«©öÕåÿµ?TµÝ?ëõÝ*Â_^yU÷ýŠà—·ªóUY¸åõûßòÿ‡½WßuYÙ|EtVµ¾²å5¿"8UåWeUåWu×}uûé+75MOy?}÷)}áW•}ב¾ã®®|U÷¼×÷W]z+ ·¦è×w«ËïÊ©éý¼¦Î•ò~úÎoUá•×®¼~Á¯îO_ù(¯}Eð+ÛOßþUåsuûUÔ¾ªýª+ßÕ=WõÝ?*‹§ªí*úUwÿ*/_œÿn•âjú¼¨.Þꮃêî5%/Õ˜¿m_Þ¯ºðô=Wÿ­ó°¦ÖEtý[ûXEx+Â_^;}é¨_UñV~Uéø·áè;N}繦÷ÛšZ§Õ¥¯¦éÖwžôýé+oÁùg¹¾|¨*}5µÿèûÓ—UmWÙþU]¯5=/úÂÑ—îòÚÕô~SÓçÏÿP^){oeáÿ³^ß}¤¦ö¹êÊAUñU~u÷{?)¯]uùUþªÒSÕr}×EtÔ”þPY<úÊOeé¬NeéªjÿòÚU–®êΫ¾|Òw¿ªì¯¦Ï­òàV§ªç`EtÔÔù]Sëµ"úªZ_Þòêÿ­qWDOuù©/<}áT—ÎêžÛå•Ww^*ÊÿÛ물þU凾|¯êOuú?~¿·¦~5Í7}ñU¶_Uúžûå•W^UëkZîËëWÓëFßóX_º*ª¯î>ùoŸ7U·¾ü­.¼òòU…_Óã­,þòòUýé{.þ3_ÓtT¶¾¦öëêÎgUÿ¯øX¾ê®ûÊþôÕ“jŠ}÷}}Ç]]}£<8ÑQÝy¬îþWY¸µ/¯¾ªë¹²ðõÅó¿ýôŠs¨iý£"¼á¯*žŠàW•žŠê«J·¾ë¼ªð*Û¯¼v•ÍWôÓw>ÿm~ê‹¿¢_Uûë;Îʶ«.þÊæ+ ¿¦÷í‹?5}Uw«©ý·ºrRYøÕÅûoí35}^Ôôº©î ROCR: visualizing classifier performance in R

ROCR: visualizing classifier performance in R

Tobias Sing, Oliver Sander, Niko Beerenwinkel, Thomas Lengauer

Abstract

ROCR is a package for evaluating and visualizing the performance of scoring classifiers in the statistical language R. It features over 25 performance measures that can be freely combined to create two-dimensional performance curves. Standard methods for investigating trade-offs between specific performance measures are available within a uniform framework, including receiver operating characteristic (ROC) graphs, precision/recall plots, lift charts and cost curves. ROCR integrates tightly with R’s powerful graphics capabilities, thus allowing for highly adjustable plots. Being equipped with only three commands and reasonable default values for optional parameters, ROCR combines flexibility with ease of usage.

Introduction

Pattern classification has become a central tool in bioinformatics, offering rapid insights into large data sets (Baldi and Brunak 2001). While one area of our work involves predicting phenotypic properties of HIV-1 from genotypic information (Beerenwinkel et al. 2002, 2003; Sing, Beerenwinkel, and Lengauer 2004), scoring or ranking predictors are also vital in a wide range of other biological problems. Examples include microarray analysis (e.g. prediction of tissue condition based on gene expression), protein structural and functional characterization (remote homology detection, prediction of post-translational modifications and molecular function annotation based on sequence or structural motifs), genome annotation (gene finding and splice site identification), protein–ligand interactions (virtual screening and molecular docking) and structure–activity relationships (predicting bioavailability or toxicity of drug compounds). In many of these cases, considerable class skew, class-specific misclassification costs, and extensive noise due to variability in experimental assays complicate predictive modelling. Thus, careful predictor validation is compulsory.

Table 1:Performance measures in the ROCR package
Contingency ratios error rate, accuracy, sensitivity, specificity, true/false positive rate, fallout, miss, precision, recall, negative predictive value, prediction-conditioned fallout/miss.
Discrete covariation measures Phi/Matthews correlation coefficient, mutual information, Chi-squared test statistic, odds ratio
Information retrieval measures F-measure, lift, precision-recall break-even point
Performance in ROC space ROC convex hull, area under the ROC curve
Absolute scoring performance calibration error, mean cross-entropy, root mean-squared error
Cost measures expected cost, explicit cost

The real-valued output of scoring classifiers is turned into a binary class decision by choosing a cutoff. As no cutoff is optimal according to all possible performance criteria, cutoff choice involves a trade-off among different measures. Typically, a trade-off between a pair of criteria (e.g. sensitivity versus specificity) is visualized as a cutoff-parametrized curve in the plane spanned by the two measures. Popular examples of such trade-off visualizations include receiver operating characteristic (ROC) graphs, sensitivity/specificity curves, lift charts and precision/recall plots. (Fawcett 2004)(#References) provides a general introduction into evaluating scoring classifiers with a focus on ROC graphs.

Although functions for drawing ROC graphs are provided by the Bioconductor project (https://www.bioconductor.org) or by the machine learning package Weka (https://www.cs.waikato.ac.nz/ml), for example, no comprehensive evaluation suite is available to date. ROCR is a flexible evaluation package for R (https://www.r-project.org), a statistical language that is widely used in biomedical data analysis. Our tool allows for creating cutoff-parametrized performance curves by freely combining two out of more than 25 performance measures (Table 1). Curves from different cross-validation or bootstrapping runs can be averaged by various methods. Standard deviations, standard errors and box plots are available to summarize the variability across the runs. The parametrization can be visualized by printing cutoff values at the corresponding curve positions, or by coloring the curve according to the cutoff. All components of a performance plot are adjustable using a flexible mechanism for dispatching optional arguments. Despite this flexibility, ROCR is easy to use, with only three commands and reasonable default values for all optional parameters.

In the example below, we will briefly introduce ROCR’s three commands—prediction, performance and plot—applied to a 10-fold cross-validation set of predictions and corresponding class labels from a study on predicting HIV coreceptor usage from the sequence of the viral envelope protein. After loading the dataset, a prediction object is created from the raw predictions and class labels.

Performance measures or combinations thereof are computed by invoking the performance method on this prediction object. The resulting performance object can be visualized using the method plot. For example, an ROC curve that trades off the rate of true positives against the rate of false positives is obtained as follows:

The optional parameter avg selects a particular form of performance curve averaging across the validation runs; the visualization of curve variability is determined with the parameter spread.estimate.

***Fig 1:** Visualizations of classifier performance (HIV coreceptor usage data): (a) receiver operating characteristic (ROC) curve; (b) peak accuracy across a range of cutoffs; (c) absolute difference between empirical and predicted rate of positives for windowed cutoff ranges, in order to evaluate how well the scores are calibrated as probability estimates. Owing to the probabilistic interpretation, cutoffs need to be in the interval [0,1], in contrast to other performance plots. (d) Score density estimates for the negative (solid) and positive (dotted) class.*

Fig 1: Visualizations of classifier performance (HIV coreceptor usage data): (a) receiver operating characteristic (ROC) curve; (b) peak accuracy across a range of cutoffs; (c) absolute difference between empirical and predicted rate of positives for windowed cutoff ranges, in order to evaluate how well the scores are calibrated as probability estimates. Owing to the probabilistic interpretation, cutoffs need to be in the interval [0,1], in contrast to other performance plots. (d) Score density estimates for the negative (solid) and positive (dotted) class.

Issuing demo(ROCR) starts a demonstration of further graphical capabilities of ROCR. The command help(package=ROCR) points to the available help pages. In particular, a complete list of available performance measures can be obtained via help(performance). A reference manual can be downloaded from the ROCR website.

In conclusion, ROCR is a comprehensive tool for evaluating scoring classifiers and producing publication-quality figures. It allows for studying the intricacies inherent to many biological datasets and their implications on classifier performance.

Additional examples

Below you can find many additional examples of ROCR’s features of performance measurement and the possibilites in plotting. However, this only a first taste. For more examples, please run demo(ROCR) and make sure the plotting deminsions are big enough.

Acknowledgement

Work at MPI supported by EU NoE BioSapiens (LSHG-CT-2003-503265).

References

Baldi, Pierre, and Søren Brunak. 2001. Bioinformatics: The Machine Learning Approach. MIT Press, Cambridge, MA.

Beerenwinkel, Niko, Martin Däumer, Mark Oette, Klaus Korn, Daniel Hoffmann, Rolf Kaiser, Thomas Lengauer, Joachim Selbig, and Hauke Walter. 2003. “Geno2pheno: estimating phenotypic drug resistance from HIV-1 genotypes.†Nucleic Acids Research 31 (13): 3850–5. https://doi.org/10.1093/nar/gkg575.

Beerenwinkel, Niko, Barbara Schmidt, Hauke Walter, Rolf Kaiser, Thomas Lengauer, Daniel Hoffmann, Klaus Korn, and Joachim Selbig. 2002. “Diversity and Complexity of Hiv-1 Drug Resistance: A Bioinformatics Approach to Predicting Phenotype from Genotype.†Proceedings of the National Academy of Sciences 99 (12): 8271–6. https://doi.org/10.1073/pnas.112177799.

Fawcett, T. 2004. “ROC Graphs: Notes and Practical Considerations for Researchers.†In HPL-2003-4., 89–96. HP Labs, Palo Alto, CA.

Sing, Tobias, Niko Beerenwinkel, and Thomas Lengauer. 2004. “Learning Mixtures of Localized Rules by Maximizing the Area Under the Roc Curve.†In In et Al José Hernández-Orallo, Editor, 1st International Workshop on Roc Analysis in Artificial Intelligence, 89–96.

ROCR/inst/doc/ROCR.Rmd0000644000176200001440000003132415134440267013763 0ustar liggesusers--- title: "ROCR: visualizing classifier performance in R" output: rmarkdown::html_vignette author: Tobias Sing, Oliver Sander, Niko Beerenwinkel, Thomas Lengauer abstract: ROCR is a package for evaluating and visualizing the performance of scoring classifiers in the statistical language R. It features over 25 performance measures that can be freely combined to create two-dimensional performance curves. Standard methods for investigating trade-offs between specific performance measures are available within a uniform framework, including receiver operating characteristic (ROC) graphs, precision/recall plots, lift charts and cost curves. ROCR integrates tightly with R's powerful graphics capabilities, thus allowing for highly adjustable plots. Being equipped with only three commands and reasonable default values for optional parameters, ROCR combines flexibility with ease of usage. vignette: > %\VignetteIndexEntry{ROCR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} bibliography: references.bibtex --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Introduction ```{r setup} library(ROCR) ``` Pattern classification has become a central tool in bioinformatics, offering rapid insights into large data sets [[@Baldi2001]](#References). While one area of our work involves predicting phenotypic properties of HIV-1 from genotypic information [[@Beerenwinkel2002;@Beerenwinkel2003;@Sing04learningmixtures]](#References), scoring or ranking predictors are also vital in a wide range of other biological problems. Examples include microarray analysis (e.g. prediction of tissue condition based on gene expression), protein structural and functional characterization (remote homology detection, prediction of post-translational modifications and molecular function annotation based on sequence or structural motifs), genome annotation (gene finding and splice site identification), protein–ligand interactions (virtual screening and molecular docking) and structure–activity relationships (predicting bioavailability or toxicity of drug compounds). In many of these cases, considerable class skew, class-specific misclassification costs, and extensive noise due to variability in experimental assays complicate predictive modelling. Thus, careful predictor validation is compulsory. ```{r, echo = FALSE, results = 'asis'} table <- data.frame(group = c("Contingency ratios", "Discrete covariation measures", "Information retrieval measures", "Performance in ROC space", "Absolute scoring performance", "Cost measures"), measure = c("error rate, accuracy, sensitivity, specificity, true/false positive rate, fallout, miss, precision, recall, negative predictive value, prediction-conditioned fallout/miss.", "Phi/Matthews correlation coefficient, mutual information, Chi-squared test statistic, odds ratio", "F-measure, lift, precision-recall break-even point", "ROC convex hull, area under the ROC curve", "calibration error, mean cross-entropy, root mean-squared error", "expected cost, explicit cost")) knitr::kable(table, caption = "***Table 1:**Performance measures in the ROCR package*", col.names = c("",""), align = "l") ``` The real-valued output of scoring classifiers is turned into a binary class decision by choosing a cutoff. As no cutoff is optimal according to all possible performance criteria, cutoff choice involves a trade-off among different measures. Typically, a trade-off between a pair of criteria (e.g. sensitivity versus specificity) is visualized as a cutoff-parametrized curve in the plane spanned by the two measures. Popular examples of such trade-off visualizations include receiver operating characteristic (ROC) graphs, sensitivity/specificity curves, lift charts and precision/recall plots. [@Fawcett2004](#References) provides a general introduction into evaluating scoring classifiers with a focus on ROC graphs. Although functions for drawing ROC graphs are provided by the Bioconductor project (https://www.bioconductor.org) or by the machine learning package Weka (https://www.cs.waikato.ac.nz/ml), for example, no comprehensive evaluation suite is available to date. ROCR is a flexible evaluation package for R (https://www.r-project.org), a statistical language that is widely used in biomedical data analysis. Our tool allows for creating cutoff-parametrized performance curves by freely combining two out of more than 25 performance measures (Table 1). Curves from different cross-validation or bootstrapping runs can be averaged by various methods. Standard deviations, standard errors and box plots are available to summarize the variability across the runs. The parametrization can be visualized by printing cutoff values at the corresponding curve positions, or by coloring the curve according to the cutoff. All components of a performance plot are adjustable using a flexible mechanism for dispatching optional arguments. Despite this flexibility, ROCR is easy to use, with only three commands and reasonable default values for all optional parameters. In the example below, we will briefly introduce ROCR's three commands—prediction, performance and plot—applied to a 10-fold cross-validation set of predictions and corresponding class labels from a study on predicting HIV coreceptor usage from the sequence of the viral envelope protein. After loading the dataset, a prediction object is created from the raw predictions and class labels. ```{r} data(ROCR.hiv) predictions <- ROCR.hiv$hiv.svm$predictions labels <- ROCR.hiv$hiv.svm$labels pred <- prediction(predictions, labels) pred ``` Performance measures or combinations thereof are computed by invoking the performance method on this prediction object. The resulting performance object can be visualized using the method plot. For example, an ROC curve that trades off the rate of true positives against the rate of false positives is obtained as follows: ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "tpr", "fpr") perf plot(perf, avg="threshold", spread.estimate="boxplot") ``` The optional parameter avg selects a particular form of performance curve averaging across the validation runs; the visualization of curve variability is determined with the parameter spread.estimate. ```{r, echo=FALSE, results='asis', fig.asp=0.35, fig.width=7, fig.align='center',fig.cap="***Fig 1:** Visualizations of classifier performance (HIV coreceptor usage data): (a) receiver operating characteristic (ROC) curve; (b) peak accuracy across a range of cutoffs; (c) absolute difference between empirical and predicted rate of positives for windowed cutoff ranges, in order to evaluate how well the scores are calibrated as probability estimates. Owing to the probabilistic interpretation, cutoffs need to be in the interval [0,1], in contrast to other performance plots. (d) Score density estimates for the negative (solid) and positive (dotted) class.*"} data(ROCR.hiv) pp.unnorm <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels # normalize predictions to 0..1 v <- unlist(pp.unnorm) pp <- lapply(pp.unnorm, function(run) {approxfun(c(min(v), max(v)), c(0,1))(run)}) par(mfrow=c(1,4)) pred<- prediction(pp, ll) perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, coloraxis.at=seq(0,1,by=0.2),) plot(perf, col="gray78", add=TRUE) plot(perf, avg= "threshold", colorize=TRUE, colorkey=FALSE,lwd= 3,,add=TRUE) mtext(paste0("(a)"), side = 3, adj = 0.01,line = 1) perf <- performance(pred, "acc") plot(perf, avg= "vertical", spread.estimate="boxplot", lwd=3,col='blue', show.spread.at= seq(0.1, 0.9, by=0.1),) mtext(paste0("(b)"), side = 3, adj = 0.01,line = 1) plot(performance(pred, "cal", window.size= 10), avg="vertical",) mtext(paste0("(c)"), side = 3, adj = 0.01,line = 1) plot(0,0,type="n", xlim= c(0,1), ylim=c(0,7), xlab="Cutoff", ylab="Density",) mtext(paste0("(d)"), side = 3, adj = 0.01,line = 1) for (runi in 1:length(pred@predictions)) { lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="-1"]), col= "red") lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="1"]), col="green") } ``` Issuing `demo(ROCR)` starts a demonstration of further graphical capabilities of ROCR. The command `help(package=ROCR)` points to the available help pages. In particular, a complete list of available performance measures can be obtained via help(performance). A reference manual can be downloaded from the ROCR website. In conclusion, ROCR is a comprehensive tool for evaluating scoring classifiers and producing publication-quality figures. It allows for studying the intricacies inherent to many biological datasets and their implications on classifier performance. ## Additional examples Below you can find many additional examples of ROCR's features of performance measurement and the possibilites in plotting. However, this only a first taste. For more examples, please run `demo(ROCR)` and make sure the plotting deminsions are big enough. ### ROC curves, Precision/Recall graphs and more ... ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots\nlike ROC curves ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "prec", "rec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... Precision/Recall graphs ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "sens", "spec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main="... Sensitivity/Specificity plots ...") plot(perf, lty=3, col="grey78", add=TRUE) ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} perf <- performance(pred, "lift", "rpp") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... and Lift charts.") plot(perf, lty=3, col="grey78", add=TRUE) ``` ### Averaging over multiple predictions Multiple batches of predictions can be analyzed at the same time. ```{r} data(ROCR.xval) predictions <- ROCR.xval$predictions labels <- ROCR.xval$labels length(predictions) ``` ```{r} pred <- prediction(predictions, labels) perf <- performance(pred,'tpr','fpr') ``` This can be used for plotting averages using the `avg` argument. ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, colorize=TRUE, lwd=2, main='ROC curves from 10-fold cross-validation') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='vertical', spread.estimate='stderror', lwd=3,main='Vertical averaging + 1 standard error', col='blue') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='horizontal', spread.estimate='boxplot', lwd=3, main='Horizontal averaging + boxplots', col='blue') ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, avg='threshold', spread.estimate='stddev', lwd=2, main='Threshold averaging + 1 standard deviation', colorize=TRUE) ``` ### Cutoff stacking ```{r, fig.asp=1, fig.width=6, fig.align='center'} plot(perf, print.cutoffs.at=seq(0,1,by=0.2), text.cex=0.8, text.y=lapply(as.list(seq(0,0.5,by=0.05)), function(x) { rep(x,length(perf@x.values[[1]])) } ), col= as.list(terrain.colors(10)), text.col= as.list(terrain.colors(10)), points.col= as.list(terrain.colors(10)), main= "Cutoff stability") ``` ### Combination of performance measures Performance measures can be combined freely. ```{r} perf <- performance(pred,"pcmiss","lift") ``` ```{r, fig.asp=1, fig.width=5, fig.align='center'} plot(perf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(1.2,1.2), avg="threshold", lwd=3, main= "You can freely combine performance measures ...") ``` # Acknowledgement Work at MPI supported by EU NoE BioSapiens (LSHG-CT-2003-503265). # References ROCR/inst/doc/ROCR.R0000644000176200001440000001604315134440376013444 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(ROCR) ## ----echo = FALSE, results = 'asis'------------------------------------------- table <- data.frame(group = c("Contingency ratios", "Discrete covariation measures", "Information retrieval measures", "Performance in ROC space", "Absolute scoring performance", "Cost measures"), measure = c("error rate, accuracy, sensitivity, specificity, true/false positive rate, fallout, miss, precision, recall, negative predictive value, prediction-conditioned fallout/miss.", "Phi/Matthews correlation coefficient, mutual information, Chi-squared test statistic, odds ratio", "F-measure, lift, precision-recall break-even point", "ROC convex hull, area under the ROC curve", "calibration error, mean cross-entropy, root mean-squared error", "expected cost, explicit cost")) knitr::kable(table, caption = "***Table 1:**Performance measures in the ROCR package*", col.names = c("",""), align = "l") ## ----------------------------------------------------------------------------- data(ROCR.hiv) predictions <- ROCR.hiv$hiv.svm$predictions labels <- ROCR.hiv$hiv.svm$labels pred <- prediction(predictions, labels) pred ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- perf <- performance(pred, "tpr", "fpr") perf plot(perf, avg="threshold", spread.estimate="boxplot") ## ----echo=FALSE, results='asis', fig.asp=0.35, fig.width=7, fig.align='center',fig.cap="***Fig 1:** Visualizations of classifier performance (HIV coreceptor usage data): (a) receiver operating characteristic (ROC) curve; (b) peak accuracy across a range of cutoffs; (c) absolute difference between empirical and predicted rate of positives for windowed cutoff ranges, in order to evaluate how well the scores are calibrated as probability estimates. Owing to the probabilistic interpretation, cutoffs need to be in the interval [0,1], in contrast to other performance plots. (d) Score density estimates for the negative (solid) and positive (dotted) class.*"---- data(ROCR.hiv) pp.unnorm <- ROCR.hiv$hiv.svm$predictions ll <- ROCR.hiv$hiv.svm$labels # normalize predictions to 0..1 v <- unlist(pp.unnorm) pp <- lapply(pp.unnorm, function(run) {approxfun(c(min(v), max(v)), c(0,1))(run)}) par(mfrow=c(1,4)) pred<- prediction(pp, ll) perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, coloraxis.at=seq(0,1,by=0.2),) plot(perf, col="gray78", add=TRUE) plot(perf, avg= "threshold", colorize=TRUE, colorkey=FALSE,lwd= 3,,add=TRUE) mtext(paste0("(a)"), side = 3, adj = 0.01,line = 1) perf <- performance(pred, "acc") plot(perf, avg= "vertical", spread.estimate="boxplot", lwd=3,col='blue', show.spread.at= seq(0.1, 0.9, by=0.1),) mtext(paste0("(b)"), side = 3, adj = 0.01,line = 1) plot(performance(pred, "cal", window.size= 10), avg="vertical",) mtext(paste0("(c)"), side = 3, adj = 0.01,line = 1) plot(0,0,type="n", xlim= c(0,1), ylim=c(0,7), xlab="Cutoff", ylab="Density",) mtext(paste0("(d)"), side = 3, adj = 0.01,line = 1) for (runi in 1:length(pred@predictions)) { lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="-1"]), col= "red") lines(density(pred@predictions[[runi]][pred@labels[[runi]]=="1"]), col="green") } ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- perf <- performance(pred, "tpr", "fpr") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "With ROCR you can produce standard plots\nlike ROC curves ...") plot(perf, lty=3, col="grey78", add=TRUE) ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- perf <- performance(pred, "prec", "rec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... Precision/Recall graphs ...") plot(perf, lty=3, col="grey78", add=TRUE) ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- perf <- performance(pred, "sens", "spec") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main="... Sensitivity/Specificity plots ...") plot(perf, lty=3, col="grey78", add=TRUE) ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- perf <- performance(pred, "lift", "rpp") plot(perf, avg= "threshold", colorize=TRUE, lwd= 3, main= "... and Lift charts.") plot(perf, lty=3, col="grey78", add=TRUE) ## ----------------------------------------------------------------------------- data(ROCR.xval) predictions <- ROCR.xval$predictions labels <- ROCR.xval$labels length(predictions) ## ----------------------------------------------------------------------------- pred <- prediction(predictions, labels) perf <- performance(pred,'tpr','fpr') ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- plot(perf, colorize=TRUE, lwd=2, main='ROC curves from 10-fold cross-validation') ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- plot(perf, avg='vertical', spread.estimate='stderror', lwd=3,main='Vertical averaging + 1 standard error', col='blue') ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- plot(perf, avg='horizontal', spread.estimate='boxplot', lwd=3, main='Horizontal averaging + boxplots', col='blue') ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- plot(perf, avg='threshold', spread.estimate='stddev', lwd=2, main='Threshold averaging + 1 standard deviation', colorize=TRUE) ## ----fig.asp=1, fig.width=6, fig.align='center'------------------------------- plot(perf, print.cutoffs.at=seq(0,1,by=0.2), text.cex=0.8, text.y=lapply(as.list(seq(0,0.5,by=0.05)), function(x) { rep(x,length(perf@x.values[[1]])) } ), col= as.list(terrain.colors(10)), text.col= as.list(terrain.colors(10)), points.col= as.list(terrain.colors(10)), main= "Cutoff stability") ## ----------------------------------------------------------------------------- perf <- performance(pred,"pcmiss","lift") ## ----fig.asp=1, fig.width=5, fig.align='center'------------------------------- plot(perf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(1.2,1.2), avg="threshold", lwd=3, main= "You can freely combine performance measures ...") ROCR/README.md0000644000176200001440000001051015134431355012277 0ustar liggesusers# ROCR [![CRAN Status](https://www.r-pkg.org/badges/version/ROCR)](https://CRAN.r-project.org/package=ROCR) [![codecov](https://app.codecov.io/gh/ipa-tys/ROCR/branch/master/graph/badge.svg)](https://app.codecov.io/gh/ipa-tys/ROCR) *visualizing classifier performance in R, with only 3 commands* ![](https://raw.githubusercontent.com/ipa-tys/ROCR/rocr-images/ourplot_website.gif) ### Please support our work by citing the ROCR article in your publications: ***Sing T, Sander O, Beerenwinkel N, Lengauer T. [2005] ROCR: visualizing classifier performance in R. Bioinformatics 21(20):3940-1.*** Free full text: http://bioinformatics.oxfordjournals.org/content/21/20/3940.full [](https://www.mpi-inf.mpg.de/home/) `ROCR` was originally developed at the [Max Planck Institute for Informatics](https://www.mpi-inf.mpg.de/home/) ## Introduction `ROCR` (with obvious pronounciation) is an R package for evaluating and visualizing classifier performance. It is... - ...easy to use: adds only three new commands to R. - ...flexible: integrates tightly with R's built-in graphics facilities. - ...powerful: Currently, 28 performance measures are implemented, which can be freely combined to form parametric curves such as ROC curves, precision/recall curves, or lift curves. Many options such as curve averaging (for cross-validation or bootstrap), augmenting the averaged curves by standard error bar or boxplots, labeling cutoffs to the curve, or coloring curves according to cutoff. ### Performance measures that `ROCR` knows: Accuracy, error rate, true positive rate, false positive rate, true negative rate, false negative rate, sensitivity, specificity, recall, positive predictive value, negative predictive value, precision, fallout, miss, phi correlation coefficient, Matthews correlation coefficient, mutual information, chi square statistic, odds ratio, lift value, precision/recall F measure, ROC convex hull, area under the ROC curve, precision/recall break-even point, calibration error, mean cross-entropy, root mean squared error, SAR measure, expected cost, explicit cost. ### `ROCR` features: ROC curves, precision/recall plots, lift charts, cost curves, custom curves by freely selecting one performance measure for the x axis and one for the y axis, handling of data from cross-validation or bootstrapping, curve averaging (vertically, horizontally, or by threshold), standard error bars, box plots, curves that are color-coded by cutoff, printing threshold values on the curve, tight integration with Rs plotting facilities (making it easy to adjust plots or to combine multiple plots), fully customizable, easy to use (only 3 commands). ## Installation of `ROCR` The most straightforward way to install and use `ROCR` is to install it from `CRAN` by starting `R` and using the `install.packages` function: ``` install.packages("ROCR") ``` Alternatively you can install it from command line using the tar ball like this: ``` R CMD INSTALL ROCR_*.tar.gz ``` ## Getting started from within R ... ``` library(ROCR) demo(ROCR) help(package=ROCR) ``` ## Examples Using ROCR's 3 commands to produce a simple ROC plot: ``` pred <- prediction(predictions, labels) perf <- performance(pred, measure = "tpr", x.measure = "fpr") plot(perf, col=rainbow(10)) ``` ## Documentation - The Reference Manual found [here](https://CRAN.r-project.org/package=ROCR) - Slide deck for a tutorial talk (feel free to re-use for teaching, but please give appropriate credits and write us an email) [[PPT](https://raw.githubusercontent.com/ipa-tys/ROCR/rocr-images/ROCR_Talk_Tobias_Sing.ppt)] - A few pointers to the literature on classifier evaluation ## Contact Questions, comments, and suggestions are very welcome. Open an issue on GitHub and we can discuss. We are also interested in seeing how ROCR is used in publications. Thus, if you have prepared a paper using ROCR we'd be happy to know. ROCR/build/0000755000176200001440000000000015134440376012125 5ustar liggesusersROCR/build/vignette.rds0000644000176200001440000000027515134440376014470 0ustar liggesusers‹‹àb```b`a’Ì@&³0r€˜‘…Hsù;é妠‰³€ÄÑÄ8Áj3JrsÐ$Ø †í‹BdA¦0!©bÍKÌM-FÓÊî’Zš—þ‡]?ãt'{§V–çÁô ¨aƒ9ß-3'foHf œÃàâe2¡»Ã|÷så—ëÁüÀ Û ñÐ=šœ“XŒîQ®”Ä’D½´" ~»u+ROCR/man/0000755000176200001440000000000013703321565011577 5ustar liggesusersROCR/man/plot-methods.Rd0000644000176200001440000001455015134440363014510 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{plot-methods} \alias{plot-methods} \alias{plot,performance,missing-method} \alias{plot.performance} \title{Plot method for performance objects} \usage{ \S4method{plot}{performance,missing}( x, y, ..., avg = "none", spread.estimate = "none", spread.scale = 1, show.spread.at = c(), colorize = FALSE, colorize.palette = rev(rainbow(256, start = 0, end = 4/6)), colorkey = colorize, colorkey.relwidth = 0.25, colorkey.pos = "right", print.cutoffs.at = c(), cutoff.label.function = function(x) { round(x, 2) }, downsampling = 0, add = FALSE ) \method{plot}{performance}(...) } \arguments{ \item{x}{an object of class \code{performance}} \item{y}{not used} \item{...}{Optional graphical parameters to adjust different components of the performance plot. Parameters are directed to their target component by prefixing them with the name of the component (\code{component.parameter}, e.g. \code{text.cex}). The following components are available: \code{xaxis}, \code{yaxis}, \code{coloraxis}, \code{box} (around the plotting region), \code{points}, \code{text}, \code{plotCI} (error bars), \code{boxplot}. The names of these components are influenced by the R functions that are used to create them. Thus, \code{par(component)} can be used to see which parameters are available for a given component (with the expection of the three axes; use \code{par(axis)} here). To adjust the canvas or the performance curve(s), the standard \code{plot} parameters can be used without any prefix.} \item{avg}{If the performance object describes several curves (from cross-validation runs or bootstrap evaluations of one particular method), the curves from each of the runs can be averaged. Allowed values are \code{none} (plot all curves separately), \code{horizontal} (horizontal averaging), \code{vertical} (vertical averaging), and \code{threshold} (threshold (=cutoff) averaging). Note that while threshold averaging is always feasible, vertical and horizontal averaging are not well-defined if the graph cannot be represented as a function x->y and y->x, respectively.} \item{spread.estimate}{When curve averaging is enabled, the variation around the average curve can be visualized as standard error bars (\code{stderror}), standard deviation bars (\code{stddev}), or by using box plots (\code{boxplot}). Note that the function \code{plotCI}, which is used internally by ROCR to draw error bars, might raise a warning if the spread of the curves at certain positions is 0.} \item{spread.scale}{For \code{stderror} or \code{stddev}, this is a scalar factor to be multiplied with the length of the standard error/deviation bar. For example, under normal assumptions, \code{spread.scale=2} can be used to get approximate 95\% confidence intervals.} \item{show.spread.at}{For vertical averaging, this vector determines the x positions for which the spread estimates should be visualized. In contrast, for horizontal and threshold averaging, the y positions and cutoffs are determined, respectively. By default, spread estimates are shown at 11 equally spaced positions.} \item{colorize}{This logical determines whether the curve(s) should be colorized according to cutoff.} \item{colorize.palette}{If curve colorizing is enabled, this determines the color palette onto which the cutoff range is mapped.} \item{colorkey}{If true, a color key is drawn into the 4\% border region (default of \code{par(xaxs)} and \code{par(yaxs)}) of the plot. The color key visualizes the mapping from cutoffs to colors.} \item{colorkey.relwidth}{Scalar between 0 and 1 that determines the fraction of the 4\% border region that is occupied by the colorkey.} \item{colorkey.pos}{Determines if the colorkey is drawn vertically at the \code{right} side, or horizontally at the \code{top} of the plot.} \item{print.cutoffs.at}{This vector specifies the cutoffs which should be printed as text along the curve at the corresponding curve positions.} \item{cutoff.label.function}{By default, cutoff annotations along the curve or at the color key are rounded to two decimal places before printing. Using a custom \code{cutoff.label.function}, any other transformation can be performed on the cutoffs instead (e.g. rounding with different precision or taking the logarithm).} \item{downsampling}{ROCR can efficiently compute most performance measures even for data sets with millions of elements. However, plotting of large data sets can be slow and lead to PS/PDF documents of considerable size. In that case, performance curves that are indistinguishable from the original can be obtained by using only a fraction of the computed performance values. Values for downsampling between 0 and 1 indicate the fraction of the original data set size to which the performance object should be downsampled, integers above 1 are interpreted as the actual number of performance values to which the curve(s) should be downsampled.} \item{add}{If \code{TRUE}, the curve(s) is/are added to an already existing plot; otherwise a new plot is drawn.} } \description{ This is the method to plot all objects of class performance. } \examples{ # plotting a ROC curve: library(ROCR) data(ROCR.simple) pred <- prediction( ROCR.simple$predictions, ROCR.simple$labels ) pred perf <- performance( pred, "tpr", "fpr" ) perf plot( perf ) # To entertain your children, make your plots nicer # using ROCR's flexible parameter passing mechanisms # (much cheaper than a finger painting set) par(bg="lightblue", mai=c(1.2,1.5,1,1)) plot(perf, main="ROCR fingerpainting toolkit", colorize=TRUE, xlab="Mary's axis", ylab="", box.lty=7, box.lwd=5, box.col="gold", lwd=17, colorkey.relwidth=0.5, xaxis.cex.axis=2, xaxis.col='blue', xaxis.col.axis="blue", yaxis.col='green', yaxis.cex.axis=2, yaxis.at=c(0,0.5,0.8,0.85,0.9,1), yaxis.las=1, xaxis.lwd=2, yaxis.lwd=3, yaxis.col.axis="orange", cex.lab=2, cex.main=2) } \references{ A detailed list of references can be found on the ROCR homepage at \url{https://ipa-tys.github.io/ROCR/}. } \seealso{ \code{\link{prediction}}, \code{\link{performance}}, \code{\link{prediction-class}}, \code{\link{performance-class}} } \author{ Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander \email{osander@gmail.com} } ROCR/man/performance.Rd0000644000176200001440000002720415134431714014372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/performance.R \name{performance} \alias{performance} \title{Function to create performance objects} \usage{ performance(prediction.obj, measure, x.measure = "cutoff", ...) } \arguments{ \item{prediction.obj}{An object of class \code{prediction}.} \item{measure}{Performance measure to use for the evaluation. A complete list of the performance measures that are available for \code{measure} and \code{x.measure} is given in the 'Details' section.} \item{x.measure}{A second performance measure. If different from the default, a two-dimensional curve, with \code{x.measure} taken to be the unit in direction of the x axis, and \code{measure} to be the unit in direction of the y axis, is created. This curve is parametrized with the cutoff.} \item{...}{Optional arguments (specific to individual performance measures).} } \value{ An S4 object of class \code{performance}. } \description{ All kinds of predictor evaluations are performed using this function. } \details{ Here is the list of available performance measures. Let Y and \eqn{\hat{Y}}{Yhat} be random variables representing the class and the prediction for a randomly drawn sample, respectively. We denote by \eqn{\oplus}{+} and \eqn{\ominus}{-} the positive and negative class, respectively. Further, we use the following abbreviations for empirical quantities: P (# positive samples), N (# negative samples), TP (# true positives), TN (# true negatives), FP (# false positives), FN (# false negatives). \describe{ \item{\code{acc}:}{Accuracy. \eqn{P(\hat{Y}=Y)}{P(Yhat = Y)}. Estimated as: \eqn{\frac{TP+TN}{P+N}}{(TP+TN)/(P+N)}.} \item{\code{err}:}{Error rate. \eqn{P(\hat{Y}\ne Y)}{P(Yhat != Y)}. Estimated as: \eqn{\frac{FP+FN}{P+N}}{(FP+FN)/(P+N)}.} \item{\code{fpr}:}{False positive rate. \eqn{P(\hat{Y}=\oplus | Y = \ominus)}{P(Yhat = + | Y = -)}. Estimated as: \eqn{\frac{FP}{N}}{FP/N}.} \item{\code{fall}:}{Fallout. Same as \code{fpr}.} \item{\code{tpr}:}{True positive rate. \eqn{P(\hat{Y}=\oplus|Y=\oplus)}{P(Yhat = + | Y = +)}. Estimated as: \eqn{\frac{TP}{P}}{TP/P}.} \item{\code{rec}:}{Recall. Same as \code{tpr}.} \item{\code{sens}:}{Sensitivity. Same as \code{tpr}.} \item{\code{fnr}:}{False negative rate. \eqn{P(\hat{Y}=\ominus|Y=\oplus)}{P(Yhat = - | Y = +)}. Estimated as: \eqn{\frac{FN}{P}}{FN/P}.} \item{\code{miss}:}{Miss. Same as \code{fnr}.} \item{\code{tnr}:}{True negative rate. \eqn{P(\hat{Y} = \ominus|Y=\ominus)}{P(Yhat = - | Y = -)}.} \item{\code{spec}:}{Specificity. Same as \code{tnr}.} \item{\code{ppv}:}{Positive predictive value. \eqn{P(Y=\oplus|\hat{Y}=\oplus)}{P(Y = + | Yhat = +)}. Estimated as: \eqn{\frac{TP}{TP+FP}}{TP/(TP+FP)}.} \item{\code{prec}:}{Precision. Same as \code{ppv}.} \item{\code{npv}:}{Negative predictive value. \eqn{P(Y=\ominus|\hat{Y}=\ominus)}{P(Y = - | Yhat = -)}. Estimated as: \eqn{\frac{TN}{TN+FN}}{TN/(TN+FN)}.} \item{\code{pcfall}:}{Prediction-conditioned fallout. \eqn{P(Y=\ominus|\hat{Y}=\oplus)}{P(Y = - | Yhat = +)}. Estimated as: \eqn{\frac{FP}{TP+FP}}{FP/(TP+FP)}.} \item{\code{pcmiss}:}{Prediction-conditioned miss. \eqn{P(Y=\oplus|\hat{Y}=\ominus)}{P(Y = + | Yhat = -)}. Estimated as: \eqn{\frac{FN}{TN+FN}}{FN/(TN+FN)}.} \item{\code{rpp}:}{Rate of positive predictions. \eqn{P( \hat{Y} = \oplus)}{P(Yhat = +)}. Estimated as: (TP+FP)/(TP+FP+TN+FN).} \item{\code{rnp}:}{Rate of negative predictions. \eqn{P( \hat{Y} = \ominus)}{P(Yhat = -)}. Estimated as: (TN+FN)/(TP+FP+TN+FN).} \item{\code{phi}:}{Phi correlation coefficient. \eqn{\frac{TP \cdot TN - FP \cdot FN}{\sqrt{ (TP+FN) \cdot (TN+FP) \cdot (TP+FP) \cdot (TN+FN)}}}{(TP*TN - FP*FN)/(sqrt((TP+FN)*(TN+FP)*(TP+FP)*(TN+FN)))}. Yields a number between -1 and 1, with 1 indicating a perfect prediction, 0 indicating a random prediction. Values below 0 indicate a worse than random prediction.} \item{\code{mat}:}{Matthews correlation coefficient. Same as \code{phi}.} \item{\code{mi}:}{Mutual information. \eqn{I(\hat{Y},Y) := H(Y) - H(Y|\hat{Y})}{I(Yhat, Y) := H(Y) - H(Y | Yhat)}, where H is the (conditional) entropy. Entropies are estimated naively (no bias correction).} \item{\code{chisq}:}{Chi square test statistic. \code{?chisq.test} for details. Note that R might raise a warning if the sample size is too small.} \item{\code{odds}:}{Odds ratio. \eqn{\frac{TP \cdot TN}{FN \cdot FP}}{(TP*TN)/(FN*FP)}. Note that odds ratio produces Inf or NA values for all cutoffs corresponding to FN=0 or FP=0. This can substantially decrease the plotted cutoff region.} \item{\code{lift}:}{Lift value. \eqn{\frac{P(\hat{Y}=\oplus|Y=\oplus)}{P(\hat{Y}=\oplus)}}{P(Yhat = + | Y = +)/P(Yhat = +)}.} \item{\code{f}:}{Precision-recall F measure (van Rijsbergen, 1979). Weighted harmonic mean of precision (P) and recall (R). \eqn{F = \frac{1}{\alpha \frac{1}{P} + (1-\alpha)\frac{1}{R}}}{F = 1/ (alpha*1/P + (1-alpha)*1/R)}. If \eqn{\alpha=\frac{1}{2}}{alpha=1/2}, the mean is balanced. A frequent equivalent formulation is \eqn{F = \frac{(\beta^2+1) \cdot P \cdot R}{R + \beta^2 \cdot P}}{F = (beta^2+1) * P * R / (R + beta^2 * P)}. In this formulation, the mean is balanced if \eqn{\beta=1}{beta=1}. Currently, ROCR only accepts the alpha version as input (e.g. \eqn{\alpha=0.5}{alpha=0.5}). If no value for alpha is given, the mean will be balanced by default.} \item{\code{rch}:}{ROC convex hull. A ROC (=\code{tpr} vs \code{fpr}) curve with concavities (which represent suboptimal choices of cutoff) removed (Fawcett 2001). Since the result is already a parametric performance curve, it cannot be used in combination with other measures.} \item{\code{auc}:}{Area under the ROC curve. This is equal to the value of the Wilcoxon-Mann-Whitney test statistic and also the probability that the classifier will score are randomly drawn positive sample higher than a randomly drawn negative sample. Since the output of \code{auc} is cutoff-independent, this measure cannot be combined with other measures into a parametric curve. The partial area under the ROC curve up to a given false positive rate can be calculated by passing the optional parameter \code{fpr.stop=0.5} (or any other value between 0 and 1) to \code{performance}.} \item{\code{aucpr}:}{Area under the Precision/Recall curve. Since the output of \code{aucpr} is cutoff-independent, this measure cannot be combined with other measures into a parametric curve.} \item{\code{prbe}:}{Precision-recall break-even point. The cutoff(s) where precision and recall are equal. At this point, positive and negative predictions are made at the same rate as their prevalence in the data. Since the output of \code{prbe} is just a cutoff-independent scalar, this measure cannot be combined with other measures into a parametric curve.} \item{\code{cal}:}{Calibration error. The calibration error is the absolute difference between predicted confidence and actual reliability. This error is estimated at all cutoffs by sliding a window across the range of possible cutoffs. The default window size of 100 can be adjusted by passing the optional parameter \code{window.size=200} to \code{performance}. E.g., if for several positive samples the output of the classifier is around 0.75, you might expect from a well-calibrated classifier that the fraction of them which is correctly predicted as positive is also around 0.75. In a well-calibrated classifier, the probabilistic confidence estimates are realistic. Only for use with probabilistic output (i.e. scores between 0 and 1).} \item{\code{mxe}:}{Mean cross-entropy. Only for use with probabilistic output. \eqn{MXE :=-\frac{1}{P+N}( \sum_{y_i=\oplus} ln(\hat{y}_i) + \sum_{y_i=\ominus} ln(1-\hat{y}_i))}{MXE := - 1/(P+N) \sum_{y_i=+} ln(yhat_i) + \sum_{y_i=-} ln(1-yhat_i)}. Since the output of \code{mxe} is just a cutoff-independent scalar, this measure cannot be combined with other measures into a parametric curve.} \item{\code{rmse}:}{Root-mean-squared error. Only for use with numerical class labels. \eqn{RMSE:=\sqrt{\frac{1}{P+N}\sum_i (y_i - \hat{y}_i)^2}}{RMSE := sqrt(1/(P+N) \sum_i (y_i - yhat_i)^2)}. Since the output of \code{rmse} is just a cutoff-independent scalar, this measure cannot be combined with other measures into a parametric curve.} \item{\code{sar}:}{Score combinining performance measures of different characteristics, in the attempt of creating a more "robust" measure (cf. Caruana R., ROCAI2004): SAR = 1/3 * ( Accuracy + Area under the ROC curve + Root mean-squared error ).} \item{\code{ecost}:}{Expected cost. For details on cost curves, cf. Drummond&Holte 2000,2004. \code{ecost} has an obligatory x axis, the so-called 'probability-cost function'; thus it cannot be combined with other measures. While using \code{ecost} one is interested in the lower envelope of a set of lines, it might be instructive to plot the whole set of lines in addition to the lower envelope. An example is given in \code{demo(ROCR)}.} \item{\code{cost}:}{Cost of a classifier when class-conditional misclassification costs are explicitly given. Accepts the optional parameters \code{cost.fp} and \code{cost.fn}, by which the costs for false positives and negatives can be adjusted, respectively. By default, both are set to 1.} } } \note{ Here is how to call \code{performance()} to create some standard evaluation plots: \describe{ \item{ROC curves:}{measure="tpr", x.measure="fpr".} \item{Precision/recall graphs:}{measure="prec", x.measure="rec".} \item{Sensitivity/specificity plots:}{measure="sens", x.measure="spec".} \item{Lift charts:}{measure="lift", x.measure="rpp".} } } \examples{ # computing a simple ROC curve (x-axis: fpr, y-axis: tpr) library(ROCR) data(ROCR.simple) pred <- prediction( ROCR.simple$predictions, ROCR.simple$labels) pred perf <- performance(pred,"tpr","fpr") perf plot(perf) # precision/recall curve (x-axis: recall, y-axis: precision) perf <- performance(pred, "prec", "rec") perf plot(perf) # sensitivity/specificity curve (x-axis: specificity, # y-axis: sensitivity) perf <- performance(pred, "sens", "spec") perf plot(perf) } \references{ A detailed list of references can be found on the ROCR homepage at \url{https://ipa-tys.github.io/ROCR/}. } \seealso{ \code{\link{prediction}}, \code{\link{prediction-class}}, \code{\link{performance-class}}, \code{\link{plot.performance}} } \author{ Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander \email{osander@gmail.com} } ROCR/man/ROCR.xval.Rd0000644000176200001440000000230113644317760013606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{data} \name{ROCR.xval} \alias{ROCR.xval} \title{Data set: Artificial cross-validation data for use with ROCR} \format{ A two element list. The first element, \code{ROCR.xval$predictions}, is itself a 10 element list. Each of these 10 elements is a vector of numerical predictions for each cross-validation run. Likewise, the second list entry, \code{ROCR.xval$labels} is a 10 element list in which each element is a vector of true class labels corresponding to the predictions. } \usage{ data(ROCR.xval) } \description{ A mock data set containing 10 sets of predictions and corresponding labels as would be obtained from 10-fold cross-validation. } \examples{ # plot ROC curves for several cross-validation runs (dotted # in grey), overlaid by the vertical average curve and boxplots # showing the vertical spread around the average. library(ROCR) data(ROCR.xval) pred <- prediction(ROCR.xval$predictions, ROCR.xval$labels) pred perf <- performance(pred,"tpr","fpr") perf plot(perf,col="grey82",lty=3) plot(perf,lwd=3,avg="vertical",spread.estimate="boxplot",add=TRUE) } \keyword{datasets} ROCR/man/performance-class.Rd0000644000176200001440000000600415134431710015464 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{class} \name{performance-class} \alias{performance-class} \title{Class \code{performance}} \description{ Object to capture the result of a performance evaluation, optionally collecting evaluations from several cross-validation or bootstrapping runs. } \details{ A \code{performance} object can capture information from four different evaluation scenarios: \itemize{ \item The behaviour of a cutoff-dependent performance measure across the range of all cutoffs (e.g. \code{performance( predObj, 'acc' )} ). Here, \code{x.values} contains the cutoffs, \code{y.values} the corresponding values of the performance measure, and \code{alpha.values} is empty.\cr \item The trade-off between two performance measures across the range of all cutoffs (e.g. \code{performance( predObj, 'tpr', 'fpr' )} ). In this case, the cutoffs are stored in \code{alpha.values}, while \code{x.values} and \code{y.values} contain the corresponding values of the two performance measures.\cr \item A performance measure that comes along with an obligatory second axis (e.g. \code{performance( predObj, 'ecost' )} ). Here, the measure values are stored in \code{y.values}, while the corresponding values of the obligatory axis are stored in \code{x.values}, and \code{alpha.values} is empty.\cr \item A performance measure whose value is just a scalar (e.g. \code{performance( predObj, 'auc' )} ). The value is then stored in \code{y.values}, while \code{x.values} and \code{alpha.values} are empty. } } \section{Slots}{ \describe{ \item{\code{x.name}}{Performance measure used for the x axis.} \item{\code{y.name}}{Performance measure used for the y axis.} \item{\code{alpha.name}}{Name of the unit that is used to create the parametrized curve. Currently, curves can only be parametrized by cutoff, so \code{alpha.name} is either \code{none} or \code{cutoff}.} \item{\code{x.values}}{A list in which each entry contains the x values of the curve of this particular cross-validation run. \code{x.values[[i]]}, \code{y.values[[i]]}, and \code{alpha.values[[i]]} correspond to each other.} \item{\code{y.values}}{A list in which each entry contains the y values of the curve of this particular cross-validation run.} \item{\code{alpha.values}}{A list in which each entry contains the cutoff values of the curve of this particular cross-validation run.} }} \section{Objects from the Class}{ Objects can be created by using the \code{performance} function. } \references{ A detailed list of references can be found on the ROCR homepage at \url{https://ipa-tys.github.io/ROCR/}. } \seealso{ \code{\link{prediction}} \code{\link{performance}}, \code{\link{prediction-class}}, \code{\link{plot.performance}} } \author{ Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander \email{osander@gmail.com} } ROCR/man/ROCR.simple.Rd0000644000176200001440000000154613644317760014137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{data} \name{ROCR.simple} \alias{ROCR.simple} \title{Data set: Simple artificial prediction data for use with ROCR} \format{ A two element list. The first element, \code{ROCR.simple$predictions}, is a vector of numerical predictions. The second element, \code{ROCR.simple$labels}, is a vector of corresponding class labels. } \usage{ data(ROCR.simple) } \description{ A mock data set containing a simple set of predictions and corresponding class labels. } \examples{ # plot a ROC curve for a single prediction run # and color the curve according to cutoff. library(ROCR) data(ROCR.simple) pred <- prediction(ROCR.simple$predictions, ROCR.simple$labels) pred perf <- performance(pred,"tpr","fpr") perf plot(perf,colorize=TRUE) } \keyword{datasets} ROCR/man/prediction-class.Rd0000644000176200001440000000451315134431726015335 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{class} \name{prediction-class} \alias{prediction-class} \title{Class \code{prediction}} \description{ Object to encapsulate numerical predictions together with the corresponding true class labels, optionally collecting predictions and labels for several cross-validation or bootstrapping runs. } \section{Slots}{ \describe{ \item{\code{predictions}}{A list, in which each element is a vector of predictions (the list has length > 1 for x-validation data.} \item{\code{labels}}{Analogously, a list in which each element is a vector of true class labels.} \item{\code{cutoffs}}{A list in which each element is a vector of all necessary cutoffs. Each cutoff vector consists of the predicted scores (duplicates removed), in descending order.} \item{\code{fp}}{A list in which each element is a vector of the number (not the rate!) of false positives induced by the cutoffs given in the corresponding 'cutoffs' list entry.} \item{\code{tp}}{As fp, but for true positives.} \item{\code{tn}}{As fp, but for true negatives.} \item{\code{fn}}{As fp, but for false negatives.} \item{\code{n.pos}}{A list in which each element contains the number of positive samples in the given x-validation run.} \item{\code{n.neg}}{As n.pos, but for negative samples.} \item{\code{n.pos.pred}}{A list in which each element is a vector of the number of samples predicted as positive at the cutoffs given in the corresponding 'cutoffs' entry.} \item{\code{n.neg.pred}}{As n.pos.pred, but for negatively predicted samples.} }} \note{ Every \code{prediction} object contains information about the 2x2 contingency table consisting of tp,tn,fp, and fn, along with the marginal sums n.pos,n.neg,n.pos.pred,n.neg.pred, because these form the basis for many derived performance measures. } \section{Objects from the Class}{ Objects can be created by using the \code{prediction} function. } \references{ A detailed list of references can be found on the ROCR homepage at \url{https://ipa-tys.github.io/ROCR/}. } \seealso{ \code{\link{prediction}}, \code{\link{performance}}, \code{\link{performance-class}}, \code{\link{plot.performance}} } \author{ Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander \email{osander@gmail.com} } ROCR/man/prediction.Rd0000644000176200001440000000645415134431734014237 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prediction.R \name{prediction} \alias{prediction} \title{Function to create prediction objects} \usage{ prediction(predictions, labels, label.ordering = NULL) } \arguments{ \item{predictions}{A vector, matrix, list, or data frame containing the predictions.} \item{labels}{A vector, matrix, list, or data frame containing the true class labels. Must have the same dimensions as \code{predictions}.} \item{label.ordering}{The default ordering (cf.details) of the classes can be changed by supplying a vector containing the negative and the positive class label.} } \value{ An S4 object of class \code{prediction}. } \description{ Every classifier evaluation using ROCR starts with creating a \code{prediction} object. This function is used to transform the input data (which can be in vector, matrix, data frame, or list form) into a standardized format. } \details{ \code{predictions} and \code{labels} can simply be vectors of the same length. However, in the case of cross-validation data, different cross-validation runs can be provided as the *columns* of a matrix or data frame, or as the entries of a list. In the case of a matrix or data frame, all cross-validation runs must have the same length, whereas in the case of a list, the lengths can vary across the cross-validation runs. Internally, as described in section 'Value', all of these input formats are converted to list representation. Since scoring classifiers give relative tendencies towards a negative (low scores) or positive (high scores) class, it has to be declared which class label denotes the negative, and which the positive class. Ideally, labels should be supplied as ordered factor(s), the lower level corresponding to the negative class, the upper level to the positive class. If the labels are factors (unordered), numeric, logical or characters, ordering of the labels is inferred from R's built-in \code{<} relation (e.g. 0 < 1, -1 < 1, 'a' < 'b', FALSE < TRUE). Use \code{label.ordering} to override this default ordering. Please note that the ordering can be locale-dependent e.g. for character labels '-1' and '1'. Currently, ROCR supports only binary classification (extensions toward multiclass classification are scheduled for the next release, however). If there are more than two distinct label symbols, execution stops with an error message. If all predictions use the same two symbols that are used for the labels, categorical predictions are assumed. If there are more than two predicted values, but all numeric, continuous predictions are assumed (i.e. a scoring classifier). Otherwise, if more than two symbols occur in the predictions, and not all of them are numeric, execution stops with an error message. } \examples{ # create a simple prediction object library(ROCR) data(ROCR.simple) pred <- prediction(ROCR.simple$predictions,ROCR.simple$labels) pred } \references{ A detailed list of references can be found on the ROCR homepage at \url{https://ipa-tys.github.io/ROCR/}. } \seealso{ \code{\link{prediction-class}}, \code{\link{performance}}, \code{\link{performance-class}}, \code{\link{plot.performance}} } \author{ Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander \email{osander@gmail.com} } ROCR/man/ROCR.hiv.Rd0000644000176200001440000000334513644317760013433 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{data} \name{ROCR.hiv} \alias{ROCR.hiv} \title{Data set: Support vector machines and neural networks applied to the prediction of HIV-1 coreceptor usage} \format{ A list consisting of the SVM (\code{ROCR.hiv$hiv.svm}) and NN (\code{ROCR.hiv$hiv.nn}) classification data. Each of those is in turn a list consisting of the two elements \code{$predictions} and \code{$labels} (10 element list representing cross-validation data). } \usage{ data(ROCR.hiv) } \description{ Linear support vector machines (libsvm) and neural networks (R package nnet) were applied to predict usage of the coreceptors CCR5 and CXCR4 based on sequence data of the third variable loop of the HIV envelope protein. } \examples{ library(ROCR) data(ROCR.hiv) attach(ROCR.hiv) pred.svm <- prediction(hiv.svm$predictions, hiv.svm$labels) pred.svm perf.svm <- performance(pred.svm, 'tpr', 'fpr') perf.svm pred.nn <- prediction(hiv.nn$predictions, hiv.svm$labels) pred.nn perf.nn <- performance(pred.nn, 'tpr', 'fpr') perf.nn plot(perf.svm, lty=3, col="red",main="SVMs and NNs for prediction of HIV-1 coreceptor usage") plot(perf.nn, lty=3, col="blue",add=TRUE) plot(perf.svm, avg="vertical", lwd=3, col="red", spread.estimate="stderror",plotCI.lwd=2,add=TRUE) plot(perf.nn, avg="vertical", lwd=3, col="blue", spread.estimate="stderror",plotCI.lwd=2,add=TRUE) legend(0.6,0.6,c('SVM','NN'),col=c('red','blue'),lwd=3) } \references{ Sing, T. & Beerenwinkel, N. & Lengauer, T. "Learning mixtures of localized rules by maximizing the area under the ROC curve". 1st International Workshop on ROC Analysis in AI, 89-96, 2004. } \keyword{datasets} ROCR/DESCRIPTION0000644000176200001440000000453415134614023012532 0ustar liggesusersPackage: ROCR Authors@R: c(person("Tobias","Sing", email = "tobias.sing@gmail.com",role="aut"), person("Oliver","Sander", email = "osander@gmail.com",role="aut"), person("Niko","Beerenwinkel", role="aut"), person("Thomas","Lengauer", role="aut"), person("Thomas","Unterthiner", role="ctb"), person("Felix G.M.","Ernst", email = "felix.gm.ernst@outlook.com",role="cre", comment = c(ORCID = "0000-0001-5064-0928"))) Version: 1.0-12 Date: 2026-01-22 Title: Visualizing the Performance of Scoring Classifiers Description: ROC graphs, sensitivity/specificity curves, lift charts, and precision/recall plots are popular examples of trade-off visualizations for specific pairs of performance measures. ROCR is a flexible tool for creating cutoff-parameterized 2D performance curves by freely combining two from over 25 performance measures (new performance measures can be added using a standard interface). Curves from different cross-validation or bootstrapping runs can be averaged by different methods, and standard deviations, standard errors or box plots can be used to visualize the variability across the runs. The parameterization can be visualized by printing cutoff values at the corresponding curve positions, or by coloring the curve according to cutoff. All components of a performance plot can be quickly adjusted using a flexible parameter dispatching mechanism. Despite its flexibility, ROCR is easy to use, with only three commands and reasonable default values for all optional parameters. Encoding: UTF-8 License: GPL (>= 2) NeedsCompilation: no Depends: R (>= 3.6) Imports: methods, graphics, grDevices, gplots, stats Suggests: testthat, knitr, rmarkdown URL: https://ipa-tys.github.io/ROCR/ BugReports: https://github.com/ipa-tys/ROCR/issues RoxygenNote: 7.3.3 VignetteBuilder: knitr Packaged: 2026-01-22 15:23:44 UTC; flixr Author: Tobias Sing [aut], Oliver Sander [aut], Niko Beerenwinkel [aut], Thomas Lengauer [aut], Thomas Unterthiner [ctb], Felix G.M. Ernst [cre] (ORCID: ) Maintainer: Felix G.M. Ernst Repository: CRAN Date/Publication: 2026-01-23 06:41:23 UTC