batchtools/0000755000176200001440000000000013606123217012415 5ustar liggesusersbatchtools/NAMESPACE0000644000176200001440000000674413606056247013657 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(doJobCollection,JobCollection) S3method(doJobCollection,character) S3method(execJob,Experiment) S3method(execJob,Job) S3method(execJob,JobCollection) S3method(execJob,character) S3method(getJobPars,ExperimentRegistry) S3method(getJobPars,Registry) S3method(makeJob,ExperimentRegistry) S3method(makeJob,Registry) S3method(makeJobCollection,ExperimentRegistry) S3method(makeJobCollection,Registry) S3method(print,ClusterFunctions) S3method(print,ExperimentRegistry) S3method(print,JobCollection) S3method(print,Registry) S3method(print,RuntimeEstimate) S3method(print,Status) S3method(print,SubmitJobResult) S3method(runHook,JobCollection) S3method(runHook,Registry) export(Worker) export(addAlgorithm) export(addExperiments) export(addJobTags) export(addProblem) export(ajoin) export(assertRegistry) export(batchExport) export(batchMap) export(batchMapResults) export(batchReduce) export(binpack) export(btlapply) export(btmapply) export(cfBrewTemplate) export(cfHandleUnknownSubmitError) export(cfKillJob) export(cfReadBrewTemplate) export(chunk) export(clearRegistry) export(doJobCollection) export(estimateRuntimes) export(execJob) export(findConfFile) export(findDone) export(findErrors) export(findExperiments) export(findExpired) export(findJobs) export(findNotDone) export(findNotStarted) export(findNotSubmitted) export(findOnSystem) export(findQueued) export(findRunning) export(findStarted) export(findSubmitted) export(findTagged) export(findTemplateFile) export(flatten) export(getDefaultRegistry) export(getErrorMessages) export(getJobNames) export(getJobPars) export(getJobResources) export(getJobStatus) export(getJobTable) export(getJobTags) export(getLog) export(getStatus) export(getUsedJobTags) export(grepLogs) export(ijoin) export(killJobs) export(ljoin) export(loadRegistry) export(loadResult) export(lpt) export(makeClusterFunctions) export(makeClusterFunctionsDocker) export(makeClusterFunctionsInteractive) export(makeClusterFunctionsLSF) export(makeClusterFunctionsMulticore) export(makeClusterFunctionsOpenLava) export(makeClusterFunctionsSGE) export(makeClusterFunctionsSSH) export(makeClusterFunctionsSlurm) export(makeClusterFunctionsSocket) export(makeClusterFunctionsTORQUE) export(makeExperimentRegistry) export(makeJob) export(makeJobCollection) export(makeRegistry) export(makeSubmitJobResult) export(ojoin) export(reduceResults) export(reduceResultsDataTable) export(reduceResultsList) export(removeAlgorithms) export(removeExperiments) export(removeJobTags) export(removeProblems) export(removeRegistry) export(resetJobs) export(rjoin) export(runHook) export(runOSCommand) export(saveRegistry) export(setDefaultRegistry) export(setJobNames) export(showLog) export(sjoin) export(submitJobs) export(summarizeExperiments) export(sweepRegistry) export(syncRegistry) export(testJob) export(ujoin) export(unwrap) export(waitForJobs) import(checkmate) import(data.table) import(stringi) import(utils) importFrom(R6,R6Class) importFrom(base64url,base32_decode) importFrom(base64url,base32_encode) importFrom(brew,brew) importFrom(digest,digest) importFrom(progress,progress_bar) importFrom(rappdirs,site_config_dir) importFrom(rappdirs,user_config_dir) importFrom(stats,pexp) importFrom(stats,predict) importFrom(stats,runif) importFrom(withr,local_dir) importFrom(withr,local_options) importFrom(withr,with_dir) importFrom(withr,with_seed) useDynLib(batchtools,c_binpack) useDynLib(batchtools,c_lpt) useDynLib(batchtools,count_not_missing) useDynLib(batchtools,fill_gaps) batchtools/.aspell/0000755000176200001440000000000013301520663013751 5ustar liggesusersbatchtools/.aspell/batchtools.rds0000644000176200001440000000010113301520663016615 0ustar liggesusersb```b`f@& YC2ĢĜԜ̪Ē<f4:batchtools/.aspell/defaults.R0000644000176200001440000000021513301520663015701 0ustar liggesusersRd_files <- vignettes <- R_files <- description <- list(encoding = "UTF-8", language = "en", dictionaries = c("en_stats", "batchtools")) batchtools/README.md0000644000176200001440000001456513435713470013715 0ustar liggesusers# batchtools [![JOSS Publicatoin](http://joss.theoj.org/papers/10.21105/joss.00135/status.svg)](https://doi.org/10.21105/joss.00135) [![CRAN Status Badge](http://www.r-pkg.org/badges/version/batchtools)](https://cran.r-project.org/package=batchtools) [![Build Status](https://travis-ci.org/mllg/batchtools.svg?branch=master)](https://travis-ci.org/mllg/batchtools) [![Build Status](https://ci.appveyor.com/api/projects/status/ypp14tiiqfhnv92k/branch/master?svg=true)](https://ci.appveyor.com/project/mllg/batchtools/branch/master) [![Coverage Status](https://img.shields.io/coveralls/mllg/batchtools.svg)](https://coveralls.io/r/mllg/batchtools?branch=master) As a successor of the packages [BatchJobs](https://github.com/tudo-r/BatchJobs) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments), batchtools provides a parallel implementation of Map for high performance computing systems managed by schedulers like Slurm, Sun Grid Engine, OpenLava, TORQUE/OpenPBS, Load Sharing Facility (LSF) or Docker Swarm (see the setup section in the [vignette](https://mllg.github.io/batchtools/articles/batchtools.html)). Main features: * Convenience: All relevant batch system operations (submitting, listing, killing) are either handled internally or abstracted via simple R functions * Portability: With a well-defined interface, the source is independent from the underlying batch system - prototype locally, deploy on any high performance cluster * Reproducibility: Every computational part has an associated seed stored in a data base which ensures reproducibility even when the underlying batch system changes * Abstraction: The code layers for algorithms, experiment definitions and execution are cleanly separated and allow to write readable and maintainable code to manage large scale computer experiments ## Installation Install the stable version from CRAN: ```{R} install.packages("batchtools") ``` For the development version, use [devtools](https://cran.r-project.org/package=devtools): ```{R} devtools::install_github("mllg/batchtools") ``` Next, you need to setup `batchtools` for your HPC (it will run sequentially otherwise). See the [vignette](https://mllg.github.io/batchtools/articles/batchtools.html#setup) for instructions. ## Why batchtools? The development of [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) is discontinued for the following reasons: * Maintainability: The packages [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) are tightly connected which makes maintenance difficult. Changes have to be synchronized and tested against the current CRAN versions for compatibility. Furthermore, BatchExperiments violates CRAN policies by calling internal functions of BatchJobs. * Data base issues: Although we invested weeks to mitigate issues with locks of the SQLite data base or file system (staged queries, file system timeouts, ...), `BatchJobs` kept working unreliable on some systems with high latency under certain conditions. This made `BatchJobs` unusable for many users. [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) will remain on CRAN, but new features are unlikely to be ported back. The [vignette](https://mllg.github.io/batchtools/articles/batchtools.html#migration) contains a section comparing the packages. ## Resources * [NEWS](https://mllg.github.io/batchtools/news/) * [Function reference](https://mllg.github.io/batchtools/reference) * [Vignette](https://mllg.github.io/batchtools/articles/batchtools.html) * [JOSS Paper](https://doi.org/10.21105/joss.00135): Short paper on batchtools. Please cite this if you use batchtools. * [Paper on BatchJobs/BatchExperiments](http://www.jstatsoft.org/v64/i11): The described concept still holds for batchtools and most examples work analogously (see the [vignette](https://mllg.github.io/batchtools/articles/batchtools.html#migration) for differences between the packages). ## Citation Please cite the [JOSS paper](https://doi.org/10.21105/joss.00135) using the following BibTeX entry: ``` @article{, doi = {10.21105/joss.00135}, url = {https://doi.org/10.21105/joss.00135}, year = {2017}, month = {feb}, publisher = {The Open Journal}, volume = {2}, number = {10}, author = {Michel Lang and Bernd Bischl and Dirk Surmann}, title = {batchtools: Tools for R to work on batch systems}, journal = {The Journal of Open Source Software} } ``` ## Related Software * The [High Performance Computing Task View](https://cran.r-project.org/view=HighPerformanceComputing) lists the most relevant packages for scientific computing with R. * [clustermq](https://cran.r-project.org/package=clustermq) is a similar approach which also supports multiple schedulers. Uses the ZeroMQ network protocol for communication, and shines if you have millions of fast jobs. * [batch](https://cran.r-project.org/package=batch) assists in splitting and submitting jobs to LSF and MOSIX clusters. * [flowr](https://cran.r-project.org/package=flowr) supports LSF, Slurm, TORQUE and Moab and provides a scatter-gather approach to define computational jobs. * [future.batchtools](https://cran.r-project.org/package=future.batchtools) implements `batchtools` as backend for [future](https://cran.r-project.org/package=future.batchtools). * [doFuture](https://cran.r-project.org/package=doFuture) together with [future.batchtools](https://cran.r-project.org/package=future.batchtools) connects `batchtools` to [foreach](https://cran.r-project.org/package=foreach). * [drake](https://cran.r-project.org/package=drake) uses graphs to define computational jobs. `batchtools` is used as a backend via [future.batchtools](https://cran.r-project.org/package=future.batchtools). ## Contributing to batchtools This R package is licensed under the [LGPL-3](https://www.gnu.org/licenses/lgpl-3.0.en.html). If you encounter problems using this software (lack of documentation, misleading or wrong documentation, unexpected behaviour, bugs, ...) or just want to suggest features, please open an issue in the [issue tracker](https://github.com/mllg/batchtools/issues). Pull requests are welcome and will be included at the discretion of the author. If you have customized a template file for your (larger) computing site, please share it: fork the repository, place your template in `inst/templates` and send a pull request. batchtools/man/0000755000176200001440000000000013606041641013170 5ustar liggesusersbatchtools/man/makeSubmitJobResult.Rd0000644000176200001440000000334513606041641017417 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{makeSubmitJobResult} \alias{makeSubmitJobResult} \alias{SubmitJobResult} \title{Create a SubmitJobResult} \usage{ makeSubmitJobResult( status, batch.id, log.file = NA_character_, msg = NA_character_ ) } \arguments{ \item{status}{[\code{integer(1)}]\cr Launch status of job. 0 means success, codes between 1 and 100 are temporary errors and any error greater than 100 is a permanent failure.} \item{batch.id}{[\code{character()}]\cr Unique id of this job on batch system, as given by the batch system. Must be globally unique so that the job can be terminated using just this information. For array jobs, this may be a vector of length equal to the number of jobs in the array.} \item{log.file}{[\code{character()}]\cr Log file. If \code{NA}, defaults to \code{[job.hash].log}. Some cluster functions set this for array jobs.} \item{msg}{[\code{character(1)}]\cr Optional error message in case \code{status} is not equal to 0. Default is \dQuote{OK}, \dQuote{TEMPERROR}, \dQuote{ERROR}, depending on \code{status}.} } \value{ [\code{\link{SubmitJobResult}}]. A list, containing \code{status}, \code{batch.id} and \code{msg}. } \description{ This function is only intended for use in your own cluster functions implementation. Use this function in your implementation of \code{\link{makeClusterFunctions}} to create a return value for the \code{submitJob} function. } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfKillJob}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeClusterFunctions}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctionsHelper} batchtools/man/runHook.Rd0000644000176200001440000000465613435713470015125 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Hooks.R \name{runHook} \alias{runHook} \alias{Hooks} \alias{Hook} \title{Trigger Evaluation of Custom Function} \usage{ runHook(obj, hook, ...) } \arguments{ \item{obj}{[\link{Registry} | \link{JobCollection}]\cr Registry which contains the \link{ClusterFunctions} with element \dQuote{hooks} or a \link{JobCollection} which holds the subset of functions which are executed remotely.} \item{hook}{[\code{character(1)}]\cr ID of the hook as string.} \item{...}{[ANY]\cr Additional arguments passed to the function referenced by \code{hook}. See description.} } \value{ Return value of the called function, or \code{NULL} if there is no hook with the specified ID. } \description{ Hooks allow to trigger functions calls on specific events. They can be specified via the \code{\link{ClusterFunctions}} and are triggered on the following events: \describe{ \item{\code{pre.sync}}{\code{function(reg, fns, ...)}: Run before synchronizing the registry on the master. \code{fn} is the character vector of paths to the update files.} \item{\code{post.sync}}{\code{function(reg, updates, ...)}: Run after synchronizing the registry on the master. \code{updates} is the data.table of processed updates.} \item{\code{pre.submit.job}}{\code{function(reg, ...)}: Run before a job is successfully submitted to the scheduler on the master.} \item{\code{post.submit.job}}{\code{function(reg, ...)}: Run after a job is successfully submitted to the scheduler on the master.} \item{\code{pre.submit}}{\code{function(reg, ...)}: Run before any job is submitted to the scheduler.} \item{\code{post.submit}}{\code{function(reg, ...)}: Run after a jobs are submitted to the schedule.} \item{\code{pre.do.collection}}{\code{function(reg, reader, ...)}: Run before starting the job collection on the slave. \code{reader} is an internal cache object.} \item{\code{post.do.collection}}{\code{function(reg, updates, reader, ...)}: Run after all jobs in the chunk are terminated on the slave. \code{updates} is a \code{\link{data.table}} of updates which will be merged with the \code{\link{Registry}} by the master. \code{reader} is an internal cache object.} \item{\code{pre.kill}}{\code{function(reg, ids, ...)}: Run before any job is killed.} \item{\code{post.kill}}{\code{function(reg, ids, ...)}: Run after jobs are killed. \code{ids} is the return value of \code{\link{killJobs}}.} } } batchtools/man/loadResult.Rd0000644000176200001440000000140613606041641015576 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loadResult.R \name{loadResult} \alias{loadResult} \title{Load the Result of a Single Job} \usage{ loadResult(id, reg = getDefaultRegistry()) } \arguments{ \item{id}{[\code{integer(1)} or \code{data.table}]\cr Single integer to specify the job or a \code{data.table} with column \code{job.id} and exactly one row.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{ANY}]. The stored result. } \description{ Loads the result of a single job. } \seealso{ Other Results: \code{\link{batchMapResults}()}, \code{\link{reduceResultsList}()}, \code{\link{reduceResults}()} } \concept{Results} batchtools/man/makeClusterFunctionsSGE.Rd0000644000176200001440000000671413606041641020176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsSGE.R \name{makeClusterFunctionsSGE} \alias{makeClusterFunctionsSGE} \title{ClusterFunctions for SGE Systems} \usage{ makeClusterFunctionsSGE( template = "sge", nodename = "localhost", scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} \item{nodename}{[\code{character(1)}]\cr Nodename of the master host. All commands are send via SSH to this host. Only works iff \enumerate{ \item{Passwordless authentication (e.g., via SSH public key authentication) is set up.} \item{The file directory is shared across machines, e.g. mounted via SSHFS.} \item{Either the absolute path to the \code{file.dir} is identical on the machines, or paths are provided relative to the home directory. Symbolic links should work.} }} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for Univa Grid Engine / Oracle Grid Engine / Sun Grid Engine (\url{http://www.univa.com/}). Job files are created based on the brew template \code{template}. This file is processed with brew and then submitted to the queue using the \code{qsub} command. Jobs are killed using the \code{qdel} command and the list of running jobs is retrieved using \code{qselect}. The user must have the appropriate privileges to submit, delete and list jobs on the cluster (this is usually the case). The template file can access all resources passed to \code{\link{submitJobs}} as well as all variables stored in the \code{\link{JobCollection}}. It is the template file's job to choose a queue for the job and handle the desired resource allocations. } \note{ Array jobs are currently not supported. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/batchReduce.Rd0000644000176200001440000000365613606041641015702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/batchReduce.R \name{batchReduce} \alias{batchReduce} \title{Reduce Operation for Batch Systems} \usage{ batchReduce( fun, xs, init = NULL, chunks = seq_along(xs), more.args = list(), reg = getDefaultRegistry() ) } \arguments{ \item{fun}{[\code{function(aggr, x, ...)}]\cr Function to reduce \code{xs} with.} \item{xs}{[\code{vector}]\cr Vector to reduce.} \item{init}{[ANY]\cr Initial object for reducing. See \code{\link[base]{Reduce}}.} \item{chunks}{[\code{integer(length(xs))}]\cr Group for each element of \code{xs}. Can be generated with \code{\link{chunk}}.} \item{more.args}{[\code{list}]\cr A list of additional arguments passed to \code{fun}.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. } \description{ A parallel and asynchronous \code{\link[base]{Reduce}} for batch systems. Note that this function only defines the computational jobs. Each job reduces a certain number of elements on one slave. The actual computation is started with \code{\link{submitJobs}}. Results and partial results can be collected with \code{\link{reduceResultsList}}, \code{\link{reduceResults}} or \code{\link{loadResult}}. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } # define function to reduce on slave, we want to sum a vector tmp = makeRegistry(file.dir = NA, make.default = FALSE) xs = 1:100 f = function(aggr, x) aggr + x # sum 20 numbers on each slave process, i.e. 5 jobs chunks = chunk(xs, chunk.size = 5) batchReduce(fun = f, 1:100, init = 0, chunks = chunks, reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) # now reduce one final time on master reduceResults(fun = function(aggr, job, res) f(aggr, res), reg = tmp) } \seealso{ \code{\link{batchMap}} } batchtools/man/chunk.Rd0000644000176200001440000000751213543336703014602 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/chunkIds.R \name{chunk} \alias{chunk} \alias{lpt} \alias{binpack} \title{Chunk Jobs for Sequential Execution} \usage{ chunk(x, n.chunks = NULL, chunk.size = NULL, shuffle = TRUE) lpt(x, n.chunks = 1L) binpack(x, chunk.size = max(x)) } \arguments{ \item{x}{[\code{numeric}]\cr For \code{chunk} an atomic vector (usually the \code{job.id}). For \code{binpack} and \code{lpt}, the weights to group.} \item{n.chunks}{[\code{integer(1)}]\cr Requested number of chunks. The function \code{chunk} distributes the number of elements in \code{x} evenly while \code{lpt} tries to even out the sum of elements in each chunk. If more chunks than necessary are requested, empty chunks are ignored. Mutually exclusive with \code{chunks.size}.} \item{chunk.size}{[\code{integer(1)}]\cr Requested chunk size for each single chunk. For \code{chunk} this is the number of elements in \code{x}, for \code{binpack} the size is determined by the sum of values in \code{x}. Mutually exclusive with \code{n.chunks}.} \item{shuffle}{[\code{logical(1)}]\cr Shuffles the groups. Default is \code{TRUE}.} } \value{ [\code{integer}] giving the chunk number for each element of \code{x}. } \description{ Jobs can be partitioned into \dQuote{chunks} to be executed sequentially on the computational nodes. Chunks are defined by providing a data frame with columns \dQuote{job.id} and \dQuote{chunk} (integer) to \code{\link{submitJobs}}. All jobs with the same chunk number will be grouped together on one node to form a single computational job. The function \code{chunk} simply splits \code{x} into either a fixed number of groups, or into a variable number of groups with a fixed number of maximum elements. The function \code{lpt} also groups \code{x} into a fixed number of chunks, but uses the actual values of \code{x} in a greedy \dQuote{Longest Processing Time} algorithm. As a result, the maximum sum of elements in minimized. \code{binpack} splits \code{x} into a variable number of groups whose sum of elements do not exceed the upper limit provided by \code{chunk.size}. See examples of \code{\link{estimateRuntimes}} for an application of \code{binpack} and \code{lpt}. } \examples{ \dontshow{ batchtools:::example_push_temp(2) } ch = chunk(1:10, n.chunks = 2) table(ch) ch = chunk(rep(1, 10), chunk.size = 2) table(ch) set.seed(1) x = runif(10) ch = lpt(x, n.chunks = 2) sapply(split(x, ch), sum) set.seed(1) x = runif(10) ch = binpack(x, 1) sapply(split(x, ch), sum) # Job chunking tmp = makeRegistry(file.dir = NA, make.default = FALSE) ids = batchMap(identity, 1:25, reg = tmp) ### Group into chunks with 10 jobs each library(data.table) ids[, chunk := chunk(job.id, chunk.size = 10)] print(ids[, .N, by = chunk]) ### Group into 4 chunks ids[, chunk := chunk(job.id, n.chunks = 4)] print(ids[, .N, by = chunk]) ### Submit to batch system submitJobs(ids = ids, reg = tmp) # Grouped chunking tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) prob = addProblem(reg = tmp, "prob1", data = iris, fun = function(job, data) nrow(data)) prob = addProblem(reg = tmp, "prob2", data = Titanic, fun = function(job, data) nrow(data)) algo = addAlgorithm(reg = tmp, "algo", fun = function(job, data, instance, i, ...) problem) prob.designs = list(prob1 = data.table(), prob2 = data.table(x = 1:2)) algo.designs = list(algo = data.table(i = 1:3)) addExperiments(prob.designs, algo.designs, repls = 3, reg = tmp) ### Group into chunks of 5 jobs, but do not put multiple problems into the same chunk # -> only one problem has to be loaded per chunk, and only once because it is cached ids = getJobTable(reg = tmp)[, .(job.id, problem, algorithm)] ids[, chunk := chunk(job.id, chunk.size = 5), by = "problem"] ids[, chunk := .GRP, by = c("problem", "chunk")] dcast(ids, chunk ~ problem) } \seealso{ \code{\link{estimateRuntimes}} } batchtools/man/Worker.Rd0000644000176200001440000000246613606041641014740 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Worker.R \docType{class} \name{Worker} \alias{Worker} \title{Create a Linux-Worker} \format{An \code{\link{R6Class}} generator object} \value{ [\code{\link{Worker}}]. } \description{ \code{\link[R6]{R6Class}} to create local and remote linux workers. } \section{Fields}{ \describe{ \item{\code{nodename}}{Host name. Set via constructor.} \item{\code{ncpus}}{Number of CPUs. Set via constructor and defaults to a heuristic which tries to detect the number of CPUs of the machine.} \item{\code{max.load}}{Maximum load average (of the last 5 min). Set via constructor and defaults to the number of CPUs of the machine.} \item{\code{status}}{Status of the worker; one of \dQuote{unknown}, \dQuote{available}, \dQuote{max.cpus} and \dQuote{max.load}.} }} \section{Methods}{ \describe{ \item{\code{new(nodename, ncpus, max.load)}}{Constructor.} \item{\code{update(reg)}}{Update the worker status.} \item{\code{list(reg)}}{List running jobs.} \item{\code{start(reg, fn, outfile)}}{Start job collection in file \dQuote{fn} and output to \dQuote{outfile}.} \item{\code{kill(reg, batch.id)}}{Kill job matching the \dQuote{batch.id}.} } } \examples{ \dontrun{ # create a worker for the local machine and use 4 CPUs. Worker$new("localhost", ncpus = 4) } } batchtools/man/makeClusterFunctionsTORQUE.Rd0000644000176200001440000000567513606056247020614 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsTORQUE.R \name{makeClusterFunctionsTORQUE} \alias{makeClusterFunctionsTORQUE} \title{ClusterFunctions for OpenPBS/TORQUE Systems} \usage{ makeClusterFunctionsTORQUE( template = "torque", scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for TORQUE/PBS (\url{https://adaptivecomputing.com/cherry-services/torque-resource-manager/}). Job files are created based on the brew template \code{template.file}. This file is processed with brew and then submitted to the queue using the \code{qsub} command. Jobs are killed using the \code{qdel} command and the list of running jobs is retrieved using \code{qselect}. The user must have the appropriate privileges to submit, delete and list jobs on the cluster (this is usually the case). The template file can access all resources passed to \code{\link{submitJobs}} as well as all variables stored in the \code{\link{JobCollection}}. It is the template file's job to choose a queue for the job and handle the desired resource allocations. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/cfHandleUnknownSubmitError.Rd0000644000176200001440000000234313606041641020743 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{cfHandleUnknownSubmitError} \alias{cfHandleUnknownSubmitError} \title{Cluster Functions Helper to Handle Unknown Errors} \usage{ cfHandleUnknownSubmitError(cmd, exit.code, output) } \arguments{ \item{cmd}{[\code{character(1)}]\cr OS command used to submit the job, e.g. qsub.} \item{exit.code}{[\code{integer(1)}]\cr Exit code of the OS command, should not be 0.} \item{output}{[\code{character}]\cr Output of the OS command, hopefully an informative error message. If these are multiple lines in a vector, they are automatically joined.} } \value{ [\code{\link{SubmitJobResult}}]. } \description{ This function is only intended for use in your own cluster functions implementation. Simply constructs a \code{\link{SubmitJobResult}} object with status code 101, NA as batch id and an informative error message containing the output of the OS command in \code{output}. } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfKillJob}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeClusterFunctions}()}, \code{\link{makeSubmitJobResult}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctionsHelper} batchtools/man/getDefaultRegistry.Rd0000644000176200001440000000156013606041641017276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getDefaultRegistry.R \name{getDefaultRegistry} \alias{getDefaultRegistry} \alias{setDefaultRegistry} \title{Get and Set the Default Registry} \usage{ getDefaultRegistry() setDefaultRegistry(reg) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \description{ \code{getDefaultRegistry} returns the registry currently set as default (or stops with an exception if none is set). \code{setDefaultRegistry} sets a registry as default. } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/batchtools-package.Rd0000644000176200001440000000263013606041641017213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \docType{package} \name{batchtools-package} \alias{batchtools} \alias{batchtools-package} \title{batchtools: Tools for Computation on Batch Systems} \description{ For bug reports and feature requests please use the tracker: \url{https://github.com/mllg/batchtools}. } \section{Package options}{ \describe{ \item{\code{batchtools.verbose}}{ Verbosity. Set to \code{FALSE} to suppress info messages and progress bars. } \item{\code{batchtools.progress}}{ Progress bars. Set to \code{FALSE} to disable them. } \item{\code{batchtools.timestamps}}{ Add time stamps to log output. Set to \code{FALSE} to disable them. } } Furthermore, you may enable a debug mode using the \pkg{debugme} package by setting the environment variable \dQuote{DEBUGME} to \dQuote{batchtools} before loading \pkg{batchtools}. } \seealso{ Useful links: \itemize{ \item \url{https://github.com/mllg/batchtools} \item Report bugs at \url{https://github.com/mllg/batchtools/issues} } } \author{ \strong{Maintainer}: Michel Lang \email{michellang@gmail.com} (\href{https://orcid.org/0000-0001-9754-0393}{ORCID}) Authors: \itemize{ \item Bernd Bischl \email{bernd_bischl@gmx.net} } Other contributors: \itemize{ \item Dirk Surmann \email{surmann@statistik.tu-dortmund.de} (\href{https://orcid.org/0000-0003-0873-137X}{ORCID}) [contributor] } } batchtools/man/makeClusterFunctions.Rd0000644000176200001440000001074413606041641017635 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{makeClusterFunctions} \alias{makeClusterFunctions} \alias{ClusterFunctions} \title{ClusterFunctions Constructor} \usage{ makeClusterFunctions( name, submitJob, killJob = NULL, listJobsQueued = NULL, listJobsRunning = NULL, array.var = NA_character_, store.job.collection = FALSE, store.job.files = FALSE, scheduler.latency = 0, fs.latency = 0, hooks = list() ) } \arguments{ \item{name}{[\code{character(1)}]\cr Name of cluster functions.} \item{submitJob}{[\code{function(reg, jc, ...)}]\cr Function to submit new jobs. Must return a \code{\link{SubmitJobResult}} object. The arguments are \code{reg} (\code{\link{Registry}}) and \code{jobs} (\code{\link{JobCollection}}).} \item{killJob}{[\code{function(reg, batch.id)}]\cr Function to kill a job on the batch system. Make sure that you definitely kill the job! Return value is currently ignored. Must have the arguments \code{reg} (\code{\link{Registry}}) and \code{batch.id} (\code{character(1)} as returned by \code{submitJob}). Note that there is a helper function \code{\link{cfKillJob}} to repeatedly try to kill jobs. Set \code{killJob} to \code{NULL} if killing jobs cannot be supported.} \item{listJobsQueued}{[\code{function(reg)}]\cr List all queued jobs on the batch system for the current user. Must return an character vector of batch ids, same format as they are returned by \code{submitJob}. Set \code{listJobsQueued} to \code{NULL} if listing of queued jobs is not supported.} \item{listJobsRunning}{[\code{function(reg)}]\cr List all running jobs on the batch system for the current user. Must return an character vector of batch ids, same format as they are returned by \code{submitJob}. It does not matter if you return a few job ids too many (e.g. all for the current user instead of all for the current registry), but you have to include all relevant ones. Must have the argument are \code{reg} (\code{\link{Registry}}). Set \code{listJobsRunning} to \code{NULL} if listing of running jobs is not supported.} \item{array.var}{[\code{character(1)}]\cr Name of the environment variable set by the scheduler to identify IDs of job arrays. Default is \code{NA} for no array support.} \item{store.job.collection}{[\code{logical(1)}]\cr Flag to indicate that the cluster function implementation of \code{submitJob} can not directly handle \code{\link{JobCollection}} objects. If set to \code{FALSE}, the \code{\link{JobCollection}} is serialized to the file system before submitting the job.} \item{store.job.files}{[\code{logical(1)}]\cr Flag to indicate that job files need to be stored in the file directory. If set to \code{FALSE} (default), the job file is created in a temporary directory, otherwise (or if the debug mode is enabled) in the subdirectory \code{jobs} of the \code{file.dir}.} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} \item{hooks}{[\code{list}]\cr Named list of functions which will we called on certain events like \dQuote{pre.submit} or \dQuote{post.sync}. See \link{Hooks}.} } \description{ This is the constructor used to create \emph{custom} cluster functions. Note that some standard implementations for TORQUE, Slurm, LSF, SGE, etc. ship with the package. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()} Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfKillJob}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeSubmitJobResult}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctions} \concept{ClusterFunctionsHelper} batchtools/man/makeClusterFunctionsOpenLava.Rd0000644000176200001440000000563513606041641021266 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsOpenLava.R \name{makeClusterFunctionsOpenLava} \alias{makeClusterFunctionsOpenLava} \title{ClusterFunctions for OpenLava} \usage{ makeClusterFunctionsOpenLava( template = "openlava", scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for OpenLava. Job files are created based on the brew template \code{template}. This file is processed with brew and then submitted to the queue using the \code{bsub} command. Jobs are killed using the \code{bkill} command and the list of running jobs is retrieved using \code{bjobs -u $USER -w}. The user must have the appropriate privileges to submit, delete and list jobs on the cluster (this is usually the case). The template file can access all resources passed to \code{\link{submitJobs}} as well as all variables stored in the \code{\link{JobCollection}}. It is the template file's job to choose a queue for the job and handle the desired resource allocations. } \note{ Array jobs are currently not supported. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/addExperiments.Rd0000644000176200001440000000772413606041641016445 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/addExperiments.R \name{addExperiments} \alias{addExperiments} \title{Add Experiments to the Registry} \usage{ addExperiments( prob.designs = NULL, algo.designs = NULL, repls = 1L, combine = "crossprod", reg = getDefaultRegistry() ) } \arguments{ \item{prob.designs}{[named list of \code{\link[base]{data.frame}}]\cr Named list of data frames (or \code{\link[data.table]{data.table}}). The name must match the problem name while the column names correspond to parameters of the problem. If \code{NULL}, experiments for all defined problems without any parameters are added.} \item{algo.designs}{[named list of \code{\link[data.table]{data.table}} or \code{\link[base]{data.frame}}]\cr Named list of data frames (or \code{\link[data.table]{data.table}}). The name must match the algorithm name while the column names correspond to parameters of the algorithm. If \code{NULL}, experiments for all defined algorithms without any parameters are added.} \item{repls}{[\code{integer(1)}]\cr Number of replications for each experiment.} \item{combine}{[\code{character(1)}]\cr How to combine the rows of a single problem design with the rows of a single algorithm design? Default is \dQuote{crossprod} which combines each row of the problem design which each row of the algorithm design in a cross-product fashion. Set to \dQuote{bind} to just \code{\link[base]{cbind}} the tables of problem and algorithm designs where the shorter table is repeated if necessary.} \item{reg}{[\code{\link{ExperimentRegistry}}]\cr Registry. If not explicitly passed, uses the last created registry.} } \value{ [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. } \description{ Adds experiments (parametrized combinations of problems with algorithms) to the registry and thereby defines batch jobs. If multiple problem designs or algorithm designs are provided, they are combined via the Cartesian product. E.g., if you have two problems \code{p1} and \code{p2} and three algorithms \code{a1}, \code{a2} and \code{a3}, \code{addExperiments} creates experiments for all parameters for the combinations \code{(p1, a1)}, \code{(p1, a2)}, \code{(p1, a3)}, \code{(p2, a1)}, \code{(p2, a2)} and \code{(p2, a3)}. } \note{ R's \code{data.frame} converts character vectors to factors by default which frequently resulted in problems using \code{addExperiments}. Therefore, this function will warn about factor variables if the following conditions hold: \enumerate{ \item The design is passed as a \code{data.frame}, not a \code{\link[data.table]{data.table}} or \code{\link[tibble]{tibble}}. \item The option \dQuote{stringsAsFactors} is not set or set to \code{TRUE}. } } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) # add first problem fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd) addProblem("rnorm", fun = fun, reg = tmp) # add second problem fun = function(job, data, n, lambda, ...) rexp(n, rate = lambda) addProblem("rexp", fun = fun, reg = tmp) # add first algorithm fun = function(instance, method, ...) if (method == "mean") mean(instance) else median(instance) addAlgorithm("average", fun = fun, reg = tmp) # add second algorithm fun = function(instance, ...) sd(instance) addAlgorithm("deviation", fun = fun, reg = tmp) # define problem and algorithm designs library(data.table) prob.designs = algo.designs = list() prob.designs$rnorm = CJ(n = 100, mean = -1:1, sd = 1:5) prob.designs$rexp = data.table(n = 100, lambda = 1:5) algo.designs$average = data.table(method = c("mean", "median")) algo.designs$deviation = data.table() # add experiments and submit addExperiments(prob.designs, algo.designs, reg = tmp) # check what has been created summarizeExperiments(reg = tmp) unwrap(getJobPars(reg = tmp)) } \seealso{ Other Experiment: \code{\link{removeExperiments}()}, \code{\link{summarizeExperiments}()} } \concept{Experiment} batchtools/man/removeRegistry.Rd0000644000176200001440000000234613606041641016512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/removeRegistry.R \name{removeRegistry} \alias{removeRegistry} \title{Remove a Registry from the File System} \usage{ removeRegistry(wait = 5, reg = getDefaultRegistry()) } \arguments{ \item{wait}{[\code{numeric(1)}]\cr Seconds to wait before proceeding. This is a safety measure to not accidentally remove your precious files. Set to 0 in non-interactive scripts to disable this precaution.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{character(1)}]: Path of the deleted file directory. } \description{ All files will be erased from the file system, including all results. If you wish to remove only intermediate files, use \code{\link{sweepRegistry}}. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) removeRegistry(0, tmp) } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/killJobs.Rd0000644000176200001440000000256113606041641015234 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/killJobs.R \name{killJobs} \alias{killJobs} \title{Kill Jobs} \usage{ killJobs(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findOnSystem}}. Invalid ids are ignored.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with columns \dQuote{job.id}, the corresponding \dQuote{batch.id} and the logical flag \dQuote{killed} indicating success. } \description{ Kill jobs which are currently running on the batch system. In case of an error when killing, the function tries - after a short sleep - to kill the remaining batch jobs again. If this fails three times for some jobs, the function gives up. Jobs that could be successfully killed are reset in the \link{Registry}. } \seealso{ Other debug: \code{\link{getErrorMessages}()}, \code{\link{getStatus}()}, \code{\link{grepLogs}()}, \code{\link{resetJobs}()}, \code{\link{showLog}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/sweepRegistry.Rd0000644000176200001440000000155113606041641016335 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sweepRegistry.R \name{sweepRegistry} \alias{sweepRegistry} \title{Check Consistency and Remove Obsolete Information} \usage{ sweepRegistry(reg = getDefaultRegistry()) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \description{ Canceled jobs and jobs submitted multiple times may leave stray files behind. This function checks the registry for consistency and removes obsolete files and redundant data base entries. } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/makeClusterFunctionsSocket.Rd0000644000176200001440000000301413606041641020776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsSocket.R \name{makeClusterFunctionsSocket} \alias{makeClusterFunctionsSocket} \title{ClusterFunctions for Parallel Socket Execution} \usage{ makeClusterFunctionsSocket(ncpus = NA_integer_, fs.latency = 65) } \arguments{ \item{ncpus}{[\code{integer(1)}]\cr Number of CPUs. Default is to use all logical cores. The total number of cores "available" can be set via the option \code{mc.cores} and defaults to the heuristic implemented in \code{\link[parallel]{detectCores}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Jobs are spawned asynchronously using the package \pkg{snow}. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/btlapply.Rd0000644000176200001440000000601313606041641015306 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/btlapply.R \name{btlapply} \alias{btlapply} \alias{btmapply} \title{Synchronous Apply Functions} \usage{ btlapply( X, fun, ..., resources = list(), n.chunks = NULL, chunk.size = NULL, reg = makeRegistry(file.dir = NA) ) btmapply( fun, ..., more.args = list(), simplify = FALSE, use.names = TRUE, resources = list(), n.chunks = NULL, chunk.size = NULL, reg = makeRegistry(file.dir = NA) ) } \arguments{ \item{X}{[\code{\link[base]{vector}}]\cr Vector to apply over.} \item{fun}{[\code{function}]\cr Function to apply.} \item{...}{[\code{ANY}]\cr Additional arguments passed to \code{fun} (\code{btlapply}) or vectors to map over (\code{btmapply}).} \item{resources}{[\code{named list}]\cr Computational resources for the jobs to submit. The actual elements of this list (e.g. something like \dQuote{walltime} or \dQuote{nodes}) depend on your template file, exceptions are outlined in the section 'Resources'. Default settings for a system can be set in the configuration file by defining the named list \code{default.resources}. Note that these settings are merged by name, e.g. merging \code{list(walltime = 300)} into \code{list(walltime = 400, memory = 512)} will result in \code{list(walltime = 300, memory = 512)}. Same holds for individual job resources passed as additional column of \code{ids} (c.f. section 'Resources').} \item{n.chunks}{[\code{integer(1)}]\cr Passed to \code{\link{chunk}} before \code{\link{submitJobs}}.} \item{chunk.size}{[\code{integer(1)}]\cr Passed to \code{\link{chunk}} before \code{\link{submitJobs}}.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} \item{more.args}{[\code{list}]\cr Additional arguments passed to \code{fun}.} \item{simplify}{[\code{logical(1)}]\cr Simplify the results using \code{\link[base]{simplify2array}}?} \item{use.names}{[\code{logical(1)}]\cr Use names of the input to name the output?} } \value{ [\code{list}] List with the results of the function call. } \description{ This is a set of functions acting as counterparts to the sequential popular apply functions in base R: \code{btlapply} for \code{\link[base]{lapply}} and \code{btmapply} for \code{\link[base]{mapply}}. Internally, jobs are created using \code{\link{batchMap}} on the provided registry. If no registry is provided, a temporary registry (see argument \code{file.dir} of \code{\link{makeRegistry}}) and \code{\link{batchMap}} will be used. After all jobs are terminated (see \code{\link{waitForJobs}}), the results are collected and returned as a list. Note that these functions are one suitable for short and fail-safe operations on batch system. If some jobs fail, you have to retrieve partial results from the registry directory yourself. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } btlapply(1:3, function(x) x^2) btmapply(function(x, y, z) x + y + z, x = 1:3, y = 1:3, more.args = list(z = 1), simplify = TRUE) } batchtools/man/runOSCommand.Rd0000644000176200001440000000261113606041641016024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runOSCommand.R \name{runOSCommand} \alias{runOSCommand} \title{Run OS Commands on Local or Remote Machines} \usage{ runOSCommand( sys.cmd, sys.args = character(0L), stdin = "", nodename = "localhost" ) } \arguments{ \item{sys.cmd}{[\code{character(1)}]\cr Command to run.} \item{sys.args}{[\code{character}]\cr Arguments for \code{sys.cmd}.} \item{stdin}{[\code{character(1)}]\cr Argument passed to \code{\link[base]{system2}}.} \item{nodename}{[\code{character(1)}]\cr Name of the SSH node to run the command on. If set to \dQuote{localhost} (default), the command is not piped through SSH.} } \value{ [\code{named list}] with \dQuote{sys.cmd}, \dQuote{sys.args}, \dQuote{exit.code} (integer), \dQuote{output} (character). } \description{ This is a helper function to run arbitrary OS commands on local or remote machines. The interface is similar to \code{\link[base]{system2}}, but it always returns the exit status \emph{and} the output. } \examples{ \dontrun{ runOSCommand("ls") runOSCommand("ls", "-al") runOSCommand("notfound") } } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfKillJob}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeClusterFunctions}()}, \code{\link{makeSubmitJobResult}()} } \concept{ClusterFunctionsHelper} batchtools/man/JobNames.Rd0000644000176200001440000000276613234300075015164 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JobNames.R \name{JobNames} \alias{JobNames} \alias{setJobNames} \alias{getJobNames} \title{Set and Retrieve Job Names} \usage{ setJobNames(ids = NULL, names, reg = getDefaultRegistry()) getJobNames(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{names}{[\code{character}]\cr Character vector of the same length as provided ids.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ \code{setJobNames} returns \code{NULL} invisibly, \code{getJobTable} returns a \code{data.table} with columns \code{job.id} and \code{job.name}. } \description{ Set custom names for jobs. These are passed to the template as \sQuote{job.name}. If no custom name is set (or any of the job names of the chunk is missing), the job hash is used as job name. Individual job names can be accessed via \code{jobs$job.name}. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) ids = batchMap(identity, 1:10, reg = tmp) setJobNames(ids, letters[1:nrow(ids)], reg = tmp) getJobNames(reg = tmp) } batchtools/man/JobExperiment.Rd0000644000176200001440000000621613234300075016233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Job.R \name{makeJob} \alias{makeJob} \alias{Job} \alias{Experiment} \title{Jobs and Experiments} \usage{ makeJob(id, reader = NULL, reg = getDefaultRegistry()) } \arguments{ \item{id}{[\code{integer(1)} or \code{data.table}]\cr Single integer to specify the job or a \code{data.table} with column \code{job.id} and exactly one row.} \item{reader}{[\code{RDSReader} | \code{NULL}]\cr Reader object to retrieve files. Used internally to cache reading from the file system. The default (\code{NULL}) does not make use of caching.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{Job} | \code{Experiment}]. } \description{ Jobs and Experiments are abstract objects which hold all information necessary to execute a single computational job for a \code{\link{Registry}} or \code{\link{ExperimentRegistry}}, respectively. They can be created using the constructor \code{makeJob} which takes a single job id. Jobs and Experiments are passed to reduce functions like \code{\link{reduceResults}}. Furthermore, Experiments can be used in the functions of the \code{\link{Problem}} and \code{\link{Algorithm}}. Jobs and Experiments hold these information: \describe{ \item{\code{job.id}}{Job ID as integer.} \item{\code{pars}}{ Job parameters as named list. For \code{\link{ExperimentRegistry}}, the parameters are divided into the sublists \dQuote{prob.pars} and \dQuote{algo.pars}. } \item{\code{seed}}{Seed which is set via \code{\link{doJobCollection}} as scalar integer.} \item{\code{resources}}{Computational resources which were set for this job as named list.} \item{\code{external.dir}}{ Path to a directory which is created exclusively for this job. You can store external files here. Directory is persistent between multiple restarts of the job and can be cleaned by calling \code{\link{resetJobs}}. } \item{\code{fun}}{Job only: User function passed to \code{\link{batchMap}}.} \item{\code{prob.name}}{Experiments only: Problem id.} \item{\code{algo.name}}{Experiments only: Algorithm id.} \item{\code{problem}}{Experiments only: \code{\link{Problem}}.} \item{\code{instance}}{Experiments only: Problem instance.} \item{\code{algorithm}}{Experiments only: \code{\link{Algorithm}}.} \item{\code{repl}}{Experiments only: Replication number.} } Note that the slots \dQuote{pars}, \dQuote{fun}, \dQuote{algorithm} and \dQuote{problem} lazy-load required files from the file system and construct the object on the first access. The realizations are cached for all slots except \dQuote{instance} (which might be stochastic). Jobs and Experiments can be executed manually with \code{\link{execJob}}. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(function(x, y) x + y, x = 1:2, more.args = list(y = 99), reg = tmp) submitJobs(resources = list(foo = "bar"), reg = tmp) job = makeJob(1, reg = tmp) print(job) # Get the parameters: job$pars # Get the job resources: job$resources # Execute the job locally: execJob(job) } batchtools/man/reduceResults.Rd0000644000176200001440000000671013606041641016314 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduceResults.R \name{reduceResults} \alias{reduceResults} \title{Reduce Results} \usage{ reduceResults(fun, ids = NULL, init, ..., reg = getDefaultRegistry()) } \arguments{ \item{fun}{[\code{function}]\cr A function to reduce the results. The result of previous iterations (or the \code{init}) will be passed as first argument, the result of of the i-th iteration as second. See \code{\link[base]{Reduce}} for some examples. If the function has the formal argument \dQuote{job}, the \code{\link{Job}}/\code{\link{Experiment}} is also passed to the function (named).} \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findDone}}. Invalid ids are ignored.} \item{init}{[\code{ANY}]\cr Initial element, as used in \code{\link[base]{Reduce}}. If missing, the reduction uses the result of the first job as \code{init} and the reduction starts with the second job.} \item{...}{[\code{ANY}]\cr Additional arguments passed to function \code{fun}.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ Aggregated results in the same order as provided ids. Return type depends on the user function. If \code{ids} is empty, \code{reduceResults} returns \code{init} (if available) or \code{NULL} otherwise. } \description{ A version of \code{\link[base]{Reduce}} for \code{\link{Registry}} objects which iterates over finished jobs and aggregates them. All jobs must have terminated, an error is raised otherwise. } \note{ If you have thousands of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) can significantly increase the performance. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(function(a, b) list(sum = a+b, prod = a*b), a = 1:3, b = 1:3, reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) # Extract element sum from each result reduceResults(function(aggr, res) c(aggr, res$sum), init = list(), reg = tmp) # Aggregate element sum via '+' reduceResults(function(aggr, res) aggr + res$sum, init = 0, reg = tmp) # Aggregate element prod via '*' where parameter b < 3 reduce = function(aggr, res, job) { if (job$pars$b >= 3) return(aggr) aggr * res$prod } reduceResults(reduce, init = 1, reg = tmp) # Reduce to data.frame() (inefficient, use reduceResultsDataTable() instead) reduceResults(rbind, init = data.frame(), reg = tmp) # Reduce to data.frame by collecting results first, then utilize vectorization of rbind: res = reduceResultsList(fun = as.data.frame, reg = tmp) do.call(rbind, res) # Reduce with custom combine function: comb = function(x, y) list(sum = x$sum + y$sum, prod = x$prod * y$prod) reduceResults(comb, reg = tmp) # The same with neutral element NULL comb = function(x, y) if (is.null(x)) y else list(sum = x$sum + y$sum, prod = x$prod * y$prod) reduceResults(comb, init = NULL, reg = tmp) # Alternative: Reduce in list, reduce manually in a 2nd step res = reduceResultsList(reg = tmp) Reduce(comb, res) } \seealso{ Other Results: \code{\link{batchMapResults}()}, \code{\link{loadResult}()}, \code{\link{reduceResultsList}()} } \concept{Results} batchtools/man/makeClusterFunctionsInteractive.Rd0000644000176200001440000000407713606041641022035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsInteractive.R \name{makeClusterFunctionsInteractive} \alias{makeClusterFunctionsInteractive} \title{ClusterFunctions for Sequential Execution in the Running R Session} \usage{ makeClusterFunctionsInteractive( external = FALSE, write.logs = TRUE, fs.latency = 0 ) } \arguments{ \item{external}{[\code{logical(1)}]\cr If set to \code{TRUE}, jobs are started in a fresh R session instead of currently active but still waits for its termination. Default is \code{FALSE}.} \item{write.logs}{[\code{logical(1)}]\cr Sink the output to log files. Turning logging off can increase the speed of calculations but makes it very difficult to debug. Default is \code{TRUE}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ All jobs are executed sequentially using the current R process in which \code{\link{submitJobs}} is called. Thus, \code{submitJob} blocks the session until the job has finished. The main use of this \code{ClusterFunctions} implementation is to test and debug programs on a local computer. Listing jobs returns an empty vector (as no jobs can be running when you call this) and \code{killJob} is not implemented for the same reasons. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/testJob.Rd0000644000176200001440000000330413606041641015071 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/testJob.R \name{testJob} \alias{testJob} \title{Run Jobs Interactively} \usage{ testJob(id, external = FALSE, reg = getDefaultRegistry()) } \arguments{ \item{id}{[\code{integer(1)} or \code{data.table}]\cr Single integer to specify the job or a \code{data.table} with column \code{job.id} and exactly one row.} \item{external}{[\code{logical(1)}]\cr Run the job in an external R session? If \code{TRUE}, starts a fresh R session on the local machine to execute the with \code{\link{execJob}}. You will not be able to use debug tools like \code{\link[base]{traceback}} or \code{\link[base]{browser}}. If \code{external} is set to \code{FALSE} (default) on the other hand, \code{testJob} will execute the job in the current R session and the usual debugging tools work. However, spotting missing variable declarations (as they are possibly resolved in the global environment) is impossible. Same holds for missing package dependency declarations.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ Returns the result of the job if successful. } \description{ Starts a single job on the local machine. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(function(x) if (x == 2) xxx else x, 1:2, reg = tmp) testJob(1, reg = tmp) \dontrun{ testJob(2, reg = tmp) } } \seealso{ Other debug: \code{\link{getErrorMessages}()}, \code{\link{getStatus}()}, \code{\link{grepLogs}()}, \code{\link{killJobs}()}, \code{\link{resetJobs}()}, \code{\link{showLog}()} } \concept{debug} batchtools/man/makeRegistry.Rd0000644000176200001440000002201613606056076016136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Registry.R \name{makeRegistry} \alias{makeRegistry} \alias{Registry} \title{Registry Constructor} \usage{ makeRegistry( file.dir = "registry", work.dir = getwd(), conf.file = findConfFile(), packages = character(0L), namespaces = character(0L), source = character(0L), load = character(0L), seed = NULL, make.default = TRUE ) } \arguments{ \item{file.dir}{[\code{character(1)}]\cr Path where all files of the registry are saved. Default is directory \dQuote{registry} in the current working directory. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well. If you pass \code{NA}, a temporary directory will be used. This way, you can create disposable registries for \code{\link{btlapply}} or examples. By default, the temporary directory \code{\link[base]{tempdir}()} will be used. If you want to use another directory, e.g. a directory which is shared between nodes, you can set it in your configuration file by setting the variable \code{temp.dir}.} \item{work.dir}{[\code{character(1)}]\cr Working directory for R process for running jobs. Defaults to the working directory currently set during Registry construction (see \code{\link[base]{getwd}}). \code{loadRegistry} uses the stored \code{work.dir}, but you may also explicitly overwrite it, e.g., after switching to another system. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well.} \item{conf.file}{[\code{character(1)}]\cr Path to a configuration file which is sourced while the registry is created. In the configuration file you can define how \pkg{batchtools} interacts with the system via \code{\link{ClusterFunctions}}. Separating the configuration of the underlying host system from the R code allows to easily move computation to another site. The file lookup is implemented in the internal (but exported) function \code{findConfFile} which returns the first file found of the following candidates: \enumerate{ \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} \item{File \dQuote{batchtools.conf.R} in the current working directory.} \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} } Set to \code{NA} if you want to suppress reading any configuration file. If a configuration file is found, it gets sourced inside the environment of the registry after the defaults for all variables are set. Therefore you can set and overwrite slots, e.g. \code{default.resources = list(walltime = 3600)} to set default resources or \dQuote{max.concurrent.jobs} to limit the number of jobs allowed to run simultaneously on the system.} \item{packages}{[\code{character}]\cr Packages that will always be loaded on each node. Uses \code{\link[base]{require}} internally. Default is \code{character(0)}.} \item{namespaces}{[\code{character}]\cr Same as \code{packages}, but the packages will not be attached. Uses \code{\link[base]{requireNamespace}} internally. Default is \code{character(0)}.} \item{source}{[\code{character}]\cr Files which should be sourced on the slaves prior to executing a job. Calls \code{\link[base]{sys.source}} using the \code{\link[base]{.GlobalEnv}}.} \item{load}{[\code{character}]\cr Files which should be loaded on the slaves prior to executing a job. Calls \code{\link[base]{load}} using the \code{\link[base]{.GlobalEnv}}.} \item{seed}{[\code{integer(1)}]\cr Start seed for jobs. Each job uses the (\code{seed} + \code{job.id}) as seed. Default is a random integer between 1 and 32768} \item{make.default}{[\code{logical(1)}]\cr If set to \code{TRUE}, the created registry is saved inside the package namespace and acts as default registry. You might want to switch this off if you work with multiple registries simultaneously. Default is \code{TRUE}.} } \value{ [\code{environment}] of class \dQuote{Registry} with the following slots: \describe{ \item{\code{file.dir} [path]:}{File directory.} \item{\code{work.dir} [path]:}{Working directory.} \item{\code{temp.dir} [path]:}{Temporary directory. Used if \code{file.dir} is \code{NA} to create temporary registries.} \item{\code{packages} [character()]:}{Packages to load on the slaves.} \item{\code{namespaces} [character()]:}{Namespaces to load on the slaves.} \item{\code{seed} [integer(1)]:}{Registry seed. Before each job is executed, the seed \code{seed + job.id} is set.} \item{\code{cluster.functions} [cluster.functions]:}{Usually set in your \code{conf.file}. Set via a call to \code{\link{makeClusterFunctions}}. See example.} \item{\code{default.resources} [named list()]:}{Usually set in your \code{conf.file}. Named list of default resources.} \item{\code{max.concurrent.jobs} [integer(1)]:}{Usually set in your \code{conf.file}. Maximum number of concurrent jobs for a single user and current registry on the system. \code{\link{submitJobs}} will try to respect this setting. The resource \dQuote{max.concurrent.jobs} has higher precedence.} \item{\code{defs} [data.table]:}{Table with job definitions (i.e. parameters).} \item{\code{status} [data.table]:}{Table holding information about the computational status. Also see \code{\link{getJobStatus}}.} \item{\code{resources} [data.table]:}{Table holding information about the computational resources used for the job. Also see \code{\link{getJobResources}}.} \item{\code{tags} [data.table]:}{Table holding information about tags. See \link{Tags}.} \item{\code{hash} [character(1)]:}{Unique hash which changes each time the registry gets saved to the file system. Can be utilized to invalidate the cache of \pkg{knitr}.} } } \description{ \code{makeRegistry} constructs the inter-communication object for all functions in \code{batchtools}. All communication transactions are processed via the file system: All information required to run a job is stored as \code{\link{JobCollection}} in a file in the a subdirectory of the \code{file.dir} directory. Each jobs stores its results as well as computational status information (start time, end time, error message, ...) also on the file system which is regular merged parsed by the master using \code{\link{syncRegistry}}. After integrating the new information into the Registry, the Registry is serialized to the file system via \code{\link{saveRegistry}}. Both \code{\link{syncRegistry}} and \code{\link{saveRegistry}} are called whenever required internally. Therefore it should be safe to quit the R session at any time. Work can later be resumed by calling \code{\link{loadRegistry}} which de-serializes the registry from the file system. The registry created last is saved in the package namespace (unless \code{make.default} is set to \code{FALSE}) and can be retrieved via \code{\link{getDefaultRegistry}}. Canceled jobs and jobs submitted multiple times may leave stray files behind. These can be swept using \code{\link{sweepRegistry}}. \code{\link{clearRegistry}} completely erases all jobs from a registry, including log files and results, and thus allows you to start over. } \details{ Currently \pkg{batchtools} understands the following options set via the configuration file: \describe{ \item{\code{cluster.functions}:}{As returned by a constructor, e.g. \code{\link{makeClusterFunctionsSlurm}}.} \item{\code{default.resources}:}{List of resources to use. Will be overruled by resources specified via \code{\link{submitJobs}}.} \item{\code{temp.dir}:}{Path to directory to use for temporary registries.} \item{\code{sleep}:}{Custom sleep function. See \code{\link{waitForJobs}}.} \item{\code{expire.after}:}{Number of iterations before treating jobs as expired in \code{\link{waitForJobs}}.} \item{\code{compress}:}{Compression algorithm to use via \code{\link{saveRDS}}.} } } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) print(tmp) # Set cluster functions to interactive mode and start jobs in external R sessions tmp$cluster.functions = makeClusterFunctionsInteractive(external = TRUE) # Change packages to load tmp$packages = c("MASS") saveRegistry(reg = tmp) } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/unwrap.Rd0000644000176200001440000000316613543336703015007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unwrap.R \name{unwrap} \alias{unwrap} \alias{flatten} \title{Unwrap Nested Data Frames} \usage{ unwrap(x, cols = NULL, sep = NULL) flatten(x, cols = NULL, sep = NULL) } \arguments{ \item{x}{[\code{\link{data.frame}} | \code{\link[data.table]{data.table}}]\cr Data frame to flatten.} \item{cols}{[\code{character}]\cr Columns to consider for this operation. If set to \code{NULL} (default), will operate on all columns of type \dQuote{list}.} \item{sep}{[\code{character(1)}]\cr If \code{NULL} (default), the column names of the additional columns will re-use the names of the nested \code{list}/\code{data.frame}. This may lead to name clashes. If you provide \code{sep}, the variable column name will be constructed as \dQuote{[column name of x][sep][inner name]}.} } \value{ [\code{\link{data.table}}]. } \description{ Some functions (e.g., \code{\link{getJobPars}}, \code{\link{getJobResources}} or \code{\link{reduceResultsDataTable}} return a \code{data.table} with columns of type \code{list}. These columns can be unnested/unwrapped with this function. The contents of these columns will be transformed to a \code{data.table} and \code{\link[base]{cbind}}-ed to the input data.frame \code{x}, replacing the original nested column. } \note{ There is a name clash with function \code{flatten} in package \pkg{purrr}. The function \code{flatten} is discouraged to use for this reason in favor of \code{unwrap}. } \examples{ x = data.table::data.table( id = 1:3, values = list(list(a = 1, b = 3), list(a = 2, b = 2), list(a = 3)) ) unwrap(x) unwrap(x, sep = ".") } batchtools/man/cfReadBrewTemplate.Rd0000644000176200001440000000205213606041641017156 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{cfReadBrewTemplate} \alias{cfReadBrewTemplate} \title{Cluster Functions Helper to Parse a Brew Template} \usage{ cfReadBrewTemplate(template, comment.string = NA_character_) } \arguments{ \item{template}{[\code{character(1)}]\cr Path to template file which is then passed to \code{\link[brew]{brew}}.} \item{comment.string}{[\code{character(1)}]\cr Ignore lines starting with this string.} } \value{ [\code{character}]. } \description{ This function is only intended for use in your own cluster functions implementation. This function is only intended for use in your own cluster functions implementation. Simply reads your template file and returns it as a character vector. } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfKillJob}()}, \code{\link{makeClusterFunctions}()}, \code{\link{makeSubmitJobResult}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctionsHelper} batchtools/man/clearRegistry.Rd0000644000176200001440000000132313606041641016275 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clearRegistry.R \name{clearRegistry} \alias{clearRegistry} \title{Remove All Jobs} \usage{ clearRegistry(reg = getDefaultRegistry()) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \description{ Removes all jobs from a registry and calls \code{\link{sweepRegistry}}. } \seealso{ Other Registry: \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/batchMap.Rd0000644000176200001440000000541213606041641015200 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/batchMap.R \name{batchMap} \alias{batchMap} \title{Map Operation for Batch Systems} \usage{ batchMap( fun, ..., args = list(), more.args = list(), reg = getDefaultRegistry() ) } \arguments{ \item{fun}{[\code{function}]\cr Function to map over arguments provided via \code{...}. Parameters given via \code{args} or \code{...} are passed as-is, in the respective order and possibly named. If the function has the named formal argument \dQuote{.job}, the \code{\link{Job}} is passed to the function on the slave.} \item{...}{[ANY]\cr Arguments to vectorize over (list or vector). Shorter vectors will be recycled (possibly with a warning any length is not a multiple of the longest length). Mutually exclusive with \code{args}. Note that although it is possible to iterate over large objects (e.g., lists of data frames or matrices), this usually hurts the overall performance and thus is discouraged.} \item{args}{[\code{list} | \code{data.frame}]\cr Arguments to vectorize over as (named) list or data frame. Shorter vectors will be recycled (possibly with a warning any length is not a multiple of the longest length). Mutually exclusive with \code{...}.} \item{more.args}{[\code{list}]\cr A list of further arguments passed to \code{fun}. Default is an empty list.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. } \description{ A parallel and asynchronous \code{\link[base]{Map}}/\code{\link[base]{mapply}} for batch systems. Note that this function only defines the computational jobs. The actual computation is started with \code{\link{submitJobs}}. Results and partial results can be collected with \code{\link{reduceResultsList}}, \code{\link{reduceResults}} or \code{\link{loadResult}}. For a synchronous \code{\link[base]{Map}}-like execution, see \code{\link{btmapply}}. } \examples{ \dontshow{ batchtools:::example_push_temp(3) } # example using "..." and more.args tmp = makeRegistry(file.dir = NA, make.default = FALSE) f = function(x, y) x^2 + y ids = batchMap(f, x = 1:10, more.args = list(y = 100), reg = tmp) getJobPars(reg = tmp) testJob(6, reg = tmp) # 100 + 6^2 = 136 # vector recycling tmp = makeRegistry(file.dir = NA, make.default = FALSE) f = function(...) list(...) ids = batchMap(f, x = 1:3, y = 1:6, reg = tmp) getJobPars(reg = tmp) # example for an expand.grid()-like operation on parameters tmp = makeRegistry(file.dir = NA, make.default = FALSE) ids = batchMap(paste, args = data.table::CJ(x = letters[1:3], y = 1:3), reg = tmp) getJobPars(reg = tmp) testJob(6, reg = tmp) } \seealso{ \code{\link{batchReduce}} } batchtools/man/makeClusterFunctionsMulticore.Rd0000644000176200001440000000323613606041641021517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsMulticore.R \name{makeClusterFunctionsMulticore} \alias{makeClusterFunctionsMulticore} \title{ClusterFunctions for Parallel Multicore Execution} \usage{ makeClusterFunctionsMulticore(ncpus = NA_integer_, fs.latency = 0) } \arguments{ \item{ncpus}{[\code{integer(1)}]\cr Number of CPUs. Default is to use all logical cores. The total number of cores "available" can be set via the option \code{mc.cores} and defaults to the heuristic implemented in \code{\link[parallel]{detectCores}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Jobs are spawned asynchronously using the functions \code{mcparallel} and \code{mccollect} (both in \pkg{parallel}). Does not work on Windows, use \code{\link{makeClusterFunctionsSocket}} instead. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/batchExport.Rd0000644000176200001440000000255513606041641015751 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Export.R \name{batchExport} \alias{batchExport} \title{Export Objects to the Slaves} \usage{ batchExport( export = list(), unexport = character(0L), reg = getDefaultRegistry() ) } \arguments{ \item{export}{[\code{list}]\cr Named list of objects to export.} \item{unexport}{[\code{character}]\cr Vector of object names to unexport.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{data.table}] with name and uri to the exported objects. } \description{ Objects are saved in subdirectory \dQuote{exports} of the \dQuote{file.dir} of \code{reg}. They are automatically loaded and placed in the global environment each time the registry is loaded or a job collection is executed. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) # list exports exports = batchExport(reg = tmp) print(exports) # add a job and required exports batchMap(function(x) x^2 + y + z, x = 1:3, reg = tmp) exports = batchExport(export = list(y = 99, z = 1), reg = tmp) print(exports) submitJobs(reg = tmp) waitForJobs(reg = tmp) stopifnot(loadResult(1, reg = tmp) == 101) # Un-export z exports = batchExport(unexport = "z", reg = tmp) print(exports) } batchtools/man/findConfFile.Rd0000644000176200001440000000226013301520663016003 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/config.R \name{findConfFile} \alias{findConfFile} \title{Find a batchtools Configuration File} \usage{ findConfFile() } \value{ [\code{character(1)}] Path to the configuration file or \code{NA} if no configuration file was found. } \description{ This functions returns the path to the first configuration file found in the following locations: \enumerate{ \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} \item{File \dQuote{batchtools.conf.R} in the current working directory.} \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} } } \keyword{internal} batchtools/man/getJobTable.Rd0000644000176200001440000000632613234300075015644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JobTables.R \name{getJobTable} \alias{getJobTable} \alias{getJobStatus} \alias{getJobResources} \alias{getJobPars} \alias{getJobTags} \title{Query Job Information} \usage{ getJobTable(ids = NULL, reg = getDefaultRegistry()) getJobStatus(ids = NULL, reg = getDefaultRegistry()) getJobResources(ids = NULL, reg = getDefaultRegistry()) getJobPars(ids = NULL, reg = getDefaultRegistry()) getJobTags(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with the following columns (not necessarily in this order): \describe{ \item{job.id}{Unique Job ID as integer.} \item{submitted}{Time the job was submitted to the batch system as \code{\link[base]{POSIXct}}.} \item{started}{Time the job was started on the batch system as \code{\link[base]{POSIXct}}.} \item{done}{Time the job terminated (successfully or with an error) as \code{\link[base]{POSIXct}}.} \item{error}{Either \code{NA} if the job terminated successfully or the error message.} \item{mem.used}{Estimate of the memory usage.} \item{batch.id}{Batch ID as reported by the scheduler.} \item{log.file}{Log file. If missing, defaults to \code{[job.hash].log}.} \item{job.hash}{Unique string identifying the job or chunk.} \item{time.queued}{Time in seconds (as \code{\link[base]{difftime}}) the job was queued.} \item{time.running}{Time in seconds (as \code{\link[base]{difftime}}) the job was running.} \item{pars}{List of parameters/arguments for this job.} \item{resources}{List of computational resources set for this job.} \item{tags}{Tags as joined string, delimited by \dQuote{,}.} \item{problem}{Only for \code{\link{ExperimentRegistry}}: the problem identifier.} \item{algorithm}{Only for \code{\link{ExperimentRegistry}}: the algorithm identifier.} } } \description{ \code{getJobStatus} returns the internal table which stores information about the computational status of jobs, \code{getJobPars} a table with the job parameters, \code{getJobResources} a table with the resources which were set to submit the jobs, and \code{getJobTags} the tags of the jobs (see \link{Tags}). \code{getJobTable} returns all these tables joined. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) f = function(x) if (x < 0) stop("x must be > 0") else sqrt(x) batchMap(f, x = c(-1, 0, 1), reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) addJobTags(1:2, "tag1", reg = tmp) addJobTags(2, "tag2", reg = tmp) # Complete table: getJobTable(reg = tmp) # Job parameters: getJobPars(reg = tmp) # Set and retrieve tags: getJobTags(reg = tmp) # Job parameters with tags right-joined: rjoin(getJobPars(reg = tmp), getJobTags(reg = tmp)) } batchtools/man/submitJobs.Rd0000644000176200001440000002620413606041641015604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/submitJobs.R \name{submitJobs} \alias{submitJobs} \title{Submit Jobs to the Batch Systems} \usage{ submitJobs( ids = NULL, resources = list(), sleep = NULL, reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findNotSubmitted}}. Invalid ids are ignored.} \item{resources}{[\code{named list}]\cr Computational resources for the jobs to submit. The actual elements of this list (e.g. something like \dQuote{walltime} or \dQuote{nodes}) depend on your template file, exceptions are outlined in the section 'Resources'. Default settings for a system can be set in the configuration file by defining the named list \code{default.resources}. Note that these settings are merged by name, e.g. merging \code{list(walltime = 300)} into \code{list(walltime = 400, memory = 512)} will result in \code{list(walltime = 300, memory = 512)}. Same holds for individual job resources passed as additional column of \code{ids} (c.f. section 'Resources').} \item{sleep}{[\code{function(i)} | \code{numeric(1)}]\cr Parameter to control the duration to sleep between temporary errors. You can pass an absolute numeric value in seconds or a \code{function(i)} which returns the number of seconds to sleep in the \code{i}-th iteration between temporary errors. If not provided (\code{NULL}), tries to read the value (number/function) from the configuration file (stored in \code{reg$sleep}) or defaults to a function with exponential backoff between 5 and 120 seconds.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with columns \dQuote{job.id} and \dQuote{chunk}. } \description{ Submits defined jobs to the batch system. After submitting the jobs, you can use \code{\link{waitForJobs}} to wait for the termination of jobs or call \code{\link{reduceResultsList}}/\code{\link{reduceResults}} to collect partial results. The progress can be monitored with \code{\link{getStatus}}. } \note{ If you a large number of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) can significantly increase the performance of \code{submitJobs}. } \section{Resources}{ You can pass arbitrary resources to \code{submitJobs()} which then are available in the cluster function template. Some resources' names are standardized and it is good practice to stick to the following nomenclature to avoid confusion: \describe{ \item{walltime:}{Upper time limit in seconds for jobs before they get killed by the scheduler. Can be passed as additional column as part of \code{ids} to set per-job resources.} \item{memory:}{Memory limit in Mb. If jobs exceed this limit, they are usually killed by the scheduler. Can be passed as additional column as part of \code{ids} to set per-job resources.} \item{ncpus:}{Number of (physical) CPUs to use on the slave. Can be passed as additional column as part of \code{ids} to set per-job resources.} \item{omp.threads:}{Number of threads to use via OpenMP. Used to set environment variable \dQuote{OMP_NUM_THREADS}. Can be passed as additional column as part of \code{ids} to set per-job resources.} \item{pp.size:}{Maximum size of the pointer protection stack, see \code{\link[base]{Memory}}.} \item{blas.threads:}{Number of threads to use for the BLAS backend. Used to set environment variables \dQuote{MKL_NUM_THREADS} and \dQuote{OPENBLAS_NUM_THREADS}. Can be passed as additional column as part of \code{ids} to set per-job resources.} \item{measure.memory:}{Enable memory measurement for jobs. Comes with a small runtime overhead.} \item{chunks.as.arrayjobs:}{Execute chunks as array jobs.} \item{pm.backend:}{Start a \pkg{parallelMap} backend on the slave.} \item{foreach.backend:}{Start a \pkg{foreach} backend on the slave.} \item{clusters:}{Resource used for Slurm to select the set of clusters to run \code{sbatch}/\code{squeue}/\code{scancel} on.} } } \section{Chunking of Jobs}{ Multiple jobs can be grouped (chunked) together to be executed sequentially on the batch system as a single batch job. This is especially useful to avoid overburding the scheduler by submitting thousands of jobs simultaneously. To chunk jobs together, job ids must be provided as \code{data.frame} with columns \dQuote{job.id} and \dQuote{chunk} (integer). All jobs with the same chunk number will be executed sequentially inside the same batch job. The utility functions \code{\link{chunk}}, \code{\link{binpack}} and \code{\link{lpt}} can assist in grouping jobs. } \section{Array Jobs}{ If your cluster supports array jobs, you can set the resource \code{chunks.as.arrayjobs} to \code{TRUE} in order to execute chunks as job arrays on the cluster. For each chunk of size \code{n}, \pkg{batchtools} creates a \code{\link{JobCollection}} of (possibly heterogeneous) jobs which is submitted to the scheduler as a single array job with \code{n} repetitions. For each repetition, the \code{JobCollection} is first read from the file system, then subsetted to the \code{i}-th job using the environment variable \code{reg$cluster.functions$array.var} (depending on the cluster backend, defined automatically) and finally executed. } \section{Order of Submission}{ Jobs are submitted in the order of chunks, i.e. jobs which have chunk number \code{sort(unique(ids$chunk))[1]} first, then jobs with chunk number \code{sort(unique(ids$chunk))[2]} and so on. If no chunks are provided, jobs are submitted in the order of \code{ids$job.id}. } \section{Limiting the Number of Jobs}{ If requested, \code{submitJobs} tries to limit the number of concurrent jobs of the user by waiting until jobs terminate before submitting new ones. This can be controlled by setting \dQuote{max.concurrent.jobs} in the configuration file (see \code{\link{Registry}}) or by setting the resource \dQuote{max.concurrent.jobs} to the maximum number of jobs to run simultaneously. If both are set, the setting via the resource takes precedence over the setting in the configuration. } \section{Measuring Memory}{ Setting the resource \code{measure.memory} to \code{TRUE} turns on memory measurement: \code{\link[base]{gc}} is called directly before and after the job and the difference is stored in the internal database. Note that this is just a rough estimate and does neither work reliably for external code like C/C++ nor in combination with threading. } \section{Inner Parallelization}{ Inner parallelization is typically done via threading, sockets or MPI. Two backends are supported to assist in setting up inner parallelization. The first package is \pkg{parallelMap}. If you set the resource \dQuote{pm.backend} to \dQuote{multicore}, \dQuote{socket} or \dQuote{mpi}, \code{\link[parallelMap]{parallelStart}} is called on the slave before the first job in the chunk is started and \code{\link[parallelMap]{parallelStop}} is called after the last job terminated. This way, the resources for inner parallelization can be set and get automatically stored just like other computational resources. The function provided by the user just has to call \code{\link[parallelMap]{parallelMap}} to start parallelization using the preconfigured backend. To control the number of CPUs, you have to set the resource \code{ncpus}. Otherwise \code{ncpus} defaults to the number of available CPUs (as reported by (see \code{\link[parallel]{detectCores}})) on the executing machine for multicore and socket mode and defaults to the return value of \code{\link[Rmpi]{mpi.universe.size}-1} for MPI. Your template must be set up to handle the parallelization, e.g. request the right number of CPUs or start R with \code{mpirun}. You may pass further options like \code{level} to \code{\link[parallelMap]{parallelStart}} via the named list \dQuote{pm.opts}. The second supported parallelization backend is \pkg{foreach}. If you set the resource \dQuote{foreach.backend} to \dQuote{seq} (sequential mode), \dQuote{parallel} (\pkg{doParallel}) or \dQuote{mpi} (\pkg{doMPI}), the requested \pkg{foreach} backend is automatically registered on the slave. Again, the resource \code{ncpus} is used to determine the number of CPUs. Neither the namespace of \pkg{parallelMap} nor the namespace \pkg{foreach} are attached. You have to do this manually via \code{\link[base]{library}} or let the registry load the packages for you. } \examples{ \dontshow{ batchtools:::example_push_temp(3) } ### Example 1: Submit subsets of jobs tmp = makeRegistry(file.dir = NA, make.default = FALSE) # toy function which fails if x is even and an input file does not exists fun = function(x, fn) if (x \%\% 2 == 0 && !file.exists(fn)) stop("file not found") else x # define jobs via batchMap fn = tempfile() ids = batchMap(fun, 1:20, reg = tmp, fn = fn) # submit some jobs ids = 1:10 submitJobs(ids, reg = tmp) waitForJobs(ids, reg = tmp) getStatus(reg = tmp) # create the required file and re-submit failed jobs file.create(fn) submitJobs(findErrors(ids, reg = tmp), reg = tmp) getStatus(reg = tmp) # submit remaining jobs which have not yet been submitted ids = findNotSubmitted(reg = tmp) submitJobs(ids, reg = tmp) getStatus(reg = tmp) # collect results reduceResultsList(reg = tmp) ### Example 2: Using memory measurement tmp = makeRegistry(file.dir = NA, make.default = FALSE) # Toy function which creates a large matrix and returns the column sums fun = function(n, p) colMeans(matrix(runif(n*p), n, p)) # Arguments to fun: args = data.table::CJ(n = c(1e4, 1e5), p = c(10, 50)) # like expand.grid() print(args) # Map function to create jobs ids = batchMap(fun, args = args, reg = tmp) # Set resources: enable memory measurement res = list(measure.memory = TRUE) # Submit jobs using the currently configured cluster functions submitJobs(ids, resources = res, reg = tmp) # Retrive information about memory, combine with parameters info = ijoin(getJobStatus(reg = tmp)[, .(job.id, mem.used)], getJobPars(reg = tmp)) print(unwrap(info)) # Combine job info with results -> each job is aggregated using mean() unwrap(ijoin(info, reduceResultsDataTable(fun = function(res) list(res = mean(res)), reg = tmp))) ### Example 3: Multicore execution on the slave tmp = makeRegistry(file.dir = NA, make.default = FALSE) # Function which sleeps 10 seconds, i-times f = function(i) { parallelMap::parallelMap(Sys.sleep, rep(10, i)) } # Create one job with parameter i=4 ids = batchMap(f, i = 4, reg = tmp) # Set resources: Use parallelMap in multicore mode with 4 CPUs # batchtools internally loads the namespace of parallelMap and then # calls parallelStart() before the job and parallelStop() right # after the job last job in the chunk terminated. res = list(pm.backend = "multicore", ncpus = 4) \dontrun{ # Submit both jobs and wait for them submitJobs(resources = res, reg = tmp) waitForJobs(reg = tmp) # If successfull, the running time should be ~10s getJobTable(reg = tmp)[, .(job.id, time.running)] # There should also be a note in the log: grepLogs(pattern = "parallelMap", reg = tmp) } } batchtools/man/saveRegistry.Rd0000644000176200001440000000177513606041641016160 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/saveRegistry.R \name{saveRegistry} \alias{saveRegistry} \title{Store the Registy to the File System} \usage{ saveRegistry(reg = getDefaultRegistry()) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{logical(1)}]: \code{TRUE} if the registry was saved, \code{FALSE} otherwise (if the registry is read-only). } \description{ Stores the registry on the file system in its \dQuote{file.dir} (specified for construction in \code{\link{makeRegistry}}, can be accessed via \code{reg$file.dir}). This function is usually called internally whenever needed. } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/loadRegistry.Rd0000644000176200001440000001212513606041641016130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loadRegistry.R \name{loadRegistry} \alias{loadRegistry} \title{Load a Registry from the File System} \usage{ loadRegistry( file.dir, work.dir = NULL, conf.file = findConfFile(), make.default = TRUE, writeable = FALSE ) } \arguments{ \item{file.dir}{[\code{character(1)}]\cr Path where all files of the registry are saved. Default is directory \dQuote{registry} in the current working directory. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well. If you pass \code{NA}, a temporary directory will be used. This way, you can create disposable registries for \code{\link{btlapply}} or examples. By default, the temporary directory \code{\link[base]{tempdir}()} will be used. If you want to use another directory, e.g. a directory which is shared between nodes, you can set it in your configuration file by setting the variable \code{temp.dir}.} \item{work.dir}{[\code{character(1)}]\cr Working directory for R process for running jobs. Defaults to the working directory currently set during Registry construction (see \code{\link[base]{getwd}}). \code{loadRegistry} uses the stored \code{work.dir}, but you may also explicitly overwrite it, e.g., after switching to another system. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well.} \item{conf.file}{[\code{character(1)}]\cr Path to a configuration file which is sourced while the registry is created. In the configuration file you can define how \pkg{batchtools} interacts with the system via \code{\link{ClusterFunctions}}. Separating the configuration of the underlying host system from the R code allows to easily move computation to another site. The file lookup is implemented in the internal (but exported) function \code{findConfFile} which returns the first file found of the following candidates: \enumerate{ \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} \item{File \dQuote{batchtools.conf.R} in the current working directory.} \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} } Set to \code{NA} if you want to suppress reading any configuration file. If a configuration file is found, it gets sourced inside the environment of the registry after the defaults for all variables are set. Therefore you can set and overwrite slots, e.g. \code{default.resources = list(walltime = 3600)} to set default resources or \dQuote{max.concurrent.jobs} to limit the number of jobs allowed to run simultaneously on the system.} \item{make.default}{[\code{logical(1)}]\cr If set to \code{TRUE}, the created registry is saved inside the package namespace and acts as default registry. You might want to switch this off if you work with multiple registries simultaneously. Default is \code{TRUE}.} \item{writeable}{[\code{logical(1)}]\cr Loads the registry in read-write mode. Default is \code{FALSE}.} } \value{ [\code{\link{Registry}}]. } \description{ Loads a registry from its \code{file.dir}. Multiple R sessions accessing the same registry simultaneously can lead to database inconsistencies. This is especially dangerous if the same \code{file.dir} is accessed from multiple machines, e.g. via a mount. If you just need to check on the status or peek into some preliminary results while another process is still submitting or waiting for pending results, you can load the registry in a read-only mode. All operations that need to change the registry will raise an exception in this mode. Files communicated back by the computational nodes are parsed to update the registry in memory while the registry on the file system remains unchanged. A heuristic tries to detect if the registry has been altered in the background by an other process and in this case automatically restricts the current registry to read-only mode. However, you should rely on this heuristic to work flawlessly. Thus, set to \code{writeable} to \code{TRUE} if and only if you are absolutely sure that other state-changing processes are terminated. If you need write access, load the registry with \code{writeable} set to \code{TRUE}. } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()}, \code{\link{syncRegistry}()} } \concept{Registry} batchtools/man/findJobs.Rd0000644000176200001440000001045013606041641015215 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/findJobs.R \name{findJobs} \alias{findJobs} \alias{findExperiments} \alias{findSubmitted} \alias{findNotSubmitted} \alias{findStarted} \alias{findNotStarted} \alias{findDone} \alias{findNotDone} \alias{findErrors} \alias{findOnSystem} \alias{findRunning} \alias{findQueued} \alias{findExpired} \alias{findTagged} \title{Find and Filter Jobs} \usage{ findJobs(expr, ids = NULL, reg = getDefaultRegistry()) findExperiments( ids = NULL, prob.name = NA_character_, prob.pattern = NA_character_, algo.name = NA_character_, algo.pattern = NA_character_, prob.pars, algo.pars, repls = NULL, reg = getDefaultRegistry() ) findSubmitted(ids = NULL, reg = getDefaultRegistry()) findNotSubmitted(ids = NULL, reg = getDefaultRegistry()) findStarted(ids = NULL, reg = getDefaultRegistry()) findNotStarted(ids = NULL, reg = getDefaultRegistry()) findDone(ids = NULL, reg = getDefaultRegistry()) findNotDone(ids = NULL, reg = getDefaultRegistry()) findErrors(ids = NULL, reg = getDefaultRegistry()) findOnSystem(ids = NULL, reg = getDefaultRegistry()) findRunning(ids = NULL, reg = getDefaultRegistry()) findQueued(ids = NULL, reg = getDefaultRegistry()) findExpired(ids = NULL, reg = getDefaultRegistry()) findTagged(tags = character(0L), ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{expr}{[\code{expression}]\cr Predicate expression evaluated in the job parameters. Jobs for which \code{expr} evaluates to \code{TRUE} are returned.} \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} \item{prob.name}{[\code{character}]\cr Exact name of the problem (no substring matching). If not provided, all problems are matched.} \item{prob.pattern}{[\code{character}]\cr Regular expression pattern to match problem names. If not provided, all problems are matched.} \item{algo.name}{[\code{character}]\cr Exact name of the problem (no substring matching). If not provided, all algorithms are matched.} \item{algo.pattern}{[\code{character}]\cr Regular expression pattern to match algorithm names. If not provided, all algorithms are matched.} \item{prob.pars}{[\code{expression}]\cr Predicate expression evaluated in the problem parameters.} \item{algo.pars}{[\code{expression}]\cr Predicate expression evaluated in the algorithm parameters.} \item{repls}{[\code{integer}]\cr Whitelist of replication numbers. If not provided, all replications are matched.} \item{tags}{[\code{character}]\cr Return jobs which are tagged with any of the tags provided.} } \value{ [\code{\link{data.table}}] with column \dQuote{job.id} containing matched jobs. } \description{ These functions are used to find and filter jobs, depending on either their parameters (\code{findJobs} and \code{findExperiments}), their tags (\code{findTagged}), or their computational status (all other functions, see \code{\link{getStatus}} for an overview). Note that \code{findQueued}, \code{findRunning}, \code{findOnSystem} and \code{findExpired} are somewhat heuristic and may report misleading results, depending on the state of the system and the \code{\link{ClusterFunctions}} implementation. See \code{\link{JoinTables}} for convenient set operations (unions, intersects, differences) on tables with job ids. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(identity, i = 1:3, reg = tmp) ids = findNotSubmitted(reg = tmp) # get all jobs: findJobs(reg = tmp) # filter for jobs with parameter i >= 2 findJobs(i >= 2, reg = tmp) # filter on the computational status findSubmitted(reg = tmp) findNotDone(reg = tmp) # filter on tags addJobTags(2:3, "my_tag", reg = tmp) findTagged(tags = "my_tag", reg = tmp) # combine filter functions using joins # -> jobs which are not done and not tagged (using an anti-join): ajoin(findNotDone(reg = tmp), findTagged("my_tag", reg = tmp)) } \seealso{ \code{\link{getStatus}} \code{\link{JoinTables}} } batchtools/man/makeClusterFunctionsLSF.Rd0000644000176200001440000000572213606041641020202 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsLSF.R \name{makeClusterFunctionsLSF} \alias{makeClusterFunctionsLSF} \title{ClusterFunctions for LSF Systems} \usage{ makeClusterFunctionsLSF( template = "lsf", scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for LSF (\url{https://www.ibm.com/us-en/marketplace/hpc-workload-management}). Job files are created based on the brew template \code{template.file}. This file is processed with brew and then submitted to the queue using the \code{bsub} command. Jobs are killed using the \code{bkill} command and the list of running jobs is retrieved using \code{bjobs -u $USER -w}. The user must have the appropriate privileges to submit, delete and list jobs on the cluster (this is usually the case). The template file can access all resources passed to \code{\link{submitJobs}} as well as all variables stored in the \code{\link{JobCollection}}. It is the template file's job to choose a queue for the job and handle the desired resource allocations. } \note{ Array jobs are currently not supported. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/execJob.Rd0000644000176200001440000000116013234300075015030 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/execJob.R \name{execJob} \alias{execJob} \title{Execute a Single Jobs} \usage{ execJob(job) } \arguments{ \item{job}{[\code{\link{Job}} | \code{\link{Experiment}}]\cr Job/Experiment to execute.} } \value{ Result of the job. } \description{ Executes a single job (as created by \code{\link{makeJob}}) and returns its result. Also works for Experiments. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(identity, 1:2, reg = tmp) job = makeJob(1, reg = tmp) execJob(job) } batchtools/man/removeExperiments.Rd0000644000176200001440000000222213606041641017176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/removeExperiments.R \name{removeExperiments} \alias{removeExperiments} \title{Remove Experiments} \usage{ removeExperiments(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to no job. Invalid ids are ignored.} \item{reg}{[\code{\link{ExperimentRegistry}}]\cr Registry. If not explicitly passed, uses the last created registry.} } \value{ [\code{\link{data.table}}] of removed job ids, invisibly. } \description{ Remove Experiments from an \code{\link{ExperimentRegistry}}. This function automatically checks if any of the jobs to reset is either pending or running. However, if the implemented heuristic fails, this can lead to inconsistencies in the data base. Use with care while jobs are running. } \seealso{ Other Experiment: \code{\link{addExperiments}()}, \code{\link{summarizeExperiments}()} } \concept{Experiment} batchtools/man/estimateRuntimes.Rd0000644000176200001440000001032513543336703017030 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimateRuntimes.R \name{estimateRuntimes} \alias{estimateRuntimes} \alias{print.RuntimeEstimate} \title{Estimate Remaining Runtimes} \usage{ estimateRuntimes(tab, ..., reg = getDefaultRegistry()) \method{print}{RuntimeEstimate}(x, n = 1L, ...) } \arguments{ \item{tab}{[\code{\link{data.table}}]\cr Table with column \dQuote{job.id} and additional columns to predict the runtime. Observed runtimes will be looked up in the registry and serve as dependent variable. All columns in \code{tab} except \dQuote{job.id} will be passed to \code{\link[ranger]{ranger}} as independent variables to fit the model.} \item{...}{[ANY]\cr Additional parameters passed to \code{\link[ranger]{ranger}}. Ignored for the \code{print} method.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} \item{x}{[\code{RuntimeEstimate}]\cr Object to print.} \item{n}{[\code{integer(1)}]\cr Number of parallel jobs to assume for runtime estimation.} } \value{ [\code{RuntimeEstimate}] which is a \code{list} with two named elements: \dQuote{runtimes} is a \code{\link{data.table}} with columns \dQuote{job.id}, \dQuote{runtime} (in seconds) and \dQuote{type} (\dQuote{estimated} if runtime is estimated, \dQuote{observed} if runtime was observed). The other element of the list named \dQuote{model}] contains the fitted random forest object. } \description{ Estimates the runtimes of jobs using the random forest implemented in \pkg{ranger}. Observed runtimes are retrieved from the \code{\link{Registry}} and runtimes are predicted for unfinished jobs. The estimated remaining time is calculated in the \code{print} method. You may also pass \code{n} here to determine the number of parallel jobs which is then used in a simple Longest Processing Time (LPT) algorithm to give an estimate for the parallel runtime. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } # Create a simple toy registry set.seed(1) tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE, seed = 1) addProblem(name = "iris", data = iris, fun = function(data, ...) nrow(data), reg = tmp) addAlgorithm(name = "nrow", function(instance, ...) nrow(instance), reg = tmp) addAlgorithm(name = "ncol", function(instance, ...) ncol(instance), reg = tmp) addExperiments(algo.designs = list(nrow = data.table::CJ(x = 1:50, y = letters[1:5])), reg = tmp) addExperiments(algo.designs = list(ncol = data.table::CJ(x = 1:50, y = letters[1:5])), reg = tmp) # We use the job parameters to predict runtimes tab = unwrap(getJobPars(reg = tmp)) # First we need to submit some jobs so that the forest can train on some data. # Thus, we just sample some jobs from the registry while grouping by factor variables. library(data.table) ids = tab[, .SD[sample(nrow(.SD), 5)], by = c("problem", "algorithm", "y")] setkeyv(ids, "job.id") submitJobs(ids, reg = tmp) waitForJobs(reg = tmp) # We "simulate" some more realistic runtimes here to demonstrate the functionality: # - Algorithm "ncol" is 5 times more expensive than "nrow" # - x has no effect on the runtime # - If y is "a" or "b", the runtimes are really high runtime = function(algorithm, x, y) { ifelse(algorithm == "nrow", 100L, 500L) + 1000L * (y \%in\% letters[1:2]) } tmp$status[ids, done := done + tab[ids, runtime(algorithm, x, y)]] rjoin(sjoin(tab, ids), getJobStatus(ids, reg = tmp)[, c("job.id", "time.running")]) # Estimate runtimes: est = estimateRuntimes(tab, reg = tmp) print(est) rjoin(tab, est$runtimes) print(est, n = 10) # Submit jobs with longest runtime first: ids = est$runtimes[type == "estimated"][order(runtime, decreasing = TRUE)] print(ids) \dontrun{ submitJobs(ids, reg = tmp) } # Group jobs into chunks with runtime < 1h ids = est$runtimes[type == "estimated"] ids[, chunk := binpack(runtime, 3600)] print(ids) print(ids[, list(runtime = sum(runtime)), by = chunk]) \dontrun{ submitJobs(ids, reg = tmp) } # Group jobs into 10 chunks with similar runtime ids = est$runtimes[type == "estimated"] ids[, chunk := lpt(runtime, 10)] print(ids[, list(runtime = sum(runtime)), by = chunk]) } \seealso{ \code{\link{binpack}} and \code{\link{lpt}} to chunk jobs according to their estimated runtimes. } batchtools/man/assertRegistry.Rd0000644000176200001440000000237013606041641016513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Registry.R \name{assertRegistry} \alias{assertRegistry} \title{assertRegistry} \usage{ assertRegistry( reg, class = NULL, writeable = FALSE, sync = FALSE, running.ok = TRUE ) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr The object asserted to be a \code{Registry}.} \item{class}{[\code{character(1)}]\cr If \code{NULL} (default), \code{reg} must only inherit from class \dQuote{Registry}. Otherwise check that \code{reg} is of class \code{class}. E.g., if set to \dQuote{Registry}, a \code{\link{ExperimentRegistry}} would not pass.} \item{writeable}{[\code{logical(1)}]\cr Check if the registry is writeable.} \item{sync}{[\code{logical(1)}]\cr Try to synchronize the registry by including pending results from the file system. See \code{\link{syncRegistry}}.} \item{running.ok}{[\code{logical(1)}]\cr If \code{FALSE} throw an error if jobs associated with the registry are currently running.} } \value{ \code{TRUE} invisibly. } \description{ Assert that a given object is a \code{batchtools} registry. Additionally can sync the registry, check if it is writeable, or check if jobs are running. If any check fails, throws an error indicting the reason for the failure. } batchtools/man/getErrorMessages.Rd0000644000176200001440000000337013606041641016743 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getErrorMessages.R \name{getErrorMessages} \alias{getErrorMessages} \title{Retrieve Error Messages} \usage{ getErrorMessages( ids = NULL, missing.as.error = FALSE, reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findErrors}}. Invalid ids are ignored.} \item{missing.as.error}{[\code{logical(1)}]\cr Treat missing results as errors? If \code{TRUE}, the error message \dQuote{[not terminated]} is imputed for jobs which have not terminated. Default is \code{FALSE}} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with columns \dQuote{job.id}, \dQuote{terminated} (logical), \dQuote{error} (logical) and \dQuote{message} (string). } \description{ Extracts error messages from the internal data base and returns them in a table. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) fun = function(i) if (i == 3) stop(i) else i ids = batchMap(fun, i = 1:5, reg = tmp) submitJobs(1:4, reg = tmp) waitForJobs(1:4, reg = tmp) getErrorMessages(ids, reg = tmp) getErrorMessages(ids, missing.as.error = TRUE, reg = tmp) } \seealso{ Other debug: \code{\link{getStatus}()}, \code{\link{grepLogs}()}, \code{\link{killJobs}()}, \code{\link{resetJobs}()}, \code{\link{showLog}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/makeClusterFunctionsSSH.Rd0000644000176200001440000000365013606041641020211 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsSSH.R \name{makeClusterFunctionsSSH} \alias{makeClusterFunctionsSSH} \title{ClusterFunctions for Remote SSH Execution} \usage{ makeClusterFunctionsSSH(workers, fs.latency = 65) } \arguments{ \item{workers}{[\code{list} of \code{\link{Worker}}]\cr List of Workers as constructed with \code{\link{Worker}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Jobs are spawned by starting multiple R sessions via \code{Rscript} over SSH. If the hostname of the \code{\link{Worker}} equals \dQuote{localhost}, \code{Rscript} is called directly so that you do not need to have an SSH client installed. } \note{ If you use a custom \dQuote{.ssh/config} file, make sure your ProxyCommand passes \sQuote{-q} to ssh, otherwise each output will end with the message \dQuote{Killed by signal 1} and this will break the communication with the nodes. } \examples{ \dontrun{ # cluster functions for multicore execution on the local machine makeClusterFunctionsSSH(list(Worker$new("localhost", ncpus = 2))) } } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/makeExperimentRegistry.Rd0000644000176200001440000001456613606041641020202 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ExperimentRegistry.R \name{makeExperimentRegistry} \alias{makeExperimentRegistry} \alias{ExperimentRegistry} \title{ExperimentRegistry Constructor} \usage{ makeExperimentRegistry( file.dir = "registry", work.dir = getwd(), conf.file = findConfFile(), packages = character(0L), namespaces = character(0L), source = character(0L), load = character(0L), seed = NULL, make.default = TRUE ) } \arguments{ \item{file.dir}{[\code{character(1)}]\cr Path where all files of the registry are saved. Default is directory \dQuote{registry} in the current working directory. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well. If you pass \code{NA}, a temporary directory will be used. This way, you can create disposable registries for \code{\link{btlapply}} or examples. By default, the temporary directory \code{\link[base]{tempdir}()} will be used. If you want to use another directory, e.g. a directory which is shared between nodes, you can set it in your configuration file by setting the variable \code{temp.dir}.} \item{work.dir}{[\code{character(1)}]\cr Working directory for R process for running jobs. Defaults to the working directory currently set during Registry construction (see \code{\link[base]{getwd}}). \code{loadRegistry} uses the stored \code{work.dir}, but you may also explicitly overwrite it, e.g., after switching to another system. The provided path will get normalized unless it is given relative to the home directory (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well.} \item{conf.file}{[\code{character(1)}]\cr Path to a configuration file which is sourced while the registry is created. In the configuration file you can define how \pkg{batchtools} interacts with the system via \code{\link{ClusterFunctions}}. Separating the configuration of the underlying host system from the R code allows to easily move computation to another site. The file lookup is implemented in the internal (but exported) function \code{findConfFile} which returns the first file found of the following candidates: \enumerate{ \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} \item{File \dQuote{batchtools.conf.R} in the current working directory.} \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} } Set to \code{NA} if you want to suppress reading any configuration file. If a configuration file is found, it gets sourced inside the environment of the registry after the defaults for all variables are set. Therefore you can set and overwrite slots, e.g. \code{default.resources = list(walltime = 3600)} to set default resources or \dQuote{max.concurrent.jobs} to limit the number of jobs allowed to run simultaneously on the system.} \item{packages}{[\code{character}]\cr Packages that will always be loaded on each node. Uses \code{\link[base]{require}} internally. Default is \code{character(0)}.} \item{namespaces}{[\code{character}]\cr Same as \code{packages}, but the packages will not be attached. Uses \code{\link[base]{requireNamespace}} internally. Default is \code{character(0)}.} \item{source}{[\code{character}]\cr Files which should be sourced on the slaves prior to executing a job. Calls \code{\link[base]{sys.source}} using the \code{\link[base]{.GlobalEnv}}.} \item{load}{[\code{character}]\cr Files which should be loaded on the slaves prior to executing a job. Calls \code{\link[base]{load}} using the \code{\link[base]{.GlobalEnv}}.} \item{seed}{[\code{integer(1)}]\cr Start seed for jobs. Each job uses the (\code{seed} + \code{job.id}) as seed. Default is a random integer between 1 and 32768} \item{make.default}{[\code{logical(1)}]\cr If set to \code{TRUE}, the created registry is saved inside the package namespace and acts as default registry. You might want to switch this off if you work with multiple registries simultaneously. Default is \code{TRUE}.} } \value{ [\code{ExperimentRegistry}]. } \description{ \code{makeExperimentRegistry} constructs a special \code{\link{Registry}} which is suitable for the definition of large scale computer experiments. Each experiments consists of a \code{\link{Problem}} and an \code{\link{Algorithm}}. These can be parametrized with \code{\link{addExperiments}} to actually define computational jobs. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) # Definde one problem, two algorithms and add them with some parameters: addProblem(reg = tmp, "p1", fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd)) addAlgorithm(reg = tmp, "a1", fun = function(job, data, instance, ...) mean(instance)) addAlgorithm(reg = tmp, "a2", fun = function(job, data, instance, ...) median(instance)) ids = addExperiments(reg = tmp, list(p1 = data.table::CJ(n = c(50, 100), mean = -2:2, sd = 1:4))) # Overview over defined experiments: tmp$problems tmp$algorithms summarizeExperiments(reg = tmp) summarizeExperiments(reg = tmp, by = c("problem", "algorithm", "n")) ids = findExperiments(prob.pars = (n == 50), reg = tmp) print(unwrap(getJobPars(ids, reg = tmp))) # Submit jobs submitJobs(reg = tmp) waitForJobs(reg = tmp) # Reduce the results of algorithm a1 ids.mean = findExperiments(algo.name = "a1", reg = tmp) reduceResults(ids.mean, fun = function(aggr, res, ...) c(aggr, res), reg = tmp) # Join info table with all results and calculate mean of results # grouped by n and algorithm ids = findDone(reg = tmp) pars = unwrap(getJobPars(ids, reg = tmp)) results = unwrap(reduceResultsDataTable(ids, fun = function(res) list(res = res), reg = tmp)) tab = ljoin(pars, results) tab[, list(mres = mean(res)), by = c("n", "algorithm")] } \concept{Registry Experiment} batchtools/man/findTemplateFile.Rd0000644000176200001440000000230413301520663016670 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{findTemplateFile} \alias{findTemplateFile} \title{Find a batchtools Template File} \usage{ findTemplateFile(template) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} } \value{ [\code{character}] Path to the file or \code{NA} if no template template file was found. } \description{ This functions returns the path to a template file on the file system. } \keyword{internal} batchtools/man/addAlgorithm.Rd0000644000176200001440000000315513233572741016070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Algorithm.R \name{addAlgorithm} \alias{addAlgorithm} \alias{Algorithm} \alias{removeAlgorithms} \title{Define Algorithms for Experiments} \usage{ addAlgorithm(name, fun = NULL, reg = getDefaultRegistry()) removeAlgorithms(name, reg = getDefaultRegistry()) } \arguments{ \item{name}{[\code{character(1)}]\cr Unique identifier for the algorithm.} \item{fun}{[\code{function}]\cr The algorithm function. The static problem part is passed as \dQuote{data}, the generated problem instance is passed as \dQuote{instance} and the \code{\link{Job}}/\code{\link{Experiment}} as \dQuote{job}. Therefore, your function must have the formal arguments \dQuote{job}, \dQuote{data} and \dQuote{instance} (or dots \code{...}). If you do not provide a function, it defaults to a function which just returns the instance.} \item{reg}{[\code{\link{ExperimentRegistry}}]\cr Registry. If not explicitly passed, uses the last created registry.} } \value{ [\code{Algorithm}]. Object of class \dQuote{Algorithm}. } \description{ Algorithms are functions which get the code{data} part as well as the problem instance (the return value of the function defined in \code{\link{Problem}}) and return an arbitrary R object. This function serializes all components to the file system and registers the algorithm in the \code{\link{ExperimentRegistry}}. \code{removeAlgorithm} removes all jobs from the registry which depend on the specific algorithm. \code{reg$algorithms} holds the IDs of already defined algorithms. } \seealso{ \code{\link{Problem}}, \code{\link{addExperiments}} } batchtools/man/batchMapResults.Rd0000644000176200001440000000520613606041641016563 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/batchMapResults.R \name{batchMapResults} \alias{batchMapResults} \title{Map Over Results to Create New Jobs} \usage{ batchMapResults( fun, ids = NULL, ..., more.args = list(), target, source = getDefaultRegistry() ) } \arguments{ \item{fun}{[\code{function}]\cr Function which takes the result as first (unnamed) argument.} \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findDone}}. Invalid ids are ignored.} \item{...}{[ANY]\cr Arguments to vectorize over (list or vector). Passed to \code{\link{batchMap}}.} \item{more.args}{[\code{list}]\cr A list of further arguments passed to \code{fun}. Default is an empty list.} \item{target}{[\code{\link{Registry}}]\cr Empty Registry where new jobs are created for.} \item{source}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with ids of jobs added to \code{target}. } \description{ This function allows you to create new computational jobs (just like \code{\link{batchMap}} based on the results of a \code{\link{Registry}}. } \note{ The URI to the result files in registry \code{source} is hard coded as parameter in the \code{target} registry. This means that \code{target} is currently not portable between systems for computation. } \examples{ \dontshow{ batchtools:::example_push_temp(2) } # Source registry: calculate square of some numbers tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(function(x) list(square = x^2), x = 1:10, reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) # Target registry: calculate the square root on results of first registry target = makeRegistry(file.dir = NA, make.default = FALSE) batchMapResults(fun = function(x, y) list(sqrt = sqrt(x$square)), ids = 4:8, target = target, source = tmp) submitJobs(reg = target) waitForJobs(reg = target) # Map old to new ids. First, get a table with results and parameters results = unwrap(rjoin(getJobPars(reg = target), reduceResultsDataTable(reg = target))) print(results) # Parameter '.id' points to job.id in 'source'. Use a inner join to combine: ijoin(results, unwrap(reduceResultsDataTable(reg = tmp)), by = c(".id" = "job.id")) } \seealso{ Other Results: \code{\link{loadResult}()}, \code{\link{reduceResultsList}()}, \code{\link{reduceResults}()} } \concept{Results} batchtools/man/JobCollection.Rd0000644000176200001440000000561613606041641016215 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JobCollection.R \name{makeJobCollection} \alias{makeJobCollection} \alias{JobCollection} \title{JobCollection Constructor} \usage{ makeJobCollection(ids = NULL, resources = list(), reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{resources}{[\code{list}]\cr Named list of resources. Default is \code{list()}.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{JobCollection}]. } \description{ \code{makeJobCollection} takes multiple job ids and creates an object of class \dQuote{JobCollection} which holds all necessary information for the calculation with \code{\link{doJobCollection}}. It is implemented as an environment with the following variables: \describe{ \item{file.dir}{\code{file.dir} of the \link{Registry}.} \item{work.dir:}{\code{work.dir} of the \link{Registry}.} \item{job.hash}{Unique identifier of the job. Used to create names on the file system.} \item{jobs}{\code{\link[data.table]{data.table}} holding individual job information. See examples.} \item{log.file}{Location of the designated log file for this job.} \item{resources:}{Named list of of specified computational resources.} \item{uri}{Location of the job description file (saved with \code{link[base]{saveRDS}} on the file system.} \item{seed}{\code{integer(1)} Seed of the \link{Registry}.} \item{packages}{\code{character} with required packages to load via \code{\link[base]{require}}.} \item{namespaces}{code{character} with required packages to load via \code{\link[base]{requireNamespace}}.} \item{source}{\code{character} with list of files to source before execution.} \item{load}{\code{character} with list of files to load before execution.} \item{array.var}{\code{character(1)} of the array environment variable specified by the cluster functions.} \item{array.jobs}{\code{logical(1)} signaling if jobs were submitted using \code{chunks.as.arrayjobs}.} } If your \link{ClusterFunctions} uses a template, \code{\link[brew]{brew}} will be executed in the environment of such a collection. Thus all variables available inside the job can be used in the template. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE, packages = "methods") batchMap(identity, 1:5, reg = tmp) # resources are usually set in submitJobs() jc = makeJobCollection(1:3, resources = list(foo = "bar"), reg = tmp) ls(jc) jc$resources } \seealso{ Other JobCollection: \code{\link{doJobCollection}()} } \concept{JobCollection} batchtools/man/showLog.Rd0000644000176200001440000000262413606041641015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Logs.R \name{showLog} \alias{showLog} \alias{getLog} \title{Inspect Log Files} \usage{ showLog(id, reg = getDefaultRegistry()) getLog(id, reg = getDefaultRegistry()) } \arguments{ \item{id}{[\code{integer(1)} or \code{data.table}]\cr Single integer to specify the job or a \code{data.table} with column \code{job.id} and exactly one row.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ Nothing. } \description{ \code{showLog} opens the log in the pager. For customization, see \code{\link[base]{file.show}}. \code{getLog} returns the log as character vector. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) # Create some dummy jobs fun = function(i) { if (i == 3) stop(i) if (i \%\% 2 == 1) warning("That's odd.") } ids = batchMap(fun, i = 1:5, reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) getStatus(reg = tmp) writeLines(getLog(ids[1], reg = tmp)) \dontrun{ showLog(ids[1], reg = tmp) } grepLogs(pattern = "warning", ignore.case = TRUE, reg = tmp) } \seealso{ Other debug: \code{\link{getErrorMessages}()}, \code{\link{getStatus}()}, \code{\link{grepLogs}()}, \code{\link{killJobs}()}, \code{\link{resetJobs}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/makeClusterFunctionsSlurm.Rd0000644000176200001440000000725313606041641020661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsSlurm.R \name{makeClusterFunctionsSlurm} \alias{makeClusterFunctionsSlurm} \title{ClusterFunctions for Slurm Systems} \usage{ makeClusterFunctionsSlurm( template = "slurm", array.jobs = TRUE, nodename = "localhost", scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{template}{[\code{character(1)}]\cr Either a path to a \pkg{brew} template file (with extension \dQuote{tmpl}), or a short descriptive name enabling the following heuristic for the file lookup: \enumerate{ \item \dQuote{batchtools.[template].tmpl} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}. \item \dQuote{batchtools.[template].tmpl} in the current working directory. \item \dQuote{[template].tmpl} in the user config directory (see \code{\link[rappdirs]{user_config_dir}}); on linux this is usually \dQuote{~/.config/batchtools/[template].tmpl}. \item \dQuote{.batchtools.[template].tmpl} in the home directory. \item \dQuote{[template].tmpl} in the package installation directory in the subfolder \dQuote{templates}. }} \item{array.jobs}{[\code{logical(1)}]\cr If array jobs are disabled on the computing site, set to \code{FALSE}.} \item{nodename}{[\code{character(1)}]\cr Nodename of the master host. All commands are send via SSH to this host. Only works iff \enumerate{ \item{Passwordless authentication (e.g., via SSH public key authentication) is set up.} \item{The file directory is shared across machines, e.g. mounted via SSHFS.} \item{Either the absolute path to the \code{file.dir} is identical on the machines, or paths are provided relative to the home directory. Symbolic links should work.} }} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for Slurm (\url{http://slurm.schedmd.com/}). Job files are created based on the brew template \code{template.file}. This file is processed with brew and then submitted to the queue using the \code{sbatch} command. Jobs are killed using the \code{scancel} command and the list of running jobs is retrieved using \code{squeue}. The user must have the appropriate privileges to submit, delete and list jobs on the cluster (this is usually the case). The template file can access all resources passed to \code{\link{submitJobs}} as well as all variables stored in the \code{\link{JobCollection}}. It is the template file's job to choose a queue for the job and handle the desired resource allocations. Note that you might have to specify the cluster name here if you do not want to use the default, otherwise the commands for listing and killing jobs will not work. } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/cfBrewTemplate.Rd0000644000176200001440000000253513606041641016370 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{cfBrewTemplate} \alias{cfBrewTemplate} \title{Cluster Functions Helper to Write Job Description Files} \usage{ cfBrewTemplate(reg, text, jc) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} \item{text}{[\code{character(1)}]\cr String ready to be brewed. See \code{\link{cfReadBrewTemplate}} to read a template from the file system.} \item{jc}{[\code{\link{JobCollection})}]\cr Will be used as environment to brew the template file in. See \code{\link{JobCollection}} for a list of all available variables.} } \value{ [\code{character(1)}]. File path to brewed template file. } \description{ This function is only intended for use in your own cluster functions implementation. Calls brew silently on your template, any error will lead to an exception. The file is stored at the same place as the corresponding job file in the \dQuote{jobs}-subdir of your files directory. } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfKillJob}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeClusterFunctions}()}, \code{\link{makeSubmitJobResult}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctionsHelper} batchtools/man/cfKillJob.Rd0000644000176200001440000000315013606041641015315 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctions.R \name{cfKillJob} \alias{cfKillJob} \title{Cluster Functions Helper to Kill Batch Jobs} \usage{ cfKillJob( reg, cmd, args = character(0L), max.tries = 3L, nodename = "localhost" ) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} \item{cmd}{[\code{character(1)}]\cr OS command, e.g. \dQuote{qdel}.} \item{args}{[\code{character}]\cr Arguments to \code{cmd}, including the batch id.} \item{max.tries}{[\code{integer(1)}]\cr Number of total times to try execute the OS command in cases of failures. Default is \code{3}.} \item{nodename}{[\code{character(1)}]\cr Name of the SSH node to run the command on. If set to \dQuote{localhost} (default), the command is not piped through SSH.} } \value{ \code{TRUE} on success. An exception is raised otherwise. } \description{ This function is only intended for use in your own cluster functions implementation. Calls the OS command to kill a job via \code{\link[base]{system}} like this: \dQuote{cmd batch.job.id}. If the command returns an exit code > 0, the command is repeated after a 1 second sleep \code{max.tries-1} times. If the command failed in all tries, an error is generated. } \seealso{ Other ClusterFunctionsHelper: \code{\link{cfBrewTemplate}()}, \code{\link{cfHandleUnknownSubmitError}()}, \code{\link{cfReadBrewTemplate}()}, \code{\link{makeClusterFunctions}()}, \code{\link{makeSubmitJobResult}()}, \code{\link{runOSCommand}()} } \concept{ClusterFunctionsHelper} batchtools/man/makeClusterFunctionsDocker.Rd0000644000176200001440000000643213606041641020764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterFunctionsDocker.R \name{makeClusterFunctionsDocker} \alias{makeClusterFunctionsDocker} \title{ClusterFunctions for Docker} \usage{ makeClusterFunctionsDocker( image, docker.args = character(0L), image.args = character(0L), scheduler.latency = 1, fs.latency = 65 ) } \arguments{ \item{image}{[\code{character(1)}]\cr Name of the docker image to run.} \item{docker.args}{[\code{character}]\cr Additional arguments passed to \dQuote{docker} *before* the command (\dQuote{run}, \dQuote{ps} or \dQuote{kill}) to execute (e.g., the docker host).} \item{image.args}{[\code{character}]\cr Additional arguments passed to \dQuote{docker run} (e.g., to define mounts or environment variables).} \item{scheduler.latency}{[\code{numeric(1)}]\cr Time to sleep after important interactions with the scheduler to ensure a sane state. Currently only triggered after calling \code{\link{submitJobs}}.} \item{fs.latency}{[\code{numeric(1)}]\cr Expected maximum latency of the file system, in seconds. Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to access files and directories. Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} } \value{ [\code{\link{ClusterFunctions}}]. } \description{ Cluster functions for Docker/Docker Swarm (\url{https://docs.docker.com/swarm/}). The \code{submitJob} function executes \code{docker [docker.args] run --detach=true [image.args] [resources] [image] [cmd]}. Arguments \code{docker.args}, \code{image.args} and \code{image} can be set on construction. The \code{resources} part takes the named resources \code{ncpus} and \code{memory} from \code{\link{submitJobs}} and maps them to the arguments \code{--cpu-shares} and \code{--memory} (in Megabytes). The resource \code{threads} is mapped to the environment variables \dQuote{OMP_NUM_THREADS} and \dQuote{OPENBLAS_NUM_THREADS}. To reliably identify jobs in the swarm, jobs are labeled with \dQuote{batchtools=[job.hash]} and named using the current login name (label \dQuote{user}) and the job hash (label \dQuote{batchtools}). \code{listJobsRunning} uses \code{docker [docker.args] ps --format=\{\{.ID\}\}} to filter for running jobs. \code{killJobs} uses \code{docker [docker.args] kill [batch.id]} to filter for running jobs. These cluster functions use a \link{Hook} to remove finished jobs before a new submit and every time the \link{Registry} is synchronized (using \code{\link{syncRegistry}}). This is currently required because docker does not remove terminated containers automatically. Use \code{docker ps -a --filter 'label=batchtools' --filter 'status=exited'} to identify and remove terminated containers manually (or usa a cron job). } \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, \code{\link{makeClusterFunctionsSGE}()}, \code{\link{makeClusterFunctionsSSH}()}, \code{\link{makeClusterFunctionsSlurm}()}, \code{\link{makeClusterFunctionsSocket}()}, \code{\link{makeClusterFunctionsTORQUE}()}, \code{\link{makeClusterFunctions}()} } \concept{ClusterFunctions} batchtools/man/reduceResultsList.Rd0000644000176200001440000000776113606041641017157 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduceResults.R \name{reduceResultsList} \alias{reduceResultsList} \alias{reduceResultsDataTable} \title{Apply Functions on Results} \usage{ reduceResultsList( ids = NULL, fun = NULL, ..., missing.val, reg = getDefaultRegistry() ) reduceResultsDataTable( ids = NULL, fun = NULL, ..., missing.val, reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findDone}}. Invalid ids are ignored.} \item{fun}{[\code{function}]\cr Function to apply to each result. The result is passed unnamed as first argument. If \code{NULL}, the identity is used. If the function has the formal argument \dQuote{job}, the \code{\link{Job}}/\code{\link{Experiment}} is also passed to the function.} \item{...}{[\code{ANY}]\cr Additional arguments passed to to function \code{fun}.} \item{missing.val}{[\code{ANY}]\cr Value to impute as result for a job which is not finished. If not provided and a result is missing, an exception is raised.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ \code{reduceResultsList} returns a list of the results in the same order as the provided ids. \code{reduceResultsDataTable} returns a \code{\link[data.table]{data.table}} with columns \dQuote{job.id} and additional result columns created via \code{\link[data.table]{rbindlist}}, sorted by \dQuote{job.id}. } \description{ Applies a function on the results of your finished jobs and thereby collects them in a \code{\link[base]{list}} or \code{\link[data.table]{data.table}}. The later requires the provided function to return a list (or \code{data.frame}) of scalar values. See \code{\link[data.table]{rbindlist}} for features and limitations of the aggregation. If not all jobs are terminated, the respective result will be \code{NULL}. } \note{ If you have thousands of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) can significantly increase the performance. } \examples{ \dontshow{ batchtools:::example_push_temp(2) } ### Example 1 - reduceResultsList tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(function(x) x^2, x = 1:10, reg = tmp) submitJobs(reg = tmp) waitForJobs(reg = tmp) reduceResultsList(fun = sqrt, reg = tmp) ### Example 2 - reduceResultsDataTable tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) # add first problem fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd) addProblem("rnorm", fun = fun, reg = tmp) # add second problem fun = function(job, data, n, lambda, ...) rexp(n, rate = lambda) addProblem("rexp", fun = fun, reg = tmp) # add first algorithm fun = function(instance, method, ...) if (method == "mean") mean(instance) else median(instance) addAlgorithm("average", fun = fun, reg = tmp) # add second algorithm fun = function(instance, ...) sd(instance) addAlgorithm("deviation", fun = fun, reg = tmp) # define problem and algorithm designs library(data.table) prob.designs = algo.designs = list() prob.designs$rnorm = CJ(n = 100, mean = -1:1, sd = 1:5) prob.designs$rexp = data.table(n = 100, lambda = 1:5) algo.designs$average = data.table(method = c("mean", "median")) algo.designs$deviation = data.table() # add experiments and submit addExperiments(prob.designs, algo.designs, reg = tmp) submitJobs(reg = tmp) # collect results and join them with problem and algorithm paramters res = ijoin( getJobPars(reg = tmp), reduceResultsDataTable(reg = tmp, fun = function(x) list(res = x)) ) unwrap(res, sep = ".") } \seealso{ \code{\link{reduceResults}} Other Results: \code{\link{batchMapResults}()}, \code{\link{loadResult}()}, \code{\link{reduceResults}()} } \concept{Results} batchtools/man/summarizeExperiments.Rd0000644000176200001440000000215513606041641017722 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summarizeExperiments.R \name{summarizeExperiments} \alias{summarizeExperiments} \title{Quick Summary over Experiments} \usage{ summarizeExperiments( ids = NULL, by = c("problem", "algorithm"), reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{by}{[\code{character}]\cr Split the resulting table by columns of \code{\link{getJobPars}}.} \item{reg}{[\code{\link{ExperimentRegistry}}]\cr Registry. If not explicitly passed, uses the last created registry.} } \value{ [\code{\link{data.table}}] of frequencies. } \description{ Returns a frequency table of defined experiments. See \code{\link{ExperimentRegistry}} for an example. } \seealso{ Other Experiment: \code{\link{addExperiments}()}, \code{\link{removeExperiments}()} } \concept{Experiment} batchtools/man/syncRegistry.Rd0000644000176200001440000000151713606041641016170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/syncRegistry.R \name{syncRegistry} \alias{syncRegistry} \title{Synchronize the Registry} \usage{ syncRegistry(reg = getDefaultRegistry()) } \arguments{ \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{logical(1)}]: \code{TRUE} if the state has changed, \code{FALSE} otherwise. } \description{ Parses update files written by the slaves to the file system and updates the internal data base. } \seealso{ Other Registry: \code{\link{clearRegistry}()}, \code{\link{getDefaultRegistry}()}, \code{\link{loadRegistry}()}, \code{\link{makeRegistry}()}, \code{\link{removeRegistry}()}, \code{\link{saveRegistry}()}, \code{\link{sweepRegistry}()} } \concept{Registry} batchtools/man/Tags.Rd0000644000176200001440000000372413234300075014357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Tags.R \name{Tags} \alias{Tags} \alias{addJobTags} \alias{removeJobTags} \alias{getUsedJobTags} \title{Add or Remove Job Tags} \usage{ addJobTags(ids = NULL, tags, reg = getDefaultRegistry()) removeJobTags(ids = NULL, tags, reg = getDefaultRegistry()) getUsedJobTags(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{tags}{[\code{character}]\cr Tags to add or remove as strings. Each tag may consist of letters, numbers, underscore and dots (pattern \dQuote{^[[:alnum:]_.]+}).} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link[data.table]{data.table}}] with job ids affected (invisible). } \description{ Add and remove arbitrary tags to jobs. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) ids = batchMap(sqrt, x = -3:3, reg = tmp) # Add new tag to all ids addJobTags(ids, "needs.computation", reg = tmp) getJobTags(reg = tmp) # Add more tags addJobTags(findJobs(x < 0, reg = tmp), "x.neg", reg = tmp) addJobTags(findJobs(x > 0, reg = tmp), "x.pos", reg = tmp) getJobTags(reg = tmp) # Submit first 5 jobs and remove tag if successful ids = submitJobs(1:5, reg = tmp) if (waitForJobs(reg = tmp)) removeJobTags(ids, "needs.computation", reg = tmp) getJobTags(reg = tmp) # Grep for warning message and add a tag addJobTags(grepLogs(pattern = "NaNs produced", reg = tmp), "div.zero", reg = tmp) getJobTags(reg = tmp) # All tags where tag x.neg is set: ids = findTagged("x.neg", reg = tmp) getUsedJobTags(ids, reg = tmp) } batchtools/man/JoinTables.Rd0000644000176200001440000000474613234300075015520 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Joins.R \name{JoinTables} \alias{JoinTables} \alias{ijoin} \alias{ljoin} \alias{rjoin} \alias{ojoin} \alias{sjoin} \alias{ajoin} \alias{ujoin} \title{Inner, Left, Right, Outer, Semi and Anti Join for Data Tables} \usage{ ijoin(x, y, by = NULL) ljoin(x, y, by = NULL) rjoin(x, y, by = NULL) ojoin(x, y, by = NULL) sjoin(x, y, by = NULL) ajoin(x, y, by = NULL) ujoin(x, y, all.y = FALSE, by = NULL) } \arguments{ \item{x}{[\code{\link{data.frame}}]\cr First data.frame to join.} \item{y}{[\code{\link{data.frame}}]\cr Second data.frame to join.} \item{by}{[\code{character}]\cr Column name(s) of variables used to match rows in \code{x} and \code{y}. If not provided, a heuristic similar to the one described in the \pkg{dplyr} vignette is used: \enumerate{ \item If \code{x} is keyed, the existing key will be used if \code{y} has the same column(s). \item If \code{x} is not keyed, the intersect of common columns names is used if not empty. \item Raise an exception. } You may pass a named character vector to merge on columns with different names in \code{x} and \code{y}: \code{by = c("x.id" = "y.id")} will match \code{x}'s \dQuote{x.id} column with \code{y}\'s \dQuote{y.id} column.} \item{all.y}{[logical(1)]\cr Keep columns of \code{y} which are not in \code{x}?} } \value{ [\code{\link{data.table}}] with key identical to \code{by}. } \description{ These helper functions perform join operations on data tables. Most of them are basically one-liners. See \url{http://rpubs.com/ronasta/join_data_tables} for a overview of join operations in data table or alternatively \pkg{dplyr}'s vignette on two table verbs. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } # Create two tables for demonstration tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(identity, x = 1:6, reg = tmp) x = getJobPars(reg = tmp) y = findJobs(x >= 2 & x <= 5, reg = tmp) y$extra.col = head(letters, nrow(y)) # Inner join: similar to intersect(): keep all columns of x and y with common matches ijoin(x, y) # Left join: use all ids from x, keep all columns of x and y ljoin(x, y) # Right join: use all ids from y, keep all columns of x and y rjoin(x, y) # Outer join: similar to union(): keep all columns of x and y with matches in x or y ojoin(x, y) # Semi join: filter x with matches in y sjoin(x, y) # Anti join: filter x with matches not in y ajoin(x, y) # Updating join: Replace values in x with values in y ujoin(x, y) } batchtools/man/grepLogs.Rd0000644000176200001440000000277713606041641015256 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Logs.R \name{grepLogs} \alias{grepLogs} \title{Grep Log Files for a Pattern} \usage{ grepLogs( ids = NULL, pattern, ignore.case = FALSE, fixed = FALSE, reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findStarted}}. Invalid ids are ignored.} \item{pattern}{[\code{character(1L)}]\cr Regular expression or string (see \code{fixed}).} \item{ignore.case}{[\code{logical(1L)}]\cr If \code{TRUE} the match will be performed case insensitively.} \item{fixed}{[\code{logical(1L)}]\cr If \code{FALSE} (default), \code{pattern} is a regular expression and a fixed string otherwise.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] with columns \dQuote{job.id} and \dQuote{message}. } \description{ Crawls through log files and reports jobs with lines matching the \code{pattern}. See \code{\link{showLog}} for an example. } \seealso{ Other debug: \code{\link{getErrorMessages}()}, \code{\link{getStatus}()}, \code{\link{killJobs}()}, \code{\link{resetJobs}()}, \code{\link{showLog}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/resetJobs.Rd0000644000176200001440000000250413606041641015420 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/resetJobs.R \name{resetJobs} \alias{resetJobs} \title{Reset the Computational State of Jobs} \usage{ resetJobs(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to no job. Invalid ids are ignored.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link{data.table}}] of job ids which have been reset. See \code{\link{JoinTables}} for examples on working with job tables. } \description{ Resets the computational state of jobs in the \code{\link{Registry}}. This function automatically checks if any of the jobs to reset is either pending or running. However, if the implemented heuristic fails, this can lead to inconsistencies in the data base. Use with care while jobs are running. } \seealso{ Other debug: \code{\link{getErrorMessages}()}, \code{\link{getStatus}()}, \code{\link{grepLogs}()}, \code{\link{killJobs}()}, \code{\link{showLog}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/doJobCollection.Rd0000644000176200001440000000235113606041641016531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/doJobCollection.R \name{doJobCollection} \alias{doJobCollection} \title{Execute Jobs of a JobCollection} \usage{ doJobCollection(jc, output = NULL) } \arguments{ \item{jc}{[\code{\link{JobCollection}}]\cr Either an object of class \dQuote{JobCollection} as returned by \code{\link{makeJobCollection}} or a string with the path to file containing a \dQuote{JobCollection} as RDS file (as stored by \code{\link{submitJobs}}).} \item{output}{[\code{character(1)}]\cr Path to a file to write the output to. Defaults to \code{NULL} which means that output is written to the active \code{\link[base]{sink}}. Do not set this if your scheduler redirects output to a log file.} } \value{ [\code{character(1)}]: Hash of the \code{\link{JobCollection}} executed. } \description{ Executes every job in a \code{\link{JobCollection}}. This function is intended to be called on the slave. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) batchMap(identity, 1:2, reg = tmp) jc = makeJobCollection(1:2, reg = tmp) doJobCollection(jc) } \seealso{ Other JobCollection: \code{\link{makeJobCollection}()} } \concept{JobCollection} batchtools/man/waitForJobs.Rd0000644000176200001440000000570713606041641015721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/waitForJobs.R \name{waitForJobs} \alias{waitForJobs} \title{Wait for Termination of Jobs} \usage{ waitForJobs( ids = NULL, sleep = NULL, timeout = 604800, expire.after = NULL, stop.on.error = FALSE, stop.on.expire = FALSE, reg = getDefaultRegistry() ) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to the return value of \code{\link{findSubmitted}}. Invalid ids are ignored.} \item{sleep}{[\code{function(i)} | \code{numeric(1)}]\cr Parameter to control the duration to sleep between queries. You can pass an absolute numeric value in seconds or a \code{function(i)} which returns the number of seconds to sleep in the \code{i}-th iteration. If not provided (\code{NULL}), tries to read the value (number/function) from the configuration file (stored in \code{reg$sleep}) or defaults to a function with exponential backoff between 5 and 120 seconds.} \item{timeout}{[\code{numeric(1)}]\cr After waiting \code{timeout} seconds, show a message and return \code{FALSE}. This argument may be required on some systems where, e.g., expired jobs or jobs on hold are problematic to detect. If you don't want a timeout, set this to \code{Inf}. Default is \code{604800} (one week).} \item{expire.after}{[\code{integer(1)}]\cr Jobs count as \dQuote{expired} if they are not found on the system but have not communicated back their results (or error message). This frequently happens on managed system if the scheduler kills a job because the job has hit the walltime or request more memory than reserved. On the other hand, network file systems often require several seconds for new files to be found, which can lead to false positives in the detection heuristic. \code{waitForJobs} treats such jobs as expired after they have not been detected on the system for \code{expire.after} iterations. If not provided (\code{NULL}), tries to read the value from the configuration file (stored in \code{reg$expire.after}), and finally defaults to \code{3}.} \item{stop.on.error}{[\code{logical(1)}]\cr Immediately cancel if a job terminates with an error? Default is \code{FALSE}.} \item{stop.on.expire}{[\code{logical(1)}]\cr Immediately cancel if jobs are detected to be expired? Default is \code{FALSE}. Expired jobs will then be ignored for the remainder of \code{waitForJobs()}.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{logical(1)}]. Returns \code{TRUE} if all jobs terminated successfully and \code{FALSE} if either the timeout is reached or at least one job terminated with an exception or expired. } \description{ This function simply waits until all jobs are terminated. } batchtools/man/getStatus.Rd0000644000176200001440000000520413606041641015443 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getStatus.R \name{getStatus} \alias{getStatus} \title{Summarize the Computational Status} \usage{ getStatus(ids = NULL, reg = getDefaultRegistry()) } \arguments{ \item{ids}{[\code{\link[base]{data.frame}} or \code{integer}]\cr A \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with a column named \dQuote{job.id}. Alternatively, you may also pass a vector of integerish job ids. If not set, defaults to all jobs. Invalid ids are ignored.} \item{reg}{[\code{\link{Registry}}]\cr Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}).} } \value{ [\code{\link[data.table]{data.table}}] (with class \dQuote{Status} for printing). } \description{ This function gives an encompassing overview over the computational status on your system. The status can be one or many of the following: \itemize{ \item \dQuote{defined}: Jobs which are defined via \code{\link{batchMap}} or \code{\link{addExperiments}}, but are not yet submitted. \item \dQuote{submitted}: Jobs which are submitted to the batch system via \code{\link{submitJobs}}, scheduled for execution. \item \dQuote{started}: Jobs which have been started. \item \dQuote{done}: Jobs which terminated successfully. \item \dQuote{error}: Jobs which terminated with an exception. \item \dQuote{running}: Jobs which are listed by the cluster functions to be running on the live system. Not supported for all cluster functions. \item \dQuote{queued}: Jobs which are listed by the cluster functions to be queued on the live system. Not supported for all cluster functions. \item \dQuote{system}: Jobs which are listed by the cluster functions to be queued or running. Not supported for all cluster functions. \item \dQuote{expired}: Jobs which have been submitted, but vanished from the live system. Note that this is determined heuristically and may include some false positives. } Here, a job which terminated successfully counts towards the jobs which are submitted, started and done. To retrieve the corresponding job ids, see \code{\link{findJobs}}. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeRegistry(file.dir = NA, make.default = FALSE) fun = function(i) if (i == 3) stop(i) else i ids = batchMap(fun, i = 1:5, reg = tmp) submitJobs(ids = 1:4, reg = tmp) waitForJobs(reg = tmp) tab = getStatus(reg = tmp) print(tab) str(tab) } \seealso{ \code{\link{findJobs}} Other debug: \code{\link{getErrorMessages}()}, \code{\link{grepLogs}()}, \code{\link{killJobs}()}, \code{\link{resetJobs}()}, \code{\link{showLog}()}, \code{\link{testJob}()} } \concept{debug} batchtools/man/addProblem.Rd0000644000176200001440000000720613606041641015535 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Problem.R \name{addProblem} \alias{addProblem} \alias{Problem} \alias{removeProblems} \title{Define Problems for Experiments} \usage{ addProblem( name, data = NULL, fun = NULL, seed = NULL, cache = FALSE, reg = getDefaultRegistry() ) removeProblems(name, reg = getDefaultRegistry()) } \arguments{ \item{name}{[\code{character(1)}]\cr Unique identifier for the problem.} \item{data}{[\code{ANY}]\cr Static problem part. Default is \code{NULL}.} \item{fun}{[\code{function}]\cr The function defining the stochastic problem part. The static part is passed to this function with name \dQuote{data} and the \code{\link{Job}}/\code{\link{Experiment}} is passed as \dQuote{job}. Therefore, your function must have the formal arguments \dQuote{job} and \dQuote{data} (or dots \code{...}). If you do not provide a function, it defaults to a function which just returns the data part.} \item{seed}{[\code{integer(1)}]\cr Start seed for this problem. This allows the \dQuote{synchronization} of a stochastic problem across algorithms, so that different algorithms are evaluated on the same stochastic instance. If the problem seed is defined, the seeding mechanism works as follows: (1) Before the dynamic part of a problem is instantiated, the seed of the problem + [replication number] - 1 is set, i.e. the first replication uses the problem seed. (2) The stochastic part of the problem is instantiated. (3) From now on the usual experiment seed of the registry is used, see \code{\link{ExperimentRegistry}}. If \code{seed} is set to \code{NULL} (default), the job seed is used to instantiate the problem and different algorithms see different stochastic instances of the same problem.} \item{cache}{[\code{logical(1)}]\cr If \code{TRUE} and \code{seed} is set, problem instances will be cached on the file system. This assumes that each problem instance is deterministic for each combination of hyperparameter setting and each replication number. This feature is experimental.} \item{reg}{[\code{\link{ExperimentRegistry}}]\cr Registry. If not explicitly passed, uses the last created registry.} } \value{ [\code{Problem}]. Object of class \dQuote{Problem} (invisibly). } \description{ Problems may consist of up to two parts: A static, immutable part (\code{data} in \code{addProblem}) and a dynamic, stochastic part (\code{fun} in \code{addProblem}). For example, for statistical learning problems a data frame would be the static problem part while a resampling function would be the stochastic part which creates problem instance. This instance is then typically passed to a learning algorithm like a wrapper around a statistical model (\code{fun} in \code{\link{addAlgorithm}}). This function serialize all components to the file system and registers the problem in the \code{\link{ExperimentRegistry}}. \code{removeProblem} removes all jobs from the registry which depend on the specific problem. \code{reg$problems} holds the IDs of already defined problems. } \examples{ \dontshow{ batchtools:::example_push_temp(1) } tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) addProblem("p1", fun = function(job, data) data, reg = tmp) addProblem("p2", fun = function(job, data) job, reg = tmp) addAlgorithm("a1", fun = function(job, data, instance) instance, reg = tmp) addExperiments(repls = 2, reg = tmp) # List problems, algorithms and job parameters: tmp$problems tmp$algorithms getJobPars(reg = tmp) # Remove one problem removeProblems("p1", reg = tmp) # List problems and algorithms: tmp$problems tmp$algorithms getJobPars(reg = tmp) } \seealso{ \code{\link{Algorithm}}, \code{\link{addExperiments}} } batchtools/DESCRIPTION0000644000176200001440000000442113606123217014124 0ustar liggesusersPackage: batchtools Title: Tools for Computation on Batch Systems Version: 0.9.12 Authors@R: c( person("Michel", "Lang", NULL, "michellang@gmail.com", role = c("cre", "aut"), comment = c(ORCID = "0000-0001-9754-0393")), person("Bernd", "Bischl", NULL, "bernd_bischl@gmx.net", role = "aut"), person("Dirk", "Surmann", NULL, "surmann@statistik.tu-dortmund.de", role = "ctb", comment = c(ORCID = "0000-0003-0873-137X")) ) Description: As a successor of the packages 'BatchJobs' and 'BatchExperiments', this package provides a parallel implementation of the Map function for high performance computing systems managed by schedulers 'IBM Spectrum LSF' (), 'OpenLava' (), 'Univa Grid Engine'/'Oracle Grid Engine' (), 'Slurm' (), 'TORQUE/PBS' (), or 'Docker Swarm' (). A multicore and socket mode allow the parallelization on a local machines, and multiple machines can be hooked up via SSH to create a makeshift cluster. Moreover, the package provides an abstraction mechanism to define large-scale computer experiments in a well-organized and reproducible way. License: LGPL-3 URL: https://github.com/mllg/batchtools BugReports: https://github.com/mllg/batchtools/issues NeedsCompilation: yes ByteCompile: yes Encoding: UTF-8 Depends: R (>= 3.0.0) Imports: backports (>= 1.1.2), base64url (>= 1.1), brew, checkmate (>= 1.8.5), data.table (>= 1.11.2), digest (>= 0.6.9), fs (>= 1.2.0), parallel, progress (>= 1.1.1), R6, rappdirs, stats, stringi, utils, withr (>= 2.0.0) Suggests: debugme, doParallel, doMPI, e1071, foreach, future, future.batchtools, knitr, parallelMap, ranger, rmarkdown, rpart, snow, testthat, tibble VignetteBuilder: knitr RoxygenNote: 7.0.2 Packaged: 2020-01-10 12:29:39 UTC; lang Author: Michel Lang [cre, aut] (), Bernd Bischl [aut], Dirk Surmann [ctb] () Maintainer: Michel Lang Repository: CRAN Date/Publication: 2020-01-10 16:30:07 UTC batchtools/build/0000755000176200001440000000000013606067063013522 5ustar liggesusersbatchtools/build/vignette.rds0000644000176200001440000000030213606067063016054 0ustar liggesusersb```b`fcb`b2 1# 'KJ,I() MABW&˃l*`9 `aBR˚n res[y %in% c("c", "d", "e"), t])) # remaining is suppressed if nothing more to submit, no error res = estimateRuntimes(unwrap(getJobPars(findDone(reg = reg), reg = reg)), reg = reg) expect_output(print(res, n = 2)) }) batchtools/tests/testthat/test_parallelMap.R0000644000176200001440000000356713246270473021055 0ustar liggesuserscontext("parallelMap") silent({ reg = makeTestRegistry() fun = function(i) { fun = function(i) i^2; parallelMap::parallelMap(fun, 1:i)} ids = batchMap(fun, i = 1:4, reg = reg) }) test_that("pm/multicore", { skip_on_os("windows") skip_if_not_installed("parallelMap") skip_on_travis() if (reg$cluster.functions$name %chin% c("Parallel", "Socket")) skip("Nested local parallelization not supported") submitAndWait(reg, ids = ids, resources = list(pm.backend = "multicore", ncpus = 2)) expect_equal(nrow(findDone(reg = reg)), 4L) }) test_that("pm/socket", { skip_if_not_installed("parallelMap") skip_if_not_installed("snow") skip_on_travis() if (reg$cluster.functions$name %chin% c("Parallel", "Socket")) skip("Nested local parallelization not supported") submitAndWait(reg, ids = ids, resources = list(pm.backend = "socket", ncpus = 2)) expect_equal(nrow(findDone(reg = reg)), 4L) }) test_that("parallelMap works with batchtools", { skip_if_not_installed("parallelMap") skip_if_not(packageVersion("parallelMap") >= "1.4") requireNamespace("parallelMap") dir = reg$temp.dir %??% fs::path_temp() parallelMap::parallelStartBatchtools(storagedir = dir, show.info = FALSE) dir = getOption("parallelMap.bt.reg.filedir") res = parallelMap::parallelMap(function(x, y) x + y, x = 1:2, y = 1) parallelMap::parallelStop() if (fs::dir_exists(dir)) fs::dir_delete(dir) expect_equal(res, list(2, 3)) }) # test_that("pm/mpi", { # skip_on_os("mac") # skip_on_cran() # skip_if_not_installed("parallelMap") # skip_if_not_installed("snow") # skip_if_not_installed("Rmpi") # skip_on_travis() # if (reg$cluster.functions$name %chin% c("Parallel", "Socket")) # skip("Nested local parallelization not supported") # submitAndWait(reg, ids = ids, resources = list(pm.backend = "mpi", ncpus = 2)) # expect_equal(nrow(findDone(reg = reg)), 4) # }) batchtools/tests/testthat/test_sleep.R0000644000176200001440000000070513200617366017716 0ustar liggesuserscontext("getSleepFunction") test_that("getSleepFunction", { reg = makeTestRegistry() f = getSleepFunction(reg, NULL) expect_function(f) expect_true(any(grepl("Sys.sleep", as.character(body(f))))) f = getSleepFunction(reg, 99) expect_function(f) expect_true(any(grepl("Sys.sleep", as.character(body(f))))) f = getSleepFunction(reg, function(x) x^2) expect_function(f) expect_true(any(grepl("Sys.sleep", as.character(body(f))))) }) batchtools/tests/testthat/test_Job.R0000644000176200001440000000723213432557307017327 0ustar liggesuserscontext("Job") test_that("Job", { reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, i = 1:3, reg = reg, more.args = list(x = 1)) submitAndWait(reg, 1, resources = list(foo = "bar")) job = makeJob(reg = reg, i = 1) expect_is(job, "Job") expect_identical(job$id, 1L) expect_equal(job$pars, list(i = 1L, x = 1)) expect_count(job$seed) expect_list(job$resources, names = "named") expect_equal(job$resources$foo, "bar") expect_function(job$fun) jc = makeJobCollection(reg = reg, resources = list(foo = "bar")) job = getJob(jc, i = 1L) expect_is(job, "Job") expect_identical(job$id, 1L) expect_equal(job$pars, list(i = 1L, x = 1)) expect_count(job$seed) expect_list(job$resources, names = "named") expect_equal(job$resources$foo, "bar") expect_function(job$fun) }) test_that("Experiment", { reg = makeTestExperimentRegistry() addProblem(reg = reg, "p1", fun = function(job, data, ...) list(data = data, ...)) addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) length(instance)) ids = addExperiments(list(p1 = data.table(i = 1:3)), list(a1 = data.table()), reg = reg) job = makeJob(1, reg = reg) expect_is(job, "Experiment") expect_identical(job$id, 1L) expect_equal(job$pars, list(prob.pars = list(i = 1), algo.pars = list())) expect_count(job$repl) expect_count(job$seed) expect_list(job$resources, names = "named") expect_is(job$problem, "Problem") expect_is(job$algorithm, "Algorithm") expect_identical(job$instance, list(data = NULL, i = 1L)) jc = makeJobCollection(reg = reg) job = getJob(jc, i = 1L) expect_is(job, "Experiment") expect_identical(job$id, 1L) expect_equal(job$pars, list(prob.pars = list(i = 1), algo.pars = list())) expect_count(job$seed) expect_list(job$resources, names = "named") expect_is(job$problem, "Problem") expect_is(job$algorithm, "Algorithm") expect_identical(job$instance, list(data = NULL, i = 1L)) }) test_that("External directory is created", { reg = makeTestRegistry() fun = function(..., .job) .job$external.dir ids = batchMap(fun, i = 1:3, reg = reg, more.args = list(x = 1)) submitAndWait(reg) expect_directory_exists(unwrap(reduceResultsDataTable(1:3, reg = reg))[[2]]) reg = makeTestExperimentRegistry() addProblem(reg = reg, "p1", fun = function(job, data, ...) list(data = data, ...)) addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) { saveRDS(job$id, file = fs::path(job$external.dir, sprintf("%s.rds", job$id)), version = 2L) job$external.dir }) ids = addExperiments(list(p1 = data.table(i = 1:3)), list(a1 = data.table()), reg = reg) submitAndWait(reg, c(1, 3)) paths = reduceResultsList(1:3, missing.val = NULL, reg = reg) expect_directory_exists(paths[[1]]) expect_true(fs::file_exists(fs::path(reg$file.dir, "external", "1", "1.rds"))) expect_null(paths[[2]]) expect_false(fs::dir_exists(fs::path(reg$file.dir, "external", "2"))) expect_directory_exists(paths[[3]]) expect_true(fs::file_exists(fs::path(reg$file.dir, "external", "3", "3.rds"))) expect_equal(reduceResultsList(1:3, fun = function(job, ...) job$external.dir, reg = reg, missing.val = NULL), paths) resetJobs(3, reg = reg) expect_false(fs::dir_exists(fs::path(reg$file.dir, "external", "3"))) expect_true(fs::dir_exists(fs::path(reg$file.dir, "external", "1"))) # directory is persistent between submits? addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) { list.files(job$external.dir) }) submitAndWait(reg, 1) sweepRegistry(reg = reg) expect_true(fs::file_exists(fs::path(reg$file.dir, "external", "1", "1.rds"))) expect_identical(loadResult(1, reg = reg), "1.rds") }) batchtools/tests/testthat/test_foreach.R0000644000176200001440000000214213246270473020216 0ustar liggesuserscontext("foreach") test_that("foreach/seq", { skip_if_not_installed("foreach") reg = makeTestRegistry(packages = "foreach") fun = function(i) { foreach(j = 1:i, .combine = c) %dopar% { j^2 } } ids = batchMap(fun, i = 1:2, reg = reg) submitAndWait(reg, ids = ids, resources = list(foreach.backend = "seq", ncpus = 2)) expect_equal(nrow(findDone(reg = reg)), 2L) expect_equal(reduceResultsList(reg = reg), list(1, c(1, 4))) }) test_that("foreach/multicore", { skip_if_not_installed("foreach") skip_if_not_installed("doParallel") reg = makeTestRegistry(packages = "foreach") if (reg$cluster.functions$name %chin% c("Parallel", "Socket")) skip("Nested local parallelization not supported") fun = function(i) { foreach(j = 1:2) %dopar% { Sys.sleep(3); i } } ids = batchMap(fun, i = 1, reg = reg) submitAndWait(reg, ids = ids, resources = list(foreach.backend = "parallel", ncpus = 2)) expect_equal(nrow(findDone(reg = reg)), 1L) status = getJobStatus(reg = reg) expect_true(status$time.running < 5.9) expect_equal(reduceResultsList(reg = reg), list(as.list(c(1, 1)))) }) batchtools/tests/testthat/test_joins.R0000644000176200001440000000572613214447313017736 0ustar liggesuserscontext("joins") test_that("joins", { reg = makeTestRegistry() batchMap(identity, x = 1:6, reg = reg) x = unwrap(getJobPars(reg = reg)[1:5]) y = findJobs(x >= 2 & x <= 5, reg = reg) y$extra.col = head(letters, nrow(y)) res = ijoin(x, y) expect_data_table(res, key = "job.id", ncol = 3, any.missing = FALSE) expect_identical(res$job.id, 2:5) expect_copied(res, x) res = ljoin(as.data.frame(x), y) expect_data_table(res, key = "job.id", ncol = 3) expect_identical(res$job.id, 1:5) expect_true(anyMissing(res$extra.col)) expect_copied(res, x) res = rjoin(as.data.frame(x), y) expect_data_table(res, key = "job.id", ncol = 3, any.missing = FALSE) expect_identical(res$job.id, 2:5) expect_copied(res, x) res = rjoin(y, x) expect_data_table(res, key = "job.id", ncol = 3) expect_identical(res$job.id, 1:5) expect_true(anyMissing(res$extra.col)) expect_copied(res, x) res = ojoin(x, y) expect_data_table(res, key = "job.id", ncol = 3) expect_identical(res$job.id, 1:5) expect_true(anyMissing(res$extra.col)) expect_copied(res, x) res = sjoin(x, y) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 2:5) expect_copied(res, x) res = sjoin(y, x) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 2:5) expect_copied(res, x) res = ajoin(x, y) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 1L) expect_copied(res, x) res = ijoin(x, data.frame(job.id = 2:4)) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 2:4) expect_copied(res, x) res = ijoin(data.frame(job.id = 2:4), x) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 2:4) expect_copied(res, x) res = ajoin(as.data.frame(x), y) expect_data_table(res, key = "job.id", ncol = 2, any.missing = FALSE) expect_identical(res$job.id, 1L) expect_copied(res, x) res = ujoin(x, y) expect_equivalent(res, x) expect_copied(res, x) yy = copy(y) yy$x = 10:13 res = ujoin(x, yy) expect_data_table(res, key = "job.id", ncol = ncol(x), any.missing = FALSE) expect_identical(res$job.id, 1:5) expect_identical(res$x, c(1L, 10:13)) expect_copied(res, x) res = ujoin(x, yy, all.y = TRUE) expect_data_table(res, key = "job.id", ncol = 3) expect_identical(res$job.id, 1:5) expect_identical(res$x, c(1L, 10:13)) expect_identical(res$extra.col, c(NA, letters[1:4])) expect_copied(res, x) }) test_that("guessBy", { x = data.frame(id = 1:3, x = 1:3) y = data.frame(jid = 1:3, y = 3:1) expect_error(guessBy(x, y), "explicitly") expect_error(guessBy(x, y, by = "id"), "subset of") by = guessBy(x, y, by = c(id = "jid")) expect_equal(unname(by), "jid") expect_equal(names(by), "id") y$id = y$jid by = guessBy(x, y, by = "id") expect_equal(unname(by), "id") expect_equal(names(by), NULL) }) batchtools/tests/testthat/test_getStatus.R0000644000176200001440000000121313335224604020562 0ustar liggesuserscontext("getStatus") test_that("getStatus", { reg = makeTestRegistry() fun = function(i) if (i == 4) stop("4!") else i ids = batchMap(fun, i = 1:10, reg = reg) submitAndWait(reg, 1:5) stat = getStatus(reg = reg) expect_data_table(stat, any.missing = FALSE, types = "integer", nrows = 1L) expect_equal(stat$defined, 10L) expect_equal(stat$submitted, 5L) expect_equal(stat$started, 5L) expect_equal(stat$done, 4L) expect_equal(stat$error, 1L) expect_equal(stat$queued, 0L) expect_equal(stat$running, 0L) expect_equal(stat$system, 0L) expect_equal(stat$expired, 0L) expect_output(print(stat), "Status for 10 jobs") }) batchtools/tests/testthat/test_hooks.R0000644000176200001440000000157413234300075017727 0ustar liggesuserscontext("hooks") test_that("hooks", { reg = makeTestRegistry() if (!is.null(reg$cluster.functions$hooks$pre.do.collection) || !is.null(reg$cluster.functions$hooks$post.sync)) skip("Hooks already defined by Cluster Functions") reg$cluster.functions$hooks = insert(reg$cluster.functions$hooks, list( "pre.do.collection" = function(jc, ...) cat(jc$job.hash, "\n", sep = ""), "post.sync" = function(reg, ...) cat("post.syn", file = fs::path(reg$file.dir, "post.sync.txt")) )) jc = makeJobCollection(1, reg = reg) expect_function(jc$hooks$pre.do.collection, args = "jc") fn.ps = fs::path(reg$file.dir, "post.sync.txt") expect_false(fs::file_exists(fn.ps)) batchMap(identity, 1, reg = reg) submitAndWait(reg, 1) syncRegistry(reg = reg) expect_true(fs::file_exists(fn.ps)) lines = getLog(1, reg = reg) expect_true(reg$status[1]$job.hash %chin% lines) }) batchtools/tests/testthat/test_seed.R0000644000176200001440000000617113462045024017525 0ustar liggesuserscontext("Seeds") test_that("with_seed", { set.seed(1) x.1 = runif(5) set.seed(42) x.42 = runif(5) x.next = runif(5) set.seed(42) y.1 = withr::with_seed(1, runif(5)) y.42 = runif(5) y.next = runif(5) expect_identical(x.1, y.1) expect_identical(x.42, y.42) expect_identical(x.next, y.next) expect_error(withr::with_seed(1, print(state))) }) test_that("Problem and Algorithm seed", { reg = makeTestExperimentRegistry(seed = 42) addProblem(reg = reg, "p1", data = iris, fun = function(job, data, ...) runif(1), seed = 1L) addProblem(reg = reg, "p2", data = iris, fun = function(job, data, ...) runif(1)) addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) list(instance = instance, res = runif(1))) addAlgorithm(reg = reg, "a2", fun = function(job, data, instance, ...) list(instance = instance, res = runif(1))) prob.designs = list(p1 = data.table(), p2 = data.table()) algo.designs = list(a1 = data.table(), a2 = data.table()) repls = 3 ids = addExperiments(prob.designs, algo.designs, repls = repls, reg = reg) submitAndWait(reg, ids) set.seed(1); p1 = runif(1) set.seed(2); p2 = runif(1) set.seed(3); p3 = runif(1) set.seed(43); a1 = runif(1) set.seed(44); a2 = runif(1) set.seed(45); a3 = runif(1) silent({ ids = findExperiments(algo.name = "a1", prob.name = "p1", reg = reg) results = rbindlist(reduceResultsList(ids, reg = reg), use.names = TRUE) }) expect_true(all(results$instance == c(p1, p2, p3))) expect_true(all(results$res == c(a1, a2, a3))) silent({ ids = findExperiments(prob.name = "p1", repl = 2, reg = reg) results = rbindlist(reduceResultsList(ids, reg = reg), use.names = TRUE) }) expect_true(all(results$instance == p2)) silent({ ids = findExperiments(prob.name = "p2", reg = reg) results = rbindlist(reduceResultsList(ids, reg = reg), use.names = TRUE) }) expect_numeric(results$instance, unique = TRUE) expect_numeric(results$res, unique = TRUE) }) test_that("Seed is correctly reported (#203)", { reg = makeTestRegistry(seed = 1) batchMap(function(x, .job) list(seed = .job$seed), x = 1:3, reg = reg) submitAndWait(reg) res = unwrap(reduceResultsDataTable(reg = reg)) expect_data_table(res, nrow = 3, ncol = 2) expect_identical(res$seed, 2:4) expect_true(any(stri_detect_fixed(getLog(1, reg = reg), "Setting seed to 2"))) expect_true(any(stri_detect_fixed(getLog(2, reg = reg), "Setting seed to 3"))) expect_true(any(stri_detect_fixed(getLog(3, reg = reg), "Setting seed to 4"))) reg = makeTestExperimentRegistry(seed = 1) addProblem(reg = reg, "p1", fun = function(job, ...) job$seed, seed = 100L) addAlgorithm(reg = reg, "a1", fun = function(job, instance, ...) list(instance = instance, seed = job$seed)) ids = addExperiments(repls = 2, reg = reg) getStatus(reg = reg) submitAndWait(reg) res = unwrap(reduceResultsDataTable(reg = reg)) expect_data_table(res, nrow = 2, ncol = 3) expect_identical(res$instance, 2:3) expect_identical(res$seed, 2:3) expect_true(any(stri_detect_fixed(getLog(1, reg = reg), "seed = 2"))) expect_true(any(stri_detect_fixed(getLog(2, reg = reg), "seed = 3"))) }) batchtools/tests/testthat/test_unwrap.R0000644000176200001440000000500013341200566020107 0ustar liggesuserscontext("unwrap") test_that("unwrap behaves", { x = data.table( id = 1:3, nested.list = list(list(a = 1), list(a = 2), list(a = 33)), nested.2dlist = list(list(a = 1, b = 2), list(a = 1), list(b = 2)), nested.df = list(data.frame(a = 1, b = 2), data.frame(a = 1), data.frame(b = 2)), multi.row = list(data.frame(a = 1:2, b = 1:2), data.frame(a = 3:4, b = 3:4), data.frame(a = 1:2, b = 3:4)), empty = list(NULL, NULL, NULL) ) cols = "nested.list" res = unwrap(x, cols) expect_data_table(res, nrow = nrow(x), ncol = ncol(x), col.names = "unique", any.missing = FALSE) expect_equal(names(res), c("id", "nested.2dlist", "nested.df", "multi.row", "empty", "a")) expect_numeric(res[["a"]]) cols = "nested.list" res = unwrap(x, cols, sep = ".") expect_data_table(res, nrow = nrow(x), ncol = ncol(x), col.names = "unique", any.missing = FALSE) expect_equal(names(res), c("id", "nested.2dlist", "nested.df", "multi.row", "empty", "nested.list.a")) expect_numeric(res[["nested.list.a"]]) cols = "nested.2dlist" res = unwrap(x, cols) expect_data_table(res, nrow = nrow(x), ncol = ncol(x) + 1L, col.names = "unique", any.missing = TRUE) expect_equal(names(res), c("id", "nested.list", "nested.df", "multi.row", "empty", "a", "b")) expect_numeric(res[["a"]]) expect_numeric(res[["b"]]) cols = "nested.2dlist" res = unwrap(x, cols, sep = "_") expect_data_table(res, nrow = nrow(x), ncol = ncol(x) + 1L, col.names = "unique", any.missing = TRUE) expect_set_equal(names(res), c("id", "nested.list", "nested.2dlist_a", "nested.2dlist_b", "nested.df", "multi.row", "empty")) expect_numeric(res[["nested.2dlist_a"]]) expect_numeric(res[["nested.2dlist_b"]]) cols = "nested.df" res = unwrap(x, cols, sep = "_") expect_data_table(res, nrow = nrow(x), ncol = ncol(x) + 1L, col.names = "unique", any.missing = TRUE) expect_set_equal(names(res), c("id", "nested.list", "nested.2dlist", "nested.df_a", "nested.df_b", "multi.row", "empty")) expect_numeric(res[["nested.df_a"]]) expect_numeric(res[["nested.df_b"]]) cols = "empty" res = unwrap(x, cols) expect_data_table(res, nrow = nrow(x), ncol = ncol(x) - 1L, col.names = "unique", any.missing = TRUE) expect_equal(names(res), c("id", "nested.list", "nested.2dlist", "nested.df", "multi.row")) expect_error(unwrap(x), "Name clash") x = data.table(x = list(2, 3, 5), y = 1:3) res = unwrap(x) expect_data_table(res, nrow = 3, ncol = 2, col.names = "unique", any.missing = FALSE) expect_set_equal(names(res), c("y", "x.1")) }) batchtools/tests/testthat/test_showLog.R0000644000176200001440000000277713432537733020251 0ustar liggesuserscontext("showLog/getLog") test_that("showLog/getLog", { reg = makeTestRegistry() batchMap(function(x) print("GREPME"), 1:2, reg = reg) expect_error(showLog(id = 1, reg = reg), "not available") expect_error(readLog(id = data.table(job.id = 1L), reg = reg), "not available") submitAndWait(reg) lines = getLog(id = 1, reg = reg) expect_character(lines, min.len = 3L, any.missing = FALSE) expect_equal(sum(stri_detect_fixed(lines, "GREPME")), 1L) expect_true(any(stri_startswith_fixed(lines, "### [bt"))) expect_identical(sum(stri_endswith_fixed(lines, "[batchtools job.id=1]")), 2L) expect_false(any(stri_endswith_fixed(lines, "[batchtools job.id=2]"))) lines = getLog(id = 2, reg = reg) expect_false(any(stri_endswith_fixed(lines, "[batchtools job.id=1]"))) withr::with_options(list(pager = function(files, header, title, delete.file) files), { x = showLog(id = 2, reg = reg) expect_equal(fs::path_file(x), "2.log") expect_equal(sum(stri_detect_fixed(readLines(x), "GREPME")), 1L) }) expect_error(getLog(id = 1:2, reg = reg), "exactly") expect_error(getLog(id = 3, reg = reg), "exactly") }) test_that("empty log files", { reg = makeTestRegistry() batchMap(identity, 1, reg = reg) submitAndWait(reg) # overwrite log file log.file = getLogFiles(reg, 1) fs::file_delete(log.file) fs::file_create(log.file) x = readLog(data.table(job.id = 1), reg = reg) expect_data_table(x, ncol = 2, nrow = 0, index = "job.id") expect_equal(getLog(1, reg = reg), character(0L)) }) batchtools/tests/testthat/test_removeExperiments.R0000644000176200001440000000303413234300075022316 0ustar liggesuserscontext("removeExperiments") test_that("removeExperiments", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) nrow(data), seed = 42) prob = addProblem(reg = reg, "p2", data = iris, fun = function(job, data) nrow(data), seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq) instance^sq) algo = addAlgorithm(reg = reg, "a2", fun = function(job, data, instance, sq) instance^sq) ids = addExperiments(list(p1 = data.table(), p2 = data.table(x = 1:2)), list(a1 = data.table(sq = 1:3), a2 = data.table(sq = 1:2)), reg = reg) N = nrow(findExperiments(reg = reg)) expect_data_table(removeExperiments(1, reg = reg), nrow = 1, key = "job.id") expect_equal(findExperiments(reg = reg)$job.id, 2:N) expect_data_table(removeExperiments(1, reg = reg), nrow = 0, key = "job.id") expect_equal(findExperiments(reg = reg)$job.id, 2:N) ids = findExperiments(prob.name = "p1", reg = reg) expect_data_table(removeExperiments(ids, reg = reg), nrow = 4, key = "job.id") expect_equal(findExperiments(reg = reg)$job.id, 6:N) expect_true(fs::file_exists(getProblemURI(reg, "p1"))) expect_set_equal(c("p1", "p2"), reg$problems) ids = findExperiments(algo.name = "a2", reg = reg) expect_data_table(removeExperiments(ids, reg = reg), nrow = 4, key = "job.id") expect_equal(findExperiments(reg = reg)$job.id, 6:(N-nrow(ids))) expect_true(fs::file_exists(getAlgorithmURI(reg, "a2"))) expect_set_equal(c("a1", "a2"), reg$algorithms) checkTables(reg) }) batchtools/tests/testthat/test_JobNames.R0000644000176200001440000000177513200617367020315 0ustar liggesuserscontext("JobNames") test_that("setJobNames", { reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, i = 1:3, reg = reg) expect_null(setJobNames(ids, letters[1:3], reg = reg)) x = getJobNames(reg = reg) expect_data_table(x, ncol = 2, nrow = 3, key = "job.id") expect_identical(x$job.name, letters[1:3]) expect_identical(reg$status$job.name, letters[1:3]) expect_data_table(ijoin(getJobNames(1:2, reg = reg), getJobPars(reg = reg)), ncol = 3, nrow = 2, key = "job.id") jc = makeJobCollection(1, reg = reg) expect_identical(jc$job.name, "a") jc = makeJobCollection(1:3, reg = reg) expect_identical(jc$job.name, "a") expect_identical(jc$jobs$job.name, letters[1:3]) expect_null(setJobNames(ids, rep(NA_character_, 3), reg = reg)) x = getJobNames(reg = reg) expect_data_table(x, ncol = 2, nrow = 3, key = "job.id") expect_identical(x$job.name, rep(NA_character_, 3)) jc = makeJobCollection(1:3, reg = reg) expect_identical(jc$job.name, jc$job.hash) }) batchtools/tests/testthat/test_btlapply.R0000644000176200001440000000230513201026450020420 0ustar liggesuserscontext("btlapply") test_that("btlapply", { reg = makeTestRegistry() fun = function(x, y) x^y res = silent(btlapply(1:3, fun, y = 2, n.chunks = 2, resources = list(..dummy = 42), reg = reg)) expect_equal(res, lapply(1:3, fun, y = 2)) expect_equal(uniqueN(reg$status$job.hash), 2) expect_equal(reg$resources$resources[[1L]]$..dummy, 42) }) test_that("btmapply", { fun = function(x, y) paste0(x, y) x = 1:3 y = letters[1:3] reg = makeTestRegistry() res = silent(btmapply(fun, x = x, y = y, chunk.size = 2, use.names = FALSE, reg = reg)) expect_equal(res, mapply(fun, x = x, y = y, SIMPLIFY = FALSE, USE.NAMES = FALSE)) expect_equal(uniqueN(reg$status$job.hash), 2) reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsInteractive() expect_equal(silent(btmapply(fun, x = x, y = y, n.chunks = 1, use.names = FALSE, simplify = TRUE, reg = reg)), mapply(fun, x = x, y = y, SIMPLIFY = TRUE, USE.NAMES = FALSE)) reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsInteractive() expect_equal(silent(btmapply(fun, x = x, y = y, n.chunks = 1, use.names = TRUE, simplify = TRUE, reg = reg)), mapply(fun, x = x, y = y, SIMPLIFY = TRUE, USE.NAMES = TRUE)) }) batchtools/tests/testthat/test_Algorithm.R0000644000176200001440000000321313516316111020523 0ustar liggesuserscontext("addAlgorithm") test_that("addAlgorithm", { reg = makeTestExperimentRegistry() algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) NULL) expect_is(algo, "Algorithm") expect_equal(algo$name, "a1") expect_function(algo$fun) expect_file_exists(getAlgorithmURI(reg, algo$name)) prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) nrow(data)) algo = addAlgorithm(reg = reg, "a2", fun = function(...) NULL) ids = addExperiments(list(p1 = data.table()), algo.designs = list(a1 = data.table(), a2 = data.table()), repls = 2, reg = reg) expect_integer(ids$job.id, len = 4L) removeAlgorithms(reg = reg, "a1") expect_integer(reg$status$job.id, len = 2L) expect_set_equal(reg$algorithms, "a2") expect_set_equal(reg$algorithms, "a2") expect_false(fs::file_exists(getAlgorithmURI(reg, "a1"))) expect_true(fs::file_exists(getAlgorithmURI(reg, "a2"))) expect_set_equal(getJobPars(reg = reg)$algorithm, "a2") checkTables(reg) }) test_that("addAlgorithm overwrites old algo", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) 2) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) instance * 2) ids = addExperiments(list(p1 = data.table()), list(a1 = data.table()), reg = reg) run = function(id) suppressAll(execJob(makeJob(id, reg = reg))) expect_equal(run(1), 4) prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) 4) expect_equal(run(1), 8) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) instance * 8) expect_equal(run(1), 32) }) batchtools/tests/testthat/test_killJobs.R0000644000176200001440000000075113200617366020360 0ustar liggesuserscontext("killJobs") test_that("killJobs", { reg = makeTestRegistry() if (is.null(reg$cluster.functions$killJob)) skip("Test requires killJobs") ids = batchMap(Sys.sleep, time = 60, reg = reg) silent(submitJobs(1, reg = reg)) expect_equal(findOnSystem(1, reg = reg), findJobs(reg = reg)) batch.id = reg$status[1, batch.id] silent({ res = killJobs(1, reg = reg) }) expect_equal(res$job.id, 1L) expect_equal(res$batch.id, batch.id) expect_true(res$killed) }) batchtools/tests/testthat/test_future.R0000644000176200001440000000062713606041732020121 0ustar liggesuserscontext("future.batchtools") test_that("futures work", { skip_if_not_installed("future.batchtools") path = Sys.getenv("R_FUTURE_CACHE_PATH") if (!nzchar(path)) Sys.setenv(R_FUTURE_CACHE_PATH = fs::path(fs::path_temp(), ".future")) library("future") library("future.batchtools") plan(batchtools_local) pid %<-% { Sys.getpid() } expect_count(pid) expect_false(pid == Sys.getpid()) }) batchtools/tests/testthat/test_getErrorMessages.R0000644000176200001440000000235613200617366022073 0ustar liggesuserscontext("getErrorMessages") test_that("getErrorMessages", { reg = makeTestRegistry() fun = function(i) if (i == 3) stop("foobar") else i ids = batchMap(fun, i = 1:5, reg = reg) submitAndWait(reg, 1:4) tab = getErrorMessages(ids, reg = reg) expect_data_table(tab, nrow = 5, ncol = 4, key = "job.id") expect_set_equal(names(tab), c("job.id", "terminated", "error", "message")) expect_identical(tab$job.id, 1:5) expect_equal(tab$terminated, c(rep(TRUE, 4), FALSE)) expect_equal(tab$error, replace(logical(5), 3, TRUE)) expect_character(tab$message) expect_equal(is.na(tab$message), !replace(logical(5), 3, TRUE)) expect_string(tab$message[3], fixed = "foobar") tab = getErrorMessages(ids, missing.as.error = TRUE, reg = reg) expect_data_table(tab, nrow = 5, ncol = 4, key = "job.id") expect_set_equal(names(tab), c("job.id", "terminated", "error", "message")) expect_identical(tab$job.id, 1:5) expect_equal(tab$terminated, c(rep(TRUE, 4), FALSE)) expect_equal(tab$error, replace(logical(5), c(3, 5), TRUE)) expect_character(tab$message) expect_equal(is.na(tab$message), !replace(logical(5), c(3, 5), TRUE)) expect_string(tab$message[3], fixed = "foobar") expect_string(tab$message[5], fixed = "[not terminated]") }) batchtools/tests/testthat/test_memory.R0000644000176200001440000000117113432540150020105 0ustar liggesuserscontext("measure memory") test_that("memory measurements work", { skip_on_os("windows") skip_on_travis() skip_on_cran() reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsSSH(list(Worker$new("localhost"))) ids = batchMap(function(n) { m = matrix(runif(n), nrow = 10); m %*% t(m) }, n = c(100, 1e7), reg = reg) submitAndWait(reg, 1:2, resources = list(measure.memory = TRUE)) expect_true(any(stri_detect_fixed(readLog(1L, reg = reg)$lines, "Memory measurement enabled"))) expect_numeric(reg$status$mem.used, any.missing = FALSE) expect_true(reg$status$mem.used[2] > reg$status$mem.used[1]) }) batchtools/tests/testthat/test_ClusterFunctionsSocket.R0000644000176200001440000000100513432542232023257 0ustar liggesuserscontext("cf socket") test_that("cf socket", { skip_if_not_installed("snow") # skip_on_travis() reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsSocket(2) ids = batchMap(Sys.sleep, time = c(5, 5), reg = reg) silent({ submitJobs(1:2, reg = reg) expect_equal(findOnSystem(reg = reg), findJobs(reg = reg)) expect_true(waitForJobs(sleep = 0.5, reg = reg)) }) expect_data_table(findOnSystem(reg = reg), nrow = 0) expect_equal(findDone(reg = reg), findJobs(reg = reg)) }) batchtools/tests/testthat/test_removeRegistry.R0000644000176200001440000000112713234300075021624 0ustar liggesuserscontext("removeRegistry") test_that("removeRegistry", { reg = makeTestRegistry() expect_directory_exists(reg$file.dir) res = removeRegistry(0.01, reg = reg) expect_string(res) expect_false(fs::dir_exists(reg$file.dir)) }) test_that("removeRegistry resets default registry", { prev = batchtools$default.registry reg = makeTestExperimentRegistry(make.default = TRUE) expect_is(batchtools$default.registry, "Registry") res = removeRegistry(0, reg = reg) expect_false(fs::dir_exists(reg$file.dir)) expect_null(batchtools$default.registry) batchtools$default.registry = prev }) batchtools/tests/testthat/test_batchReduce.R0000644000176200001440000000200713272302113021002 0ustar liggesuserscontext("batchReduce") test_that("batchReduce", { reg = makeTestRegistry() xs = 1:20 ids = batchReduce(function(aggr, x) aggr+x, xs, init = 0, chunks = chunk(seq_along(xs), n.chunks = 10), reg = reg) expect_data_table(ids, nrow = 10, key = "job.id") submitAndWait(ids = ids, reg = reg) y = reduceResults(fun = function(aggr, job, res) aggr+res, init = 0, reg = reg) expect_equal(y, sum(1:20)) }) test_that("batchReduce w/ more.args", { reg = makeTestRegistry() xs = 1:20 chunks = sort(chunk(seq_along(xs), n.chunks = 10)) ids = batchReduce(function(aggr, x, y) aggr+x+y, 1:20, init = 100, chunks = chunks, more.args = list(y=1), reg = reg) expect_data_table(ids, nrow = 10, key = "job.id") submitAndWait(reg = reg) expect_equal(as.integer(reduceResultsList(reg = reg)), viapply(split(xs, chunks), function(x) 100L + length(x) + sum(x), use.names = FALSE)) y = reduceResults(fun=function(aggr, job, res) aggr+res, init = 0, reg = reg) expect_equal(y, sum(1:20) + 20 + uniqueN(chunks) * 100) }) batchtools/tests/testthat/test_chunk.R0000644000176200001440000001032013436167427017721 0ustar liggesuserscontext("chunk") test_that("chunk", { x = 1:10; n.chunks = 2 expect_integer(chunk(x, n.chunks = n.chunks), len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) x = 1:10; n.chunks = 1 expect_integer(chunk(x, n.chunks = n.chunks), len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) x = 1:10; n.chunks = 10 expect_integer(chunk(x, n.chunks = n.chunks), len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) x = 1:10; n.chunks = 20 expect_integer(chunk(x, n.chunks = n.chunks), len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) x = integer(0); n.chunks = 20 expect_integer(chunk(x, n.chunks = n.chunks), len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) x = 1:10; chunk.size = 3 res = chunk(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_integer(table(res), lower = 1, upper = chunk.size, any.missing = FALSE) x = 1:10; chunk.size = 1 res = chunk(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_integer(table(res), lower = 1, upper = chunk.size, any.missing = FALSE) expect_equal(chunk(numeric(0), chunk.size = 1), integer(0)) expect_equal(chunk(numeric(0), n.chunks = 1), integer(0)) x = 1:10; n.chunks = 2 res = c(rep(1, 5), rep(2, 5)) expect_equal(chunk(x, n.chunks = n.chunks, shuffle = FALSE), res) }) test_that("binpack", { x = 1:10; chunk.size = 10 res = binpack(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), lower = min(x), upper = chunk.size, any.missing = FALSE) x = 1; chunk.size = 10 res = binpack(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), lower = min(x), upper = chunk.size, any.missing = FALSE) x = rep(1, 100); chunk.size = 1 res = binpack(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), lower = min(x), upper = chunk.size, any.missing = FALSE) x = runif(100); chunk.size = 1 res = binpack(x, chunk.size = chunk.size) expect_integer(res, len = length(x), lower = 1, upper = length(x), any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), lower = min(x), upper = chunk.size, any.missing = FALSE) expect_equal(binpack(numeric(0), 1), integer(0)) }) test_that("lpt", { x = 1:10; n.chunks = 2 res = lpt(x, n.chunks) expect_integer(res, len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), len = min(length(x), n.chunks), lower = min(x), any.missing = FALSE) x = runif(100); n.chunks = 3 res = lpt(x, n.chunks) expect_integer(res, len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), len = min(length(x), n.chunks), lower = min(x), any.missing = FALSE) x = 1:10; n.chunks = 1 res = lpt(x, n.chunks) expect_integer(res, len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), len = min(length(x), n.chunks), lower = min(x), any.missing = FALSE) x = 1:10; n.chunks = 12 res = lpt(x, n.chunks) expect_integer(res, len = length(x), lower = 1, upper = n.chunks, any.missing = FALSE) expect_numeric(sapply(split(x, res), sum), len = min(length(x), n.chunks), lower = min(x), any.missing = FALSE) expect_equal(unname(res), 10:1) expect_equal(lpt(numeric(0), 1), integer(0)) }) test_that("caching works", { reg = makeTestExperimentRegistry() p1 = addProblem(reg = reg, "p1", data = iris) p2 = addProblem(reg = reg, "p2", data = data.frame(a = 1:10)) a1 = addAlgorithm(reg = reg, name = "a1", fun = function(data, ...) nrow(data)) a2 = addAlgorithm(reg = reg, name = "a2", fun = function(data, ...) 2L * nrow(data)) addExperiments(reg = reg) ids = findJobs(reg = reg) ids$chunk = 1L submitAndWait(reg, ids) expect_identical(unlist(reduceResultsList(ids, reg = reg)), as.integer(c(150, 300, 10, 20))) }) batchtools/tests/testthat/test_convertIds.R0000644000176200001440000000337613201027261020723 0ustar liggesuserscontext("convertIds helper") test_that("convertIds", { reg = makeTestRegistry() batchMap(identity, 1:10, reg = reg) reg$status = reg$status[-3, ] tab = convertIds(reg, NULL) expect_equal(tab, NULL) tab = convertIds(reg, 1:10) expect_data_table(tab, ncol = 1, nrow = 9, key = "job.id") expect_copied(tab, reg$status) tab = convertIds(reg, findJobs(reg = reg)) expect_data_table(tab, ncol = 1, nrow = 9, key = "job.id") expect_copied(tab, reg$status) tab = convertIds(reg, data.table(job.id = 3:4, key = "job.id")) expect_data_table(tab, ncol = 1, nrow = 1, key = "job.id") tab = convertIds(reg, as.data.frame(findJobs(reg = reg))) expect_data_table(tab, ncol = 1, key = "job.id") expect_copied(tab, reg$status) tab = convertIds(reg, 10:8) expect_data_table(tab, ncol = 1, nrow = 3, key = "job.id") expect_equal(tab$job.id, 8:10) expect_copied(tab, reg$status) tab = convertIds(reg, 10:8, keep.order = TRUE) expect_data_table(tab, ncol = 1, nrow = 3) expect_equal(tab$job.id, 10:8) ids = findJobs(reg = reg) ids$chunk = 9:1 tab = convertIds(reg, ids, keep.order = TRUE, keep.extra = "chunk") expect_data_table(tab, ncol = 2, nrow = 9, key = "job.id") # keep index if possible setorderv(ids, "chunk") tab = convertIds(reg, ids, keep.order = TRUE, keep.extra = "chunk") expect_data_table(tab, ncol = 2, nrow = 9) expect_null(key(tab)) expect_equal(tab$job.id, setdiff(10:1, 3L)) expect_error(convertIds(reg, c(2, 2)), "Duplicated ids") expect_error(convertIds(reg, as.character(1:3)), "not recognized") # issue #40 ids = ids[list(5:10), on = "job.id"][, "chunk" := chunk(job.id, chunk.size = 3)] ids = convertIds(reg, ids, keep.extra = c("job.id", "chunk")) expect_data_table(ids, any.missing = FALSE) }) batchtools/tests/testthat/test_ClusterFunctionsMulticore.R0000644000176200001440000000273213432541422024002 0ustar liggesuserscontext("cf multicore") test_that("cf multicore", { skip_on_os("windows") reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsMulticore(2) ids = batchMap(Sys.sleep, time = c(2, 2), reg = reg) silent({ submitJobs(1:2, reg = reg) expect_equal(findOnSystem(reg = reg), findJobs(reg = reg)) expect_true(waitForJobs(sleep = 0.2, expire.after = 1, reg = reg)) }) expect_data_table(findOnSystem(reg = reg), nrow = 0) expect_equal(findDone(reg = reg), findJobs(reg = reg)) # check that max.concurrent.jobs works reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsMulticore(2) reg$max.concurrent.jobs = 1 ids = batchMap(Sys.sleep, time = c(2, 0), reg = reg) submitAndWait(1:2, reg = reg) tab = getJobStatus(reg = reg) expect_true(diff(tab$started) > 1) }) if (FALSE) { # Multicore cleans up finished processes reg = makeTestRegistry() batchMap(Sys.sleep, rep(0.8, 8), reg = reg) parallel::mccollect() p = self = Multicore$new(4) for (i in 1:4) { p$spawn(makeJobCollection(i, reg = reg)) } expect_data_table(p$jobs, ncol = 2) expect_integer(p$jobs$pid, len = 4L, any.missing = FALSE, lower = 0L) expect_integer(p$jobs$count, len = 4L, any.missing = FALSE, lower = 0L, upper = 1L) Sys.sleep(1.5) p$spawn(makeJobCollection(5L, reg = reg)) expect_integer(p$jobs$pid, len = 1L, any.missing = FALSE, lower = 0L) p$collect(3) p$collect(1) x = parallel::mccollect() expect_true(is.null(x)) } batchtools/tests/testthat/test_ExperimentRegistry.R0000644000176200001440000000312413516316250022453 0ustar liggesuserscontext("ExperimentRegistry") test_that("makeTestExperimentRegistry", { reg = makeTestExperimentRegistry() expect_is(reg, "Registry") expect_is(reg, "ExperimentRegistry") expect_true(is.environment(reg)) expect_directory_exists(reg$file.dir, access = "rw") expect_directory_exists(reg$work.dir, access = "r") expect_directory_exists(fs::path(reg$file.dir, c("jobs", "results", "updates", "logs"))) expect_file(fs::path(reg$file.dir, "registry.rds")) expect_character(reg$packages, any.missing = FALSE) expect_character(reg$namespaces, any.missing = FALSE) expect_int(reg$seed, na.ok = FALSE) expect_true(reg$writeable) expect_is(reg$cluster.functions, "ClusterFunctions") expect_list(reg$default.resources, names = "strict") checkTables(reg, any.missing = FALSE, nrows = 0L) expect_character(reg$problems, len = 0L) expect_character(reg$algorithms, len = 0L) expect_output(print(reg), "Experiment Registry") }) test_that("Printer works (#170)", { reg = makeTestExperimentRegistry() expect_character(reg$problems, len = 0L) expect_character(reg$algorithms, len = 0L) expect_output(print(reg), "Problems[[:space:]]*:[[:space:]]*0") expect_output(print(reg), "Algorithms[[:space:]]*:[[:space:]]*0") addProblem("iris", data = iris, reg = reg) addAlgorithm("foo", fun = function(...) list(...), reg = reg) expect_character(reg$problems, len = 1L, any.missing = FALSE) expect_character(reg$algorithms, len = 1L, any.missing = FALSE) expect_output(print(reg), "Problems[[:space:]]*:[[:space:]]*1") expect_output(print(reg), "Algorithms[[:space:]]*:[[:space:]]*1") }) batchtools/tests/testthat/test_ClusterFunctionsSSH.R0000644000176200001440000000324613435713122022476 0ustar liggesuserscontext("cf ssh") test_that("cf ssh", { skip_on_os("windows") skip_on_travis() skip_on_cran() reg = makeTestRegistry() if (reg$cluster.functions$name == "Interactive") { workers = list(Worker$new("localhost", ncpus = 2, max.load = 9999)) reg$cluster.functions = makeClusterFunctionsSSH(workers) saveRegistry(reg) fun = function(x) { Sys.sleep(x); is(x, "numeric") } ids = batchMap(fun, x = c(5, 5), reg = reg) silent({ submitJobs(1:2, reg = reg) Sys.sleep(0.2) expect_equal(findOnSystem(reg = reg), findJobs(reg = reg)) expect_true(killJobs(2, reg = reg)$killed) expect_true( waitForJobs(1, sleep = 0.5, reg = reg) ) }) expect_equal(findDone(reg = reg), findJobs(ids = 1, reg = reg)) expect_equal(findNotDone(reg = reg), findJobs(ids = 2, reg = reg)) expect_true(loadResult(1, reg = reg)) } }) if (FALSE) { reg = makeTestRegistry() workers = list(Worker$new("129.217.207.53"), Worker$new("localhost", ncpus = 1)) reg$cluster.functions = makeClusterFunctionsSSH(workers) expect_string(workers[[1L]]$script) expect_string(workers[[2L]]$script) expect_equal(workers[[1L]]$ncpus, 4L) expect_equal(workers[[2L]]$ncpus, 1L) fun = function(x) { Sys.sleep(x); is(x, "numeric") } ids = batchMap(fun, x = 20 * c(1, 1), reg = reg) submitJobs(1:2, reg = reg) expect_equal(findOnSystem(reg = reg), findJobs(reg = reg)) expect_true(killJobs(2, reg = reg)$killed) expect_true(waitForJobs(1, reg = reg, sleep = 1)) expect_equal(findDone(reg = reg), findJobs(ids = 1, reg = reg)) expect_equal(findNotDone(reg = reg), findJobs(ids = 2, reg = reg)) expect_true(loadResult(1, reg = reg)) } batchtools/tests/testthat/test_addExperiments.R0000644000176200001440000001063513334761364021573 0ustar liggesuserscontext("addExperiments") test_that("addExperiments handles parameters correctly", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data, x, y, ...) stopifnot(is.numeric(x) && is.character(y)), seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, a, b, ...) { print(str(a)); checkmate::assertList(a, len = 1, names = "named"); checkmate::assertDataFrame(b); } ) prob.designs = list(p1 = data.table(x = 1:2, y = letters[1:2])) algo.designs = list(a1 = data.table(a = list(list(x = 1)), b = list(iris))) repls = 2 ids = addExperiments(prob.designs, algo.designs, repls = repls, reg = reg) expect_data_table(ids, nrow = 4, key = "job.id") ids = addExperiments(prob.designs, algo.designs, repls = repls, reg = reg) expect_data_table(ids, nrow = 0, key = "job.id") ids = addExperiments(prob.designs, algo.designs, repls = repls + 1L, reg = reg) expect_data_table(ids, nrow = 2, key = "job.id") submitAndWait(reg, ids) expect_true(nrow(findErrors(reg = reg)) == 0) }) test_that("addExperiments creates default designs", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris) prob = addProblem(reg = reg, "p2", data = cars) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance) nrow(data)) algo = addAlgorithm(reg = reg, "a2", fun = function(job, data, instance) ncol(data)) ids = addExperiments(reg = reg) expect_equal(findExperiments(reg = reg)$job.id, 1:4) expect_equal(as.character(reg$defs$problem), rep(c("p1", "p2"), each = 2)) expect_equal(as.character(reg$defs$algorithm), rep(c("a1", "a2"), times = 2)) }) test_that("addExperiments / user provided designs", { reg = makeTestExperimentRegistry() addProblem(reg = reg, "p1", data = iris, fun = function(...) list(...)) addProblem(reg = reg, "p2", data = cars, fun = function(...) list(...)) addAlgorithm(reg = reg, "a1", fun = function(...) list(...)) addAlgorithm(reg = reg, "a2", fun = function(...) ncol(data)) prob.designs = list(p1 = data.table(a = 1, b = 2:4)) algo.designs = list(a1 = data.table(c = 3:8), a2 = data.table()) repls = 1 ids = addExperiments(reg = reg, prob.designs = prob.designs, algo.designs = algo.designs, combine = "bind") expect_data_table(ids, nrow = 9, key = "job.id") tab = getJobPars(reg = reg) pars = unwrap(getJobPars(reg = reg)) expect_set_equal(pars$problem, "p1") expect_set_equal(pars$algorithm, c("a1", "a2")) expect_equal(pars$a, rep(1L, 9)) expect_equal(pars$b, rep(2:4, 3)) expect_equal(pars$c, c(3:8, rep(NA, 3))) expect_error(addExperiments(reg = reg, prob.designs = list(p1 = data.table(job = 2))), "reserved keyword 'job'") expect_error(addExperiments(reg = reg, algo.designs = list(a2 = data.table(instance = "foo"))), "reserved keyword 'instance'") prob.designs = c(prob.designs, list(p2 = data.table())) ids = addExperiments(reg = reg, prob.designs = prob.designs, algo.designs = algo.designs, combine = "bind") expect_data_table(ids, nrow = 7, key = "job.id") expect_data_table(unwrap(getJobPars(reg = reg)), nrow = 16) ids = addExperiments(reg = reg, prob.designs = prob.designs, algo.designs = algo.designs, combine = "crossprod") expect_data_table(ids, nrow = 12, key = "job.id") expect_data_table(unwrap(getJobPars(reg = reg)), nrow = 28) pd = list(p1 = data.frame(foo = letters[1:2])) withr::with_options(list(stringsAsFactors = NULL), { expect_warning(addExperiments(reg = reg, prob.designs = pd), "stringsAsFactors") }) withr::with_options(list(stringsAsFactors = TRUE), { expect_warning(addExperiments(reg = reg, prob.designs = pd), "stringsAsFactors") }) withr::with_options(list(stringsAsFactors = FALSE), { addExperiments(reg = reg, prob.designs = pd) }) }) # reg = makeTestExperimentRegistry() # addProblem(reg = reg, "p1", data = iris, fun = function(job, data, ...) nrow(data)) # addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) NULL) # addAlgorithm(reg = reg, "a2", fun = function(job, data, instance, ...) NULL) # prob.designs = list(p1 = data.table(x = 1:500)) # algo.designs = list(a1 = data.table(y = 1:50), a2 = data.table(y = 1:20)) # repls = 2 # profvis::profvis(addExperiments(prob.designs, algo.designs = algo.designs, repls = repls, reg = reg)) # ids = findExperiments(reg = reg) # profvis::profvis(submitJobs(ids = s.chunk(ids), reg = reg)) # profvis::profvis(unwrap(getJobPars(reg = reg))) batchtools/tests/testthat/test_waitForJobs.R0000644000176200001440000000216113432541161021031 0ustar liggesuserscontext("waitForJobs") test_that("waitForJobs", { reg = makeTestRegistry() fun = function(x) if (x == 2) stop(x) else x ids = batchMap(reg = reg, fun, 1:2) silent({ submitJobs(ids, reg = reg) expect_true(waitForJobs(ids = ids[1], reg = reg, sleep = 1)) expect_false(waitForJobs(ids = ids, stop.on.error = TRUE, sleep = 1, expire.after = 3, reg = reg)) }) }) test_that("waitForJobs: detection of expired jobs", { reg = makeTestRegistry() if (is.null(reg$cluster.functions$killJob)) skip("Test requires killJobs") ids = batchMap(reg = reg, Sys.sleep, c(20, 20)) ids$chunk = 1L silent({ submitJobs(ids, reg = reg) batch.ids = reg$status$batch.id reg$cluster.functions$killJob(reg, batch.ids[1]) expect_warning(waitForJobs(ids, reg = reg, sleep = 1, stop.on.expire = TRUE), "disappeared") }) }) test_that("waitForJobs: filter out unsubmitted jobs", { reg = makeTestRegistry() ids = batchMap(identity, 1:2, reg = reg) silent({ submitJobs(ids = 1, reg = reg) expect_warning(res <- waitForJobs(ids = ids, reg = reg, sleep = 1), "unsubmitted") expect_true(res) }) }) batchtools/tests/testthat/test_reduceResults.R0000644000176200001440000001522413606055110021432 0ustar liggesuserscontext("reduceResults") suppressMessages({ reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, a = 1:4, b = 4:1, reg = reg) submitAndWait(reg, 1:3) }) test_that("loadResult", { expect_equal(loadResult(reg = reg, 1), list(a = 1, b = 4)) expect_equal(loadResult(reg = reg, 2), list(a = 2, b = 3)) expect_error(loadResult(reg = reg, 4), "not terminated") }) test_that("batchMapResults", { target = makeTestRegistry() x = batchMapResults(target = target, function(x, c, d) x$a+x$b + c + d, c = 11:13, source = reg, more.args = list(d = 2)) expect_data_table(x, nrow = 3, key = "job.id") expect_data_table(target$status, nrow = 3) submitAndWait(target) res = unwrap(reduceResultsDataTable(reg = target)) expect_equal(res[[2L]], 11:13 + rep(5, 3) + 2) }) test_that("reduceResults", { silent({ expect_equal(reduceResults(fun = function(aggr, res, ...) c(aggr, res$a), init = integer(0), reg = reg), 1:3) expect_equal(reduceResults(ids = 1, fun = c, reg = reg), list(a = 1, b = 4)) expect_equal(reduceResults(ids = 1, fun = c, list(c = 1), reg = reg)$c, 1) expect_equal(reduceResults(fun = function(aggr, res, extra.arg, ...) aggr + res$a + extra.arg, init = 0, extra.arg = 1, reg = reg), sum(1:3 + 1)) expect_equal(reduceResults(fun = function(job, aggr, res) c(aggr, job$id), init = integer(0), ids = 2:3, reg = reg), 2:3) expect_list(reduceResults(fun = function(job, aggr, res) c(aggr, list(job)), init = list(), ids = 2:3, reg = reg), types = "Job", len = 2) expect_equal( reduceResults(fun = function(aggr, res, ...) c(aggr, res$a), ids = 3:1, init = integer(0), reg = reg), rev(reduceResults(fun = function(aggr, res, ...) c(aggr, res$a), ids = 1:3, init = integer(0), reg = reg)) ) expect_error(reduceResults(fun = function(aggr, res, ...) c(aggr, res$a), ids = 1:4, init = integer(0), reg = reg), "successfully computed") }) }) test_that("reduceResultsList", { silent({ expect_equal(reduceResultsList(reg = reg), Map(fun, a = 1:3, b = 4:2)) expect_equal(reduceResultsList(reg = reg, fun = function(x) x$a), as.list(1:3)) expect_equal(reduceResultsList(reg = reg, fun = function(x, y) x$a + y, y = 1), as.list(1:3 + 1)) expect_list(reduceResultsList(reg = reg, fun = function(job, ...) job), types = "Job", len = 3) expect_equal(reduceResultsList(ids = 2:1, reg = reg), rev(reduceResultsList(ids = 1:2, reg = reg))) }) }) test_that("reduceResultsDataTable", { silent({ tab = unwrap(reduceResultsDataTable(reg = reg)) expect_data_table(tab, nrow = 3, ncol = 3, key = "job.id") expect_null(tab$result) expect_equal(tab$a, 1:3) tab = unwrap(reduceResultsDataTable(reg = reg, fun = function(x) list(a = x$a))) expect_data_table(tab, nrow = 3, ncol = 2, key = "job.id") expect_equal(tab$a, 1:3) tab = unwrap(reduceResultsDataTable(reg = reg, ids = 3:2, fun = function(x) list(a = x$a))) expect_data_table(tab, nrow = 2, ncol = 2, key = "job.id") expect_equal(tab$a, 2:3) tab = unwrap(reduceResultsDataTable(reg = reg, fun = function(x) x$a)) expect_data_table(tab, nrow = 3, ncol = 2, key = "job.id") expect_equal(tab$result.1, 1:3) tab = unwrap(reduceResultsDataTable(reg = reg, fun = function(x, y) x$a + y, y = 1)) expect_data_table(tab, nrow = 3, ncol = 2, key = "job.id") expect_equal(tab$result.1, 1:3 + 1L) }) }) test_that("reduceResultsDataTable/unwrap simple", { silent({ tab = reduceResultsDataTable(reg = reg) expect_data_table(tab, nrow = 3, ncol = 2, key = "job.id") expect_set_equal(names(tab), c("job.id", "result")) expect_list(tab$result[[1]], types = "numeric", len = 2) tab = unwrap(tab) expect_data_table(tab, ncol = 3) expect_equal(tab$job.id, 1:3) expect_equal(tab$a, 1:3) expect_equal(tab$b, 4:2) }) }) suppressMessages({ reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", fun = function(job, data, ...) 2, seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq) instance^sq) ids = addExperiments(list(p1 = data.table()), list(a1 = data.table(sq = 1:3)), reg = reg) submitAndWait(reg = reg) }) test_that("reduceResults/BatchExperiments", { silent({ expect_equal(reduceResults(fun = function(aggr, res, ...) c(aggr, res), init = integer(0), reg = reg), 2^(1:3)) expect_equal(reduceResults(ids = 2:3, fun = function(aggr, job, res, ...) c(aggr, job$id), init = integer(0), reg = reg), 2:3) expect_list(reduceResults(fun = function(job, aggr, res) c(aggr, list(job)), init = list(), ids = 2:3, reg = reg), types = "Experiment", len = 2) }) }) test_that("reduceResultsList/BatchExperiments", { silent({ expect_equal(reduceResultsList(reg = reg), as.list(2^(1:3))) expect_equal(reduceResultsList(fun = function(job, ...) job$prob.name, reg = reg), as.list(rep("p1", 3))) expect_equal(reduceResultsList(fun = function(job, ...) job$algo.name, reg = reg), as.list(rep("a1", 3))) expect_equal(reduceResultsList(fun = function(job, ...) job$instance, reg = reg), as.list(rep(2, 3))) }) }) test_that("reduceResults with no results reg", { silent({ reg = makeTestRegistry() expect_equal(reduceResults(fun = c, reg = reg), NULL) expect_equal(reduceResults(fun = c, reg = reg, init = 42), 42) expect_equal(reduceResultsList(reg = reg), list()) fun = function(...) list(...) ids = batchMap(fun, a = 1:3, b = 3:1, reg = reg) expect_equal(reduceResults(fun = c, reg = reg), NULL) expect_equal(reduceResults(fun = c, reg = reg, init = 42), 42) expect_equal(reduceResultsList(reg = reg), list()) }) }) test_that("reduceResultsList/NULL", { reg = makeTestRegistry() f = function(...) NULL ids = batchMap(f, 1:3, reg = reg) submitAndWait(ids, reg = reg) res = reduceResultsList(ids = ids, reg = reg) expect_equal(res, replicate(3, NULL, simplify = FALSE)) }) test_that("reduceResultsDataTable/multiRowResults", { silent({ reg = makeTestRegistry() fun = function(a) data.frame(y1 = rep(a, 3), y2 = rep(a/2, 3)) ids = batchMap(fun, a = c(10, 100), reg = reg) submitAndWait(reg, ids) res = reduceResultsDataTable(reg = reg) expect_data_frame(res, ncol = 2, nrow = 2) expect_list(res$result, types = "data.frame", len = 2L) }) }) test_that("reduceResultsDataTable/unwrap objects", { silent({ reg = makeTestRegistry() fun = function(...) iris ids = batchMap(fun, i = 1:2, reg = reg) submitAndWait(reg, 1:2) tab = reduceResultsDataTable(reg = reg) expect_data_table(tab, nrow = 2, ncol = 2, key = "job.id") expect_set_equal(names(tab), c("job.id", "result")) expect_list(tab$result, types = "data.frame", names = "unnamed") }) }) batchtools/tests/testthat/test_Problem.R0000644000176200001440000000465013516316111020203 0ustar liggesuserscontext("addProblem") test_that("addProblem / removeProblem", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data, ...) nrow(data)) expect_is(prob, "Problem") expect_equal(prob$data, iris) expect_equal(prob$name, "p1") expect_function(prob$fun) expect_null(prob$seed) expect_file_exists(getProblemURI(reg, prob$name)) expect_false(prob$cache) expect_false(fs::dir_exists(getProblemCacheDir(reg, "p1"))) prob = addProblem(reg = reg, "p2", fun = function(...) NULL, seed = 42, cache = TRUE) expect_is(prob, "Problem") expect_null(prob$data, NULL) expect_equal(prob$name, "p2") expect_function(prob$fun) expect_identical(prob$seed, 42L) expect_file_exists(getProblemURI(reg, prob$name)) expect_true(prob$cache) expect_directory_exists(getProblemCacheDir(reg, "p2")) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) NULL) prob.designs = list(p1 = data.table(), p2 = data.table()) algo.designs = list(a1 = data.table()) ids = addExperiments(prob.designs, algo.designs, repls = 2, reg = reg) expect_integer(ids$job.id, len = 4L) removeProblems(reg = reg, "p1") expect_directory_exists(getProblemCacheDir(reg, "p2")) expect_integer(reg$status$job.id, len = 2L) expect_set_equal("p2", reg$problems) expect_false(fs::file_exists(getProblemURI(reg, "p1"))) expect_true(fs::file_exists(getProblemURI(reg, "p2"))) expect_set_equal(getJobPars(reg = reg)$problem, "p2") checkTables(reg) removeProblems(reg = reg, "p2") expect_false(fs::dir_exists(getProblemCacheDir(reg, "p2"))) }) test_that("instance caching", { reg = makeTestExperimentRegistry() addProblem(reg = reg, "p1", data = iris, fun = function(job, data, param) param * 10 + runif(1), seed = 1, cache = TRUE) addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) list(result = instance)) addAlgorithm(reg = reg, "a2", fun = function(job, data, instance, ...) list(result = instance)) ids = addExperiments(prob.designs = list(p1 = data.table(param = 1:2)), repls = 2, reg = reg) j = makeJob(1, reg = reg) foo = j$instance expect_file_exists(getProblemCacheURI(j)) submitAndWait(reg = reg) tab = unwrap(ljoin(getJobTable(reg = reg)[, c("job.id", "repl", "problem", "prob.pars", "algorithm")], reduceResultsDataTable(reg = reg))) expect_equal(tab[, list(v = var(result)), by = c("param", "problem", "repl")]$v, rep(0, 4)) }) batchtools/tests/testthat/helper.R0000644000176200001440000001433213462045057017031 0ustar liggesuserslibrary("testthat") library("data.table") library("checkmate") library("stringi") requireNamespace("withr") options(datatable.rbindlist.check="error") is_on_ci = function() { identical(Sys.getenv("APPVEYOR"), "True") || identical(Sys.getenv("TRAVIS"), "true") } getSysConf = function() { conf.file = findConfFile() if (!checkmate::testScalarNA(conf.file)) { ee = new.env() sys.source(conf.file, envir = ee) as.list(ee) } else { list() } } makeTestRegistry = function(file.dir = NA, make.default = FALSE, ...) { reg = makeRegistry(file.dir = file.dir, make.default = make.default, ...) # cleanup registry directories if not a subdirectory of R's temp dir if ((is.na(file.dir) && !identical(reg$temp.dir, fs::path_temp()))) reg.finalizer(e = reg, f = function(reg) if (fs::dir_exists(reg$file.dir)) fs::dir_delete(reg$file.dir), onexit = TRUE) return(reg) } makeTestExperimentRegistry = function(file.dir = NA, make.default = FALSE, ...) { reg = makeExperimentRegistry(file.dir = file.dir, make.default = make.default, ...) # cleanup registry directories if not a subdirectory of R's temp dir if ((is.na(file.dir) && !identical(reg$temp.dir, fs::path_temp()))) reg.finalizer(e = reg, f = function(reg) if (fs::dir_exists(reg$file.dir)) fs::dir_delete(reg$file.dir), onexit = TRUE) return(reg) } silent = function(expr) { withr::with_options(list(batchtools.progress = FALSE, batchtools.verbose = FALSE), expr) } s.chunk = function(ids) { ids$chunk = 1L ids } submitAndWait = function(reg, ids = NULL, ..., sleep = 1) { ids = if (is.null(ids)) findNotSubmitted(reg = reg) else convertIds(reg, ids, keep.extra = names(ids)) if ("chunk" %chnin% names(ids)) ids = s.chunk(ids) silent({ ids = submitJobs(ids = ids, ..., reg = reg) waitForJobs(ids, expire.after = 10L, reg = reg, sleep = sleep) }) } suppressAll = function (expr) { silent(capture.output({z = suppressWarnings(suppressMessages(suppressPackageStartupMessages(force(expr))))})) invisible(z) } checkTables = function(reg, ...) { checkmate::expect_string(reg$hash) checkmate::expect_posixct(reg$mtime, len = 1L) if (class(reg)[1L] == "Registry") { cols = c("def.id", "job.pars") types = c("integer", "list") } else { cols = c("def.id", "problem", "prob.pars", "algorithm", "algo.pars", "pars.hash") types = c("integer", "character", "list", "character", "list", "character") } expect_is(reg$defs, "data.table") checkmate::expect_data_table(reg$defs, ncols = length(cols), ...) checkmate::expect_set_equal(colnames(reg$defs), cols) expect_equal(as.character(reg$defs[, lapply(.SD, class), .SDcols = cols]), types) expect_equal(key(reg$defs), "def.id") expect_equal(anyDuplicated(reg$defs, by = "def.id"), 0L) if (class(reg)[1L] == "Registry") { cols = c("job.id", "def.id", "submitted", "started", "done", "error", "mem.used", "resource.id", "batch.id", "log.file", "job.hash", "job.name") types = c("integer", "integer", "numeric", "numeric", "numeric", "character", "numeric", "integer", "character", "character", "character", "character") } else { cols = c("job.id", "def.id", "submitted", "started", "done", "error", "mem.used", "resource.id", "batch.id", "log.file", "job.hash", "job.name", "repl") types = c("integer", "integer", "numeric", "numeric", "numeric", "character", "numeric", "integer", "character", "character", "character", "character", "integer") } expect_is(reg$status, "data.table") checkmate::expect_data_table(reg$status, ncols = length(cols), ...) checkmate::expect_set_equal(colnames(reg$status), cols) expect_equal(as.character(reg$status[, lapply(.SD, class), .SDcols = cols]), types) expect_equal(key(reg$status), "job.id") expect_equal(anyDuplicated(reg$status, by = "job.id"), 0L) checkStatusIntegrity(reg) cols = c("resource.id", "resource.hash", "resources") types = c("integer", "character", "list") checkmate::expect_data_table(reg$resources, ncols = length(cols), ...) checkmate::expect_set_equal(colnames(reg$resources), cols) expect_equal(as.character(reg$resources[, lapply(.SD, class), .SDcols = cols]), types) expect_equal(key(reg$resources), "resource.id") expect_equal(anyDuplicated(reg$resources, by = "resource.id"), 0L) cols = c("job.id", "tag") types = c("integer", "character") checkmate::expect_data_table(reg$tags, ncols = length(cols), ...) checkmate::expect_set_equal(colnames(reg$tags), cols) expect_equal(as.character(reg$tags[, lapply(.SD, class), .SDcols = cols]), types) expect_equal(key(reg$tags), "job.id") if (class(reg)[1L] == "ExperimentRegistry") { checkmate::expect_character(reg$problems, any.missing = FALSE, unique = TRUE) checkmate::expect_character(reg$algorithms, any.missing = FALSE, unique = TRUE) checkmate::expect_integer(reg$status$repl, lower = 1L, any.missing = FALSE) checkmate::expect_subset(reg$defs$problem, reg$problems) checkmate::expect_subset(reg$defs$algorithm, reg$algorithms) } expect_key_set_equal(reg$defs, reg$status, by = "def.id") expect_key_set_equal(reg$status[!is.na(resource.id)], reg$resources, by = "resource.id") if (nrow(reg$status) > 0L) checkmate::expect_data_table(ajoin(reg$tags, reg$status, by = "job.id"), nrow = 0) else expect_equal(nrow(reg$tags), 0) } checkStatusIntegrity = function(reg) { tab = reg$status[, list(job.id, code = (!is.na(submitted)) + 2L * (!is.na(started)) + 4L * (!is.na(done)) + 8L * (!is.na(error)))] # submitted started done error # 2^0 2^1 2^2 2^3 # 1 2 4 8 # ------------------------------------------------------ # 0 0 0 0 -> 0 (unsubmitted) # 1 0 0 0 -> 1 (submitted) # 1 1 0 0 -> 3 (started) # 1 1 1 0 -> 7 (done) # 1 1 1 1 -> 15 (error) checkmate::expect_subset(tab$code, c(0L, 1L, 3L, 7L, 15L), info = "Status Integrity") } expect_copied = function(x, y) { expect_false(data.table:::address(x) == data.table:::address(y)) } expect_key_set_equal = function(x, y, by = NULL) { expect_true(nrow(ajoin(x, y, by = by)) == 0 && nrow(ajoin(y, x, by = by)) == 0) } batchtools/tests/testthat/test_manual.R0000644000176200001440000000133713200617366020065 0ustar liggesuserscontext("manual expensive tests") test_that("rscimark", { skip("manual test") reg = makeTestRegistry(package = "rscimark") reg$cluster.functions = makeClusterFunctionsMulticore(4) batchMap(rscimark, minimum.time = rep(1, 5), reg = reg) submitJobs(reg = reg) waitForJobs(reg = reg, sleep = 1) tab = getJobTable(reg = reg) expect_true(tab$started[5] >= min(tab$done[1:4])) reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsMulticore(4) batchMap(Sys.sleep, rep(3, 4), reg = reg) submitJobs(reg = reg) waitForJobs(reg = reg, sleep = 1) tab = getJobTable(reg = reg) expect_true(all(as.numeric(diff(range(tab$started))) <= 2)) expect_true(all(as.numeric(diff(range(tab$done))) <= 2)) }) batchtools/tests/testthat/test_doJobCollection.R0000644000176200001440000000352213303460717021657 0ustar liggesuserscontext("doJobCollection") test_that("doJobCollection handles bulky log output", { N = 1e5 reg = makeTestRegistry() fun = function(N) print(paste(rep("a", N), collapse = "")) batchMap(fun, N, reg = reg) jc = makeJobCollection(1, reg = reg) fn = fs::file_temp() doJobCollection(jc, output = fn) lines = readLines(fn) expect_true(any(nchar(lines) >= N)) fs::file_delete(fn) }) test_that("doJobCollection truncates error messages", { N = 5000 # R truncates stop() at 2^13 chars reg = makeTestRegistry() fun = function(N) stop(paste(rep("a", N), collapse = "")) batchMap(fun, N, reg = reg) jc = makeJobCollection(1, reg = reg) fn = fs::file_temp() doJobCollection(jc, output = fn) syncRegistry(reg = reg) msg = getErrorMessages(reg = reg)$message expect_true(stri_endswith_fixed(msg, " [truncated]")) fs::file_delete(fn) }) test_that("doJobCollection does not swallow warning messages", { reg = makeTestRegistry() reg$cluster.functions = makeClusterFunctionsInteractive(external = TRUE) fun = function(x) warning("GREPME") batchMap(fun, 1, reg = reg) submitAndWait(reg, 1) expect_data_table(findErrors(reg = reg), nrow = 0L) expect_data_table(grepLogs(pattern = "GREPME", reg = reg), nrow = 1L) }) test_that("doJobCollection signals slave errors", { reg = makeTestRegistry() fn = fs::file_temp(ext = ".R", tmp_dir = reg$temp.dir) reg$source = fn saveRegistry(reg) assign("y_on_master", 2, envir = .GlobalEnv) writeLines("x <- y_on_master", fn) rm(y_on_master, envir = .GlobalEnv) expect_error(withr::with_dir(reg$work.dir, loadRegistryDependencies(reg, must.work = TRUE)), "y_on_master") batchMap(identity, 1, reg = reg) submitAndWait(reg, 1) expect_data_table(findErrors(reg = reg), nrow = 1) expect_string(getErrorMessages(reg = reg)$message, fixed = "y_on_master") fs::file_delete(fn) }) batchtools/tests/testthat/test_submitJobs.R0000644000176200001440000000271713305230337020727 0ustar liggesuserscontext("submitJobs") test_that("submitJobs", { reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, i = 1:3, reg = reg) submitAndWait(reg, 1:2, resources = list(foo = "bar")) checkTables(reg) expect_integer(reg$status[1:2, resource.id], any.missing = FALSE) expect_character(reg$status[1:2, batch.id], any.missing = FALSE) expect_numeric(reg$status[1:2, submitted], any.missing = FALSE) expect_true(is.na(reg$status[3, submitted])) x = reg$resources[1, resources][[1L]] y = insert(reg$default.resources, list(foo = "bar")) if (isTRUE(y$chunks.as.arrayjobs) && is.na(reg$cluster.functions$array.var)) y$chunks.as.arrayjobs = NULL expect_equal(x[order(names2(x))], y[order(names2(y))]) submitAndWait(reg, 3, resources = list(walltime = 100, memory = 500)) res = reg$resources[2, resources][[1L]] expect_equal(res$walltime, 100) expect_equal(res$memory, 500) # should be 2 chunks? expect_equal(uniqueN(reg$status$job.hash), 2) }) test_that("per job resources", { reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, i = 1:3, reg = reg) ids$walltime = as.integer(c(180, 120, 180)) ids$chunk = 1:3 submitAndWait(reg, ids = ids) res = reg$resources expect_data_table(res, nrow = 2) expect_equal(uniqueN(res, by = "resource.hash"), 2L) expect_set_equal(rbindlist(res$resources)$walltime, c(120L, 180L)) ids$chunk = 1L expect_error(submitJobs(ids, reg = reg), "per-job") }) batchtools/tests/testthat/test_getJobTable.R0000644000176200001440000001271013214447313020765 0ustar liggesuserscontext("getJobTable") test_that("getJobTable.Registry", { reg = makeTestRegistry() fun = function(i, j) i + j ids = batchMap(fun, i = 1:4, j = rep(1, 4), reg = reg) tab = getJobTable(reg = reg) expect_data_table(tab, nrows = 4, ncols = 15, key = "job.id") expect_list(tab$job.pars) expect_equal(tab$job.pars[[1]], list(i = 1L, j = 1)) tab = unwrap(tab) expect_data_table(tab, nrows = 4, ncols = 15, key = "job.id") expect_null(tab[["job.pars"]]) expect_equal(tab$i, 1:4) expect_equal(tab$j, rep(1, 4)) expect_is(tab$submitted, "POSIXct") expect_is(tab$started, "POSIXct") expect_is(tab$done, "POSIXct") expect_is(tab$time.queued, "difftime") expect_numeric(tab$time.queued, lower = 0) expect_is(tab$time.running, "difftime") expect_numeric(tab$time.running, lower = 0) expect_character(tab$tags) expect_true(allMissing(tab$tags)) tab = unwrap(getJobTable(reg = reg), sep = ".") expect_null(tab[["job.pars"]]) expect_equal(tab$job.pars.i, 1:4) expect_equal(tab$job.pars.j, rep(1, 4)) # be sure that the original tables are untouched checkTables(reg) submitAndWait(reg = reg, ids = s.chunk(ids), resources = list(my.walltime = 42L)) addJobTags(2:3, "my_tag", reg = reg) tab = getJobTable(reg = reg) expect_data_table(tab, key = "job.id") expect_copied(tab, reg$status) expect_is(tab$submitted, "POSIXct") expect_is(tab$started, "POSIXct") expect_is(tab$done, "POSIXct") expect_is(tab$time.queued, "difftime") expect_numeric(tab$time.queued, lower = 0) expect_is(tab$time.running, "difftime") expect_numeric(tab$time.running, lower = 0) expect_character(tab$tags, min.len = 1L) tab = getJobResources(reg = reg) expect_data_table(tab, nrow = 4, ncols = 2, key = "job.id") expect_copied(tab, reg$resources) expect_set_equal(tab$resource.hash[1], tab$resource.hash) expect_list(tab$resources) expect_true(all(vlapply(tab$resources, function(r) r$my.walltime == 42))) tab = unwrap(getJobResources(reg = reg)) expect_null(tab[["resources"]]) expect_integer(tab$my.walltime, any.missing = FALSE) }) test_that("getJobPars", { reg = makeTestRegistry() fun = function(i, j) i + j ids = batchMap(fun, i = 1:4, j = rep(1, 4), reg = reg) tab = getJobPars(reg = reg) expect_data_table(tab, nrow = 4, ncol = 2, key = "job.id") tab = unwrap(tab) expect_copied(tab, reg$defs) expect_null(tab$job.pars) expect_equal(tab$i, 1:4) expect_equal(tab$j, rep(1, 4)) tab = unwrap(getJobPars(reg = reg, ids = 1:2)) expect_data_table(tab, nrow = 2, ncol = 3, key = "job.id") tab = unwrap(getJobPars(reg = reg), sep = ".") expect_data_table(tab, nrow = 4, ncol = 3, key = "job.id") expect_equal(tab$job.pars.i, 1:4) expect_equal(tab$job.pars.j, rep(1, 4)) }) test_that("getJobPars with repls", { reg = makeTestExperimentRegistry() prob = addProblem("prob", data = iris, fun = function(data, job) nrow(data), reg = reg) algo = addAlgorithm("algo", fun = function(job, data, instance, i, ...) instance, reg = reg) prob.designs = list(prob = data.table()) algo.designs = list(algo = data.table(i = 1:2)) ids = addExperiments(prob.designs, algo.designs, repls = 3, reg = reg) waitForJobs(reg = reg, sleep = 1) ids[, chunk := chunk(job.id, chunk.size = 2)] submitAndWait(ids = ids, reg = reg) expect_equal(nrow(getJobPars(reg = reg)), nrow(ids)) }) test_that("getJobTable.ExperimentRegistry", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) nrow(data), seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq) instance^sq) ids = addExperiments(list(p1 = data.table(k = 1)), list(a1 = data.table(sq = 1:3)), reg = reg) tab = getJobTable(reg = reg) expect_data_table(tab, nrows = 3, ncols = 19, key = "job.id") expect_copied(tab, reg$status) expect_null(tab$job.pars) expect_list(tab$prob.pars) expect_list(tab$algo.pars) for (i in 1:3) { expect_equal(tab$prob.pars[[i]], list(k = 1)) expect_equal(tab$algo.pars[[i]], list(sq = i)) } expect_equal(tab$problem[1], "p1") expect_equal(tab$algorithm[1], "a1") tab = unwrap(getJobTable(ids = 1:3, reg = reg), c("prob.pars", "algo.pars")) expect_data_table(tab, nrows = 3, ncols = 19, key = "job.id") expect_null(tab[["job.pars"]]) expect_set_equal(tab$k, rep(1, 3)) expect_set_equal(tab$sq, 1:3) tab = unwrap(getJobPars(reg = reg), sep = ".") expect_null(tab[["job.pars"]]) expect_set_equal(tab$prob.pars.k, rep(1, 3)) expect_set_equal(tab$algo.pars.sq, 1:3) }) test_that("experiment registry with vector parameters", { tmp = makeTestExperimentRegistry() fun = function(job, data, n, mean, sd, ...) rnorm(sum(n), mean = mean, sd = sd) addProblem("rnorm", fun = fun, reg = tmp) fun = function(instance, ...) sd(instance) addAlgorithm("deviation", fun = fun, reg = tmp) prob.designs = algo.designs = list() prob.designs$rnorm = data.table(expand.grid(n = list(100, 1:4), mean = 0, sd = 1:2)) algo.designs$deviation = data.table() addExperiments(prob.designs, algo.designs, reg = tmp) submitAndWait(reg = tmp) res = getJobPars(reg = tmp) expect_data_table(res, ncol = 5) expect_list(res$prob.pars, len = 4) res = unwrap(res) expect_data_table(res, ncol = 6, nrow = 4, col.names = "unique") expect_list(res$n, len = 4) expect_numeric(res$mean, len = 4, any.missing = FALSE) expect_numeric(res$sd, len = 4, any.missing = FALSE) res = unwrap(res) expect_data_table(res, ncol = 9, nrow = 4, col.names = "unique") }) batchtools/tests/testthat/test_JobCollection.R0000644000176200001440000000540413516316247021341 0ustar liggesuserscontext("JobCollection") test_that("makeJobCollection", { reg = makeTestRegistry() fun = function(...) list(...) ids = batchMap(fun, i = 1:3, reg = reg, more.args = list(x = 1)) jc = makeJobCollection(ids, resources = list(foo = 42), reg = reg) expect_environment(jc, c("file.dir", "job.hash", "jobs", "log.file", "packages", "resources", "uri", "work.dir")) expect_directory_exists(jc$file.dir) expect_string(jc$job.hash, pattern = "^job[[:alnum:]]{32}$") expect_data_table(jc$jobs, key = "job.id") expect_string(jc$log.file) expect_character(jc$packages, any.missing = FALSE) expect_list(jc$resources, names = "unique") expect_string(jc$uri) expect_directory_exists(jc$work.dir) expect_list(jc$jobs$job.pars) expect_string(jc$array.var, na.ok = TRUE) expect_flag(jc$array.jobs) expect_output(print(jc), "Collection") }) test_that("makeJobCollection does not expand relative paths", { skip_on_os("windows") reg = makeTestRegistry(file.dir = NA, make.default = FALSE) batchMap(identity, 1, reg = reg) reg$file.dir = fs::path_abs("~/foo") reg$work.dir = fs::path_abs("~/bar") expect_string(reg$file.dir, pattern = "^~") expect_string(reg$work.dir, pattern = "^~") jc = makeJobCollection(1, reg = reg) expect_true(stri_startswith_fixed(jc$file.dir, "~/foo")) expect_true(stri_startswith_fixed(jc$uri, "~/foo/jobs/")) expect_true(stri_startswith_fixed(jc$log.file, "~/foo/logs")) expect_true(stri_startswith_fixed(jc$work.dir, "~/bar")) }) test_that("makeJobCollection.ExperimentCollection", { reg = makeTestExperimentRegistry() addProblem(reg = reg, "p1", fun = function(job, data, ...) list(data = data, ...)) addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) length(instance)) ids = addExperiments(list(p1 = data.table(i = 1:3)), list(a1 = data.table()), reg = reg) jc = makeJobCollection(ids, resources = list(foo = 42), reg = reg) expect_directory_exists(jc$file.dir) expect_string(jc$job.hash, pattern = "^job[[:alnum:]]{32}$") expect_data_table(jc$jobs, key = "job.id") expect_string(jc$log.file) expect_character(jc$packages, any.missing = FALSE) expect_list(jc$resources, names = "unique") expect_string(jc$uri) expect_directory_exists(jc$work.dir) expect_list(jc$jobs$prob.pars) expect_list(jc$jobs$algo.pars) expect_character(jc$jobs$problem) expect_character(jc$jobs$algorithm) expect_string(jc$array.var, na.ok = TRUE) expect_flag(jc$array.jobs) expect_is(jc, "ExperimentCollection") }) test_that("chunks.as.arrayjobs is stored", { reg = makeTestRegistry(file.dir = NA, make.default = FALSE) ids = batchMap(identity, 1:2, reg = reg) resources = list(chunks.as.arrayjobs = TRUE) jc = makeJobCollection(ids, resources = resources, reg = reg) expect_true(jc$array.jobs) }) batchtools/tests/testthat/test_sweepRegistry.R0000644000176200001440000000550013453602073021457 0ustar liggesuserscontext("sweepRegistry") test_that("sweepRegistry", { reg = makeTestRegistry() array.jobs = isTRUE(reg$default.resources$chunks.as.arrayjobs) batchMap(identity, 1, reg = reg) submitAndWait(reg, 1, resources = list(foo = 1)) submitAndWait(reg, 1, resources = list(foo = 2)) writeRDS(makeJobCollection(1, reg = reg), fs::path(reg$file.dir, "jobs", "test.rds")) expect_data_table(reg$resources, nrow = 2) expect_character(list.files(dir(reg, "logs")), len = 2L) expect_character(list.files(fs::path(reg$file.dir, "jobs"), pattern = "\\.rds$"), len = 1L + (array.jobs && reg$cluster.functions$store.job.collection) * 2L) expect_character(list.files(fs::path(reg$file.dir, "jobs"), pattern = "\\.job$"), len = (batchtools$debug && array.jobs) * 2L) expect_true(sweepRegistry(reg)) expect_data_table(reg$resources, nrow = 1) expect_character(list.files(dir(reg, "logs")), len = 1L) if (reg$cluster.functions$store.job.collection) expect_character(list.files(fs::path(reg$file.dir, "jobs")), len = 0L) checkTables(reg) reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data, ...) nrow(data)) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, ...) NULL) addExperiments(prob.designs = list(p1 = data.table(i = 1:10)), reg = reg) addJobTags(6:10, "foo", reg = reg) expect_data_table(reg$tags, nrow = 5, any.missing = FALSE) removeExperiments(ids = 6:10, reg = reg) expect_data_table(reg$tags, nrow = 0) checkTables(reg) }) test_that("relative paths work (#113)", { skip_on_cran() skip_if_not(is.null(getSysConf()$temp.dir)) # we are probably on a system where home is not shared fd = sprintf("~/batchtools-test-%s", fs::path_file(fs::file_temp(""))) reg = makeTestExperimentRegistry(file.dir = fd) problems = list("a", "b") pdes = lapply(problems, function(p) { addProblem(name = p, data = p, fun = function(...) list(...), reg = reg) res = data.frame(fold = 1:3) }) names(pdes) = problems algo.rep1 = function(job, data, instance, x) { rep(paste(data, x), instance$fold) } algo.rep2 = function(job, data, instance, x) { rep(paste(data, x), instance$fold) } addAlgorithm(name = "rep1", fun = algo.rep1, reg = reg) addAlgorithm(name = "rep2", fun = algo.rep2, reg = reg) ades = list( rep1 = data.table(x = LETTERS[1:3]), rep2 = data.table(x = letters[1:3]) ) addExperiments(pdes, ades, reg = reg) submitAndWait(reg = reg) ids.rep1 = findExperiments(algo.name = "rep1", reg = reg) ids.rep2 = findExperiments(algo.name = "rep2", reg = reg) removeExperiments(ids.rep2, reg = reg) expect_character(getLog(ids.rep1[1], reg = reg), min.len = 1, any.missing = FALSE) expect_list(reduceResultsList(ids = ids.rep1, reg = reg), len = 18) checkTables(reg) fs::dir_delete(fs::path_expand(fd)) }) batchtools/tests/testthat/test_mergeRegistries.R0000644000176200001440000000227013435714125021746 0ustar liggesuserscontext("mergeRegistries") test_that("mergeRegistries", { target = makeTestRegistry() f = function(.job, x) { if (x %in% c(2, 7)) fs::file_create(fs::path(.job$external.dir, "foo")); x^2 } batchMap(f, 1:10, reg = target) td = fs::path(target$temp.dir, fs::path_file(fs::file_temp())) fs::dir_create(td) file.copy(target$file.dir, td, recursive = TRUE) file.dir = fs::path(td, fs::path_file(target$file.dir)) # FIXME: dir_copy? source = loadRegistry(file.dir, writeable = TRUE, make.default = FALSE) submitAndWait(target, data.table(job.id = 1:4, chunk = 1L)) submitAndWait(source, data.table(job.id = 6:9, chunk = c(1L, 1L, 1L, 2L))) expect_data_table(findDone(reg = source), nrow = 4) expect_data_table(findDone(reg = target), nrow = 4) mergeRegistries(source, target) expect_data_table(findDone(reg = source), nrow = 4) expect_data_table(findDone(reg = target), nrow = 8) checkTables(target) expect_set_equal(list.files(dir(target, "external")), as.character(c(2, 7))) expect_equal(unwrap(reduceResultsDataTable(reg = target))$result.1, c(1,2,3,4,6,7,8,9)^2) expect_file_exists(fs::path(target$file.dir, "external", c("2", "7"), "foo")) fs::dir_delete(td) }) batchtools/tests/testthat/test_tags.R0000644000176200001440000000330613435713470017547 0ustar liggesuserscontext("Tags") test_that("tags work", { reg = makeTestRegistry() batchMap(identity, 1:10, reg = reg) expect_equal(getUsedJobTags(reg = reg), character()) expect_data_table(findTagged(tag = "foo", reg = reg), nrow = 0, ncol = 1) expect_data_table(removeJobTags(reg = reg, tags = "foo"), nrow = 0, ncol = 1) expect_data_table(addJobTags(1:4, "walltime", reg = reg), nrow = 4, key = "job.id") expect_data_table(addJobTags(3:7, "broken", reg = reg), nrow = 5, key = "job.id") expect_set_equal(getUsedJobTags(reg = reg), c("walltime", "broken")) expect_set_equal(getUsedJobTags(1:2, reg = reg), c("walltime")) addJobTags(tags = c("foo", "bar"), reg = reg) x = getJobTags(reg = reg) expect_true(all(stri_detect_fixed(x$tags, "foo"))) expect_true(all(stri_detect_fixed(x$tags, "bar"))) x = removeJobTags(tags = c("foo", "bar"), reg = reg) expect_data_table(x, ncol = 1, nrow = 10, key = "job.id") x = getJobTags(reg = reg) expect_false(any(stri_detect_fixed(x$tags, "foo"), na.rm = TRUE)) expect_false(any(stri_detect_fixed(x$tags, "bar"), na.rm = TRUE)) x = getJobTags(reg = reg) expect_data_table(x, nrow = 10, ncol = 2, key = "job.id") expect_character(x$tags, min.len = 1L) x = findTagged(tags = "broken", reg = reg) expect_data_table(x, nrow = 5, ncol = 1, key = "job.id") expect_equal(x$job.id, 3:7) x = findTagged(tags = "whoops", reg = reg) expect_data_table(x, nrow = 0, ncol = 1, key = "job.id") x = removeJobTags(9:3, "walltime", reg = reg) expect_data_table(x, ncol = 1, nrow = 2, key = "job.id") expect_equal(x$job.id, 3:4) x = getJobTags(reg = reg) expect_equal(x$tags, c(rep("walltime", 2), rep("broken", 5), rep(NA_character_, 3))) checkTables(reg) }) batchtools/tests/testthat/test_findConfFile.R0000644000176200001440000000055513333246470021140 0ustar liggesuserscontext("findConfFile") test_that("findConfFile", { d = fs::path_real(fs::path_temp()) fn = fs::path(d, "batchtools.conf.R") fs::file_create(fn) withr::with_dir(d, expect_equal(findConfFile(), fs::path_abs(fn)) ) withr::with_envvar(list(R_BATCHTOOLS_SEARCH_PATH = d), expect_equal(findConfFile(), fs::path_abs(fn)) ) fs::file_delete(fn) }) batchtools/tests/testthat/test_testJob.R0000644000176200001440000000314113453602073020214 0ustar liggesuserscontext("testJob") test_that("testJob", { reg = makeTestRegistry() f = function(x) if (x %% 2 == 0) stop("foo") else x^2 batchMap(reg = reg, f, 1:3) expect_equal(testJob(reg = reg, id = 1), 1) expect_equal(testJob(reg = reg, id = 3), 9) expect_error(testJob(reg = reg, id = 2), "foo") expect_equal(suppressAll(testJob(reg = reg, id = 1, external = TRUE)), 1) expect_error(suppressAll(testJob(reg = reg, id = 2, external = TRUE)), "re-run") expect_equal(findSubmitted(reg = reg), data.table(job.id = integer(0L), key = "job.id")) expect_equal(findDone(reg = reg), data.table(job.id = integer(0L), key = "job.id")) expect_equal(findErrors(reg = reg), data.table(job.id = integer(0L), key = "job.id")) }) test_that("testJob.ExperimentRegistry", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data, ...) nrow(data), seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq, ...) instance^sq) ids = addExperiments(prob.designs = list(p1 = data.table()), algo.designs = list(a1 = data.table(sq = 1:3)), reg = reg) suppressAll(x <- testJob(id = 1, reg = reg)) expect_equal(x, 150) suppressAll(x <- testJob(id = 2, reg = reg, external = TRUE)) expect_equal(x, 150^2) }) test_that("traceback works in external session", { reg = makeTestRegistry() f = function(x) { g = function(x) findme(x) findme = function(x) h(x) h = function(x) stop("Error in h") g(x) } batchMap(f, 1, reg = reg) expect_output(expect_error(testJob(1, external = TRUE, reg = reg), "external=FALSE"), "findme") }) batchtools/tests/testthat/test_findJobs.R0000644000176200001440000001357013251466762020360 0ustar liggesuserscontext("findJobs") none = noIds() test_that("find[Status]", { reg = makeTestRegistry() expect_equal(findJobs(reg = reg), none) expect_equal(findSubmitted(reg = reg), none) expect_equal(findNotSubmitted(reg = reg), none) expect_equal(findStarted(reg = reg), none) expect_equal(findNotStarted(reg = reg), none) expect_equal(findDone(reg = reg), none) expect_equal(findNotDone(reg = reg), none) expect_equal(findErrors(reg = reg), none) expect_equal(findOnSystem(reg = reg), none) expect_equal(findRunning(reg = reg), none) expect_equal(findQueued(reg = reg), none) expect_equal(findExpired(reg = reg), none) fun = function(i) if (i == 3) stop(i) else i ids = batchMap(fun, i = 1:5, reg = reg) all = reg$status[, "job.id"] expect_equal(findJobs(reg = reg), all) expect_equal(findSubmitted(reg = reg), none) expect_equal(findStarted(reg = reg), none) expect_equal(findNotStarted(reg = reg), all) expect_equal(findDone(reg = reg), none) expect_equal(findNotDone(reg = reg), all) expect_equal(findErrors(reg = reg), none) expect_equal(findOnSystem(reg = reg), none) expect_equal(findRunning(reg = reg), none) expect_equal(findQueued(reg = reg), none) expect_equal(findExpired(reg = reg), none) submitAndWait(reg, ids) expect_equal(findJobs(reg = reg), all) expect_equal(findSubmitted(reg = reg), all) expect_equal(findNotSubmitted(reg = reg), none) expect_equal(findStarted(reg = reg), all) expect_equal(findNotStarted(reg = reg), none) expect_equal(findDone(reg = reg), all[-3L]) expect_equal(findNotDone(reg = reg), all[3L]) expect_equal(findErrors(reg = reg), all[3L]) expect_equal(findOnSystem(reg = reg), none) expect_equal(findRunning(reg = reg), none) expect_equal(findQueued(reg = reg), none) expect_equal(findExpired(reg = reg), none) }) test_that("Subsetting", { reg = makeTestRegistry() fun = function(i) if (i == 3) stop(i) else i ids = batchMap(fun, i = 1:5, reg = reg) submitAndWait(reg, ids) all = reg$status[, "job.id"] expect_equal(findJobs(ids = 1:3, reg = reg), all[1:3]) expect_equal(findDone(ids = 3, reg = reg), none) expect_equal(findErrors(ids = 1:2, reg = reg), none) expect_equal(findSubmitted(1:5, reg = reg), all) expect_data_table(findSubmitted(6, reg = reg), ncol = 1L, nrow = 0L) }) test_that("findJobs", { reg = makeTestRegistry() fun = function(i, j) i + j ids = batchMap(fun, i = 1:5, j = c(2, 2, 3, 4, 4), reg = reg) all = reg$status[, "job.id"] expect_equal(findJobs(i == 1, reg = reg), all[1]) expect_equal(findJobs(i >= 3, reg = reg), all[3:5]) expect_equal(findJobs(i >= 3 & j > 3, reg = reg), all[4:5]) xi = 2 expect_equal(findJobs(i == xi, reg = reg), all[2]) }) test_that("findOnSystem", { reg = makeTestRegistry() if (is.null(reg$cluster.functions$listJobsRunning)) skip("Test requires listJobsRunning") silent({ ids = batchMap(reg = reg, Sys.sleep, c(10, 10)) submitJobs(reg = reg, ids = s.chunk(ids)) expect_equal(findOnSystem(reg = reg), findJobs(reg = reg)) expect_equal(findExpired(reg = reg), noIds()) # ensure that the registry is not removed before jobs have finished waitForJobs(reg = reg, sleep = 1) }) }) test_that("findExperiments", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", fun = function(job, data, n, ...) mean(runif(n)), seed = 42) prob = addProblem(reg = reg, "p2", data = iris, fun = function(job, data) nrow(data)) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq) instance^sq) prob.designs = list(p1 = data.table(n = c(10, 20)), p2 = data.table()) algo.designs = list(a1 = data.table(sq = 1:3)) repls = 10 addExperiments(prob.designs, algo.designs, repls = repls, reg = reg) tab = findExperiments(reg = reg) expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.name = "p1") expect_data_table(tab, nrow = 60, ncol = 1, key = "job.id") expect_error(findExperiments(reg = reg, prob.name = c("p1", "p2")), "length 1") tab = findExperiments(reg = reg, prob.pattern = "p.") expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.pattern = "2$") expect_data_table(tab, nrow = 30, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.pattern = "p1", algo.pattern = "a1", repls = 3:4) expect_data_table(tab, nrow = 12, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.pattern = c("^p")) expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.name = "p2") expect_data_table(tab, nrow = 30, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, ids = 1:10, prob.name = "p1") expect_data_table(tab, nrow = 10, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.name = "a1") expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.name = "p1", prob.pars = n == 10) expect_data_table(tab, nrow = 30, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.pars = sq == 2) expect_data_table(tab, nrow = 30, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.name = "a1") expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.pattern = "a.") expect_data_table(tab, nrow = 90, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.name = "p") expect_data_table(tab, nrow = 0, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.name = "a") expect_data_table(tab, nrow = 0, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, prob.name = "xxx") expect_data_table(tab, nrow = 0, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, algo.name = "xxx") expect_data_table(tab, nrow = 0, ncol = 1, key = "job.id") tab = findExperiments(reg = reg, repls = 1:2) expect_data_table(tab, nrow = 18, ncol = 1, key = "job.id") }) batchtools/tests/testthat/test_count.R0000644000176200001440000000061113144310031017714 0ustar liggesuserscontext("count") test_that("count", { expect_identical(count(1:3), 3L) expect_identical(count(integer(0L)), 0L) expect_identical(count(list()), 0L) expect_identical(count(c(TRUE, NA, FALSE)), 2L) expect_identical(count(c(1L, NA, 3L)), 2L) expect_identical(count(c(1., NA, 3.)), 2L) expect_identical(count(c("a", NA, "c")), 2L) expect_identical(count(list(1, NULL, 3)), 2L) }) batchtools/tests/testthat/test_summarizeExperiments.R0000644000176200001440000000161513200617654023047 0ustar liggesuserscontext("summarizeExperiments") test_that("summarizeExperiments", { reg = makeTestExperimentRegistry() prob = addProblem(reg = reg, "p1", data = iris, fun = function(job, data) nrow(data), seed = 42) prob = addProblem(reg = reg, "p2", data = iris, fun = function(job, data) nrow(data), seed = 42) algo = addAlgorithm(reg = reg, "a1", fun = function(job, data, instance, sq) instance^sq) ids = addExperiments(list(p1 = data.table(), p2 = data.table(x = 1:2)), list(a1 = data.table(sq = 1:3)), reg = reg) s = summarizeExperiments(reg = reg) expect_data_table(s, nrows = 2, ncols = 3) expect_equal(s$.count, c(3, 6)) expect_equal(s$problem, c("p1", "p2")) expect_equal(s$algorithm, c("a1", "a1")) s = summarizeExperiments(reg = reg, by = c("problem", "algorithm", "x")) expect_data_table(s, nrows = 3, ncols = 4) expect_equal(s$.count, c(3, 3, 3)) expect_equal(s$x, c(NA, 1, 2)) }) batchtools/tests/testthat/test_ClusterFunctions.R0000644000176200001440000000747313350415471022130 0ustar liggesuserscontext("clusterFunctions") test_that("clusterFunctions constructor", { check = function(cf) { expect_is(cf, "ClusterFunctions") expect_set_equal(names(cf), c("name", "submitJob", "killJob", "listJobsQueued", "listJobsRunning", "store.job.collection", "store.job.files", "array.var", "scheduler.latency", "fs.latency", "hooks")) expect_output(print(cf), "ClusterFunctions for mode") } reg = makeTestRegistry() check(reg$cluster.functions) fn = fs::path(fs::path_temp(), "dummy.tmpl") writeLines("foo", fn) check(makeClusterFunctionsInteractive()) check(makeClusterFunctionsSGE(template = fn)) check(makeClusterFunctionsTORQUE(template = fn)) check(makeClusterFunctionsSlurm(template = fn)) check(makeClusterFunctionsOpenLava(template = fn)) check(makeClusterFunctionsLSF(template = fn)) check(makeClusterFunctionsTORQUE("torque-lido")) check(makeClusterFunctionsSlurm("slurm-dortmund")) check(makeClusterFunctionsDocker("image")) expect_error(makeClusterFunctionsLSF(), "point to a readable template file") skip_on_os(c("windows", "solaris")) # system2 is broken on solaris check(makeClusterFunctionsSSH(workers = list(Worker$new(nodename = "localhost", ncpus = 1L)))) }) test_that("submitJobResult", { x = makeSubmitJobResult(0, 99) expect_is(x, "SubmitJobResult") expect_identical(x$status, 0L) expect_identical(x$batch.id, 99) expect_identical(x$msg, "OK") x = makeSubmitJobResult(1, 99) expect_is(x, "SubmitJobResult") expect_identical(x$msg, "TEMPERROR") x = makeSubmitJobResult(101, 99) expect_is(x, "SubmitJobResult") expect_identical(x$msg, "ERROR") expect_output(print(x), "submission result") x = cfHandleUnknownSubmitError(cmd = "ls", exit.code = 42L, output = "answer to life") expect_is(x, "SubmitJobResult") expect_true(all(stri_detect_fixed(x$msg, c("ls", "42", "answer to life")))) }) test_that("brew", { fn = fs::file_temp() lines = c("####", " ", "!!!", "foo=<%= job.hash %>") writeLines(lines, fn) res = stri_split_fixed(cfReadBrewTemplate(fn), "\n")[[1]] assertCharacter(res, len = 3) expect_equal(sum(stri_detect_fixed(res, "job.hash")), 1) res = stri_split_fixed(cfReadBrewTemplate(fn, comment.string = "###"), "\n")[[1]] assertCharacter(res, len = 2) expect_equal(sum(stri_detect_fixed(res, "job.hash")), 1) reg = makeTestRegistry() ids = batchMap(identity, 1:2, reg = reg) jc = makeJobCollection(1, reg = reg) text = cfReadBrewTemplate(fn, comment.string = "###") fn = cfBrewTemplate(text = text, jc = jc, reg = reg) brewed = readLines(fn) expect_equal(brewed[1], "!!!") expect_equal(brewed[2], sprintf("foo=%s", jc$job.hash)) fs::file_delete(fn) }) test_that("Special chars in directory names", { reg = makeTestRegistry() base.dir = fs::file_temp(pattern = "test", tmp_dir = fs::path_dir(reg$file.dir)) fs::dir_create(base.dir) file.dir = fs::path(base.dir, "test#some_frequently-used chars") reg = makeTestRegistry(file.dir = file.dir) batchMap(identity, 1:2, reg = reg) submitAndWait(reg = reg) Sys.sleep(0.2) expect_equal(reduceResultsList(reg = reg), list(1L, 2L)) expect_equal(testJob(1, external = FALSE, reg = reg), 1L) }) test_that("Export of environment variable DEBUGME", { reg = makeTestRegistry() if (reg$cluster.functions$name == "Socket") skip("Environment variables not exported for CF socket") batchMap(function(i) Sys.getenv("DEBUGME"), i = 1, reg = reg) withr::local_envvar(c("DEBUGME" = "grepme")) submitAndWait(reg, 1) res = loadResult(1, reg = reg) expect_string(res, min.chars = 1, fixed = "grepme") }) test_that("findTemplateFile", { d = fs::path_temp() fn = fs::path(d, "batchtools.slurm.tmpl") fs::file_create(fn) withr::with_envvar(list(R_BATCHTOOLS_SEARCH_PATH = d), expect_equal(findTemplateFile("slurm"), fs::path_abs(fn)) ) fs::file_delete(fn) }) batchtools/tests/testthat/test_grepLogs.R0000644000176200001440000000364013423662247020376 0ustar liggesuserscontext("grepLogs") silent({ reg = makeTestRegistry() ids = batchMap(reg = reg, function(x) { if (x == 1) { print("FOOBAR: AAA") } else if (x == 2) { cat("FOOBAR: BBB") } else { if (identical(Sys.getenv("TESTTHAT"), "true")) { # testthat uses muffle restarts which breaks our internal # sink() somehow. # https://github.com/r-lib/testthat/issues/460 cat("FOOBAR: CCC", file = stderr()) } else { message("FOOBAR: CCC") } } invisible(NULL) }, x = 1:5) ids$chunk = as.integer(c(1, 1, 2, 3, 4)) submitAndWait(reg, ids[1:4]) }) test_that("grepLogs", { expect_true(any(grepl("AAA", getLog(1, reg = reg)))) expect_true(any(grepl("BBB", getLog(2, reg = reg)))) expect_true(any(grepl("CCC", getLog(3, reg = reg)))) expect_false(any(grepl("AAA", getLog(2, reg = reg)))) expect_data_table(grepLogs(pattern = "FOOBAR", reg = reg), ncol = 2, key = "job.id") expect_equal(grepLogs(pattern = "FOOBAR", reg = reg)$job.id, 1:4) expect_equal(grepLogs(pattern = "XXX", reg = reg)$job.id, integer(0L)) expect_error(grepLogs(pattern = "", reg = reg), "at least") expect_error(grepLogs(pattern = NA, reg = reg), "not be NA") expect_equal(grepLogs(pattern = "AAA", reg = reg)$job.id, 1L) expect_equal(grepLogs(pattern = "BBB", reg = reg)$job.id, 2L) expect_equal(grepLogs(pattern = "CCC", reg = reg)$job.id, 3:4) expect_equal(grepLogs(pattern = "aaa", reg = reg)$job.id, integer(0L)) expect_equal(grepLogs(pattern = "aaa", ignore.case = TRUE, reg = reg)$job.id, 1L) expect_data_table(grepLogs(pattern = "F..BAR", reg = reg), ncol = 2, nrow = 4, key = "job.id") expect_data_table(grepLogs(pattern = "F..BAR", fixed = TRUE, reg = reg), ncol = 2, nrow = 0, key = "job.id") expect_data_table(grepLogs(1:2, pattern = "CCC", reg = reg), nrow = 0, ncol = 2) expect_data_table(grepLogs(5, pattern = "CCC", reg = reg), nrow = 0, ncol = 2) }) batchtools/tests/testthat/test_export.R0000644000176200001440000000345413234300075020124 0ustar liggesuserscontext("Export") test_that("export works", { reg = makeTestRegistry() x = batchExport(list(exported_obj = 42L), reg = reg) expect_data_table(x, nrow = 1, ncol = 2) expect_set_equal(names(x), c("name", "uri")) expect_equal(x$name, "exported_obj") expect_file_exists(fs::path(reg$file.dir, "exports", mangle("exported_obj"))) withr::with_dir(reg$work.dir, loadRegistryDependencies(reg)) expect_equal(get("exported_obj", envir = .GlobalEnv), 42L) x = batchExport(reg = reg) expect_data_table(x, nrow = 1, ncol = 2) expect_set_equal(names(x), c("name", "uri")) expect_equal(x$name, "exported_obj") x = batchExport(unexport = "exported_obj", reg = reg) expect_data_table(x, nrow = 0, ncol = 2) expect_set_equal(names(x), c("name", "uri")) expect_false(fs::file_exists(fs::path(reg$file.dir, "exports", mangle("exported_obj")))) x = batchExport(list(exported_obj = 43L), reg = reg) batchMap(function(x) exported_obj + x, 1L, reg = reg) submitAndWait(reg) expect_equal(loadResult(1, reg = reg), 44L) rm("exported_obj", envir = .GlobalEnv) }) test_that("export works with funny variable names", { reg = makeTestRegistry() x = batchExport(list(`%bla%` = function(x, y, ...) 42), reg = reg) expect_data_table(x, nrow = 1, ncol = 2) expect_set_equal(names(x), c("name", "uri")) expect_equal(x$name, "%bla%") expect_file_exists(fs::path(reg$file.dir, "exports", mangle("%bla%"))) withr::with_dir(reg$work.dir, loadRegistryDependencies(reg)) expect_function(get("%bla%", envir = .GlobalEnv)) expect_equal(1 %bla% 2, 42) x = batchExport(unexport = "%bla%", reg = reg) expect_data_table(x, nrow = 0, ncol = 2) expect_set_equal(names(x), c("name", "uri")) expect_false(fs::file_exists(fs::path(reg$file.dir, "exports", mangle("%bla%")))) rm("%bla%", envir = .GlobalEnv) }) batchtools/tests/testthat/test_runOSCommand.R0000644000176200001440000000231113234566643021156 0ustar liggesuserscontext("runOSCommand") test_that("runOSCommand", { skip_on_os(c("windows", "solaris")) # system2 is broken on solaris x = runOSCommand("ls", find.package("batchtools")) expect_list(x, names = "named", len = 4L) expect_names(names(x), permutation.of = c("sys.cmd", "sys.args", "exit.code", "output")) expect_identical(x$exit.code, 0L) expect_true(all(c("DESCRIPTION", "NAMESPACE", "NEWS.md") %chin% x$output)) }) test_that("command not found", { skip_on_os("solaris") # system2 is broken on solaris res = runOSCommand("notfoundcommand") expect_list(res, len = 4) expect_identical(res$exit.code, 127L) expect_identical(res$output, "command not found") expect_error(OSError("Command not found", res), pattern = "Command not found") expect_error(OSError("Command not found", res), pattern = "'notfoundcommand'") expect_error(OSError("Command not found", res), pattern = "exit code 127") }) test_that("stdin", { skip_on_os(c("windows", "solaris")) # system2 is broken on solaris tf = fs::file_temp() lines = letters writeLines(letters, con = tf) res = runOSCommand("cat", stdin = tf) expect_identical(res$exit.code, 0L) expect_identical(res$output, letters) fs::file_delete(tf) }) batchtools/tests/testthat/test_resetJobs.R0000644000176200001440000000327113453602073020546 0ustar liggesuserscontext("resetJobs") test_that("resetJobs", { reg = makeTestRegistry() f = function(x, .job) if (x == 2) stop(2) else .job$external.dir batchMap(f, 1:3, reg = reg) before = list( status = copy(reg$status), defs = copy(reg$defs) ) submitAndWait(reg, 1:3) expect_file_exists(getLogFiles(reg, 3)) expect_false(identical(reg$status$submitted, before$status$submitted)) expect_file_exists(getResultFiles(reg, 1)) expect_equal(unname(fs::dir_exists(fs::path(reg$file.dir, "external", 1:3))), c(TRUE, FALSE, TRUE)) resetJobs(1, reg = reg) expect_true(all.equal(before$status[1], reg$status[1])) expect_false(fs::file_exists(getResultFiles(reg, 1))) expect_true(fs::file_exists(getResultFiles(reg, 3))) expect_file_exists(getLogFiles(reg, 3)) expect_equal(unname(fs::dir_exists(fs::path(reg$file.dir, "external", 1:3))), c(FALSE, FALSE, TRUE)) expect_false(fs::file_exists(getResultFiles(reg, 1))) expect_file_exists(getResultFiles(reg, 3)) resetJobs(2:3, reg = reg) expect_data_table(reg$status, key = "job.id") expect_data_table(reg$defs, key = "def.id") expect_equivalent(before$status, reg$status) expect_false(fs::file_exists(getLogFiles(reg, 3))) expect_false(fs::file_exists(getResultFiles(reg, 3))) expect_equal(unname(fs::dir_exists(fs::path(reg$file.dir, "external", 1:3))), c(FALSE, FALSE, FALSE)) }) test_that("functions produce error after resetting jobs", { reg = makeTestRegistry() f = function(x, .job) if (x == 2) stop(2) else .job$external.dir batchMap(f, 1:3, reg = reg) submitAndWait(reg, 1:3) resetJobs(1, reg = reg) expect_error(getLog(1, reg = reg), "not available") expect_error(loadResult(1, reg = reg), "not terminated") }) batchtools/tests/testthat.R0000644000176200001440000000034613144310031015532 0ustar liggesusers# setting R_TESTS to empty string because of # https://github.com/hadley/testthat/issues/144 # revert this when that issue in R is fixed. Sys.setenv("R_TESTS" = "") library(testthat) library(batchtools) test_check("batchtools") batchtools/src/0000755000176200001440000000000013606067063013212 5ustar liggesusersbatchtools/src/init.c0000644000176200001440000000131013435725715014321 0ustar liggesusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP c_binpack(SEXP, SEXP, SEXP); extern SEXP c_lpt(SEXP, SEXP, SEXP); extern SEXP count_not_missing(SEXP); extern SEXP fill_gaps(SEXP); static const R_CallMethodDef CallEntries[] = { {"c_binpack", (DL_FUNC) &c_binpack, 3}, {"c_lpt", (DL_FUNC) &c_lpt, 3}, {"count_not_missing", (DL_FUNC) &count_not_missing, 1}, {"fill_gaps", (DL_FUNC) &fill_gaps, 1}, {NULL, NULL, 0} }; void R_init_batchtools(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } batchtools/src/lpt.c0000644000176200001440000000166213436166657014174 0ustar liggesusers#include #include #include #define min(a, b) (((a) < (b)) ? (a) : (b)) SEXP attribute_hidden c_lpt(SEXP x_, SEXP order_, SEXP chunks_) { const double * x = REAL(x_); const R_len_t n = length(x_); const int * order = INTEGER(order_); const int chunks = min(INTEGER(chunks_)[0], n); SEXP res = PROTECT(allocVector(INTSXP, n)); int * bin = INTEGER(res); double * sums = malloc(chunks * sizeof(double)); for (R_len_t i = 0; i < chunks; i++) { R_len_t ii = order[i] - 1; bin[ii] = i + 1; sums[i] = x[ii]; } for (R_len_t i = chunks; i < n; i++) { R_len_t ii = order[i] - 1; R_len_t pos = 0; for (R_len_t j = 1; j < chunks; j++) { if (sums[j] < sums[pos]) pos = j; } bin[ii] = pos + 1; sums[pos] += x[ii]; } free(sums); UNPROTECT(1); return res; } batchtools/src/fill_gaps.c0000644000176200001440000000140113144310031015271 0ustar liggesusers#include #include #include /* similar to last observation carried forward, but resets to NA if the last observation is spotted again */ /* used in log file reading: jobs have a start and stop marker, the lines in between belong to the job */ SEXP attribute_hidden fill_gaps(SEXP x) { const R_len_t n = length(x); int last = NA_INTEGER; const int *xi = INTEGER(x); const int * const xend = xi + n; SEXP y = PROTECT(allocVector(INTSXP, n)); int *yi = INTEGER(y); for(; xi != xend; xi++, yi++) { if (*xi == NA_INTEGER) { *yi = last; } else { *yi = *xi; last = (*xi == last) ? NA_INTEGER : *xi; } } UNPROTECT(1); return y; } batchtools/src/Makevars0000644000176200001440000000003413507344432014701 0ustar liggesusersPKG_CFLAGS=${R_DEBUG_FLAGS} batchtools/src/count_not_missing.c0000644000176200001440000000353513144310031017104 0ustar liggesusers#include #include #include static R_len_t count_not_missing_logical(SEXP x) { const int * xp = LOGICAL(x); const int * const xe = xp + length(x); R_len_t count = 0; for (; xp != xe; xp++) { if (*xp != NA_LOGICAL) count++; } return count; } static R_len_t count_not_missing_integer(SEXP x) { const int * xp = INTEGER(x); const int * const xe = xp + length(x); R_len_t count = 0; for (; xp != xe; xp++) { if (*xp != NA_INTEGER) count++; } return count; } static R_len_t count_not_missing_double(SEXP x) { const double * xp = REAL(x); const double * const xe = xp + length(x); R_len_t count = 0; for (; xp != xe; xp++) { if (!ISNAN(*xp)) count++; } return count; } static R_len_t count_not_missing_string(SEXP x) { const R_len_t nx = length(x); R_len_t count = 0; for (R_len_t i = 0; i < nx; i++) { if (STRING_ELT(x, i) != NA_STRING) count++; } return count; } static R_len_t count_not_missing_list(SEXP x) { const R_len_t nx = length(x); R_len_t count = 0; for (R_len_t i = 0; i < nx; i++) { if (!isNull(VECTOR_ELT(x, i))) count++; } return count; } SEXP attribute_hidden count_not_missing(SEXP x) { switch(TYPEOF(x)) { case LGLSXP: return ScalarInteger(count_not_missing_logical(x)); case INTSXP: return ScalarInteger(count_not_missing_integer(x)); case REALSXP: return ScalarInteger(count_not_missing_double(x)); case STRSXP: return ScalarInteger(count_not_missing_string(x)); case VECSXP: return ScalarInteger(count_not_missing_list(x)); case NILSXP: return ScalarInteger(0); default: error("Object of type '%s' not supported", type2char(TYPEOF(x))); } } batchtools/src/binpack.c0000644000176200001440000000227113144310031014746 0ustar liggesusers#include #include #include #include SEXP attribute_hidden c_binpack(SEXP x_, SEXP order_, SEXP capacity_) { const double * x = REAL(x_); const R_len_t n = length(x_); const int * order = INTEGER(order_); const double capacity = REAL(capacity_)[0]; R_len_t ii = order[0] - 1; if (x[ii] > capacity) error("Capacity not sufficient. Largest item does not fit."); SEXP res = PROTECT(allocVector(INTSXP, n)); int * bin = INTEGER(res); double * capacities = malloc(n * sizeof(double)); R_len_t bins = 1; bin[ii] = 1; capacities[0] = capacity - x[ii]; for (R_len_t i = 1; i < n; i++) { ii = order[i] - 1; bool packed = false; for (R_len_t pos = 0; !packed && pos < bins; pos++) { if (capacities[pos] >= x[ii]) { packed = true; bin[ii] = pos + 1; capacities[pos] -= x[ii]; break; } } if (!packed) { capacities[bins] = capacity - x[ii]; bins++; bin[ii] = bins; } } free(capacities); UNPROTECT(1); return res; } batchtools/vignettes/0000755000176200001440000000000013606067063014433 5ustar liggesusersbatchtools/vignettes/tikz_prob_algo_simple.png0000644000176200001440000007517713144310031021517 0ustar liggesusersPNG  IHDRG )v9gAMA a cHRMz&u0`:pQ<bKGD̿ pHYs,,sRtIME'.yRIDATxo#i~}[vk9Խۃ4-Q% pެ wicadP0zUS0L/آoZ ]Wb;OC&8],:Q~ A3L~ 3]Ō =$I$I29@$I$}I$Id/I$IL%I$Iɾ$I$I2ٗ$I$d_$I$K$I$}I$Id/I$IL%I$I2ٗ$I$I&$I$b}-GoY:$I45)4?hw+'&K/I$IsW)8m8Ih ВyM,I$I(Z,_KO=4 xc/I$IC^X!& p{0GL%I$iN{)֟%zƳ{$nspA_2ٗ$IWϞo (sd-9L%I$iV´~m!ÏW?c#I}I$I:r؃?Ag{FC/O&$IZޯeKע>-?}|LM%I$iH?æAZ_ ׬u3ٗ$Iʬ6ϯس&x`ߴd_$I EFÉ?quZ8d_$IҲ&eFuί&K9no/I$i%!$?Pwɾ$IKMZi uN8L%I$-<[)'OΣZi=8@&$Ie' |WI~'ǣFK$Ih[5<^>ɗ2$-o/I$鲭R'vw8桽&$I.C|>x`M9Y~}I$Ig2[/O#e:Lɾ$I*38l?ώC5'ww8jK$IeNK[l vKtL/>\d_$IlOSf擦"q:wK$INOEido/I$ia|]y?o/I$i}}Lu);&&$I&Ut~|ZS' q|۫2d_$Itk݈&cnǟȝG=9/I$Igivçoxa~qgdɾ$I)_yҿTk#zyM kb/I$le:CݕL{Q)}I$IV>4>{k2ɾ$II{ N %`ɾ$I$ _Q& -I&$I4h g2_ ATػ_^e#L%I$M"_ NΗz5O$I ݣ+9:B u:@rbvY٢H@qw[Rq]zF 9rV{%I$MJV>-wxmwܠE1m:i78H2p܈ [[c-ӵQF{џUN7Iٗ$I]1xLCD mS7xKƳp؜wx>9կCyv󷢭Ng_iyxMI$I ެ77[o޼yf?Lz&Ӿ(M97Ogy7$ljʛg_]]γN/I$)bVX ,܍^;lje5Jro}+==&$ItF|ʛ0'|N~gy Oo <9a}}I$IمXy|1~ޚE!w+&kz e/7ٗ$Idd?&?[z>Cqh4w'_ޫs:$I:)1':LqH܆ج[M/@+lNtXw+ֲ}R{%I$M"솾mIܳ~8967kne'}/w>=$I&0f;фtJJǕ[s 'aZ/,=2~~PwŞ}I$I:f#NaӺsr7[ld_$Id꽜F4Nevv~g9+~J1dK$IZuw%:X^tyi tVB00JխO}I$In':)S .7OO|SLߎRT¿L%I$M{wO3<$yN7| FdkCMhbQ7>Od_$I>Ϡ/Q˲9CcNx~;C1zZ#>VpR1G.i>|l7G%I$)WW'|1W|EG6u|ZIq&aw(G%{<hN:|9'|'>@"-9ȿAgԹZZGEhስuQN8}bGr٢Cg+ҳ'6_dS1ٗ$I4%xgga-J{eTYi"O9!y@q`Bt-(˭]gQIeK}I$IT;MϜJKU'c~wfC}I$Iӕ/o_1ׅjqw` _o؅fɾ$I_j/7}Х:>8~AJM%I$&~㵿z<[8' dH.7ٗ$I4tw_~ko&";l9_S!?n~$֗LL%I$*_;õjn϶(|uLWO1?ɾ$I٥y~Љw_|P?["r$Z}?o|>n~c/I$i wM /~)l(/EELN8'c׋Ზɾ$IYĻ_`5>ͯ-NspG|OIam/L%I$]D^׆,R6W__zB_>ROwrɾ$IJ|?o>oүI׿>< _npg/I$b5=}?)%E|ep/tW[: ɾ$IH֛T+gw"Z;{S$*m/9d97ٗ$I4? ;kqI?Y3_ -#Ss'Oy2>}I$Iǯ%gMLۿ@ ~:Z#R$'՟o/I$i~RoSR~xOb7ywTo?:pBgd>׿VovZ?Y7ٗ$I4 ;JO?{^ÉZ?]:NΘ?/7?۟ iɾ$IK?}Ow"3eaot8E1_bgj)~p-|}I$IǯQLoؿ*yU>kOOYo'~'HoMs hg}I$ICß4?MF$0 Ռ|?[M%I$-CҟH~_ݯwλn>\/rW!'|ʾE'D׿W[o|>Ojγo/I$ixwҞS{ RA)+ğ>}|K).zMM%I$-aҟC+?y]l(yߓT>QBl=7?ůfܟ~M%I$Bu*^4L>B;pW-g޼E܉L%I$j̯W 1A#ҭߗ_A)gtLM%IZ;OYsH$DK yU8&±{,y%/L}~>37ٗ$i >~9QbHo8,bϾ$ɤޤl/χgxm*ɿ٩# 6}Umݻ3Y_-4'$ͷ5?3_ܫ4/|~ͻL%Ig-J&IZ8!]JI GL&@oJowcd_4ITDӨգ[.^_yk w L%I"o=I^&vg}8Pd/I51'M;K;}vF~KfNW ϦE+L9m$Fh*ϧCQ}e/INz\>Y}S2$JH;b5'ɾ$a’>weI8G\$%Cd_4Z|U27}_ZiA~??;}K2ڇL zHWH9L|_Z-y>2A)ܾL%Ii)F> Ibiz若@_y(rɾ$'k~=J&쿗.ZRz>/--܍bB<!T2ٗ [h_4N?_9I>mN!7]&z7Ɂ|IA K F5ŗ IZ1&4zwKJ7? 0f((I&ӛk-c~~¨P=NKNP(I&Yއ ےƋ7_Kx¿qn_$-'蓤⯍YT>|IRf-Oc_4U'ә9A$]]9P>|I蟧ZbSi_IK=tQz}.RǨYBaoãtqx س3yc/I$ͪTkaJk_(;D}KZ8;!Ŋ]6PJMY5:\{&}IޱpkoߒNyۊ%2ٗW {ʩJEo#tD:wLe/I[ .\ի%-q#Ζ?kLsoL%2V5~ѓ菤x2x]hZ>oRH*;"5p1G#ݗR|:"*>h䒣Ҫ;/R J%QFo/Ic?:㒤Vd#X'E!}I(yc*8b+I42rJ}ocn34>Չȗ$Ek8"VxC:oe&*Ӵvuɍ[q_TA_XcWO/i>iR4hy5xŢ%㗴zLZP}IҴ慨}PJ+'>[ߊJflXޯ0;&I_Ҫ $A x̾ ZoSɾ$ ʏDSW~94ʾ}_}x0mxaL%)P9<ԉ$i^^R;_&V>`(4͵*k^&VШ.$I۷ѯ?E D&Vr}IZ=1DG̐ɾe9`_8bW0Zy>uL%-W)wxМJxƎ#}I'G}O:>oSO~h~҄e/ii*h4K&1~$d_r|}I1Oې˴5PyKZ[%IWd'WMY;fca2ٗ'KZJ?u TCט#Id/i*Dɫ;`_ض8J|S([<"GCW٨$}IysJ^e+بȳ3d/=)!IZNiMolOV֬d/#'$|5/NԵ0ɾJu&KM%IJ[ה 6|jP&Ң/xx#I<7휥ϑ2ٗL%IΓ?o}oFOjɾ7T|Ih) T§ۣ/}in$I][I8╃fgh+X2ٗۖi$IZYㆃg᧤8T)}imM%I|I3 }Q|XVse/-A6$iROGt[mb5gd_}d/In -?GD_&\ ?M P$-Q龩ޗɾ4$i9`?`rd/}inz/ZM%IrJ/xdurꐖLKޫԵh%IG%e8٢P3=d_$k OKU3"ozrž7aBɾtѡ8i 4$InD[dW2ٗ.^$IZhT㳁?g?[Ɨ9DJJO߲-Cr̉ Pu_ZAu*/xwe?MWx_ᯑ+O6]1@~,il5w:.:ܷL2Dq_Yj1(ÝaڏLѶnu9cH$I_}JAjӤ;}=I$I&y婛/'[ki!/,Iͱ[$I$Ir1ٗ$I$ida"ؤa!H$Iٗ$I$d_$I$K$I 5@$iwwclV-IR_*ؿٗ$I$iɘK$Id/-Qy$I$ {%I5iZdg_V<%l[ Q9k;4vvX˲e^jdOd[赘+}IZU*߯.rdc >!My˻ N=*i>yh26jb<~7s0~IZZ|ѿOb/xDns'd.?`j^S?el<ĺy^OP0sM5a!i&4O KR4%<\`si ck5Zc )\%r PɜGw٫/i.%7>6/}շ^Sl%yʔ͐ycq =6Q9` O'zĺL=IgsaAc+)`kE?E)St+ǬCnGk((0oFp4uَ [;p=;D[v8)B%x>gѼ ֧R|7c4hWXr6WG̏Ѥ_^K\ϼ:ׇv<3ۼL,IgB%SRlgqc߽?շ~,-;;CpYb߾R2_cmo?w^}wA#<.=`^TirݡFf,b]8gTẌ{1wN",95`Œ=Ps*IJE!|S(coUJq!N+j3C<9cWdgfNIHlg,;tfkվV *kx}grr&|6؍lR$~_bQ^_g=@. s*{bwpv͔t{w=gT5 ФN+Up滱ULr>X;d5,N~sÍXJ{CRád2)gM2ERr}w]3>E\c<;CC5U=4ips[O*7r||NΏq糎1wcDܙ*|sch0NCEQٌ Vm\@2֗ϲ*|2 w[|?;CCd|ok>{\HC=5tDmOǷFOC!ϹԸ&m Z’}6{RC۶5qȵ342vE/מt>I ZJ**)ݜTNШV[@O:41f[tA:Fa{~Ƒ KYV? aw4}u|Ã(V\GQEp?^nW'qӛ'?پ3sP^r'[P$ `G0Oz8შ>,z ^3}4i|#GDes4\R^N6ن< 6Ltҁ&zE'l_ړ .z)Ph)hj08l'|N5HA3} H;%49L*/=UT|6ߠA5'TnRKw.w~Fozg>7Y;3G3l}x1*ϵ)G"hVxٷB|jq;m-?>729>O$es4jj ~a ?(~aha=0(}dJ{]ŨzU6RnD~T&\f)7SV{T2eT&X?ʔ}5{AMuF;VFȩldfwv}gNF"D <^d+xo:GY x 7;偵s\X  &广}ɾq-.hĝ9_Kl?Pm^=Pt0ɇ}<  '"f;KfXA*k&y:ьIMRu^F>ty£)̘? cRzjfs;WkW{yfywDi1x9IOK)GKrY`?]=zy.kc-13=wq֡D\@ѴaHw90(:INrEnЗ4=7Q!]ji\bu(`̬BT 4&k4FCSRX{gq ų3S2~r_&>~6۳^Kh'mOlX8cZ>V_D'oAq\:TY#>5>ㄽ]7r|0s6ؠFfN +KxE"6©A_ThSקbzrs摣Wjzb27jNlԸ>{03?d~k`2vHRg փ~'} &I܉FҽYcr> &SbC~#%H}ٛԨҥ9Xهe^@2eSɯs),-ٛˤ4Qd]il$2?;g[ -4h'6<wz[d]n񳹸^:3?xֆ&+֐?|_B}tVkhlqBȳF9#p$<L5v('FSӷӇ9ֹ`-rB"[a/:ky u-3O`\=`aKq&GYtt%7|hiw`vkFҥp *ܪseJΪJ1Yi $,Fk;}=Ɯ|#)Ѭ"1w3^㠊Ӝ XDg>%B'oOӵyI;SPQc;O[=nq3<^Y| jQaѽ,z'?智i㔧쿑7s6kug-!?b8h%߷w=Mp'<ΘȣPrH^MZBw|KYHq:;>*^~̝k?i=t)j iT'^E>Lt :xYwi9Ӱ)ԣ y{AlhS&OIk{u.^r\ɭ(zq*XDWy2#~($qz? s}JFzgdbܥƕ|(Vu\UUTb{ Em:y뾊bm蝳]=Ra <}QTxc^@dhmrݙ`0 H{Հ6o<;?.RLCgqEˎ+q7?*K,MxyHe.M[t]&ECs&t) 5ֲlG^s_.eC­UKUr ^CW4;L7Lx|N4sd/I=o R^ߵ& p6OoAzX4zRJ}s7r4yM c&M(M{S`UcޭDiN ƣ]a=|Ur\ap|m 6O~'??x[5.`/#pHmmT<{X1wqp4ß+l RZ=}XH3E tha=Pt2luHi/N>݀)78iV/&;d.~]~ ):z=-\ꉳ+rD{-&{0k|w2اL0y8~MP>=m=`2ħg,,˳s\'%ߦհ:Y%pB'y(m$E Ǖv5#-sJsλ)s'CB-c<ʬA3ϸ!~΢_c"!!At\724><>Ƈ57fv7bDI0_T;])9߈zRn2P/Yā&zE'-0]_jS5G6GY?/~|mLҹƋؿN6טGژ}DZmLxַTczqƤu|c‹!gj.hDIc=c_5,dD;}gpM23K-J=+saؗÃ1d^Xu~)>žhd8-F[}>%,Y}1֓IZgX@SW y@LJE Iem_Mj)B7^?I@o'LP[܈R=p7Hj:'LŽ, sGM9|NG|Yf8|Tν;#s8 G(d/I.GVdgDg& 5-]^~g0gHoSϐp'CJE Ǧn0JwF4p}֝)t{4iw6*&K{:/F6|(uJ7u8y$}I7nsħ#{fWr!K]0r^^8-6q3]^{&AښTn1uoE{i{;(a.=H-NC+jJM{ 5Ul6~No ZG`TC<&;s6(sROK&E`jup".:/RÞ+?~7ois`Ll ֛^:#޻QGw4'a~Zozz؈0މvF6TtD XIUqʼ=~%ʒLE-C)LZ. OsQB}doi_w} .gh8M歁$aԛF Fw4hZbנq{i{s֣w|~ox1fKc϶idx(jJQ:xS^`J ֯O>2$Dko%ƕh%cM{7}kDފ?*?MމhH;#MQCE'hv5#+EpyX;e2r}Zc#NS$ 0E{n5}Rℵqz}=LM}G6*yOZP/>XA~ yONtoHO+(gHO0toggg'>$6_Hɾ$popہ2LQG&0%}8r;)lZ:80:]^[)`ބ[fp΁^hqopdž[I&v(sc S'KNO'GV8M.64p8ǰ5vќ2hiɴQ6w%d_[&ΠG|tכ^t8}@=uQ+J8}j=}wtH1L;#R仱9FhG)e0-K2ٗ$A?M8R[ѵ GcAԯIDMYQ40*]{<<{Q4(9m5 VMr>q=ގNzMϏ)k&$9Ysi]~w՟퍾=ۣ7i`pc3;G3O/neԒHo{q#帯}"$]6 )]Z5 x44c7j] eض8>q3~c>h F=>QxM7>]>N >q@*h&Ds(ҥU F)]}d_t=ѿҪ%arvumSbt^c?\3cZlLC;ܦ5&\c~Ƅ:[0hn5ug8NXcHc&Mm]}d_tlLMSa5R<~]O(.sL6Ʉ-2~f:<*E5R3|f!uiD$i6Eh-({ OTݮ-6 &]g1rb"F^)]0~*NٳaC3۴i^ ]`w=Z p}L%Iÿ $] MflR4d;!nۦJ lf4ijP9]:)[ը=`Ub~m }Cl]%R ,V/F^Lv\(fo(Jm'YC|+4iEٔ]VF`wSL%I94D7TNU8ҍثWCouJ|5jKjZx)K*55iT@2U޳whЍibDeՄU:&?աfnؠӞqyp`HhrF&{}G4C#Ӿ/"98JҼYg7 ѕJRvX< ptiBտmoY қ0_"e3<`A5+驺Zt6]6y7E=oAT-ݕN^&Zy1Qr1rGa IHH'=0/N+p[f^^gjz96(Qtd&viФMf@V}pqEf┣p 7nĢۖ*oQK- ]y$X]n%R+8ϺGEl(nTlw»Ok5cDeL=IC$GWƬ,WM3Lwcyz8^J_-9G|+cLf %ITĪ 7 dpzlA*݁{iЦv "IӊFCϾ]5a@ ^h`&5TGtlz_3_TQ@DUΦVrV.qZ ۳W7g_IFFIbQrKch4mҹ>Kye9`ih,Ji #bP[dL$iӬ3Qb*m^:dzf_IFɋF,a Mz_)/m-<~R"8!(IL RO`III6*5(q}*ϴOV^LWJf`o~+tG^sL$iv|TH?Efo&Yw]FObkѷiGoQٔW3c@]u]k&Q4KfenR@Ьؓ~[PME,-L4J6tyNDy:_^]Tu5HQ4+ W2Ut{12K79$hRa~/eTvWpcն{aIZ9(99oQr19T7KH7z5J\O7|=Lh׹P6%Ц^9I6X:DGK߳nu5Mx>~GIRP{|#=5~+Gh%j}OVmڱWOMtM6Ҧ1G7tzr@vKנZTUKC3L_r}9 ^7.iðI#L2F^PV(99Y^WtأU^EDOأ4iƶq/55}mVw˿9,(IJru9lَ8>z:ׇ^M[Ц.+2ԮF{˱}}D˗8c걭fH X4v+kr%"=BЗ$cEiӍL?<{;A)|BNzl޷' '}b}mVw˿&Ge>r5٤ GI|L&JÃr z\ }}0~u/ߵd8ĊҴT FT%t9LZ~ƂuW/Ŝ+ݰ(s4;[qk_,\f(91M7:f~5YRkj\ZX?ƿup$M76pHC., 7~2U&o0ZU6M(b ӌ%?F?QklO}-;/L^QXzѰ4 [Y_սHHc6 {FGHA9Y-3 |XJԩ-#IM*ር%YMeGKҗ##fp!6B*IeY*}vÉ<7.w\vL%I/ɹ$#=g+d_sTI2FzVI$Id/I$IL%I$Iɾ$I$I2ٗ$I$I&$I$K$I94-%it-I$IٓCKI$I0~I$I,IZ5[Z+.3?፿&ͫ_2XfU@D"t~{LH$I4|f_$I%34|jeږj`)=L;mߏ?4ިxM%iXaղ%";[ߵ`oq$IswrZz_-Ku޳$i/m9M>'|fI4{ ?~7>,$i^]t{bIt*Kj"uN$i^+{};jZλWo؁:g㗤EcUK;e!\o-W̿{?^:7$I%*F- CV0?(t a$ͧR04ߞ}Iȋ!I+c+X:{o/I<*ݔۗU [2ٗ$iݔX .'}Iʧ}7W>)I+ag )۷Hd/I(ݖ}nSV-2ٗ$i<-[ݓWNH\~U&$-b⍹lHҒ{Y,2ٗ$i9Äiy2|}INEmk/IgM5da$ZU.k[I<ɾ$Iߎ/m[I<%I9 عm{/IgƯd&$+}IZE&J0~I$Iɾ$I$I&$I$d_$I$K$I$}I$Id/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$K$Id/I$IL%I$Iɾ$I$I2ٗ$I$I&$I$K$I$}I$Id/I$IL%I$Iɾ$I$I&$I$d_$I$K$I$}I$Id/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$K$Id/I$IL%I$Iɾ$I$I2ٗ$I$I&$I$K$I$}I$Id/I$IL%I$Iɾ$I$I&$I$d_$I$K$I$}I$Id/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$K$Id/I$IL%I$Iɾ$I$IޤґE"f܎طs;S)g_$I%c/I$IҒE Ib ͽ& ASkH b/I:4M5uFd%I$IZ2&$I$K$I$}I$Id/I$I%I.AwL$i]WK$-UiY$//I$IҒ1ٗ$I$d_$I$3ٗ$ hҤح43o=ڼ9/YxX\/&/Z]Od_wH#GSzTD:wrdËIY;K|[Ĭ .%IMu}3Ob[I:OL({|Š3K46>%fڷ4s/$IS1?Qvt|; K7Wd_9q"8^E!i[w3$i  7Aoɾ$IRo>a~9_$Iħe/I$IKE }I$IZ&3Kg%IsK(7WS+^Vip=TAw6x XOFv#[߇L81Vp]j37FiP@.Z7M&W؍L$-6 c1`}*YHR|P9ýmT\;4 3xo0ٗ$-[Ԣ?aCWyIU_TN<(PehkGN>UJhHxj|lPM=9Us}6tc>W)ФIaUj(ex1lP4٣@\X.U>dxk4ئM^d.Iˠ! nrMgpJ?>Tyr?MZĨvqwfyg{ɾ$isnx{M? {G{4>5b=Po*IJ5!|"(c/UJQ!*;&wڤJ:Gz;Uţ*u]QEtʩM96^XI( T{HPe:AoM;sHZT;|KǷw>Q%{wIo7!LKcqqb Ӿ7d_d*GA Nd3V+MFG;\T#l܋oGQL=CH\_M|l{Qirr&^beRU.4YaeIeh`콰sVUߣI:\X&~9p@& iUGqVI3||\;ô 3xo8?'$͡^/LoCKU'V a.h68]ia?@[cF%:S LW6 Ӂ>u;j KTwx5HhRr( }fݡ$[IˢJ*}..5aU&#ڬ Ӿ7\{ɾ$i *aS+^ECG7򱑚_z%M^q8x9HuOS3Wv¨g<oj"ˤAZ*3YY}d&k3Lpw &5.%Vs&m/zkcJLSfc4UUW3G;A/S~>y  ?kT9[/3Ofsg" L%I+`=(dMr{*9wq5|FٶraE;Qz Q0(,g3Is.-7{w}ҫ2UXg4'@5F-LxȠPm Sc.p=͊z ^J^Ҋ oo2?QeMaٳ/IZ$?y5ٚk cXz,4 *Π,6{|2MR63;{%IrU`)y j4iF8>I;4\j|q2? &$]ȟNj1}E/grKMjTҤ:vf*a;3&sFQb?s|Ž;r$i&Ti,M64+M,qrZ&:u>[V! `\Q KT~ϚLo0ٗ$-`B߃I^$3~VqB GhgveyNӞI-mԓٛcg*Ӌb Ϟ}IB NLN&ڞ¦>..cl%4k JIh4mmzyLd_3zPQl0xՄSYooq{\}/lJRmYywdY]9%M0,Q,, /0{w}Id*eqo`#+bPU69%F! l}/^ZI\8Qy+1 at)!Iwaa \JT,tËn)id 7GґEޅ>[#?ᐿC~JcR㘯?+@WyN FǔPV}O3M?孰 [GCo[55B?~Nz_a 2vMReOKg1?/S*ApXpy?姴cqd9n9H_t eX sWĿE %/iߧc#l=o+S5 O#O0{ì xov{/<4MFo>%iQ]0?!!] f8.DUͰ&3O=R %Ҥ6y4RJ~np9VgZ 6gL::O2a3 I4#UrSbdTzgd_$U@QEC}I< Z?KQ;dTd_T^ZKUF<˶J2x=& OFO:%: TٍV$s8]_$I06AZe`SƵp%. ^-H2x=&"X:lGjذ2'ɨ9KEcj$<g%I$I2ٗ$I$I&$I$d_$I$K$I$}I$IL%I$Iɾ$I$I2ٗ$I$I&yvȡ ,/ɾ$IGA(ӵ $㿌&$ͲA e7ٗ$i6r %㿌&$B ,]TY_F5@4+](Li.g6zRFuirŠ^o7ٗ$iޔyH5dڶecgGKPۤ %QSPK {8Mo_oKfFOmdʱNY%o/It4X~R?-Rx5J2aUWӑU-_Ry-2IAw ^nIɾ$i!iըͦ\bR6.P`1ێW]xw}@8A] uGTGTȱ@%B6؍uwHxvV hgbJ4P |/qɦG;X m*47J0S3o'A|chd;uu#O//gMmo7ٗ$͉]Q!5M Re/]*ԕ'vW(x ĎF-eM&{}.UjUϞAVɋXulx֢ud;zvBo5I%9U:wաl7 Z`@ɾ4Q|џ&٫lP"zJ.<.a%`R˾* l T .M> `]4W6] *b r@P;d3٠D4heBv! fM>zt(<^%.I]4=`7yI6@x},G;\ saX&GQUJ}}{W*WbW~GJ/NYEˏF}Iy9AA.6=^ðZ(rk`mC8X1 MQ <V3r νgt>uE؈fL^ =6 SP RK)VIU*xB(U'*a$G;X %EGv]j*Fsw+}GP cIGYiEglx'f'}chTjU)Vzt$۞ڌP`}x~Jp4*WyrIz3S*smfR}6ůq&gEyF}Iq/s=TªMnීnl{#~D.F%U+aT O,l9jngʝk6!Sg;ڃap>}+dZ,˞}iɢl>d_4G&iPpտ<\ٷ{Wa(!{y"F=] _ mY L|43>':nL9xR_WW%_Og'gg3vsm#iTλmru'XRmgKcÕMt SÄm1RP)}]M=Sg9ܔhҠM X%J\Hu/"_T-G7KJ3jRe6Pp٢{gbJzrZ=C2:NS'u۴(ygF}IYSkDI%r4L$MR:BnXhμʕK}܅UA٦s%k$_To7KX;쭏7Og&hN *xe^6fb65xN.{F]ɯ@yYO4[s*]`{HO_f$ێW 'VQ#.v^ ր΅.pGЂXenpy-BJsFˋYɾ$i2Tj 4εm`%!MtZAJ \6(&PKH™G͂_Jiy?o7ٗ$͑+#ouJ50.xooJ'vr]>vé~@{ߵ;ɪc/ԨUϬv2N~}ٗ#g'ɾ$i.1 HDhU2j7ɶ&ؒK9}}pn_#J+Wy s6l Z5v VK=8}MF-??o]7O $I3P oUm4iSfu&i6rja)ٹ `{.~)Og/3._v7= g˷M_nJz{m+h"31\]yguUimӍu3^9ߢˋ*W{5Esٳ%O%_|ϾKf"6Uj *Lp6]̮jtp=ia/I ζƽ4=OaL$㿌&$M` sZVMsd&=:*0W3YHo/IZvѥiPewX2.'$MY*YѸT])ٳ#F}IflrȽcш5kbr$irܣWZ9슧Td%z%_FdKfD[| myUWw^'ɾ$IT V:Hd_YyՅV%e_u>/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$K$Id/I$IL%I$Iɾ$I$I2ٗ$I$IY,4,iҴ%KR$imZ8_$I$}I$I4/IK#IK!]FWc/I.y߂֢5_Z4/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$l: -7V zcJ$IT/I$Iɾ$I$I2ٗ$I$I&$I$d_$I$[~TA.%tEXtdate:create2017-01-03T14:39:24+01:00%tEXtdate:modify2017-01-03T14:39:24+01:00@otEXtpdf:VersionPDF-1.5 \ 9IENDB`batchtools/vignettes/tikz_prob_algo_simple.pdf0000644000176200001440000007257013156705617021523 0ustar liggesusers%PDF-1.5 % 4 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 5 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 7 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 8 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 9 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 10 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 11 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 12 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 17 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 18 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 20 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 21 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 23 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 24 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 26 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 27 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 31 0 obj << /Length 2192 /Filter /FlateDecode >> stream xZIoW2ު8 FBHjlG1hS_uu-6t^oBJ.`oa9%Q}Z!y2Yxo$֕`VgK4_\/f"sP+r<'IgGKa~:?kץ$ ޜQR\#WL6JlB)G"S,id\~?7GÎa[JVU44W9b7}PwźO9/X|1D+Ҭ"zX࡞c~z lN̳^_>ϧ>tᄰ6c&e]w5Dʶˆ$嫦@lD[,AL K\4`WA|P(o D0"xxsn1?ݏ~FDImYU.M'KWC_S(\B"h)zZrdp a(1!ο "h]cNa4Ǐ7@#,=4po"tl2+S=uW.Sqſ3 0}hMl34g?<~`?!FnP/̤ݑLwR:v,YBOI^aa/$+!sS<EL(H f,5%+_\>\04c;ݡg5 틋anuhC`aXhI;C|dARz:$mZ4`i),3X'05Lk;uk1jZ8vHt59@PK'6"nH\L!Q ӈJ,Tz L2}]@-.4TF%=%s.ɕd[E&}Ƴv(ڲ -=I\л[c4\ķbom.!̎B1"(uZ.Eno- sX7ǜ@ԇBWg-8k1X[AVtG FIrckDt$rk+Hol F$ ZXDT!Yu8ܓevTVλ12T `#lzU:l u+ ')ºˡ;^seN9XW# \J$|گ4T3B[};o1%sˎ&!<&#hF7$Hų4J6r*l/\TB\=A[<@Oekhc th:*(u䲯ʢqeV/4zP5jh c Um&}y;\ :h-joDΪ$mcIEX%!H֣w!!ثB(b`ZL੷PcZKVΙa+5D |[TBlU؜2v, 2Mͤ6wAV O A"yHQ-ynAp%WЁ@y*=],E3Rc:}1R*U:hL qk,Qc:)EYKP5FSaYYK΢ƱZ`m#c a-# I: ^jCHN>Sń#oxYWUhdq9X Sm]t 7LEN ;@hOԉʡba?:5վ:7k6bBW<3> stream xڍPk.7ww+ E[X)NqwwZܥP[ws&3s-_=yi59,NP7'0@F57Qs-`A2q`2YN Pvwp<\\Bc* y@je'( Qbk{N?+V?R`W P@m'+_!XD`0ga ӓj+ ^`k A:`Ak;u|rur->W/\ۿ ?or<<]ܘaV NV"aa-UR;bS;Y9|\[qPSX+3B\RX$_<4֠mJj`zbc~o,@[/5:ߣ^FvewAτ= ^zK#fwv+U0J'9bucLs-3gP`hl^WScO/1cy |yf|Vtx:ȍȨ'|SI| E ^dćǹϓ^ y 1ƽc#NZROdJA\lMLf#ricjQh` W`MćہN"^ *[On69;%tȥ}EdPoș=f> r701_Yyz4'iS(e|r;^9twZ"Rz9 l M~WJ ;sQ&!wByzh9FkK][o^"mFI3Rz nCG ,]74h›ntK#iZx\}ϗJ j*?D-tWInոe[c]B7o;M!Msg{nrdvhbסܤUO@/& !vn+ŚKLoU]OE}H\D⣇Uv٤,ML bn&իDW_L: d%"u>dOz)jAP~"߆T2&gM`~5UJcnf%:L:>5&\.mW|fR8ʸ =2ʣpʰR8n__1ƻ;ffg(.%8st|gڑ ~ONZ{01',D3iJҞD8g؝愭BJ H= 4A6Ni}=C}!U[OklwUq듮 2I"zbڛfw ɚIIS+W7JMNU>ZESpQ9WSS6*_#K5 f |%ALԙ4T\V!9TaP)c$ՊACĂͣ5_I׏$ui@m?meaNje<=d>i9o>5bvaxj-^SI&b%09͆3Ilō\x&0X4.j\ώIxtUآt:|zyD˕ispt7~S>1@칱cTڽ0PuϕvU3sUi"x@/R+[G#4<2"Ek]}C; v_ {sIR~^ 5MDŽyUQnXx5~Kߕar-^}==ܖ)l NuA/4b&bt*:>&98Vtf.ZNǮ6Τ l<ށ^iqV m4 }vK$t Rf៖bF|. PmVSbPRfJ܀:{ÿUFר=UA9׈"H \t̴W{k3Wü0 8+XYtG̢R,<7j\qKfb9KqӞLfJdvT+ti4_DM= 70V*F ͗Sjo6NHa9Ns[, s^o-_ `pj\B-;9>5oDŽZ/=EL:SzEؙ iŜB 22=RkIbA7oUsD:~N'9߳`Ǩ_D‹oX.uX%WKFXQ($Z-#gz@#VҨ|fk~h CV ^ޖM9CX7[4؇wݦ_H؄beQ믄3$(qe?!\ ;|ۡ(&f'0]Zxh}EH6J颔g+]萜,B#u7WN=4v-HnxKxdo:OU}3oVcT^W)Ў\Zqx#-Q\ucSjFq1@SC(4+C%PWNkZjMATV%DcIĨQ^N"_tj&r 3B2m IyܴU%G@BWGcs"[ZX͠թS:o,&̋h_R+4N@].R(溥sGk t&UQv$\B,ƀ{΁:v~b>o#.y$<[P~^\ͽ6 ;b.%\E@ؑG76 8 FH+spQ"5Ӄn>J>9keDn dbs*H1<'CȒ,!!%톢 c[Ѵ#+>[RgqfNjZ7 qT-eKqŦH[oC BDLZ(]Ѫ=ͨ_Kf WU"/D5R燯.Z$2S}/ݞ2hd@Q\+)嗷~\0:qM3<.wai[Y;CŢCSj%⧪s{Vmytz[JVpڊLO-H`( ap|ɖR Pw˖;Qm)rjw@޵-:T=f5 ׵ =fAs#m'Oh#q/EV&l*7xT.c0,.1I󥉆i=R7 UuI&eqIZEEpp҇Qc9Gq/_/Y9 xPWX41+9<ݩyȽf,Mk΃G~GN.dI$ fe:5Tp3WHG]. Lc^GDM&1!"'4=2!D0ö@RF&W4Kh7KS=jQRUMC&X{fX2#ѓ Zpe?$R O}f,_\QƕQxmrYy]ȇ J\]ȹ*(~jo@W^Ԏo~2j[G.(/NQEt AxÞ g yOT,5Ō]WJ%j n[ڄ/e9ѡ.颷L\mǽwϙH&4^i(-GgkAn{J€ǴbO#?u3>]`$& _)Bg!4K x- =_޹z#dphW14p@(; ڎn:6%mECJ4}%lj}e,/hbW; :*T?<-:ޓHc> 'SP}ɾGF\; PRӍ4p oӋ[vwgDS&[|JWgշVk<^?ғUd?Q$O}LGyɚ"QtS-*S :987&{7fI"i~?_dMzBݧz?2gj~o{{B{Xh x:Πv[ "2jDig<5 rȼCsj3{TL)2թt8=r9CBGHr^tO U۠shkݺ,Jp+Fyz%o:f4POǗF r27>]ק-&u߿ 7,Iӽ:cCu k߆)H+aH [30 >S(ĦO2UkB;Ov«w6-a8~V<*>R7A ޓԖ0jeZxڞr8ե Q2CQA ~Ok-n/LDd(-x~!sǨ Wl#*s+;#R ̡=$='Nx5:I+i 8L?}~@gz(ոhgAc*0~׺s"r M5iD3ϯmz!iuDVТUڸ(F)Q_ӺoBvoþE fŲL#lyx6ޠgiLMMd<H[KK|_At:mJY@fs住}HOdgaU+m#T'ec_`?5 toJO6\72]Y7{Վ2Ae΅#Zf/Sxu 1І1bFT7e# ߩ-=;y* $hr"=FQG>GTz7{ ,mb-J8̹&^ġ }~[=-/WŮRgגNplGH-`C|5kOGō 65?lFI~hvMeiq_~ӖmڦԬGH._0a#,Wn&7ɪ">_ah"18}0y&Rlo˱[k3ds&$Vnv 2*X hƻhI s-|r V}c"e:rI>U rX~1[L${bLdrJUtC Є]] J2em/GHM37/Ot__6?j!mf$q-U[bhf%RaOy ,hN诂BbpdQJED5WbwSb~ߖau$)֪iV4`~\{՛. _wt0? Uj3ߺ:*}Ek7)i(jC^RKB@eQutƨI?uiNʶ( I2Qj@袵KL$.ԧlQQፎd? X,+Eh,=>D}Lp;)RkJD&"8D5U2d{๣JPω,\1 *X9:BЗD#U/LeD FQE`uJ ͻQlj*^;Y *:%ӝaʯvFA?{ۅdMIL :;ƚḎW֯ hݷ, *Ƀhb>tw\- Ĕ`2oxwӆt5h*4 9I-gJ 7\Ѳ1uuXQ,Sz#3^;g2h`M8p-PK=?"*}[EENՏ(Ӣ ]wmr4^CӗY4A23ZeϘa%WfڍÃNHe%Ѭ m܆,,aAs\T%׷ZJ9e.&7*nݤnt?P]i48{uHa)T]xe3eˌ5M3]O4Z =Yo1=Q_Z|川y.h ;u{EFdԔێMP|U/uÙ]>^oϋ!w/Ô~Rw[t[?))>ya{8&|fzC?Suʷ aN(.j"ŒCpXE d>7j=ep/z۫}d9 ]>ƪ4a&% W?ת:by[ Q:Vq\8R88gH2ѶYb?(0܈rmRmgaOlKhXZ&U.ni{-M& ҫTqn}Q#_Ox2nmb3B(x8К=^P&A8)x~67D:Vۗ1w$5AsNnW ʡ:ɓ22,<k[?| mdpdR5tJ9&LVR>.T/ڏ1a9rW }K7)#p.[z*vMKWkZVɽɫcA1RH9mQ|>\Svz) DŽks= Ut*S,(:gUA"g>T${[kUw3ߚDm^Nx=YM뎭"^ ΏD] ʹH&yPl%fLM9œ]4.~g4JV)82fVggQH4~<=?s+OiԂ$ 1uQOʝoVG˂Oy2c&߹f:~7B A,tHXQJ'fr#" : k2!lǖ1RHzwٛ?[yMW;>l̓ps{+PnMZ$Q&OE!襉NczQ75iZʂhXKRhǕZIBoVy8]d#1!=r~FL#;.:LTӨ*guwɠfGā$ٷ_+WAȾ>h[4yKibGcHPЌYh7kysx\ k5Q_L] ;g{<=9oE7۞H1V'UxB"t1R>*qєb7Xëܴm݇9Eh|cd#dg OBzǙ1|y=D/k+-NsF4_OŇ݌lՆ?&g,BŐ1vTeYR^0Di@yZ 9<_XzLJ/FVU}l;_k`8]9'WOXWν=–GzԴH+=A9&"@#Yn&sR0*,Ե-&iX3gmA@)!owگjȴE6eO/]s]&㶉0d7%֝d b;0[ԸeÒ7'&vGRËَ!-9IuQ仞$Q)t}pF51,\M6RꂾswrرK%7Z]׭.Y^4ZwmYgB֎9aEd):w G/ J2})7B/4D?@x&` v7*i>JgZK-~il6^ޯ5O`'aq68ބvrpO*X#a?W9//]:^XBF ]c"#t?Bsng-:1fZsV.\C,aȮة%5w95ooxpΦ!>bIf] hN1S7[g/3P*h )Y<Ɗe"C>zVg h1?. k d~e4Զ)&b1[YD1IB7([ǿ$4`PHٕ9ÿBQJ||X-mt!BUUo1cRgI/R_$K@A$^ge~.m/7Nao7ed|W2PvB{_Iiebɵ;?1^6bkd+K2aƎ¡3 ob"kdY[ӆB/G^^v:-Y0zD?x7_QhaDi!^8Tq܃ Y&o%mX&7ckS]ީ0#$X mYq_/xq& QQ"qԪ$WVJyY`']DžGnt7wqqgv8w.aL͜WYK(P49|3&h,M٬esսu>Մ!i!X?&lJ{|83dTqGkkSEXIE WS rHj4r1>RrGw4q]બT+.sڜrQI'%7ZntNxuG<,A6ȅ?pk/Nzk0T/( ]~= A.>*~Z6WoRtG /u%f3RTU"gЗ\6l⡸] W*6zѬR_M_WVNaezAhuv74m.V u s>}[MS̺++;"x'gn˘ۥ-TeK0P].)-,2L9 K^Dz 9NenX0;Z1}u d-Pc j*bibML wFޒ`Ox쾿.'~p?tbv@A `EeK,:aC5jtMtN;h߃3>㡬WSBeR ~BwmwV7~rT&!㽪ʧf;) WR #KS |%LmKik k! endstream endobj 39 0 obj << /Length1 1658 /Length2 10148 /Length3 0 /Length 11209 /Filter /FlateDecode >> stream xڍT.L#H7RCwwwwIIC 0  %-HH t# \q>׺wZ3SkK@ ;7@V]O ŃNO9ӿAYBA'%PPqwpDE<\\:CEr` @ :xCv<0Y3pH;`kK@frzhmu`IstrpJ0<0; [hX:jgvSl󴄂OG5]e5 ڟlOCpvvrx!#,!߆nO`GK'?J(Hk,:?7k(v#0Oce@Pܽ9Z7C6pC elDlA0?/ yYqNC[ԃ ?f +ss`k d I ? p=яO :C1cŜ :z  ;%* qYӔ./0ù[~&.JH=ӟ/'޺Þn@ kjpA@jaO u n `/P ߙ#rv~Y\\{:.k퉒@O!G/B-ѹ~F 89 ΰ'Swg( ?$,=]/'_ p 8OO >IPAA6IO'f92ڡO >yyJ[CRx@ /5⼳h}]i2O mtfvE'[,Utp<Ӆ/Ԉ$;{v/SDEhzR;~\^9 w绺 ai_{)z ީP}~_=V?UP,}U  /ًG*VtXk+z#&6h2+r"\ Qwj}{'\fvt? Tcd`e6 '\BnC{l2Lt{qꝏ'ycd2^3uwcJ($rZkZ)R GWw@\Z[ֺC=(Y-iQQJFKbuE{EIDdMF'c]]blbع84GjTeU+Hd՛ӝ nz~[^|d7iѴ6 }xPuL]D`I>\^vۜ(0y{Gjgż=Q%N/9icG˘mx_..?ݼQ;)io5Nَf ˁ+ͅ?~9)X9iH$[WP P& /(E'(~!4 7K%ر72_pk(]}YX>.kx9&GFzS3ssx>>.iȖ;ψj^`Y@ Dhae ¡ ?-c+ӟ>(;!Fk,]zJ+~|JTtl/MzL#e&ㄛr|<_{ʝ ܼR(9]HMNLG2='%笧ؕ(,.j8?SbUWKin}7k?~E{1 _G*Xwl|C&؋ƈ S6Ȏn´/([>N>8-+=t)RiR21xe1 =NmLNtS^)T̕:Ķ%|7C N;6.Cy~Ș_;r=`(zR%HIzv5ˆȻ,k" ^3=,<-t55 W::Ye̪ ӞK_g9]d>J)a2`aC{W, e&CerKwp^OTd/ݖYHNYP50T08eR}NB 1p6} (lOx+*CBaX3C{\Qzrj*4xw՛or9x`,t\bG0nݹx9]YyyNopDD/!{W.Rgؐ2秌~vv[pgZqeKUh& ih@ϐQ])kՓ:λmgX>⭷u`t\GѹU᎘RO|_LoRXHp1rGIqβJ>/0-qwz~3ā_`!˨L3|ԏ$'C:Z 䳩T-vB!dCUg`<;lkڄAU!>;~,Y>w#2s\")Y]j%vC5yޮ b kšLxbC |y"bN*cw:!g2B(ʣgsc1]E/EBEkqoq" V|zL0z\t/ h6~Cg?' /IY8vscym ˶Z2\(KpGa~\čVfyX], 銢r1O6~ ) j6ZY|J&ߕ LF5 ;9Ξ'~Wl++K@ZŴ˄]̗p}ɹ$ojI"Ѣ.n  C/D[ l6(3e&(UtLXQgN{Y07bB|^:~4MB y $*!QO~i)ZAxƩ!%Mej5qWdNX4!'U::f Unm/Z|MC=`Lmc@]{ϹUdLԟQaiSwdt#:sQteq@-SbOÍD:){)mʈl4CY9IAV2yC B~_'smS@ XDV{QNsSK㶦nh){׈$[RlCt;A&>yq0Ȫsڭ=mسо#P x)~4W+kK-ۥVs gAܺ@s8m#0 梞>(\:xS ~dZ*%q0woTdsL!vK0b[5K6fm8hol(V SJ[5Hr2#nitVbt5U1Y.BoXnoa@B,Yx ̒`~ŀ_zZrAhY`9|ԦctCy +/b~UqJ۪$HBxRf"H Ԥ}-{gZGɰ>į|G,WM#ɨK[y#އ B5T'2~+T9T=G>дLkܔ[˴hԟk*F/OGiUԸ$$iJƴO:Zmf :Ğ%UNnѥ"Qւ. 86ڢ^nNU=2-uSi*H5oxaڐ|TY?]j)d{AKzh~AY@tڞqM*PLhrG{2pEGA (BhWG|i_زF,/F E,9md]>mb߼wdӌޅə:OksY5εdF,l8&Gi ٧yKATT7xc 9\%-Π&`JI mXj'bBng$ :2ጴz8WčՃF]l 2{a^XCr 6-5JYnj[MPyjጢaD׌?-374VzRiɒ=]& E/J̊]&N~Wz 10{NFeLcuУ Ԙ{@_j~%* o\nj˙^P1}`FGV@;HU$?cJ:DGk U,veTvh ЌLt{{K8%vF>F$2Wr  Ɇ Kn-5P%,Hoiɻm >9fkq-0£[W!)'̯Uj}J~UI=5ڋNΒף %ۻ5]去Xud7ҳP],Nj^g2Fz8H(K(Dzzy6Yy? YaZ,ڣei :@]G_ZG x~r7/A$!Z%}o+ȹZ|QM}Crվ4}$0s5@opb Z^&4,+<ET8S>4}cU4kG1 T6KJܽftk&7DsݒLG i,Hۚh8{͸VHGI'7 kpI)Ä;n[*~MtC&ӔЏi:D_I'(]~%WIWY/gx$.2Ք=%Xm'Ik(:AY8!0jO͟'W[ p-Njʟ`Gpx(s%Hj%⽚ᬦ'#'ΆR6xS!ex E1u oŃlIfeo?S3US Ɍ|sX@p$js@WmՕ2+xn,>)f b 5Qcu9)zzr K4PrUTnuQk8[5P;x8U\]~|B:o*jGgxfE.(Y]IVdAHò>N6X-+{^Cgh&[ք#w&wfbt5jqq%撕#x* ohkqM,2U `{bspG段s yi^}gp=_#p+7mM%XXFjpZ`NlKoahL&}R~ūpP9 P6QppDoYZTQڕ̺ aPsMuu_UfYu&YZ9f)@C[{'IEm0x WU%Z&&6^=M>GGVk 9\馏&6aʴZH23ނřG|DMtHӦ ʎ13&06i-i;8©BGx9 Xs9F*|n?頬Y_;̡̫N jcڳjIUA ?h8">>"q~lc\W_ؿpJG(nd,F$m"#3*BW2چE}{ ƄWd6abKn_x{pvL'5M{Qc|IA9oƳg3(wKfLAoet9hr|,b2X,I/B[[<$4<#~azRGU^%(۪ Imlj͠#_KM%?fsu]^)K63=r'݌E8c$\A؜ʦJ%Ҫ}Kg%J5$pYuGH,]tEjJ@Ƕ ÎΐGѯ6a~@ F\HAR~LDʋ7$ce䃌/U#wO TՊ*ԌFzWOnE=bLp_9- +j5!k)gRؕ%iL)E /B$ IwUN#/*[ʷ %3p)[pf sML" ٧Ͻn(PO{l:2B,]8'+HQ5_iy"bFGpV{x:I`N)y/}o~pj (> ZCY|\;I8oq :t\~i܅žU坨_9Jp! s"b0H`gW?g09m #[cLG|pړU<ϑ)c[|LĞ Tay}{/TRoPin}ņK0(Sk-Q GQ%pX?`*k^NާUˇ3C3%팃Azr_5KM#s~E/eL?3Jhjt$d]I` @MD֍H_Lv]?"@er q $CLN>H]3 ̰bMf!']\ez'{c:MDR#!=`E=I {qi"Bz4.Oe|XZ۔ogtXoSդU3i  Rжgݛw[.E%ZQxLyJɇ#䱩,Gܷ߰*WBy3&UU+ޥ]TI J+}Mڅ&LQRxljl;70pyα[}i?5S{,]gQE\z~yܸ*SټqW%uYoB0D7דw? K.ȪElStlWak(#& m=xq*9D0q,"Ҙy *?J;)FZD;znoљ4; ;2ivq^MLxo ;%5[hc rݓg{1ʲpғ_̗@oPwH PߌA?,J rj" gr-ae㾖VSigH|l85sD/2s^ۍYp }6MTG,AsăPF_]Dj^rׄOʗPw@؝'+Z,o\fDZ|jbގd|2 IxQD8i߻ 9BMw}w$螕:v#Dùąr([ERxNmoo#)$Tl 8F?h"۪ilhZc6Xg/qQ+Zqs5CWuڵ>x$Bz-Oϒ& Q]0-À?ϺU{u6 *F;ί{L~+B&xj>7u8Aqng)!4hGPXp0&AS*M3>mY޼hd4;c*[\]R97{_iԈִ|p{2A0\Z!i:Tlً$j-Tֈ2EyM+?Oce8\VhBm%A]'l4<l0޸(<8\O|N3~PͲYpP`LOU)HyMm=Ra\uϣ ߮vmk /z`T<#Ax$o nYv?MJzpjg *!̝x)p'hJgz1J6.[" d_ l'F^j вt>8K&^4קcf-`L%`(־^Yf[tF:.4n߻zMxӼ<=4I&ܰ&n:6Iz=Jr1IqDB?/Ԕb5b>PnH'=)ʮ2Smn=($W}}ҲwpCR~/#ͰgKWzv?C"|2/㌛(_&JHC#L.ù(rSPIZZh`WuF(,+jySL$n+DGDג꣋t4G[FDG-Sbs~`_IN+F'>NgY !tU&dpG$n:nXѬMbӐFzݺ޺jkf7;FN?dXKHu=S2ޤp`_ڷ}F+ѺlCqPD;mY* \ҁWr0m 0?hT ^$ mK?p6t_"Xثs0sSyjL ;~r}eYMCWXN9qάg)n%1d4$ؾ}$]Ůe--]89cF9h'kA5=oZ6jy_؂8:h9fh;i(WXs@ltӤzʊ#O0&R`Xa^qVGn'x$[LMz8ɠ;n_:MY}L$勂Ҟ׹R{ ^񭰊0nI#^&4BRWB? s IH>p ң=|ݽ*Of/>1n a7t9_k1O|a]g+˴87>%gi ^ A'X0 OZJ3Kۊ= ./f(kA$>GIB&ϋhmc&1ѽC-B!)x5i3)&.nDIad~ -TOzM!գQ"\Չ[t[؛:X\%./2A9d&o˂ap]ihj{R"&VU姬hڝPSz{=y{&:؆PJpn"_=R B'Vư癇r¥ endstream endobj 42 0 obj << /Producer (pdfTeX-1.40.18) /Creator (TeX) /CreationDate (D:20170728153754+02'00') /ModDate (D:20170728153754+02'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Arch Linux) kpathsea version 6.2.3) >> endobj 6 0 obj << /Type /ObjStm /N 21 /First 159 /Length 1089 /Filter /FlateDecode >> stream xXYo6~ׯE[xh7l["3[2$ΐr|MDpoȏ#J !ApH4*$Ф cPIAq2 S*сm8HPtCO`Rv8%$@)8@i2x8`Ww٤(p| (w7YU_-;~Xͳijr!1W p eUҬ:.zp凘 0$| Z?bZ!q\ i2&t)߷65\ ''ʶ.0P%Qv*=_i ͎GzcWl:>SOļ86G{l; *7UF[/ Cp Pۅg0wayQF^. XJoP9 6ղmCg(NNW)$ %Ǩ4LcF:%!wcf(aںD\Zf3 p,`@:!)k{߮&l1?"RY3v!>y, ~Έ`dõ5F*I|aqtR*TL^r÷U2]5<,tH8DFGH ,RuNb;tQ1g Cz"AJ80udI^{tz\c$R*8:< V?/vsszf.mU"_~Mޡv#H:NxJlqD)zg[LﰛQ 2f"?-3 ?Be5m?ݰMelXflJV[Ц5jg5kX˖ۏ~̃3xc_{kdY  GtM[P) 8G rWyP8E5y`~ip˛uI{YcN[ [ѡ{ iiȠ鷬Psw]䚿лs0ŕy3\j?R endstream endobj 43 0 obj << /Type /XRef /Index [0 44] /Size 44 /W [1 2 1] /Root 41 0 R /Info 42 0 R /ID [<14F32C6AB8AE9E3F9FDA8849061867A4> <14F32C6AB8AE9E3F9FDA8849061867A4>] /Length 125 /Filter /FlateDecode >> stream x @D_c#$$Dp#B*@<@ C᩾ЕDEƋW7Z;6)b+ dl*qaE7/?-{  =mcMY<=bqA/3 endstream endobj startxref 29697 %%EOF batchtools/vignettes/function_overview.pdf0000644000176200001440000006514113156705617020714 0ustar liggesusers%PDF-1.5 % 4 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 5 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 7 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 8 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 9 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 10 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 11 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 12 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 17 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 18 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 20 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 21 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 23 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 24 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 26 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 27 0 R /Length 15 /Filter /FlateDecode >> stream xP( endstream endobj 31 0 obj << /Length 2620 /Filter /FlateDecode >> stream x\Ko#7Wi9qdf Hķ$Yn{YYFvlflKHTd=HbM6ߟGg/mX#'θ3J3j}oz3J8q9N\6Z…TYwN1pA)hVqa9If G|&VI;qh-עAg- YI0cAЉhay³G]':HCKy0 y?\qA C'8H7t^+h'H}mԝG&X&Q1L%0L)8h;pnh)#9G0Na2eôd8L |Rsd[Wؕ Cǂr%cYm҅h7ptpv9kʱo@dXP)1 *Gs(j\ aǼTNpGאa>+4"(;O X:8V(41|cƟ*{Dc 1\\7Q,J]\5|yw ap{o?}Z#4x%HpMgB!5dDLd'LX#~^o.EG2,].sY3hti1?6Qξj?ƽ`,$yፅ; Q816{)1F- Je xmUQv3Z= ?,/ ̬ļrs'O~&[UL k-P$IBi->3F#D{j 4YE$A+dW75Ԗ6wK-W!ͬNczWok ;VÈ ي  fR}Y/ogSɨFÓO`"- IBac~"k<*./a"xUDX LMSyMD#0nZu_]2U.2,s2)/:Ac_ͻ?E* o7ʦ#\F3NL=ǡhgG7 2>R&~2oA[(Pc$  ;*p\ri.G,cK't$Fg`'OL#[X;WGT5pdNN1AQ9I\8@ϱ?HNCaH5Lr.矖_|f[$zX59 jVLcٯSy*5Or԰˓hry8gR(dvd) xpNt,qqoZiSͤM䈖5O1+I6BLF֍'TԶੌ쫝P(O(xّ*"LȀ_]G坰 C^Y^]cu1)=J"t7MBSSA@crRW~J"Zz RQ("??\޷=gEUzGj3B2KD̫Aˌ@ajj.JXb#$C}ČR}VJcѣ`t9ij3"Z"Zc{|dѡI Ӆ'󊈕l9~B̈́A@VN֑#VsgпӸLh,[3Pꟈ`J՘x|%#殝x^v(1!ԷnEȠ:w9{jx@SQc7NJtrWIMs.ǘ#SnAo|j422EWE=xNy/܌#]v5Is?߅5eQk ©y"\"# x+)J(.BicP+T\& ,J݌Q-).V[55Ԗ6wK-W!GV;ӧQxQA͠cwݚ6ߒ?d?3Ys{֠GЋ3)7?Q n}^hIS6]׮һ^F/O8oߌH憪Fv`! a!#ȡM"] Q"kIdlL_`+'Z&́k; ƶUh #}4Ǥ֟Fc=JW-B?r4!6˫feo(A1JfZkb{(= xc% q1G0)@R-owz@w endstream endobj 37 0 obj << /Length1 1887 /Length2 11920 /Length3 0 /Length 13097 /Filter /FlateDecode >> stream xڍP-CpwB-4@Ҹ{,{pw ;3_^uUw׶s>4f& {;3; @RIBƁLC ǎLrr !B^lR@ Q b` 8CwH]f%F la yzS;??/q[hPB,A/M6 {S0_),!VV7773 AN 3@[_ Yth؛C܀N l s~ q39^4* ?ɊtEN#hjjoY6 " ڙ&m_@ G@2_9: ,`3Nrvf ;3N ӗs`rvf0sq`մ;⼘Y n666^~r7d]'o >^1@>`s38|o0B& ?_ ?;l/cEafv6bV]yUEݷSB `fsx>GbrNi/ Υl\q|??v$bcOxxQ e _vڠ?WW}r.Y}`g;L 1S.5/ j ]/χ&p^KJۙڛ2n "%nn:P1x`nFyxM">?*7^F|V'Uo8_*m^b55|)ewqW _ |)m/R`|io}y~%}Ces1}95ҥ?C8_<_֊b)Ԇ+\u/xI/q84 c_{LfM[o*IܘDŽlk00{-8ܡ#&1Td:]' uc.mJ_-?z6"5'{?ǩOn O=#{ENl[+ C&х]5ƭWֽHNCG(il3f27'ӗW߱3ǟ}~~[㈾\.}IDMGH{=2I%,O0U#oNf4 ҆t䪢X*_&7,؜ 1U۠ڷl'Ϊ-zd7EvVk},p rۼEh K'0ɆwyюaxoN}e퐾_=y7E:DnI)`94IIUO/AFB;b0ks]͊AiV]vp T9}%LEGJ탫y+ 9tčaתdcESP*$''~5rQ^2:f\©22q!ς3W>M&Hi'eH!=Qrޗdԍ t=+˜:'MZح ume [̅coF,|F乭&33B>>ʒ8g\J>-[HO(g>[tZ wF,ٶl6hN?'҄ !h| l^9G,tuC ^x=t2[oSప؀@9PeP_ă#ԫ,"6GsEt'Yg_ѣN hXA`ot;̫XL?I <A}S7( nɁ]H{v8I$[:築Re,2zBS"FcnٹƕK%\U:PW+>uɬa ;^\)m/0k.|TG"p3p gg0uOc#]q;U`v]`ÕrtIv~*ho6Q^cSb̹@.VQߵ',9 q(ET*|d]$է n#q%y`4 oPD BkTba2$f23O!|iM܎}frJ*y'WkdEtf1 sjyړ>ʖoBQ@{82M&#qbuY#Sd4eTq1p'ǨQ YfIs$YtKcNRǻ&/Ϊ}%h,S`kP 2?kmwLo(@b<+BsMx+7fJF<)T 1708!fܰs="˰Ș^w壢gM *y"?^|?*L (/x"h` %4&Y|Os~Ƌ2v v_6}>c 5$Rg>g|sN(۲>xѸF j ֩mC]0=HgvٔӮ(~rQTi6`,Q2PNT4sA푏dyvJ/ Q`]ﶜm.y"bzggx"JJʙ@D*W|v2-0z0K\peW3"kJ-S"ֹgQ?}ZNB1jc{wu r5)x1,BhK 1xg+*xJ:ë&zS>G 4 *kzZ Zs_fԙ zIG;k|fGQW'n;];]qmj.=Lo# ?Dj+D*iFTR4?7f-EɅI1Rd-mH@ /= KWYS+&@ɸ  j:UG_; BG;U0aIM#9e}Aҥ-fuR Lee17/ܴ)rOV#i B|unႣ\(\yu* ïF"h25HN§Eʷ+Ч1%ctU-dger˱,6YjVC24VZ"xRGؤĹDJr#4idMXYU6(2xo9C}$0*˚O|-n#yp[Β]I(Q.HRjQlҒc[:X' 'b豷~h]~\%mp׻NyP==tb8[M7݆|7>^D>r4YZ ޔ͜$V-ПmSz [hmҊ0oj^j?z+Yץ !>k7=\ ݱ[:c'(Z׸M >roVj $$Ea?l$UHfiOiܚ0Q)˰=dsZ{ ,!#=2MMfD2F,r"ӿkT|#cB =U#NOM4g2Dyy+9J>? 93-4tr1Y=7 C1L5TN!{s:7#5~vdw@R6J/dwx}ị+e94b~+b}֊UJg%5Cїāx:~ S&|'`TcU}E|7h{V\6)twI-aʘE47f]ؗ,\Ewxjx^R<)_CG۴)fg?$ Jԯ[ڀZ0BNWK_bWT-MJ.[&訂4j2C6*%cI,hY3to@ii%.ʎ|!Rx^^]kVzp7X$+Aݍ8Nb&<~d:& C\=GGᄸy49nGw.kH#^mU2eIt3$6V Pi~r93}Λm3i䶭#=qxFAM{xg%{me޴ө\0ttikYx S~{q` nXCʚ"AC'9Ck6E+4잲T:U3%N7.k~r R2=:TkO}Z1 GsX^F5SjLwa=q/ktHZCQMZaa-)mΡ 73mYjvD\D.xټj_3Nq\vhc $Ix,1R 3Kzn&筇AڬI:I>ICq% \ 9#fNxǯ-Sjɭ)k'dDA,``hR$ض~QKҼQd:qșHϨBƁRx:wpn{ɝhbdo?o#GaC8vkJ)^BX I:3-nӔV413* Ǣi;X*f>AyyO@ye;9=ܚ&'?p>Trd~E1[or"B_8bۚʼ4i*QB,pfp#_f}Wzьm `G_(FZx4T\g^ ^[E) 19toufU;I>DT:Jk n'n|P}JLe#c+hB ] iyg35I)`CN:r+0nܰ!L_ uV!K'Bߗ_܉p/olgӢq6K!L{$B;ਛݰ蔞Z"XobYk&;-Җ"_:̌ #?^ny]F3Y门Np)@.>6MQ>"ғQڹm 侧u.9MZ!"겱NYN 7P,-ҁQ8z'(V7 (8k %=SLdoɅՖi헉fO)ݪ]-LqB벩 c.6]1nrƢ?M:~"s%Vx\\;=/UL_u~+# +2.W{,+|gDsp|yN؍OOOW5mוY !og$ qfC,l'^ :9rĉ֌b. OG^rz3 ]7O '5p)rl \VK&]IÊF\uv..~- Az!_#9s"jpS;?;_RGE-Ko"<& pLnКqgHDi/IIJ-WdHެ$\c0+YH+vg(A\o{YDAH13d^KXcuUV`D-P-9 +Ҧ 0paSy盁eWyM^Xdm 't r]u&xG{ΙbDê`9}֚޶'QzʃR*8)f$#[ΡM(EVdcu-RI ^(Gq]ǎ5l bk/|=FIfr([\M=#^Qԯ M(Xry{0r[ ,OL12M? }Bxg|멯s]9XvVà@x|4AN3*bfR( z?4G9Zȕ k\=ϖzhs8 9R6- :v70"Ҷ7Wh@E: 2DgZH Mzj֧a#.JOV5x`yTWcGT-~Mw׿Og޽eKl} V{Vz"0s#na8#>Exx5"trPp+Q#܃z>ȸm)6U*VǙ-x1{7Yغ} .bΥiUEGcR+T_<Yrә0lՃGbѬgPkbո=vT\UopŒb-9jnb =@iJQN'əۗ Lmhl't8k^ h W ^w Y-ZO> _\\BH;*>#$;={;ўvDW tvг'>b")<"oӢ|UDe޺@I\|sM)y3>Z]aKH|>Q@ՒzcݞF }N'6ϝ@v Zd8%.^\wݒgiY xNr+&wʋ&l #f9b:WJI܈LQ$6: ñMt:Sf+@ٯ~&teGX=4 $O~< g?NtD23x =[T{5B ޭu?ʊU5OϤbayfz3k,͂R-2oܹg`ZsvAlkpO<3cXz琛2)$Pr|>3ZD:QO~;KJIʺYvED:Eۂ];]xqugr@A 8Wk#Op=pTC瘈iabaXAra['^^u4Nł,?(V$/ *oyܚeWPB|zqUI)g[\xݙRbk8?|^5:HsQJqNgÈbcOgKi 0NI//<$Δ*W[PX"v`6ne">^70"W@Z䞠 .hgzXWgBfViɪWSJeZbXPLjpZM 3#[n6y#BtJ&o~Ңk*=qa_eVk% ![dEүSUtU:7->t#ܕiǍ*>2'-K5dC94|U'>u R/|:AqhFR>o}?Y ؒ;ɷB)5m0lW~hwW?yx=]kNgvE  vS#XzK(U'a*A~Mɱ7*6 m /'/Z{~Hܼ=-s_TncuS>s!RT D!h^.ss&n*UFUfA.FV@[Ѫ~BC{Czn(Q΄MK_Tyhűن6ILe~8*"L(9[[t9[ʆ/a SI=5!u #&1tw04jTaMR uOǾcy|`f|uJ漬 Iؕ:͎X7a48$V JΜ>eKhsv o\mxVMZ' *ӕ!10=h%*p-Ƙe@WSҳmޙ-Bw(1Ww+A}h 0$![$@,dD/G뮪ѻ#>h~PO!`M9=V7qU'Iop (VL |U>7n0sF)WU2e+PF.<s? 6F6y+۱[]kH$@Դ)Lb_wЎGjяx#}vCcJJڪ!B"@HptbE٭N]nO?JxA5E&A_o]G#=#=Z-iZGk˩Uu{ih g^Ӈv+>DZ ̤ i8DQ{+%CC5yYzM1yhV-u[w6!r9kg+y$;]$A{ea̞͖?.6le [ͼ:G>O{<փf!ў٬C2_w)S7iWZRRmdvLj,a?EG+vԺ%eI8TMNv/n;R.x[Ds7hƥϐAww&Cu5dhw%/ fg;`'옕Q{mnM+n K"{ xsS$-7@NzHSO' %x=DZS4HijzqQ EhZW@ih+&z^A o8Ƶ_K%.{}b|!&IFQ[{!YVb;,~aGF`'0U|8ޤCctԌsTF*qG:'M;r&\JRWa_"t l2& Dsu>7J`.S-#m^&Zh0oE[7).P*0 ]i"A mbEJK:Vt?.\@1ѬEާ~Dh,𢣐J )duɲHpNJNY_G%C2pfud؄tٜX HvʳzhE?PBOEC͆Ĭ 1#*ceVڸ !S0ЇVMwDk, m7 /j&ni"elHYkuyao["K:=zM; Liu'YUTXLȷ^3{ɠ,bt9VJe{0}[GV]jol^0c7E5%5ԅN OĢypFwsmy1c 9[Y;N]vը[Ft֟*?jU%:C9f;ERY endstream endobj 39 0 obj << /Length1 1921 /Length2 6452 /Length3 0 /Length 7615 /Filter /FlateDecode >> stream xڍ 8O,Yn[}ؗaf cd"!ٷ%!KgKdBlzz_wu|^sιf BQ0-#,&"*"f;_;1 @!PG M; &Iˋȋ⢢r E=M̭ @#]0}}199UA 0E9!`Sp`?|{l`3`NC-a ! _.UDxk!aP0713wFy#~,䆽=-wD:?L\JѐbQl'KIAbi61A0%BxRH@Hi&I@IV82Cas&9ljqMbض9BbXFC`ثcm~;8ɇ=k/ĦX/jq g e:B ?( 2P!VoĞ'vfPJAX)!v/* R:a+%/J ?'4{ "/a0jkmTjU&?AqIݸ}"%MUu=GTV6K?su Ίow(~+Rn&NlGwv /&,잼 >y詒] =l{:u(|-ꨠ`!FsUqe? g܉/ 4@bB\ X'y\>FɴKn':3R<Ʈ|yYc}:LۛOI7ۺ,_gi[ߧhc7l8]ζb5D?}=@/ע+yu>F >&PwSDAl9U2yW2&(Usr鋁q1((/IYO6ٍG/8Q_Zm5]yz}0C4h'w='OYAr~!p_)1N?Ϥf÷[kZP+yBUW3NQ^#>7 k/q4(Te=mzUng0kmHtlPzj!w7yD[Wh2y;w+ 5}U<'Mu[}#?P}\Y;p2]{j"[c.9}C]{x wzq"R^Ϟ0qo`KTvvKip :f ap!`jA؍aޗQf|ƗYRp(>x[^/y2eqPzH1/rdr\}Sa{![) ;^ <νs )Ùl/L#ak$ *Ґf,PeuNic٬;//z;"͚X-՞ etB."@_ %['W{ƭ& L[ ? a&gnK~u|uSTa>62!)s -1Gf ̵ScG&T<"Ndw Ng1*LyZ!3.6:ۊaZ[Abb'RHJ]ớtV+;${loD=7V4 'g+fI Nju48sV*FD&bieW8O5=nXgAetMř٘ >Jz(ۃ8Gw^-m7܏^ U&leWKt?{0y#R̻ѯBL>ߎv"}}3ֺ)˄`=G1=rD5bzं#!(/]Qx>"K /uz7X%,}Vj-.mW?D|8YYfwrVK `)l aςo ]ˇʯV ^0]665 ejm/h֟MhLQL[}Optrn#C$E}B$4HC/DcƬ6d1C(&}%ń&84e~ݔjzm(6~T륧MO5Mxfx}`?؟MWy>IP?ތceYI ؓ\l~簸fŒ'RTJe7MU4Tpҹ*u:|v'T%34S l2kNDH4:7ݚXേM^~ W2NބІm~^gh|(E%v:jT#Pg;Fϕ _]j>p{v6 յ5nWEodBqVⴤ%;$m&^Agă2^d(l0AmVAq#S ԺtLZ-s0WD;2MuɃsyh͔^͋6K,E<âϚ&t;Qp[RcvX$ͨfܹ8f5уWdO ez n{#\iGs7&y6vO-]fmߟδu9բY(6i8fnTEJ;N-o3q96nR*EFL9/m=[hx;=ܞ gL1_vhP0HʅZRѾq}ȷ_R;ӟZ=8-AT 8vQHu.er9*ཐ7wH!h_9v-,Ov:y[A ?GI͎%*Ww],^Dx q<5&JߓנfȬtN-5&{yC.QIUȘwkxi$kP [uN;+aΖQ z*٤aI(PIĄO7imu-'W0`?ɣ$:>$ Qbj#+Uߜ#?N(NH3ݻs,`f-?ƺTjY`` ]@R_MG{, DJ. ֈq>$?1:V9^X\@ v,\/Ϣ&o3&4,䮔`˜/&GD*FeǵoFdȮ[ yz3\=u PϥTDx6Z5 5Th^Qf(=ҨO(Ѝnk %K>֚h?ބpSTr2sMu~Xyz0'& kw6qg,eׯ01BZ)h46Zp̊=?`NϚ^|.y826Jk8p {۬:O— OgޜUNc>Y@qXqz!p&ԩQ|9EL2 8y).nQ.wv!I{wAE u<#aU{ *IC }Ruw9l~obJWVU{> u((j*N]#dQewkK4NDp=ꇲC'%+ݖ#?OjvXeXx74Y9.4&eȸA-Yu( dA0mY# W0%C(/*R)T%ᙲJ[>˧ufq"c;Ne9CULHsGb@^׻cI_uBp dRW.ڐ ;h=H$Uՠڐz=#p@%c;33K/_+GA 9P&,]V?ުaoED,x/QMta$0F:j+א2u0 L.8rew4'WFlO`QXtԬ*K\_uro]J(}) cd'x_51S2]z4.5Tҋq+iv)KؓQfd/9:4qV}WUtl>[^߱Id^K%6)#d,Qodh+*9`庋Y "H?}i mUp忕1m WlHᓷXLԩ7:B =B M v둞 M9$>Vow[]Kx;ǟ$wKҵGlΥAFF83R}y~C휐iង [g؊F)yy8| B_Bi}jVm'mlq~c+FJ s3]ލYڰy} %"=/Gp~g;N$vsqуwkΒۗIƻBch0SDSmߔ.}wQ.TӮ˿~( 1~scׁeRw`).n<:tg#[ 9DΌʒz#G.:NF GL B5 %eWvŞI8cDNzI,SV|6M{Zqjy8-A\ĝ H%뫡8 jjL6LOn15\Li"VQPϿw𫋸NһA]G{] VN/g݌m ȭZmho|d%p"Ԕ(=apүmղr-s,9VtJ/WAK/ZU0qjhA yw'ʘ;%5\u'RN.Öp W5HÉ'_ S ]ٴ;褟0&Kpm/LZ=k~w`0$=k=:4R ~R3wn,::7,S5g6pšm^s.q`'᳙@^{UY.'zT-)dŖާמFnG*a+ᳵg~,@qזoP蕌hd~"plV2>V ` zhm.L!g cO/06_߲Z%{J]&N$WONpI|/ﳆ; , |}ר! >;.TdžwJy˞0ֶߌMFH5}aNq1Zmt5JQ}HeޤC&*ʾ's{_U]p5b~7Nvk&$ ERGXdCͮr:9*R 11|˯r_e ךdX^.YhږԗoF 1Ƭ+> endobj 6 0 obj << /Type /ObjStm /N 21 /First 159 /Length 1236 /Filter /FlateDecode >> stream xY[O8~ϯ8ZQGBh\F]F!5%TI:sye+}swNl75Ǐ@Q56q cP$8h4r: G6vF΁2'5XB_6As` ,Zz#vzb ۀ Sgߧ~9)iy`kw{H7i^ҰB!ըn;;YEp?i 9PO> G?ak٢5l-^9+C T8EA#Oza&;m|1tR{m`DlP٩o&0ɷp^ϬJ^lo_3E7:nu'%T#:/)nk Ϙzm3A x]\ȋ*fx!z*Mal c0~.uϘ/o>'ɏm A:KM lD|MP̱^y.bdq&%!J*bæU!IVs;XZ0AP8zC#kbgY t|{g. EtΧ*^VKXFyхgaӾv]`MS]CޢUBRU/c~u:!4] .um ےbpbn;)ƺ#ѱ%b';Ӽq8IznNV+'hC$9% Ʃغ$N%ȥtNQguKVɶ.W䧋FwEF[4MF)O fWPδY zKٖ š nʧMYߧ9?>w.$2&J셍x(ؒ)=qޭ3چmtk:1rC-AM:ɳb <9CD7EC48F3937C397FA746C4164C286D>] /Length 129 /Filter /FlateDecode >> stream xͻ PD; Q5$$"h6\mfn [ݞO1Pq-ː!<Ľ].zE7Ϫ lm:'.mt endstream endobj startxref 26854 %%EOF batchtools/vignettes/function_overview.tex0000644000176200001440000000500713156705617020736 0ustar liggesusers\documentclass[crop,tikz,convert]{standalone} \usetikzlibrary{shapes,matrix,positioning,chains,arrows,shadows,decorations.pathmorphing,fit,backgrounds} \begin{document} \begin{tikzpicture}[auto] \tikzstyle{box} = [rectangle, drop shadow, draw=black, fill=white, thick, minimum width=4cm, rounded corners, align=center,font=\ttfamily\large] \tikzstyle{chead} = [font=\large\bfseries] \tikzstyle{rhead} = [chead,align=left, minimum width=4cm] \tikzstyle{bg} = [rectangle, fill=gray!10, inner sep=0.2cm, rounded corners=5mm] \tikzstyle{hl} = [rectangle, draw=red, inner sep=0.2cm, rounded corners=5mm] \matrix [row sep=10mm, column sep=5mm] (mat) { \node (chead0) [minimum width=4cm] {}; \pgfmatrixnextcell \node (chead1) [chead] {Regular Registry}; \pgfmatrixnextcell \node (chead2) [chead] {Common}; \pgfmatrixnextcell \node (chead3) [chead] {Experiment Registry}; \\ \node (registry0) [rhead] {(1) Create Registry}; \pgfmatrixnextcell \node (registry1) [box] {makeRegistry}; \pgfmatrixnextcell \node (registry2) {}; \pgfmatrixnextcell \node (registry3) [box] {makeExperimentRegistry}; \\ \node (define0) [rhead] {(2) Define Jobs}; \pgfmatrixnextcell \node (define1) [box] {batchMap \\ batchReduce}; \pgfmatrixnextcell \node (define2) [box] {batchMapResults}; \pgfmatrixnextcell \node (define3) [box] {addProblem \\ addAlgorithm \\ addExperiments}; \\ \node (subsetting0) [rhead] {(3) Subset Jobs}; \pgfmatrixnextcell \node (subsetting1) [box] {findJobs}; \pgfmatrixnextcell \node (subsetting2) [box] {findDone\\ findErrors \\\ldots}; \pgfmatrixnextcell \node (subsetting3) [box] {findExperiments}; \\ \node (submit0) [rhead] {(4) Submit Jobs}; \pgfmatrixnextcell \node (submit1) {}; \pgfmatrixnextcell \node (submit2) [box] {submitJobs}; \pgfmatrixnextcell \node (submit3) {}; \\ \node (status0) [rhead] {(5) Monitor \& Debug}; \pgfmatrixnextcell \node (status1) {}; \pgfmatrixnextcell \node (status2) [box] {getStatus \\ testJob \\ showLog \\ grepLogs}; \pgfmatrixnextcell \node (status3) [box] {summarizeExperiments}; \\ \node (collect0) [rhead] {(6) Collect Results}; \pgfmatrixnextcell \node (collect1) {}; \pgfmatrixnextcell \node (collect2) [box] {loadResult \\ reduceResults \\ reduceResults[List|DataTable]}; \pgfmatrixnextcell \node (collect3) {}; \\ }; \begin{pgfonlayer}{background} \node [bg, fit=(chead0) (collect0)] {}; \node [bg, fit=(chead0) (chead3)] {}; \end{pgfonlayer} \end{tikzpicture} \end{document} batchtools/vignettes/batchtools.Rmd0000644000176200001440000011511613543336703017246 0ustar liggesusers--- title: "batchtools" output: pdf_document: toc: true urlcolor: blue linkcolor: blue vignette: > %\VignetteIndexEntry{batchtools} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r,include = FALSE, cache = FALSE} library(batchtools) library(data.table) # for %chin%, data.table options(batchtools.progress = FALSE, datatable.print.class = TRUE, batchtools.timestamps = FALSE) if (identical(Sys.getenv("IN_PKGDOWN"), "true")) { tmp_dir = fs::path(dirname(tempdir()), "batchtools-vignette") if (fs::dir_exists(tmp_dir)) fs::dir_delete(tmp_dir) fs::file_temp_push(fs::path(tmp_dir, letters)) } ``` # Setup ## Cluster Functions The communication with the batch system is managed via so-called cluster functions. They are created with the constructor [makeClusterFunctions](https://mllg.github.io/batchtools/reference/makeClusterFunctions) which defines how jobs are submitted on your system. Furthermore, you may provide functions to list queued/running jobs and to kill jobs. Usually you do not have to start from scratch but can just use one of the cluster functions which ship with the package: * Interactive Cluster Functions (default): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsInteractive), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsInteractive.R) * Multicore Cluster Functions: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsMulticore), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsMulticore.R) * Socket Cluster Functions: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSocket), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSocket.R) * Makeshift SSH cluster: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSSH), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSSH.R) * Docker Swarm: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsDocker), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsDocker.R) * IBM Spectrum Load Sharing Facility (LSF): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsLSF), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsLSF.R) * OpenLava: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsOpenLava), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsOpenLava.R) * Univa Grid Engine / Oracle Grid Engine (OGE) / Sun Grid Engine (SGE): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSGE), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSGE.R) * Slurm: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSlurm), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSlurm.R) * TORQUE/OpenPBS: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsTORQUE), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsTORQUE.R) To use the package with the socket cluster functions, you would call the respective constructor [makeClusterFunctionsSocket()](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSocket): ```{r, message=FALSE} reg = makeRegistry(NA) reg$cluster.functions = makeClusterFunctionsSocket(2) ``` To make this selection permanent for this registry, save the Registry with [saveRegistry()](https://mllg.github.io/batchtools/reference/makeRegistry). To make your cluster function selection permanent for a specific system across R sessions for all new Registries, you can set up a configuration file (see below). If you have trouble debugging your cluster functions, you can enable the debug mode for extra output. To do so, install the [debugme package](https://cran.r-project.org/package=debugme) and set the environment variable `DEBUGME` to `batchtools` before you load the `batchtools` package: ```{r,eval=FALSE} Sys.setenv(DEBUGME = "batchtools") library(batchtools) ``` ## Template Files Many cluster functions require a template file as argument. These templates are used to communicate with the scheduler and contain placeholders to evaluate arbitrary R expressions. Internally, the [brew package](https://cran.r-project.org/package=brew) is used for this purpose. Some exemplary template files can be found [here](https://github.com/mllg/batchtools/tree/master/inst/templates). It would be great if you would help expand this collection to cover more exotic configurations. To do so, please send your template via [mail](mailto:michellang@gmail.com) or open a new pull request. Note that all variables defined in a [JobCollection](https://mllg.github.io/batchtools/reference/JobCollection) can be used inside the template. If you need to pass extra variables, you can set them via the argument `resources` of [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). If the flexibility which comes with templating is not sufficient, you can still construct a custom cluster function implementation yourself using the provided [constructor](https://mllg.github.io/batchtools/reference/makeClusterFunctions). ## Configuration File The configuration file can be used to set system specific options. Its default location depends on the operating system (see [Registry](https://mllg.github.io/batchtools/reference/makeRegistry)), but for the first time setup you can put one in the current working directory (as reported by `getwd()`). In order to set the cluster function implementation, you would generate a file with the following content: ```{r,eval = FALSE} cluster.functions = makeClusterFunctionsInteractive() ``` The configuration file is parsed whenever you create or load a [Registry](https://mllg.github.io/batchtools/reference/makeRegistry). It is sourced inside of your registry which has the advantage that you can (a) access all of the parameters which are passed to [makeRegistry](https://mllg.github.io/batchtools/reference/makeRegistry) and (b) you can also directly change them. Lets say you always want your working directory in your home directory and you always want to load the `checkmate` package on the nodes, you can just append these lines: ```{r, eval = FALSE} work.dir = "~" packages = union(packages, "checkmate") ``` See the documentation on [Registry](https://mllg.github.io/batchtools/reference/makeRegistry) for a more complete list of supported configuration options. # Migration from `BatchJobs`/`Batchexperiments` The development of [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) is discontinued because of the following reasons: * Maintainability: The packages [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) are tightly connected which makes maintaining difficult. Changes have to be synchronized and tested against the current CRAN versions for compatibility. Furthermore, BatchExperiments violates CRAN policies by calling internal functions of BatchJobs. * Data base issues: Although we invested weeks to mitigate issues with locks of the SQLite data base or file system (staged queries, file system timeouts, ...), BatchJobs kept working unreliable on some systems with high latency or specific file systems. This made BatchJobs unusable for many users. [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) will remain on CRAN, but new features are unlikely to be ported back. ## Internal Changes * batchtools does not use SQLite anymore. Instead, all the information is stored directly in the registry using [data.tables](https://cran.r-project.org/package=data.table) acting as an in-memory database. As a side effect, many operations are much faster. * Nodes do not have to access the registry. [submitJobs()](https://mllg.github.io/batchtools/reference/submitJobs) stores a temporary object of type [JobCollection](https://mllg.github.io/batchtools/reference/JobCollection) on the file system which holds all the information necessary to execute a chunk of jobs via [doJobCollection()](https://mllg.github.io/batchtools/reference/doJobCollection) on the node. This avoids file system locks because each job accesses only one file exclusively. * `ClusterFunctionsMulticore` now uses the parallel package for multicore execution. * `ClusterFunctionsSSH` can still be used to emulate a scheduler-like system which respects the work load on the local machine. Setting the hostname to `"localhost"` just strips out `ssh` of the command issued. ## Interface Changes * batchtools remembers the last created or loaded Registry and sets it as default registry. This way, you do not need to pass the registry around anymore. If you need to work with multiple registries simultaneously on the other hand, you can still do so by explicitly passing registries to the functions. * Most functions now return a [data.table](https://cran.r-project.org/package=data.table) which is keyed with the `job.id`. This way, return values can be joined together easily and efficient (see this [help page](https://mllg.github.io/batchtools/reference/JoinTables) for some examples). * The building blocks of a problem has been renamed from `static` and `dynamic` to the more intuitive `data` and `fun`. Thus, algorithm function should have the formal arguments `job`, `data` and `instance`. * The function `makeDesign` has been removed. Parameters can be defined by just passing a `data.frame` or `data.table` to [addExperiments](https://mllg.github.io/batchtools/reference/addExperiments). For exhaustive designs, use `data.table::CJ()`. ## Template changes * The scheduler should directly execute the command: ``` Rscript -e 'batchtools::doJobCollection()' ``` There is no intermediate R source file like there was in `BatchJobs`. * All information stored in the object [`JobCollection`](https://mllg.github.io/batchtools/reference/JobCollection) can be accessed while brewing the template. * Extra variables may be passed via the argument `resoures` of [submitJobs](https://mllg.github.io/batchtools/reference/submitJobs). ## New features * Support for Docker Swarm via `ClusterFunctionsDocker`. * Jobs can now be tagged and untagged to provide an easy way to group them. * Some resources like the number of CPUs are now optionally passed to [parallelMap](https://cran.r-project.org/package=parallelMap). This eases nested parallelization, e.g. to use multicore parallelization on the slave by just setting a resource on the master. See [submitJobs()](https://mllg.github.io/batchtools/reference/submitJobs) for an example. * `ClusterFunctions` are now more flexible in general as they can define hook functions which will be called at certain events. [ClusterFunctionsDocker](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsDocker.R) is an example use case which implements a housekeeping routine. This routine is called every time before a job is about to get submitted to the scheduler (in the case: the Docker Swarm) via the hook `pre.submit` and every time directly after the registry synchronized jobs stored on the file system via the hook `post.sync`. * More new features are covered in the [NEWS](https://mllg.github.io/batchtools/news/index.html). ## Porting to `batchtools` The following table assists in porting to batchtools by mapping BatchJobs/BatchExperiments functions to their counterparts in batchtools. The table does not cover functions which are (a) used only internally in BatchJobs and (b) functions which have not been renamed. | BatchJobs | batchtools | | ------------- | :-------------: | | `addRegistryPackages` | Set `reg$packages` or `reg$namespaces`, call [saveRegistry()](https://mllg.github.io/batchtools/reference/saveRegistry) | | `addRegistrySourceDirs` | - | | `addRegistrySourceFiles` | Set `reg$source`, call [saveRegistry()](https://mllg.github.io/batchtools/reference/saveRegistry) | | `batchExpandGrid` | [batchMap](https://mllg.github.io/batchtools/reference/batchMap): `batchMap(..., args = CJ(x = 1:3, y = 1:10))` | | `batchMapQuick` | [btmapply](https://mllg.github.io/batchtools/reference/btlapply) | | `batchReduceResults` | - | | `batchUnexport` | [batchExport](https://mllg.github.io/batchtools/reference/batchExport) | | `filterResults` | - | | `getJobIds` | [findJobs](https://mllg.github.io/batchtools/reference/findJobs) | | `getJobInfo` | [getJobStatus](https://mllg.github.io/batchtools/reference/getJobTable) | | `getJob` | [makeJob](https://mllg.github.io/batchtools/reference/JobExperiment) | | `getJobParamDf` | [getJobPars](https://mllg.github.io/batchtools/reference/getJobTable) | | `loadResults` | [reduceResultsList](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `reduceResultsDataFrame` | [reduceResultsDataTable](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `reduceResultsMatrix` | [reduceResultsList](https://mllg.github.io/batchtools/reference/reduceResultsList) + `do.call(rbind, res)` | | `reduceResultsVector` | [reduceResultsDataTable](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `setJobFunction` | - | | `setJobNames` | - | | `showStatus` | [getStatus](https://mllg.github.io/batchtools/reference/getStatus) | # Example 1: Approximation of $\pi$ To get a first insight into the usage of `batchtools`, we start with an exemplary Monte Carlo simulation to approximate $\pi$. For background information, see [Wikipedia](https://en.wikipedia.org/wiki/Monte_Carlo_method). First, a so-called registry object has to be created, which defines a directory where all relevant information, files and results of the computational jobs will be stored. There are two different types of registry objects: First, a regular [`Registry`](https://mllg.github.io/batchtools/reference/makeRegistry) which we will use in this example. Second, an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry) which provides an alternative way to define computational jobs and thereby is tailored for a broad range of large scale computer experiments (see, for example, [this vignette](ExampleExperiment.html)). Here, we use a temporary registry which is stored in the temp directory of the system and gets automatically deleted if you close the R session. ```{r, message = FALSE} reg = makeRegistry(file.dir = NA, seed = 1) ``` For a permanent registry, set the `file.dir` to a valid path. It can then be reused later, e.g., when you login to the system again, by calling the function `loadRegistry(file.dir)`. When a registry object is created or loaded, it is stored for the active R session as the default. Therefore the argument `reg` will be ignored in functions calls of this example, assuming the correct registry is set as default. To get the current default registry, [`getDefaultRegistry`](https://mllg.github.io/batchtools/reference/getDefaultRegistry) can be used. To switch to another registry, use [`setDefaultRegistry()`](https://mllg.github.io/batchtools/reference/getDefaultRegistry). First, we create a function which samples $n$ points $(x_i, y_i)$ whereas $x_i$ and $y_i$ are distributed uniformly, i.e. $x_i, y_i \sim \mathcal{U}(0,1)$. Next, the distance to the origin $(0, 0)$ is calculated and the fraction of points in the unit circle ($d \leq 1$) is returned. ```{r} piApprox = function(n) { nums = matrix(runif(2 * n), ncol = 2) d = sqrt(nums[, 1]^2 + nums[, 2]^2) 4 * mean(d <= 1) } set.seed(42) piApprox(1000) ``` We now parallelize `piApprox()` with `batchtools`: We create 10 jobs, each doing a MC simulation with $10^5$ jobs. We use [`batchMap()`](https://mllg.github.io/batchtools/reference/batchMap) to define the jobs (note that this does not yet start the calculation): ```{r} batchMap(fun = piApprox, n = rep(1e5, 10)) ``` The length of the vector or list defines how many different jobs are created, while the elements itself are used as arguments for the function. The function `batchMap(fun, ...)` works analogously to `Map(f, ...)` of the base package. An overview over the jobs and their IDs can be retrieved with [`getJobTable()`](https://mllg.github.io/batchtools/reference/getJobTable) which returns a data.frame with all relevant information: ```{r} names(getJobTable()) ``` Note that a unique job ID is assigned to each job. These IDs can be used to restrict operations to subsets of jobs. To actually start the calculation, call [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). The registry and the selected job IDs can be taken as arguments as well as an arbitrary list of resource requirements, which are to be handled by the cluster back end. ```{r} submitJobs(resources = list(walltime = 3600, memory = 1024)) ``` In this example, a cap for the execution time (so-called walltime) and for the maximum memory requirements are set. The progress of the submitted jobs can be checked with [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus). ```{r} getStatus() ``` The resulting output includes the number of jobs in the registry, how many have been submitted, have started to execute on the batch system, are currently running, have successfully completed, and have terminated due to an R exception. After jobs have successfully terminated, we can load their results on the master. This can be done in a simple fashion by using either [`loadResult()`](https://mllg.github.io/batchtools/reference/loadResult), which returns a single result exactly in the form it was calculated during mapping, or by using [`reduceResults()`](https://mllg.github.io/batchtools/reference/reduceResults), which is a version of `Reduce()` from the base package for registry objects. ```{r} waitForJobs() mean(sapply(1:10, loadResult)) reduceResults(function(x, y) x + y) / 10 ``` If you are absolutely sure that your function works, you can take a shortcut and use *batchtools* in an `lapply` fashion using [`btlapply()`](https://mllg.github.io/batchtools/reference/btlapply). This function creates a temporary registry (but you may also pass one yourself), calls [`batchMap()`](https://mllg.github.io/batchtools/reference/reduceResultsList), wait for the jobs to terminate with [`waitForJobs()`](https://mllg.github.io/batchtools/reference/waitForJobs) and then uses [`reduceResultsList()`](https://mllg.github.io/batchtools/reference/reduceResultsList) to return the results. ```{r, R.options=list(batchtools.verbose=FALSE)} res = btlapply(rep(1e5, 10), piApprox) mean(unlist(res)) ``` # Example 2: Machine Learning We stick to a rather simple, but not unrealistic example to explain some further functionalities: Applying two classification learners to the famous iris data set (Anderson 1935), vary a few hyperparameters and evaluate the effect on the classification performance. First, we create a registry, the central meta-data object which records technical details and the setup of the experiments. We use an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry) where the job definition is split into creating problems and algorithms. See the paper on [BatchJobs and BatchExperiments](http://www.jstatsoft.org/article/view/v064i11) for a detailed explanation. Again, we use a temporary registry and make it the default registry. ```{r, message = FALSE} library(batchtools) reg = makeExperimentRegistry(file.dir = NA, seed = 1) ``` ## Problems and Algorithms By adding a problem to the registry, we can define the data on which certain computational jobs shall work. This can be a matrix, data frame or array that always stays the same for all subsequent experiments. But it can also be of a more dynamic nature, e.g., subsamples of a dataset or random numbers drawn from a probability distribution . Therefore the function [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) accepts static parts in its `data` argument, which is passed to the argument `fun` which generates a (possibly stochastic) problem instance. For `data`, any R object can be used. If only `data` is given, the generated instance is `data`. The argument `fun` has to be a function with the arguments `data` and `job` (and optionally other arbitrary parameters). The argument `job` is an object of type [`Job`](https://mllg.github.io/batchtools/reference/JobExperiment) which holds additional information about the job. We want to split the iris data set into a training set and test set. In this example we use use subsampling which just randomly takes a fraction of the observations as training set. We define a problem function which returns the indices of the respective training and test set for a split with `100 * ratio`% of the observations being in the test set: ```{r} subsample = function(data, job, ratio, ...) { n = nrow(data) train = sample(n, floor(n * ratio)) test = setdiff(seq_len(n), train) list(test = test, train = train) } ``` [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) files the problem to the file system and the problem gets recorded in the registry. ```{r} data("iris", package = "datasets") addProblem(name = "iris", data = iris, fun = subsample, seed = 42) ``` The function call will be evaluated at a later stage on the workers. In this process, the `data` part will be loaded and passed to the function. Note that we set a problem seed to synchronize the experiments in the sense that the same resampled training and test sets are used for the algorithm comparison in each distinct replication. The algorithms for the jobs are added to the registry in a similar manner. When using [`addAlgorithm()`](https://mllg.github.io/batchtools/reference/addAlgorithm), an identifier as well as the algorithm to apply to are required arguments. The algorithm must be given as a function with arguments `job`, `data` and `instance`. Further arbitrary arguments (e.g., hyperparameters or strategy parameters) may be defined analogously as for the function in `addProblem`. The objects passed to the function via `job` and `data` are here the same as above, while via `instance` the return value of the evaluated problem function is passed. The algorithm can return any R object which will automatically be stored on the file system for later retrieval. Firstly, we create an algorithm which applies a support vector machine: ```{r} svm.wrapper = function(data, job, instance, ...) { library("e1071") mod = svm(Species ~ ., data = data[instance$train, ], ...) pred = predict(mod, newdata = data[instance$test, ], type = "class") table(data$Species[instance$test], pred) } addAlgorithm(name = "svm", fun = svm.wrapper) ``` Secondly, a random forest of classification trees: ```{r} forest.wrapper = function(data, job, instance, ...) { library("ranger") mod = ranger(Species ~ ., data = data[instance$train, ], write.forest = TRUE) pred = predict(mod, data = data[instance$test, ]) table(data$Species[instance$test], pred$predictions) } addAlgorithm(name = "forest", fun = forest.wrapper) ``` Both algorithms return a confusion matrix for the predictions on the test set, which will later be used to calculate the misclassification rate. Note that using the `...` argument in the wrapper definitions allows us to circumvent naming specific design parameters for now. This is an advantage if we later want to extend the set of algorithm parameters in the experiment. The algorithms get recorded in the registry and the corresponding functions are stored on the file system. Defined problems and algorithms can be queried with: ```{r} reg$problems reg$algorithms ``` The flow to define experiments is summarized in the following figure: ```{r,echo=FALSE} knitr::include_graphics("tikz_prob_algo_simple.png", auto_pdf = TRUE) ``` ## Creating jobs [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments) is used to parametrize the jobs and thereby define computational jobs. To do so, you have to pass named lists of parameters to [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments). The elements of the respective list (one for problems and one for algorithms) must be named after the problem or algorithm they refer to. The data frames contain parameter constellations for the problem or algorithm function where columns must have the same names as the target arguments. When the problem design and the algorithm design are combined in [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments), each combination of the parameter sets of the two designs defines a distinct job. How often each of these jobs should be computed can be determined with the argument `repls`. ```{r} # problem design: try two values for the ratio parameter pdes = list(iris = data.table(ratio = c(0.67, 0.9))) # algorithm design: try combinations of kernel and epsilon exhaustively, # try different number of trees for the forest ades = list( svm = CJ(kernel = c("linear", "polynomial", "radial"), epsilon = c(0.01, 0.1)), forest = data.table(ntree = c(100, 500, 1000)) ) addExperiments(pdes, ades, repls = 5) ``` The jobs are now available in the registry with an individual job ID for each. The function [`summarizeExperiments()`](https://mllg.github.io/batchtools/reference/summarizeExperiments) returns a table which gives a quick overview over all defined experiments. ```{r} summarizeExperiments() summarizeExperiments(by = c("problem", "algorithm", "ratio")) ``` ## Before Submitting Before submitting all jobs to the batch system, we encourage you to test each algorithm individually. Or sometimes you want to submit only a subset of experiments because the jobs vastly differ in runtime. Another reoccurring task is the collection of results for only a subset of experiments. For all these use cases, [`findExperiments()`](https://mllg.github.io/batchtools/reference/findJobs) can be employed to conveniently select a particular subset of jobs. It returns the IDs of all experiments that match the given criteria. Your selection can depend on substring matches of problem or algorithm IDs using `prob.name` or `algo.name`, respectively. You can also pass R expressions, which will be evaluated in your problem parameter setting (`prob.pars`) or algorithm parameter setting (`algo.pars`). The expression is then expected to evaluate to a Boolean value. Furthermore, you can restrict the experiments to specific replication numbers. To illustrate [`findExperiments()`](https://mllg.github.io/batchtools/reference/findJobs), we will select two experiments, one with a support vector machine and the other with a random forest and the parameter `ntree = 1000`. The selected experiment IDs are then passed to testJob. ```{r} id1 = head(findExperiments(algo.name = "svm"), 1) print(id1) id2 = head(findExperiments(algo.name = "forest", algo.pars = (ntree == 1000)), 1) print(id2) testJob(id = id1) testJob(id = id2) ``` If something goes wrong, `batchtools` comes with a bunch of useful debugging utilities (see separate vignette on error handling). If everything turns out fine, we can proceed with the calculation. ## Submitting and Collecting Results To submit the jobs, we call [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs) and wait for all jobs to terminate using [`waitForJobs()`](https://mllg.github.io/batchtools/reference/waitForJobs). ```{r} submitJobs() waitForJobs() ``` After jobs are finished, the results can be collected with [`reduceResultsDataTable()`](https://mllg.github.io/batchtools/reference/reduceResultsList) where we directly extract the mean misclassification error: ```{r} reduce = function(res) list(mce = (sum(res) - sum(diag(res))) / sum(res)) results = unwrap(reduceResultsDataTable(fun = reduce)) head(results) ``` Next, we merge the results table with the table of job parameters using one of the [join helpers](https://mllg.github.io/batchtools/reference/JoinTables) provided by `batchtools` (here, we use an inner join): ```{r} pars = unwrap(getJobPars()) tab = ijoin(pars, results) head(tab) ``` We now aggregate the results group-wise. You can use [`data.table`](https://cran.r-project.org/package=data.table), `base::aggregate()`, or the [`dplyr`](https://cran.r-project.org/package=dplyr) package for this purpose. Here, we use [`data.table`](https://cran.r-project.org/package=data.table) to subset the table to jobs where the ratio is `0.67` and group by algorithm the algorithm hyperparameters: ```{r} tab[ratio == 0.67, list(mmce = mean(mce)), by = c("algorithm", "kernel", "epsilon", "ntree")] ``` # Example: Error Handling In any large scale experiment many things can and will go wrong. The cluster might have an outage, jobs may run into resource limits or crash, subtle bugs in your code could be triggered or any other error condition might arise. In these situations it is important to quickly determine what went wrong and to recompute only the minimal number of required jobs. Therefore, before you submit anything you should use [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) to catch errors that are easy to spot because they are raised in many or all jobs. If `external` is set, this function runs the job without side effects in an independent R process on your local machine via `Rscript` similar as on the slave, redirects the output of the process to your R console, loads the job result and returns it. If you do not set `external`, the job is executed is in the currently running R session, with the drawback that you might be unable to catch missing variable declarations or missing package dependencies. By way of illustration here is a small example. First, we create a temporary registry. ```{r, message = FALSE} library(batchtools) reg = makeRegistry(file.dir = NA, seed = 1) ``` Ten jobs are created, one will trow a warning and two of them will raise an exception. ```{r} flakeyFunction <- function(value) { if (value == 5) warning("Just a simple warning") if (value %in% c(2, 9)) stop("Ooops.") value^2 } batchMap(flakeyFunction, 1:10) ``` Now that the jobs are defined, we can test jobs independently: ```{r} testJob(id = 1) ``` In this case, testing the job with ID = 1 provides the appropriate result but testing the job with ID = 2 leads to an error: ```{r} as.character(try(testJob(id = 2))) ``` We ignore the error here, and just assume everything looks fine and submit all jobs. ```{r} submitJobs() waitForJobs() ``` After you have submitted jobs and suspect that something is going wrong, the first thing to do is to run [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus) to display a summary of the current state of the system. ```{r} getStatus() ``` The status message shows that two of the jobs could not be executed successfully. To get the IDs of all jobs that failed due to an error we can use [`findErrors()`](https://mllg.github.io/batchtools/reference/findJobs) and to retrieve the actual error message, we can use [`getErrorMessages()`](https://mllg.github.io/batchtools/reference/getErrorMessages). ```{r} findErrors() getErrorMessages() ``` If we want to peek into the R log file of a job to see more context for the error we can use [`showLog()`](https://mllg.github.io/batchtools/reference/showLog) which opens a pager or use [`getLog()`](https://mllg.github.io/batchtools/reference/showLog) to get the log as character vector: ```{r} tail(getLog(id = 9)) ``` You can also grep for messages (output suppressed in this vignette for technical reasons): ```{r,eval=FALSE} grepLogs(pattern = "simple", ignore.case = TRUE) ``` # Workflow ## On the Local System 1. Create a Registry with [`makeRegistry()`](https://mllg.github.io/batchtools/reference/makeRegistry) (or [`makeExperimentRegistry()`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry)) or load an existing from the file system with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry). 2. Define computational jobs with [`batchMap()`](https://mllg.github.io/batchtools/reference/batchMap) or [`batchReduce()`](https://mllg.github.io/batchtools/reference/batchReduce) if you used [`makeRegistry()`](https://mllg.github.io/batchtools/reference/makeRegistry) or define with [`addAlgorithm()`](https://mllg.github.io/batchtools/reference/addAlgorithm), [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) and [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments) if you started with [`makeExperimentRegistry()`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry). It is advised to test some jobs with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) in the interactive session and with `testJob(external = TRUE)` in a separate R process. Note that you can add additional jobs if you are using an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry). 3. If required, query the data base for job ids depending on their status, parameters or tags (see [`findJobs()`](https://mllg.github.io/batchtools/reference/findJobs)). The returned tables can easily be combined in a set-like fashion with data base verbs: union ([`ojoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for outer join), intersect ([`ijoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for inner join), difference ([`ajoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for anti join). 4. Submit jobs with [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). You can specify job resources here. If you have thousands of fast terminating jobs, you want to [`chunk()`](https://mllg.github.io/batchtools/reference/chunk) them first. If some jobs already terminated, you can estimate the runtimes with [`estimateRuntimes()`](https://mllg.github.io/batchtools/reference/estimateRuntimes) and chunk jobs into heterogeneous groups with [`lpt()`](https://mllg.github.io/batchtools/reference/chunk) and [`binpack()`](https://mllg.github.io/batchtools/reference/chunk). 5. Monitor jobs. [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus) gives a summarizing overview. Use [`showLog()`](https://mllg.github.io/batchtools/reference/showLog) and [`grepLogs()`](https://mllg.github.io/batchtools/reference/grepLogs) to investigate log file. Run jobs in the currently running session with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) to get a `traceback()`. 6. Collect (partial) results. [`loadResult()`](https://mllg.github.io/batchtools/reference/loadResult) retrieves a single result from the file system. [`reduceResults()`](https://mllg.github.io/batchtools/reference/reduceResults) mimics `Reduce()` and allows to apply a function to many files in an iterative fashion. [`reduceResultsList()`](https://mllg.github.io/batchtools/reference/reduceResultsList) and [`reduceResultsDataTable()`](https://mllg.github.io/batchtools/reference/reduceResultsDataTable) collect results into a `list` or `data.table`, respectively. ```{r,echo=FALSE} knitr::include_graphics("function_overview.png", auto_pdf = TRUE) ``` ## On Multiple Systems Most users develop and prototype their experiments on a desktop box in their preferred IDE and later deploy to a large computing cluster. This can be done by prototyping locally ([`testJob()`](https://mllg.github.io/batchtools/reference/testJob) or submit subsets via [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs)). To deploy to the cluster, just copy the file directory (as reported by `reg$file.dir`) to the remote system. Next, log in on the cluster (typically via `ssh`), `cd` to the copied directory and call `loadRegistry(", "", writeable = TRUE)`. This function will (a) source the local configuration file so that you can talk to the cluster (verify by checking the output of `reg$cluster.functions`) and (b) adjust the paths to the new system if argument `update.paths` is set. After loading the Registry, it is advised to test some jobs again with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) before submitting all of them with `submitJobs(resources = list())` (remember you now need to set resources!). After some jobs are finished, the `file.dir` can be copied back (do not merge with the previous directory!) and loaded again with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry). This approach is totally viable as long as some general rules are followed: 1. Make sure you have all packages installed. Package versions can be synchronized across machines with [`checkpoint`](https://cran.r-project.org/package=checkpoint) or [`packrat`](https://cran.r-project.org/package=packrat). 2. Test jobs on the remote system prior to submit to ensure that paths are resolved correctly. 3. Make sure you have set the cluster functions in a configuration file, and stick to one backend as long as jobs are running. 4. The status can only be monitored on the remote system (for obvious reasons). 5. Partial results can be inspected both on the remote system and on the local system. For the latter, you need to copy over the **complete** `file.dir` first. Overwriting/merging directories is not advised as this may lead to inconsistencies if you added or removed experiments on the remote. If you have to merge, use `rsync` with option `--delete`. Load the registry locally with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry) and collect results. Do not copy back and forth. 6. Avoid accessing the `file.dir` with multiple sessions simultaneously. This includes accessing the registry via a mount! Simultaneous access may lead to inconsistencies and missing results. batchtools/vignettes/function_overview.png0000644000176200001440000031122013156705617020717 0ustar liggesusersPNG  IHDR 0IEUgAMA a cHRMz&u0`:pQ<bKGD̿ pHYs,,sRtIME*9᜘IDATxMl#kpqc[ՍI11[C00xa`L:q)L:Eti#.Rd7kt vS007u.\T7lY6F^` 2$D'yp}J*J|zIR_Ç< )>XjҊvlzZ.דiE}dE`Řey]O /d Jkk(Q',3_6_s-™=ܟr`^Bs%MXką>]gr 46?YhOGyhu4ҞUȻ ;خⓏ0|8MoL1.3,K*0"g@w߫ɻH6S@IRYowqV8X`;"--a7e?w`y_4d+.N`tۧݟ8͒4qj_!b rQoe`\ч ޝ, q;K'f+t/pO86N_ԭ𛴋L$m$Iz'i{zYF[leR>zSL/ *P:ӥyRwY"zw2R3i(x^-Wz鶓FyKʖ"7lf=i5sӺSF `3 љt,Z`s7aqޅY)b@+|J+&H#xG, }JBHt:Z?S=ץ.u;]Rgz/t湕@!aG[YHڵ e[>-EZ'V/.ҟk*]WX7_Rg0ĺ}Je},uQg7TU}N#]#k}6 ̉:SXb-" S\|h[(1_9IE\)2.@ǗxKK\ `l2oui) 5u-EiF`9>ؾOiNEvX wz=^ZU&{JҮu%RbD0 ='|Xz3WY7niXgTv.<[xlK"EuP`><-ќ .=2HRb|H ˻̝F ʞTs3[ F<НFʁ\a[fyRxYJ|~dBda)CQeS #2Qf> `yO%{ʃ aRNcl~&ɔMU3-6iy[*C6uK[-e.}+<+O3ќݝuH/0r>^x8eP#FisIé-||6Ĭĝ:CUCnn?K`}1)x^nB )@TgGn9',cxI߷ukt71תhFhZ|oq[7jKq髹mym`휚o=}9&ӭs&oq/U<7p8{/ĥV t/%IX]뛐̜9J-ݩR#\U=@e@a:=$+=oJֵ:z+HO5֑ʺH#=w/˲i=%ĞQϑt9($MKG oN0X#hC=ӎkI#]-2[9v8hO'Ru0p0@/5u0-<5m)fVx5o4'>x~\ǭ,3yEe96I3n6\^˺,7+'?~dot( U;SN_z. F; VI3;oA=O'sG, cut_8QY:2]:!e,q/'WY\S6{ 2&5ItNON#V3M圿ޑ>靻);j&6XMHu:ngKI3΅ÐEWI8̶9VgwN:Vݳӻ@tuWpf[.ZqLjE`$#osۻ4.xx;M./bϒC7Xu-Kdd#]] n[>$ڬ9'gNK%dU#uCc}oB;j&6X^W1iOܸi9ȴ1ZU > l)M/f'$ͻQD7o8㺅?DѪ Of[/Wcu KM^3YOW]Z!MIњ(!׵%oקt-+$/0Ѐ[DO6TnƝGtoo4ބ:Cҝqzo}yuOoF twL"Iϵ.x//V8%y,(캖g1W6<A+ÝuCZ6M]Yo"nop36S3wz=ua1'2p%Ir~ ]׾'Zu}e7 N%BPNgSSTU5Su05re427iN`-hhfc=׹yx0ۦYUu{˙&ie]WI m[sc'y!BZ9{"q^wrf]&9em?%#]RB_y9b-i^܌t->P[Uog=ݘN hS,wYyݧ7[,4{ 8o4GY9H*eYeQֹ2sIK kPs:M-t%dγqg6@Ly>%xR]ّF꨺v~q?I^zߥ#&U:o⥻Ű.N+e;:;i]F,fa9],N E6( OEET\qѼkrKt^4.r~oሥJ 擽mڧTЍ{:4i݊w;ݹjy܁m76a/"НnPYN=/Nol[Uv>GΨZ k{=oYm Wַt=RŸ9n%'9!_0չl5z/t\^7:y=mƧTD4x634Mrl3Yn.ݟE,Qx"U9ǀwi8iE&R3ʮ]"~lls Oc][+g›Zio7S=ץt츚5{w6&CzF'>c\1zJpiGc|-zs=ɏ>OBL/,<1֙hiqؼe|\h)K.pkH J;:K^wz;OGA_ꫜRvs렽)<][vXCwtÌ>OR9K)g8 a@C/Gv[ߜ:i;E;z.DߚX_g:^feKqg˥>@#@lBOeܛ$/N4-+zSu֣S^=ԡ/g`̞kGGh^E`oF#}rI3.$vQ{=tsz7 מU ֩nYzӍX˻&3ӷ;=Ix1mcfin)=c˘_s{|k$C@^EeOu_.θadH:k7suČqy eV٦溥E+B\;Nr+ =_6Bs8 @)|\U{b1\(6Y78"dV٦MZsH`$3/͎.%r3 oTSa3yO[>tKovB~qMfD/#lw"$Y gb%ϑyFZ]3).Mf-{Άg1/4&8]V٦ȣnh)F֋Vx:9—`dFɟ?uhr+y^azƛwdG濗L3GЬ5d[oC6!>eux3w/zy<֙u3sL_Jz=3Vg>O)8礫{8nt|':RQ:xt|;%Lݨ4FM1 |H#}^HFSn7m{Je}HגnTֵ!Iߛa6S ?Γ2?4M,} Sk<G*h@w >0{xs `yF|pKwWgG59ם:zK9њq^3+浑f{oZ>[]e9']/;UO.dxmiv,b?gk'LqLư)=i!T^KjSZOɯ:y&d@إt>/վ: y}Bvt8uf t=YûXg۠Zelu˳O+|+m/B6\23n :RYO5ѵSz^{Ɉ8n{~i{ t1FV5AA7dDwtf+,weOlGۅV/rOw:lEsl4f0IU `1l{zd}2TzfNå^Йk;~5ܩV1 a], _)^" ȅs+:䧼öS4f^X jws] 4TСfH׺;<瓂Tuk$:B+|bt&6Jϼ5Ӂ$iO˻;h?zz X <.%6}SPg`eT#H/HxrQйKueN0?1oS7vZ E B!̂M<:VVYc((#HHU̸5sh,7}r͟ґCIwzQ,^#LgΩ ZKӍX.IWdh:&ߞ ,i OSYS.`{:sEVF!BNdi`.,{*0^X@#U@ ]GãJdVxGI,7!B`sFzq%Qz{&daf01|x"" q 0pt(EғEvXӻ`,JH͇4E@aEo#zЊ8c0pXz$Gm)G_`v"`vWs"8jL.y@{A#{2 /K.IvE.i=|p" KYo F`8"뫙帋^mf`Ze#V\0ۧ.Jm2TW# ꩷`6Y_=[MiM2PR_RO~!`z.X]#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H/.䮦j] `S_ݼyU_= > J]{wf蔆zjw!XCE]bS91o.vׅ>^BM >tz`7+鲨Jy 1Cd_8&+p\=/f1zC,Ӻ/tXR]5.䥥?N\,j$2Xz=y`q#UY$xSj@jM~k#`ytEO+ӱ7`?c0eNxĠoRRjε ^b5}6ɯ6`[JyT s'`ue=(9`[̳:tjW]:SSa0%v^Ty, `\8c=%` ݣi[%AF2i'fn]v>N qyO=*88 u԰CX_H욆="$OzN} k`k=U'm}/2bGمrj 9&m?g-&Ʊt#d7*蹞])bG-,5gU|ӼJ$BhkiՌ΀w1%iU댶W֍ԑ2⬲?RQTTUe.Z뢣tgn̩v?Wo^.+R3uRTE}IcIO_u⇰ 8kOpez!Ǝ.t=}o>=Rui%&c/G*ü0ցh/Di9,M6idZz-uXeI$ݫ^%`%n(Žgq9v5}&/?R՛Sp1]ܥt)$ƒR_hq8֝F :ҹtC1FGA:Y@׺v&p?"Tч˱6x.EqI{yn;EUϽe,I c%I$,FnFt'VbK~ؑnFO$=7ۿIu*14t;x:ӥ.y=)֒ts]#g{w:Xz;^jgK=זz]wf%KkR Swi%(K ٜF,eayajkrq W߽ r 0LD:I>idƵK4YF#1NFד:=L2#suf~`-BUO ֛3HMǒ.}T|] zj'>j5)MXM+SI=`x2eFW#nOx̵KeVZԥ5R-w2^$(I$WrlJ\7ؘs{ZlM4djUC^n/W1~KL=ͻ˶j,>a#^BƑz4#pXVUUDoܐ޴C~zAy<œXe.jyg'5Oe}s\e`ޤOHK b vfƺ>aH; -Jt`wk؋{ƌ3TWjzꩯ湭TS#b̅QTEMsiTT(k])%#t/}u5*hjO=I%[u,u5iZ>Jx}ߊg=+0ˣw7Qw8?~?]w;1~+qyD+in x&%QR,=Z}%i-FFfRII&GZtmEq}_{{`t}~=}e:c/y֛CVM8Lug%w&ue? Ĝ[zԸ+5ձyn}]5ick&IS3 ՚FfrW|7^:u8֩{%'<НFS9άυo@SʦtY&٧u۱,%kO5mmuuyTN&P%..\Df kYoÈ ՞{͒G;1n bvPDztB\]3T3mROoCìeۼVe?mmj8)$->)*B>'=t~KѺCYzA`ϒ:s$"fӵf :Ih O3 5FYL磙{M^ֱ^eP#yLGt8alꄔbn1,ax9޿!HRV}VҩJ)lhEszk.Ėz-b!S4s-Dm4TS]R_=5VI1jipHP]u iRzЧ*z.VyꞙfCC5̶zAR}v ߊno]4$iyv[ ӮL`NJ̦UOR;R[ew,AmUSERɔiW9َb׳$uI~׸E> cjYß; l|ſCDkx r`=g7)K{;If~}^G"1+ܟܟ+S{ym>c~G}QWZ#iz=9,U>r4e}8{߻?_v?`ÏSMU^/ot/ov;vbVbkh3UZrӅy=4Qg^Ndt E_:qۼ'$?fuVw`oZ:iw濫1oØ(zʺэxs|-^{9x&t\ߩ,;[,Pw*ZM@SG7qYri:}TugfznzJ҉ʁLsfF'4VYu.#n*4tlj1yfj7:Tgs<t*%+<XJj(խ^I/Ft96OukJڊrW;E&{q[aѵ5[>2I&)o }Sƺ쁜K#/)yi'{ު({foFOx1K_[[:I?Ejs,6p:'`a`Ǿ TU`:*{;j?Vӥ뷞u]n jE2ek0jA3‹izvH>}K?z:{:(K: )]ʓCO ^n՞o"?ƅB.ilg]4+pt^WNюcY4tl4{#|7LdVuu#U*Ms$ ]yk'&U2Uk(2AI6BB)5oX*i_6μY>arKqճ*?Zp)oㆭ_I`nd*4澽 ݶkL[@d2˴K= {&dv~>i:-8ϹՑtvmf4Mj6$<w;|K!e_7+sٗ9W{+LibuWi8؟A;^שKz>Y5(zEM ؍z [};q~+qvb/^0 IIjxhzPYyH/R}B!>>!I^6˲;r?7Kz.S;;]Yg-;H׺ԁxxk=OH79ҵ4sTK/KO@גt4y&Kάgy^ei38^d0C5d)M$S_z]m+1^F<|o=WOHSٖ}KhM?m84ҧg;1Y2Ì5;Leơ. j%mYǻg7YK%M訔#Qu^'z9z2ci4T<;.ǕV%tⒻ-8^.LRO2&q1֘ m9RVi43zz mf-dgTSS}Yj662mj4Ԕb8UKձZq緂ޤF7ự;Lqaώ#6e],hԐy_0~9-og %}0񿭖ځhtIөGEh;1n+qYĖ'SzԖt$YڏA&8i3ͻ*Pڼ',b Wmqjw @vXڰ^DeV\i~ӗt멞Rw:ӁK VFN}8z#VҔ#TΈ‹r3Hv;%9oPϴr|g\4{JNg]CO+J?itHp'^cniNIfn`[{F LL˨K^֡6yOrr 0$6/?"] лT# ]];qV+1vlI?gk!GoZvqVY[$mry ';cq'QZdD'<s[jqÍtgL.ehOeF^7эp`R?B^=oO隣ocwV1Ȱ Z? _!+,)lQt[I5My*,dHY?zɓ]f`eNJ̶8=#eל v8훮@ M6m^6>!olH^VՓ58̙F;;@vfrFHZ<+ƞ^YUUU]8t0Xʼz\ȗ̵g =N4+z]KM`+ŞhhɹV؋i7hN8ڦy5]ͭ񒂀uIC3 ,ۉFb̲8KK]9%39uiZladj6ٴyOؼ\9źz1RܰVTp) y5YeZ>~)nҕk#Oش#oi䥩}ܭm.MM훋Kg[gO=!1[>gj6whOAnW0^ߗ=%_Uc]Y[O߄,9[^˴wdֱ}9|)덻$rCzFO#)/Y=ݝޯi7J4ʭ+}iȞoK^@9oKW?M- iKض5?NS!Ei8 un)ȿ]t9H65vbĸ-y-d=ra2ٯ}9EqJWEo[|ksYO,}Bc}߾w9""qIiI*轧襞CLO5֑;3q-z;NOr=7SF띤RiYGq n]Ҕ~?ӁeQiVO,e SqIo]WSsզ³gzq"I3˚hdjT]AK[ 9EV3GI)g:6SX+s泋7d-j'.Jۂ$[OSs(Md[%Nq[o[|k^diڼ'$?f}> JBJ ~rc_Ӊ7$I{]E׵~ -Uu@PfoHS3nN] 3&>sW]jnRt=7%.o$#$~Ym% =4oܗy{i%{pj_=3g`Q0)%tX+mKjs+nij I5-Ц+}[44֧.Uf6/ ː>$oQ D>F {?3<='tA.dcO杪^]>foNcI'xfW*Ftr ('I]H t,LRxyyjg+u3ݤei : 4uR;iOYt#6H} E+a,)d{\M %L]Io74#`' F O`* βJ d9ƹu;ݙ7wsA:ʼ^UNw֝Fs#M2tN邎LPҷ&fS]wzƒl }%+ӲtkM^SԌurzOoT֝:dhtÈ!hy tkIprЖ5HV`{ $&)rU:Θ-Iw Vg6"<#uynAf&oM_Tc u%Yf|/9wnt;=ϰX /b!{:׹Ndi ,=t]:hNtNi̕«ZEŶN$D&?,'cUF'Q9^:Ѐ"2+ocbOi+#` 5TA}g28k`u{]s tDxɆbt?wYX$ ʻrHZb<ϨfUo$a]3}wIX9^mŚk0?3OLk` :hT;{\0-y@?==N809YKQ=7& Iof7`R&G7ik]O_v&/_GFon-KnqI?a~>`L*߄.BN#`tnd1ȍ{t),C7 F3$XM=q4G} LJ@zz>k`k-54_mDh‹]a^|? ~}) vY~EUѕ*/%UQMżK윮yF'ֿWQ./+Sԕ_j)RjvO}TN_!¿ȻB/μBJ*3ue7P-%^Lb?7)!r@ZF7Qhf$}!S7*ՙ(I?OH쮾?_'?aiX'fT.L5/. ]O~i~?c0XuUmP]. k.&޿ a~>l JcqKy/I?Ջ$`ߖKB{‹WRe@&޼!ҏ.zAo.y{  d7/~?"FC_|;2.!Fk7/Bb F˴c0^Xu.p;Q@~~PQ7DH2mPQ^=տԏ.Iy窉^䡯 0X 0J~hrf;E¼] ;;vzEVeW?+n^/^s$D?4|]ʆ,!HH#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#0H#"5TQM5.s,uWOü FUQCu.jH%+Ai0qj^rWҩ>w1 *R=UԟR[uuy$]*:lzDXݼK5˃+) vY~ECQS]]` u׶.7D /tKxXߢ;jcS 0jNS}`E`m"w%춢NR@INK]?m*IRZrSM#Q+M&x_ne&mt245s:&}ؖ#h 7'u/u% CeҢt`7F/ݫ] .p1TxӼK> J]{wQ%yce?2X7cJnx 5LU K׊꙱K\7D䦤?`:ֈ5uZj\+% ;B*Pc_y䠋4d'|a~86kkκ=kRPYvlqX3#3gvmΩd.7ݍ{JU_rDIS a#0l&0ZSI=7L+e]uO+'W_:5]HEf>옧yF`\ R0bzH`l9&HP#f*-1 r]wnfI*zP] 5TQ%MefzIzޫ0m**9t_]ss꾺RI^Pm)bFUKRM5I=WIusd,՗Z:m]#j=E[WM5t:#h):إ.mJr&IjmH`ͺSWuSf55dKn$=15lnںRC]w{vBkQaH-v<^$SA쮜fY&lV|_Znxy}zb}BMk~zRޡ8tggKvIb RmKOtk~oou!w" ZI}IEmQuU3Ytc vJ tn#К=6-'כN|QYt YҩM VLٰz+d0;#q1FuzM=vG7eI}jR`UȖ&/*˲ tUniQN2q5SIݑ`')m ,YkerA=jlnv6f[x;Y8>Ϋ|z!z`;dj1Ƞ`x>auid#Vv kxfuѮ]]ߧܪmvE(V AF YMuw@tH g HfUqJ]y]Xm/VH#&!`WUt0`mG L/ vҬ:EY#D:3V'ONsjFĺ"`7bKQ(P}&qJUk <XjKJy^Dث:RSWR/}73aVWWRV `t5N"#j9cFqF]]d #*iY'ï{nh^pf- Ք 6.XQWI\UtÊnI'q~'1&@=3\gU1:L0*r0;fP-ub`k6$ W}duwyXyٙV5jEvvNأCB}=4u/:Y̗yns:v+ 0-u\QoC/˅>85wٱCmXC5 n9=u3N@gĶ{;ΨW3Ͼ,5Mh,}tp?"ѾtStYzH[)0▱XSER_VȸEU0POLJg~x֬ %[ZIIgzSL|F<g^Su& [\͒*G|>Ζ5HS}s 5**BE5)ҖvWO?7E.mgo#eZ[ѝiEX;NRiWQ }`T+wB":͐NogΧ؄7eȾ>̿WېuĚ&{qhഢ+wSuP1Pۭk}t;, SV*'TeBu:u)y|E)_2X3deh]֗wX1*Ixnu)iIN`^Tw>N:N8ҵ@};ʵ /32,ze0t֙lFVa'2NP:IH9D؝l•i}3"ǣU E`԰IWwi6)\jISUJa6H5-? pJzylt+jҺwVXg!7Kxש\QRE}I~\Egbi#]QM [I.Њb,N-ad7=_5%ǿ$߽M%.vʲg=\˵.onZQӼ *z$kO[tyw4u)V=O`;5x+Qܣ l$x7 `ՊzZg{F rЩy߳$g宲ܮBGQ''qv&$$f?F1ŒnbkZDE Et_hجXcw)%,E%{XSWzbpʀ.3(9wQz'j->f6T^̺G [aE`myF&4;8s]\'lkZGyW VҾ'VSEE knx+uUsR0qŶ`n3&۱X(Fz^]-X>[S]uߘk?Zc(˩(f뭡cIғ߽}ޥl 0t˺с:Gh).Zk<#]$ ;'o:kŤ† '_2?} a@3_W7/˻<CƱX{u,&ZzOT֡Y#ugɂƒ$R .,QB{A/ 0bB'hЗ󞥶Ë%5|],K*i(fE(|i>3Qei]r"({mnm9KczL+(=&Y}Y r dJu٦6ڙXIzœ޹gEUݪNt :_qFNWfNn(CU ٵ:3%%$;Vts@ggP%ЦPMI+o|-ZxGiYAGIb&SK%I+X݈w}ؒ|GMmI*%KJ:ֈ1p5Z1< $ӾUj﷾ }t]/7T:+MOx4Ц][[=]xx$:?z+h)ɊInu~d@N%]Hl_ǷZI׏}{ _U&]gYIְG?t3кk1ϐΧo "RcIX% : z>^'nIzE1ɼrB EI==־zܼ^n\qTSznxMj'ڟ̓ZhPގ ]HLJ@OԔ4Ծ~t==}6k?"{\o8sҺUӭ{;W?λ #ɱݕǘ ?BNG&(2,f#8 Oyxft—9ԯ tv'FOw?͜G@ЭQ[ʼnlK=S^1f]K]~¦/ٱSð)|}i؛(W}o'@8Y?(}k?t%2&d7y^΃ F0:ݜg;v1dzUfL$.F3M2+rCUu85:#Mcun}N1cQtțBuKnuO’Th&|qTtYӐTkIs)W0"QQQcߌCX'gi`.1Hʞā|s K-Hud2;WO`l(3C25>u eI*&j 'V "BqnKҕF%$YtTjY璘 k3 ٥d7ME3cl0:`jIo^t~4t_E2jn. 'D3Ck[6n)j(K%UTA'_JDFb^~f`Ҝ}ICu}/.M̼Π];+ƭ͌,f햯ʜ"OiR{z֜Ԧo׳Ew&nmVǯJ/ *y.9cUR=V,r(n-|:}zCkM7BW[,K-g.`zC\wIv=}X/WsO濳SR*{0c+> eK7HjwuCxA: <[=Q .}s.{:1NH}'Wfo2Pgo>KuB@U 0v}T3}= 8pneޚ9K=Ԗ ].Ne~ȼɭMC5U.O|שabggf :r:H ƭE"(47x T7Gm@xfnuVH2XSq$ Y=훟&gү1YYO**i^&WTCuUS=󝪮y`RtYR;;Sir^ݾp^blO,v w=_ҩJ&WijSPMYjmyɗXyVHgz{'`,^Б.NѵYzXghO'Rյkkwy8I,Z#]L#Pe% NwhktNIE]yn;+/{'gͺ,Y:-_􄴚fdU):UT7AhX,> W7{qVL;cRe0]7x3ton'FU0}Y-a1d8$(̈́}d~g@$9eorǑ!kLLF`7i^Mx9TSŽ&U$]$ C7dGjY>9k͖%Tޜc51zZ҅{/郊Jz__mIFXF'0}z(iNj29 FڼR֍dU@u&dJҹ7 > sHK_4k7_PvGϞӕw:0CCkSg1>X):ѵklZG:wnG3=m*;^+dMtgz٘c>8+K0q+#2y3nlwd-ꉆZj]l͢!vW\3s%E17�WC.adms\GǮOi*#!; *^{PCŜs]OF1 g:A`QXg^z~>hk8vu{`TUgSաrn|ݡ'?;rʻIx7 nLȰ9[9қ@C7PKݧ_I4BN\04SԅB8Nvܼm UUT lvfRU p,(˔5G}LھLYWij+ynhB!wM/y^]\l=uv/^Lv=f[ ˳M/異k]cq @Iv֬x.ғU ƗnTZqVΓ6ʡ ޅN N=׬A#7l5}rƄ-"^?tkK_{l^Fg?2³*ʒԛy1P0;=SUWם%O~#3mCrXQ5g3TWŝF^~ΤU`;;dƄH,ԚdȲWNj8XI>$^HZ*:놛}V2ӟd`&-G3Jd%>X fI<ͤ#uյ,OP{N*4&V1χ髛Ѽaė#V<;}vMj-efJ9s%eWQEE6pj9t$R?ÿs2:_ؼn:#$8^yi{|4vǖױ˙'YMJ,xN`<=sdT{*둾#sT sF.w-W1YoAm2W rnVysƹ˶QG9+ӕ==o~`XH%52f٦ysKw]>L,56*:,}Ͳ!"BV {`?YmN;J!bMl#֣^ߢ)e'N4KɆm=a*Q3jUҕ9/׳uHG[؆ꫧ;s$ T[o7wj%y '_ûv;dҔl8u۶'!TF/ qluqzM'XЯ72dy bO[& cܽxB8HUIIǒ'Mk~>9k+F԰♼km_xۻ]V/yn<|YXB$+SžRԈN-3 5V|qu⎼jȫwsFFkyW|,F3K;T7k/{C4s15uܢe9V6QN9bxYjRwK*&e𬋰X7Is٫;\| gR[C,pc4ɚF.CvIGy` r‹GzR$o0t}&*lxyWD|.0钖ex83;h7j=7IX[-LinU̘%;M}e.|̻ Tі79J0cY&|u4k3굚Qy&]t7-m$H\EGO3ޫꍾ0P^l}޹u:{o:S7%o Lrq=yf5ܡFUsӝzBrߙM nҜO{zN|-fs]JN|Ȋ.aF` ߿}/a`5i|bA7|vY&U f",,@_詞DA%zxIi :Թw7ovX7ۋZiew3u3h%hEUU7de[qrHRSzFm^1kE+i-Wo:;Um]<眱3ZT|gqJG^әgfh3o,VgfE[`xʺ cHR|G+[^y:0SoB-$ 1pIDATgYz֦k_TeTLk-얰?Ʈdk{^uu<0뛛,R)Q]ɹdW6Cm7F!o|h.!;6ҜCVfkz2m0+ICf3IR)4ҜOn:(Q5b򌑮nql;O(0f$qvxϲ{p<#o?e*q~vc#x9 ɯ`  Up@y'Amj]o~DZ |a^m/7grYYJ_qrٟ= n_RF@֍9ӳRO=Gf."w&ymLKs0cOfhF_5Y1;,r:N lr;bl,ҎAsE'4;;mk <%.<:c]GL"utZU^3<}R=tCB?INթo]8\p0Refˬh{-85cLVMޜ>Jj-j*yS{5cLEUT:X[wgH uYRөzFqr3bKv*fҦTv=&)_ eEȲ L9+=L`{$K"$2w&ym;ϽYvnHF_M] Ur'vq޳=S^3Щd~OTTRE]uU]+q`Xjَ[8;;&NmUхUቊPQMImuQWԳCNiSnϝϤ/.dϷ5otbU'tKti`73!Q߬!~,S2vŭƥ!Y EݺS|9E5?瓰=PCBI3k (yzE]({{sC5]u94geꫫ_Ggr1Ƭ`lΉԏ|2|z:Z +vn5 "= +=y̌G3@utxQNPܝ@cU$~96G\'oQ'4~%湛[:4ϱS}dMjV2vL3vW ZGrIUT003te;ص#}$s&+kI׵$T2D;/L /|f#w޹5f(> S5u]׽$W?`rT wiw*Etst⌡ɰKrSY0Q;F:$G9j=2zUG_& LI\q3fQy_yϤ_d7-69o^;9y^+:Xo0__=5>:$[ՓOȩ ny44<^oi>voOG:n;ח&WZת|;rCN@,Yޙ{{;n_?,Iy:Xg#KUHwG$J@{ i;_'<҉R֡K 4r'nTeU= d[$(-+]1I[MK_oe_[Ygg_tIZX @qq՗֌ץ>az<(}eZw!qmfVqnmҿӝuܲNJvٗHU]Z gANtkF)c rnwm;s}k]kOUFvu2"옮,Izfj 0`uxՙr:%rbRZp0`u.!X0Pג-543c6,-Vނߤ9ѿaw = ku"^݈8}xܵViґHۙs.l{~+cZymՍxKu1c\ kA}3Z?ʻW5"Ɲ% gFe0:ۦwzHSk;meQWQRO3}eځ FWzF,-_7,k2{jdt ݱJmI] %nі;LK=. cArE]除z:5Fڦm]iޅى㞙rI;H -5%YK 6%]Hk( ;=ԓWsZ#-L&v΋ X'u])KC]V$UF\i˥tb~)I:Y`hXOMwceI+G/0m?FX_ UݽE58#;0*HzQ[ԝC WWV&'eQK`QNCqJ /"';(t\7nwzw`X2:}$K0_Q.VV?wId.dy#g;`ʺс<`X5VY7*]=yS\k w軼K KE١b쨜d/ch@x@j(K%UBfxYX!Yff{4T3Liξ팽ƼkS(ݢjG1ְkRTI0JǬ(w׳Y3_3Uk)%٩b|*0=@G:' 'Ɏc44 eijjr_﷑:bS3ٙ:M8.WK3R-d꾞x~kcr3\'=f,EO5G'i`9WP;3ɋk/mUF /ISS :q4 m=|DLb̵kT| Fغ/5zz2gVmV;u֙8d2%MGkHL\l_t;然ޚ𛥞zjmbR ՓݹׯwPMe'/:fUQԦvDF4KMc 5TSԭmR Ӿir[ʚV=ITTE %1K^`im_MT}\Wc4`EQWpZd!Ft֬ɒcEOiq}*+魯dEuu9*^x\cYԍ5` /u=4*z:WTQtoNdqhaM5Ʉl1K^`H+rRrboKC6@0TRSRJ%YSLROÌBRvDIKu˼+فyf\6"BDz9{5][k\IWu̒XZ>]SzO}elE#$W NRԅB\3yzW!ݚJLuKl1TO#krMlK i1;m/% t/aLWt['YF@F0jxerNjLT-i%btFUo'96a391K^`,f_!sGOOܿHG#llɄbȅ4(Эϙ1r; % ='a1=-=2e*)S_93猄&q/ӊtڒZfj/=Q|/P$4uV?jL7RO}u=퀿G]41F2vq: $Nh+BQW[x{jH4lcrĜquV':5sbVK,M  F=A**YskQuHhmNt] D/\]$wy=0JW( ̺=8e_% b]t_]`}?#wQ1`-cyPT."15%k?甫{*`Lz̒X9K?ҟf?1Z$ŶtazS &(Ԗmx񱤆|TPRwR]]IAzzkd7INc.@'iޥu.gEW\vXꪲ3E:`CuCOv0;DzUxj(mv)tE gz "C5eS2;EHMZ% ?ꪡ]n^>"Yj[1xuZs'ԛ.KeI:NQ Ւ$|N*lȀ6RW]]}=v=7LyJ,YW2&k7[tHc.@>^Xz.Pazl(^7z+z9&11 u}IRїXQQVȳFQ,ewӣ<:K7ԒT_4HSIߕ*w۔;bԤR`”֌#6 nlh&?fI_U4f~Pc k[TEY0CQ*Y0rs\-Y=82`c ]o8o͆y'稦Qͼ&rlaa]t/̫m=eL賴VvCQcݝjyjkir%s:{r-5gv2p% d:E &tăDSԩ./"݆:#2`CeǪznj*wjIۙoEUjKjJ"Xo*3aТz|'<@RE5]hcϐ===krCmu'-u25詤 s[=1@+JhAOHvˋWU. "t6>(9I#M[@$Uې¢nM˿Mg|gG61ٳ|{+g۽q B퐮-=I Ox1Nxf;UE? ]<.飞{QWxK6G-+-Hr KW739j^FON`v)#]We;aNUѭZ`IEھŚjjSLXM3ƌF Ǝ 5R}z W1y~ Iu&{uv- b%d> ";u;jEu*1hٟ֬+1[.Ʈ_9r +/~P_}uW܉L,EM=洊]> J]gB#M<ENe#CMWV7nPc,uӡiNo䇙%X_``; L8{|N.Xzg1Ͼm:cBC !ۧ;d}X=i$ֿz6'~` xyS)C3p0U+\`*Vʼnp L@SI-[3#j8RI5[3'.lUz}h QRޣzu3ijj:VXjj>blY-q9N+$TGw<4ި2uNFf`Qi.қUǶ7Ygxk}v[vȼ: } wKkݛ)qݩ!c;{q s!)Ϭ=A~$Ų=G5_Uv;qtι?2h˶s=0R#V[>cAͧ~l8;ųқ47p*>OYZŕ,W FFU 3+wg#O#.Xfalؿ&3<2{~ٜо}r?;3+if 9 ZM+G=eQٜpE|> F8T VKF毖uEZ;v6zlcwvĤ^ m gTg1KSEbe-"/"S-j;it 3H;;cK9_^XLTl~iggr"/(Δ>]oHUmGV^vWS)d|>ug,aujNدUDk읥Z{CsQP+vs0טz Vu*I;vgr3GIC3ӛ+[dzʹ@+}[,uV즈q,C뤘0iXIޜ8e|H؝1&K;gkHg+OfΏ#SĈ7Ż玡z>Ժn0"5,Lӝ?y_ u>jEo]7݅'qqF{q#pfYxi/nf٭._%9tF|cٟW-Jةv2*:-<uñW'p;:nkdyBDV/uvQMm ^<;bfHv5fY:0Kab;80pqi'6N31{T&TLK;, $3ԼmOy&f* [={?":NƎ|n10#px9HcN[Iã `͵JX.eI`eawbκеWV1cgכzISK[r4Ez3=ZrvԊp~uf7Wlvvuǀho5?j/' [ \f!]2oTf:a,;[&Bq^XjҵE̵e wvq;r%R)Vg~}6#AOI0N#4C_ /fB``w')$[y9*9fI/xZQɞ7_Yl<lff@^FzK#ݏZ3v._jP[;Z/+uy2LvrVմog;4Jʲ[x%/ݱs=Wnjb|eXvoz5 NkeYDv;](*o$oGoj}?CV6R8@\_?תn' 6XvKE+?P92,g$/V6'2(^ĪHTAxjIK3 ,Z_Ue7ۿ˰EX7%=^LR*ܘH5,J6}$31nGk~Yf+I"`\޴r3v=AWvW\Gj3k>\%}9:aq&,RfY4Yc8lf/I-IA9G;0v"Js"kL/UY43 jύ+Aze7`~o`o~&׍f[A3[_{; ܧv'IQdGcChsE8s϶*V7#IS{]>.O3ϖ2Ԋq\Ӝ0th`w0qs}I+{JƯ=5,?Xy{v4;*]eoŹ48h4sm\2 <j٭| /G;ZЩU))O99Ng`jөl]#}uqGPU/[7s3}lg)o*Wݛzgv>q͡vwwsͻp5~&ɏR>J ;K;K⵳٫뫧Nuh_%VnAT%TJַo=[}<45gӹnTS흽kHѫSlYñA_սrJJ^Uֵ0Qw_߽4WRK-뵧JS^ъJjE:os5[y_Y- #A TvYo;˥i$ֿ.evD(`.[0&ɓj~?ayͯIkg`د DFkHUaoͽy~1dv}5 _ggJ_ذÁ?h|k%-zg䈃l2tM_kLw^pVG1cџ/~quFH xJ+_^5wjnQeBj;%ª*íGj5&}nzvxdoN`UĦf|O}ʨJ^gov^TG~>+gkѯ;ez +YjcW#\w--v˻|q[?[v[XWFʨ$uãaukImm`= (G^eO:e7RS<V*d@Ԯ,m?wj9aH-TI-$c5O~eu#`jnJkg/G}d{꜕L;nuGeӘW%50-SO3SVjO}ظ!!&^bM <. Z X,p ,"SQ' {v4G_(Aִ>1[W]v#XFi'e1ka5SmT^$HX~9H^x (I0H% "p6]~µg{`G:?1& #`K) ף⯻Lx `}lEɲ0s]ҏ`M`]X㴆;XIck bƮJRpf/;۪N5){vrVZ:=fFY6rԻzԲ]?ԟ@)FX͡0,y=do^_=C3jOla;{-:HIrhH ElB͑Vs)Wh޾$ǹ=fV[zvFD#,ˡ$CCc$񵧮\4)N5VIbovy5ԧUlg+QE ^Zv$ǹ_|z4QO#|7w:kp`%L=۱IR\9tcf*I´vnpd.3 !y=)cnvIjy>0Q z/gg@ Sz"z_=l; [1DZz"  g(<(* ;P]xɉ֬}IvƋGy#`Tmd(p'^UU)_z"sևc]ڗQczS)r1-5ka'?3ae.wS0s 암6G#`Ԭ;v¶IAb NmaPVՋmy<VԬ t¶9h%b<{TJڗvotc0Zr eT'N 'G4r[7v3DZ7`,;zQ;Aas`֍Z3CEgJ_=7zcoO L+s,\Z7k^B88b%$͉=SN~ \!:ߤEQoN5`7<"`b3s/" `u"˫J-Yb:vG<*L 2[M0WvJoǞg ύ"`bބr;pKn%HѰ96Fbf'I]@cnzU0/&7pvrjE7hrq]7 |~%^:Ef*plݼSc0owYln~7uJzzRec?weqoߝ0J~[|~+>]a6mpj75, stW%y6drl=K2K@5*`!Ω?Cy϶ ζlsppJ^DollM9grjsrЊKPhى>_`. FN3I bNy\\/tuWreAܺi4#P(Mf`JW<0Ʋ0#0zji%M;4HsM8S͡hl%9AntxV[HFjR3 #\Fcyu*dR|/•g?=׫˨ֲP#-WVSYv{d?[Yenn F:0a)6WX*xy.^P:هG\_ym`*/p$|wX)yuku= cjr4MlpآvVP-|yj0nv*N-uo`)i87Apez/w;eM|%֌s͆HtAy=YϖxBx* gn/-\4<]ȱ>Ot$vg0}5J^f6Fb^?EsYMxyy=F>G0jc-W 1PѰҶ7'OVx9͝|ʗŽU~$ɭ$NsoXNswyУۧ<_vK'9|sn\ŧ+YG'O6.|i>inox[KOY*l^FѓFTrwMIŝ|ys\뻢v; մFgZ-W9 `UW/>[yRɗ]v\4w&au*;nFnMNqf#./QNl^6A:oryG4w္GyVRtPp*d'j8RI7sֵ+Cvu7'k3վ2tݟ$q~'9Ihj\&,r#?Z-i~7y<4Ŝi/ ׺%]}'>ɋ|#/ګ|AWӁhn^]V.mnwxaUղ&+`+PdCW9[;{9 SGs3$WQvo,^_j_/~u"//4F>'IF|/,t AeG*-o<%ċ]Ω\q_mׄcfnċIrԙ$e(W=]k3]Iaړ/ ]k^xZvOQf#/}/=OIAɝi>LMn}/iG`e7$vR-ãam5}Ͳ[,R;E(z*9qd/nњHHJiq|aj_tvwۦ_. k0~Vi` y2;mJ4.4xU1SΜ׫x?͇O<BNRbq5S.};]HNt70VQ7U23vIuL[~<xV_tb|x/7I>Pԭ>Xaz `uۓ5] v~v/D*:wIݦߦy~xe:|kj_뀱[`6gՋ`w6O8ɫ@m9=5/A= g'ܞ<ݞ00c+7KuVq$1=ru%VG' d;WhT@:ƋBߚ6&>3_>My݀ay"P$[ 'O=~Lǹs1CoO~A{%-KpuI*DHjZ]F6{c1nF$y46fhѵ5\Up{tTgD*֊#w~.;V:wI\݉Y$Tԭ>-oNM~i1pgSij?˭gu ;i?sDܳ!Z=&rG]Ҳ20ܫi{ԩ^϶>җZfتWc_M-3zȝ׬ge}Ffg;Gizޔy.y'NgW]idkվT+1gm7 s[#35C^lxyA<ۣӾqϟjߛl[S9$o$^H4Sݫ)^7׽9?_˹0/nޏ*wtH6G|ۃ1󿎛=h,ڪ9qpGح/=ض$cf ?|FN Z9@ '3o;)>:gy3+A׋f;)m8/'hޔ{w5|ៗvxK8퀵  ͫ"I{INgy[34 ~8r'#<ʧM[7q5|6F$'36{C1XEIN5N,u/Mg}?nx/s]&-Uyㇿ\v#ή=r%FYkeqS/VgZ *?Fn5b+;igw$yG+{cB4|,(]ί4ɽʭ$'y^$',<ݜF>šo*'.rmA1y<͋3ʋ<{?.U= Pu`!1җLimSCםCt=;Wyq}]_&i]su4Gyr|7Cqin˾t#7цâǹ擴{J̳Y//i??(O\Xc?-\.:P9 ̶f8?9og[Ӷr3s<þpi;;3̺dZXwfE0u6F%t'[2ܶN8UDkg3w8.j"^΀[mmW`j^cw~29J7e^J{Ex~c'oYn^qMӾ$IfdǹGSC<ӱ۝ 3Gx#5rZ,4xqN{?f=k>yG`p/ɳn˼7cQ.¼ކ /oq-m-3^NbSA}*0 I{2dֽ%s#yn\+7SHc虝 en`P g;[y.7;.q+3:YӀLb"y0G{8oק[z9=wE{yQTi>s/pӑXzأF5yhYx̳y oL(}n_tlq}'LSAV`nryoעO_vg_,p]F*6j1w5/LZi9mM?(cï$c"/b{3\71u݀q^<*w'iO"#9PsޛY#=?Xw8{w5^oM/ΛlO{-c#\)$3Q|ZXGiU;c&7flm_;>Z䟕w\]ܞi7GjM}rw^ Ǭt ձDŢ8٢z}fsۺ=s/Mhw3T.IHo.Ns+׺~M+\l̼Ͻ4ˎFKZ9_ot( |v>b٣.t/Y{ȭ"=s{v [ 3,{lg+QE^Nr M ory_`e/\|wgq/c:IAyÂg@ܛqpnJm97n76zөlN Ng&h~͋2+e+i~15IgSZӀ6yN/s+  NӼINr20&z؍" }s#^>{ φߘxN@gѷun{>ϋ7lec)ofpTv*{.E f] N\,p:T"OwG:@W+]דjgӝWyU.֛|vq; *ʄ5 OpEJ_Q 7r_$'CɋDgyM:~7r'tcq[9Nrz2M|W7\uN-AfpTbQ%(3l\N= gn j.JЯs;<-s/uﮋ .sX"J>lFZiX1˘]k0ޘsqnE;O%08V̏{UtPޘn|4P_yU\fxŮ/fW9Ҥʘ4=m ;s\N=nꭴZ(r-jQ }.r'en ԺҏGwV.EuqW/ߎ%Hކ~Zif:sl{ u`Gy1vx?WDE<_/">]<˃<M\(.o2Vl^ly4jZIc2ۦfs5[ )7\N2b_g"v=عڹ$Yc;H3sј;`h G({>29q rBowa#3ZXӀ񽙶/V5x:6y5C{~];pEz8;%HY`F}R2q6/Nw}9YHӹv l֭b|3<`vs._eo[Y2|Ww s'ν֛| =NZƊSIwVi j+#} zK-784<` 4˚gcӢ齑15{">:wn捖(1 ~oIYyf|۹gaٍ||nNIuo5R+~:Їqw~ƷяD͗w}a}\t;7Om*h`..9yYefxŰV79q3&~b {$InJO~Nl٘d/z<'yOyWS{44 >͇QIHF{>K':ǽ֟INso{%ˁ <"B> 篊Li tۭLkr0Sw }.SAq\Mo݉p/uN;mQ1v*5k}ol9ۧ6Gtԭ3Eu2}ǨfW#[9롫T;Û ^Q+Bқݶ+r0y=39V_x1޲pYnFޤ3urFgyy՛g}УSWwnFi^U>ʭl-?I8[qNꓼF>ț&Aߜ'@ q^^9ɛ(w鄳ɫ[+`̵4z/"w#:?iG}qb[ᣜd#'y[ľi>)Ӿ@w/84tw~^noV^uPv*Ӧ۞at$id_GX:UCLp([c ^褐2%WEs/Uc9Vz3|=vI*;Aud2=OuƶUb˚lg;髪M7+|Ͽ YZLͱZ:1L2+6`L~ZT:y9Bȝܛ0FV^i^ -s2i;ʋʛFLڷ|VĀO~:s^QA^U~>έ& `,Vv&֝$I=[3ag {[pԛ \lfٹSA唹K܉p/ƫ,`SUϪXퟙwsywX7IViՁon\q'׽N5p^=ޭţzi{x7ckOǙUo90٫b[%鶦l{.7/~.П]#g'LP|7¼#]R~Z9w1SO5)jϵכy+TN3G^_v6{Gv$cbFNۧ7>{WAyjz08k6~h jyzgT9]=r޻d3{S2l7igg|!lUe9Ygz6hō}D0\"9W^5wqb͆V.`~uݟPəqw_52໡?v^ao=1k%ZS6_O3$tG s~ƷĚ%/r.7Wmn'mZsf;qٳ\{)+Ix0"/Iz.,xV]+[97Y gnaC+UD:I6e6r|ܓ|V>_vYtVhfo-&8(F*tVV[n+Hed5[2yЈ482hgŁa%UN&XK3dw#{f"oݒQ"`FO%Y.4ws"^O^x+~Q|V]{k0v"[(/rrExŋ Vo/F ŤjkԮ`̵ ; ZNs"zä[̞:FX{ݛ`+8@3SVZ@얌sd#rk!Gz{ PVl|:FXͽ/& /e9sﰲz}\RZIRV6s3;9f 8{WGѿL3&:+q7n8[vn%v2$/WS֎ kl'SIse]Ϳ\AxFqS;{`%?/&ˇʌr$I%eO.pfe0W3EX_7w~5WZX+b ã]v#pUu[?i4WäP# 8gի/`wyݿ42 4i=iiy9ݼIg`|} Fɣ$O["?f#(_|GRiy#Y6xq%|Q:\}Hl%IUY^b_.- O9G:w$;IN2L΍u^>΄ƲP,P[7<V\1H:ѓ\v[XOk0~$y0^oW3n9Ӿ*f`Jԙ9.1uE_j; p鶊jjO,=-C|0g؇3zΛ$];[}:fuw+ӴS*^o`Ŏr}]JvSxg ۬F<{[zY;yŏ披~I^Յ"OLC ϗ \J"⹈Vn}WWxvn^i}2ۭFTn"Z >$ɭܘcy:עz]D7`S?/ `NTl?7r#od:-}Sne >G ֫yIڗSw["L=L_/w ^5[3?i>, f?y||>Wcwt ,rWYnMDoYd'Hka+[IK˶Q*9aۃ%iϴkvnc7adzW0~7y4猍-@xnĐzlNfI+-u"9g?GW{m6FʛJƋo=|_~g듼ITzlRpSf8ک=J #Fϊg?ɋʧs{ιZu1g|[;#ڍ#G^ѹ}|y10V]ycg7Ӿ泡јP|9гup&O"붪gꏿF$'#``V?WnbnܬÝ_q6ę΂tBMywr+Ar>|IVn%9*'9gx|G ,/ NU*y[|$O"_94}ƹyP oU^M oޫ9͍|܋U[yEY;@u䯒meZ3(\bܝ̫J*}=Pvu}[NE>iNIsW9罁ҝ U>ɧy}ǿ;ł:_4JqugUf^-Zz/^ÜөA|WM4e1oF1%GO*Ik0`7sGs7ݯb?DIB<ӱڝ 3sxC1Vr/(IgegGINjڛ%i䯆+vC5zs\^xH|f=o8SMm̶zFiԋv{^kp-yoh2f&ak6g翚D%Fbm"Or{~{Y}E4N .?ȋJԙNtd}en,@Yzx̳y oL|W܎J!];{c+>(ⷳH}뾯wiNY\4nvEt9m{l8;ųHv0N/,j`L{/J77d{5Ͽfgf5&\c,Yw~<΍v >=7&<~kR{v`ϮgSބ_q.7ͱk*2۞{ʒ ?~ߙc2*GcA6)皕5טY ƳlyOgZB,9sawU^CzՁ&O{3,zpQ CBwV-ބ-6 ?ߴ"|:sw/7&(~1v;TSK4ثi3Lyn*i{NөÖ[9J3I3_8ZT-J+{i(CQ a[aj>vW6U5lg+(3S956NQNctm;4orCcRՔG);{jOF_ȭ^ӸU:je;7DŽRyGNwNTrm;qYG5T:$,-7sY8܎Ԁ{ExprZEXbFJU -o`Ͽf)|&0"}:v$Ω[Oe}̧C⛾@kUn,eq2g<{w7yǹ'hl, \Lg@cq{U vh;`wVvK;I5/.f<ړYտpM8 b̢#NYbzgu'40z7yU6=]un{0-]`?_f.K\\k95W gYU1*8972/sb8ykvկ&\/k0ni*߮jqF_!$FM Os-^VO-[Iڹ@\U܋onxpuW]_zIgfWj6>iy{lriOR_/WB×/ϯ'>s'o,F~>N "j s3f3{9(V#* q}[n5[WbgEV%غ3U:U=*{7H4?zՒlOnWANfUEO`fV6 BNmztUkbB=޴kW > b%nEt<_wފ V 9z\Yw2jpcWo՟ʮg ^vVS~^QT;{WЎN ;eBmW8,ez?zV6Y5U̟ .:$M]i^=Js^#|z^.יObqw_G?DNS PQr2$.2~nWPW=̣>Iz?5W5 qW0.htYdRF>%OҞ9<[GQ.Kr^mvw(}*M?c\>:f?>,]sBTvE|<ѣl06'~/1-G-w5G^.~ͮen2{xW9=z:q-ݑ$Inii<SlW$i>}{{C{GL#oL{w_ {w}ټ4|=6yգ|xl&on"`> 篊αmݑkI^$Sb06p Fј#6o/"ikr=M 1('7pkb+fz5~kKr+7&᭳}2vi/rBϋ_cFFi^U>ʭl-?IݚRAqN~7y7Y<ȋv'@ q^^9ɛ'wȹnߵz:uvf}EƷ|#_('I^(o{9'U 7y>:<|#_q>iNs77r'鄢FfjEW?3~nNnFN&Ƀ([cE`8\[םexْ":fq%Z=0!']D*~qijY1ioAQ6|GyMN28'߭;m;o đ/z2p;'9J;s~k6ϫ wp'I[r?]Y罡M~ݫL=>P% =,/{ۋ&cߣ΄j!5;<׬BGkK^K˛q^%~ IwQ `? Yˀq#_f#hG=gInew`54¨^?jZɭ|Z`IDATxrZ$AgƦaҰã廿_vkX_k0v#ƧŢ,u9/ jw㥁U/ο^v Xk0v"[(/prEaas_Z+ WY〱1~ 8ҝ6_x@E:?5~[p۟nfM/P`.9'VXnoï\v IcM`X_ V/n\'eCՋ\s0ړڲ[$IZ.,ND(`.{RI-?ayͯˇe7"`$ɓJb4\o~fU0" &`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @iF4#P(M&`J0 @i[vX;SKèkFn;͹?\vR۩겛q9J{`GF2 jS_v#$I;;9^v#Xg!,nvE# W_XgG#WAE .VZEry6 V Hi`;4l-=p͵ӿq/Ejճxq+_ayykle-` NQ<7,Vn^[ڲz0P~'è]Slw9\vkXoFʪešEXAv& e0PVQ{6X!EäDF)jX9O5\#PG*U($5ãaUzKow`٭`= (/ªO~QA6n;^\b^X8#eԒ"`\9fH;IAfi8-,AwV%?_>.=#)Z5l`ltWw/X0J;^Qx4$y]RsagpI~\vX/[v+B޺I-Ez%>=l^0СVXگ5ӶŎJ^f{zåz ``-tufwsrUxM`}`-4L^xg;`X#\ e7gn"/PR3ijjJ%I+i٧ނ+FKLv{6guf߹*ekBIiw]536^v}Mɏ~FPB+;Š䎝&9*1IZ9H"VXd?[C mP#?vZi=W,I*q1!ivG띾gcעngows 0ܚE]cw}Vg{lT6jrd/ՑȲy9΄:ɣY _q=-6'cT]Csjb%J-vZrWsj;l%i |Z/Lk\/Vl;bٳuϋitCU󞧖i$iT"V(/S o`L ,;͹?\v$/SOyv,fI*zệETvḭ." '`؉*g҃멥>.~TہG9GLFuƽs' 6&T=r8en{0[9:N{<zvQ9_-*7yjj=<)\ظ7#" sw)؈s84x+S֟P`cyc[Ws8zoֳtD=Sօ3m5yϞM1d+ϯi"\>eRmcyFJ'NkΰA sj;lG8J+Q~1x;TS`Lwq\lg+QIˉzRpfOJFDpr/0ldtb91oYhu%[Qv4s0ܕE9^#|FXI;ig4.Fp$iQ̟r,+&}t4c7[ Qq>GEǁE/=ц^`X5/2E VzK!`ԉ&<[)N^·J|7 E}\So+oݼV ;!rڽp1qqN#IqZj+0['܆jZIs@ó{eiQ,S| o@+[7v]Qz[7c5پRSM::a|F<}lb,Pbb= \2.n۵ h1pLCX8jtD#l%2NTF'^t28tr=]#F#Ȉ` RObdyߗJzC{< DP$zM:jpQd4j:Kl⪫+VREnx?iE[#@~kpiR}]`t7q4^fbŪZPOޫ1Z"B+/YPT'Oصj)iyzcɓd0pJlM^\qvkAw`4R鱎b?q%_[V,fmX6Ǐg&`d\@Mu/.do VA%c??8drXS3K F'>OSsLk@y鵒)X@8=['of]J o5*wk"r[u݌{hhbDɸJOtZibȚ OicF΂gn Ub GBOtY/*j~$"ì/̳)c?KO{tL-`ZSeŔiSz } '&\;fxքsM˲$Yzvk+qґN*m뙞-R?{:c6SC9GԋD&rkdV'j=M(/u5X=MmS,a$) B _A`vO^UMcHFJwzK=ӎ,ЍtZnt~}+OGYu@A ՗@M5jhfj,ב?SW]5eKrDkjS_lA`pN$b-k'?lVC̘l2@M"\LY;Β);._X}@>-$_v^ \q/4Ҏ~Sw #ƭ E3+yY#=5QԥlhugmJ_V/rBGS@FO|-LI^lVj:6106 =)d)YzY;1ڲƌZrXzKHa@>o>yenHOu'b)ȕF:Jy|O':ץ!IeW knzwnUҮ P_:g[ut;\;\_nt(( -FM}P?t+ѯPC}c[XjXѽX.T/ ̬P K6R'#lʖl]VHN?xpZx, _fV ںuyeI_g~.]q<_LN2= à[iGϸ/t']ߚ"׾ڋW/^s]Qyk5øRt7 o37iz]hln VSjzXS TXN;Ѹ6 Fي '9[fwiWMs *G[o1G7UNÊKR#yZ8NAnuye;~2 aW/u`~k=K}[iU؞n5 [?t(KHR%Nj5LW ە?w$EOQ&&EJM,ҲMf˴X^c{f]^5՟;e* GfC{iO7^tnI5nuS 6ⵕfKUֶFAwF /uwFU^ 0UȒ~8_H̯'o4UFSWQmt#_+0`.sx)(|fBXr=+0G>ns5S}JjQ_Vv\b#ihƬ=Gn+2'{~{U՚;73W;0k/jx z+4ӥKpuv^D/vD۬vHtՍ F*mouJнgڌWƯ7]i0dKT0uNrM8\tSqI(WxyY˵԰jՍב/%jzҖuvX(zy/Q`]ݙϮ>+PxY$aW3'It^J`siBe$ ?>P4* jvWvFIю$]ו֪.gJ+5۳!׸*Tw:/q{*uuk`t2Kc}~njLlߞvðjVx _<] U%\ڭ]$Eu\rhV[= R;||ej;m@QC-SE/Gj?#n 'KvQHfxx؉{>ime5\#kG^?nAG89@<5dOS,}7i5Rct4vOs[椨Jƻ0Yt-^U0JGzfZiv+fw᪁5pLG?&=˝~U2^,փ۰Fr/r~GcW:ծޕ|U+ڦȶ/ntgaܛzio*;FwBP'fḞ 9Hȳ}}|}yƉxU=U_ 5бyvM\RhO'jBa64m#ҝVt4&DZ<ү7tf"yL%_ 7ACWHQgJ /xeygQ76f]u&v>ūzҪB~R椨JƯ.B+íFJG:kͦXn;]H-%Ύ>ח)JLɾ6;:Ii8{Nux<ܞgr=#%tNOoy^Jzmx7l<?Nѕַu\ jeٗ+FJM#JA S^?7,0H_>ߛGfzx緱1Y?Kv_?ٻ_.}pnƹk^'9՚o {3uMweǰ=3ݿ }r~~u(L7E7e<{k^^{7Ѫ߿?0=}7ؼobj'1?ڊ{>}|'Ww'S{xȣ#W@׺}@uh#fv^׽u~]~j>ֵu9LoIp!u6ш_;6'`ڝ|u|H:=z?.rf[,)>+^g3+]hc||K2~4Qdپj;Ӊ5ҕibe qlѭ[RwXӥ Vi>5ܛĻwRg}yTwzT<+jO{MV/ӃmiO'ԭ~]) SΌ;finp zc::J#Mz  M㴧U7aQ_Gߙܯ\D$rڵvJuUV"J+#lC>Bdh)ÙDUꢓ|A*X0xBkڴZ 51ҵi_/?y+ʒ Ws̕شmb\L}ӛXe6 P4SAX,껓^DߚFOr\Rwc2/ [vx˩SG:78T[HTӃ!TV6. ̠[ Wa:mNN{9zǁe{[+>x%G3Qk1|F8Cqx%ةwZQ_#tV;e:8^H"5.֯hklr,WgV5Y1jR+l`*,\TȒo1ݶ~mij`ajّ.cGvnYrJ.S*#]HT݃׾݋q^/[rn"hZl} UU-dy-*譥ZU>5s<2xevg:KH6+bgTSmF :PROW=x<}b}ӛ.\Tû=uA(1sҸvʽqsxԨqMߧGl'jf+MTՃm}7:7sTmn?Q-YjmNnǏJ>UZa: 1C=RWX,dlblyA׬]jkWR7S Sk#~ܓd8hl%Wl,vy}d2+wh"{gQ3Iި*yf[ۑ4ƪz?`;{EF"gbb4xl{xl yV,ٲeM Rvc9}S/za8ՋW1O3jVJe~y?UۗUEcm7\Os㨂fFj.Wխn"Ud+8jy"Nu=6=H:RtőۢD]J++3 o|m: XV_065j/:Ѿe0q[wfiZj?-]5~8nY}Y+Gu"\AXu*Y*5gAUoR#ɬBOg;Q?Orj=rE.&v|>_Nx>aMu\mh(Wn.UW}M3xIjn.b~yZiձK>WOkq?;#QZE<ʃ7 QjBu=fKS]z`^]DV<^>HͬZ{}Yƕ7粒]„-73nfێD~xˉ31:`__GᾎDZ#= KKz+Z[-AHOLѩdnI}qsM]˒4,2yKs8/J&'i$=%uԅ5ctbb*u荫Gz<粒1O}q+baVK_,}we26ٍ(zm>viN/uoB{.u:׍u7ċ@:NFL\1Xcbj8P7Y5Y{:8P2 9Ju&N4Zq1}Y:1w粒q,d%7*pUouaN|ȳN *tԃ5RN'5~ױgv#]OӥvG}wzw7%>x0bU]AS=Р,zP=~mɸ݌ٚ= 44q޼<J.]{3OkǼ+./WL/4WD҂"_O߉vdw18.U#؟ӃVFQzjM$`^!8'wm+T[֙яeLVX'חg{;ʑ_C4b5x~u%[g8^r_3E^V$^u~^tՑԕ/u>"AcNe9v{QrSIvEY4#5e0Ej/Z+SyfzI8#5VFdSCe$`5QǴڰ#]F_-/#W>u)ґг|eޗ1crhg<$vpjRun,}.ّ};fv*ioQ.#A^J;|'3ǥ)ܴmn-@p*OOT1nkWvy V`3_T!dsR k4 x&4L4͔jq?!; PA=<⇁VxNrgLe:\#KmPjcugTBD"}L~þxl]D!%&bui6GƑ<8vc9vYzYW/Z+Sي,_o)=q,uPJ LŮՊQE?PTO]*]q71.u+;]]x|u#=ML i%lYQϞ=YG6a& ?8w[)#Lֵ:k4ч)ve9&sRzt:scN ѯ'bQ_L8T7SZi[ɘ[7zу4#=V%};}bn3}nQKԣn>oQ7Hnϼ׾I:IGz^\ҁ>g]iӋO(q̑tt:xD_;=ɱ^Gy_jw۱DX'e-Z뚪֛Gj@%3sJ *{&bQesT@~z@]RՈ,7m]457x}?oo'JHKfjwy854'*GmW֗:Qk=_.RݵϦN]]}H{G`[otK2EփH;vZ'WS|z xdi7 {J765O_3LW2Kg`z=}+79|c˙N5RtޚOU7%mr% JfEଂI݂ŲXuS})g9iӼHU0]} 'Q#Fwpl)_Ѓnu2[r-[WE60.x,,vmd#)٠5;yX*`\dHPT0n@1u/U Jҭr9e;S#Ҧ:`[{ٙj`lϱqQn$gp'jEdi}]w6`S·u[gC^* L5 v=: VU);G&XWү-MlR?X(Lywn*X`ބkXnV1ޘrJF2xȕ![m4TK:ȕjeɒ#Z)=O+3eY=M+\ 4д-X Fnu;/`KII,z&Izי|z nA2- M'V+BXc%F"O,.t{}е:rύLK./ lG*R\Lz,O"i҇,$ 9cz#`:ND 5umfGn@3<NjuݲoC=ҾH&8y/@R@c)9=z `ɾe)m`!Yؐٲ Dy~Obi L8u*U?R(ÛCUwt -Փ2-Ժþ$1\C\S(5ɑa8חxb $Q_6Ջ@:WD96q<`y0%"k}muIz⹡&G5p=FbEKz `Pkn]F4F0(@iJ#`P#F4F0(@iJny5Xu@-0q8QKdi" / F .JȕԔ-Ij2E"`Ж$9r%:[us@1E@iJ#`P#\yjVK$W 5Plh9VS@I i\嘯,m vl"Fo%[7bQ1Zc #?VGR_^|Oҙ=Nօ$OCI=rz:S+H. }Pu* ucwؘDf$0rc^Qi܌]uxl$W&L窛ҚzkjWwezr65,>b X6`*㚰RGuH-HGdKu4؄mm]Zu: {{LD؏s]v5/Le^o^Azkz7L Xċm{݇_To0@epbM5eXFL S]sd+ zS~ljʾZzD.pJ4zʏ5evXi:71Q؈Ia?(&EPT5+ Ţ ?|XW1,]3{6q u'G7v´+~2#60MJi"+ATď1w' Ax|jr,=-5#p2Y9o|k2#6m"tUH"d<2;`lkR ͪFlOyt8&)~xL0%s>ˌXTR{Q:F<[sGg-9۩}$xi 'QlhӡA,t61,˿h ?< l33\)KC$gEGBi?ے-[S HS=kyھ# *9&{D܈YV_0u)jUb4P`qQׄ[~-_qGzĦ5z\IL ̙% pCyWBP7N*щ<-s36z#j(G}]3Y%"`*~C=RS~{[ kVk{̩n=cƶڦq}b0L-ud[ڗ#O=tU,vV*4P<uY,Ě# `*8fxCLj)FT`?(G؇`S0hɒ4Աcc9 Xf9:cyluVZ`?^u҅XjFvGv۔غvʓoߴlå(y_be$ih7R{P_#b4$!bLulJԅ> ,#ucYӰ~#jПi-l&*PgTWݒڱeL+5[KW= 3k6_7O6#eμXnša%z6SPϺ{ |J6#J` G=I_e F09qi@|P Cr0* 0M_{7n `s04i;HeyAb// 6ܰ~nު[(/&}C FzzwW f!`,I+K YUx@~}U7, #(4F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(@iJ#`P#F4F0(ǫnhɖFP`KjJh᪛'Khz;}~@1ET_+zi?uD\q34ԣo%GLؖ8%.tu W.fz5,0@&zVK< PPH*< 5T_~X>q}UL.vՓ+oVoO]ӂqCU[M$9/WR_ =Lԡ Eja&m;7ںP7Hvd5UBvxwZ3}l@e\/zkAK-̴_G 9(EoP<z,]D3j xff_,];ڲ6+E]9&"Ʀ S[QhEЮ _L `*ӕ'1Qؖt,xw.t(i(wѿG2Ɠ. 2 #!3cL%GMn7c! T5Eʒmm,&#@KgH J+tR9Ta';ۨ MEj\1}g˳+6XO@E诙2X [#}\' +soivG㓘uzFG-V:`#1E53m!Wpj ʕ'GY'Ⱥ¬ v̡@J"=>M %9#l4C5Ԓ-;Rh!|J=wkd͙ǥ"g6QW}I=S0jQz< jϖ-K)#Pm ˨WLr+U҅r%V0Wp&xP[`0&隲e;ߍϭ.k') ʍ?ꪯT2Jnf jOMhTKXp" ޕ9C+s;RjU}]1qX$9+$樱y,uW;%BR{S;!6PSP]9B[^+~EqZŀ{$&/?X::h,ɬX/qFl@E/}EC?LlD*Nj=8R >YR}!0:5m낀HbaC0FD[=u왆:zqEK'AgNL#=u,Km}P+ܚJ퉕YD,ԇh /Rzd)Ջtb,/2iwk:5yQ``n`DFd8/jqtb礷tiܸ7>G5uF5Wp7V^DW4"l"U**wVZKg_'ϝAgѶ-'X\lLP#F4F0(@iJ#`P#F4F0(@iJ#`P#j0jTUV`:` =eWB:j0TW:Izm=ӳUw 0?^uFJwzKj?%;y,iwsvDԶ^,/uU#Nԑ^(hwӍg:ЉvVTˀTN&'2kۄxi^+|j YΥN#8D՜0F/w'[_|~nܣ1*Zwxoּ;FԥN"ȫXQÀJԣF:5Bo@ڑF:(sk=K}[a4n$שr;z#=ӭV<^tn_{B:7F4b*.o@#/I}ܒUأ vy 2qsUQNs!<;jTp&Ӕgtb%s}YyzsmLqy4q߮"1ncu Wna D+UfVFbL9Vڋu[g~|}:7LWtuhf8w@Ijeip՗cn6Nj W,5 aj |Z3YwRx=)]ƯDjEέff"L;?1AuWvtx3Ha߮vs#ҁmo+ ߤ>DM:2S=Jw:ץ\0#=3_R#]*ziB}]Ytg w>תGBݩ]ёS/g޵wZ)6h3q⑶.$y %v" t՛Xm32;fV!q݋&I]]g{~i3E9P18I}Jm0(` an3Ot#陎r~홰t81iFzjzxבj jSEueWkK=5RFYHn{;eX TkTRf;=(v?(2:w<*]㇙_ cp6 \I'&'S:4!夡'걚3[2뫛1Ma$|7W]W4`jxul3E4\εtQ/GY ǶlMZ7 _9q:N4j\;ti[#1VK)ѮrI3NjMf񝕘h"g̊"Ahg3*q`F^o; F&mnwageejEGWE^xd;uހqT۱;a**~s;&h_T&弶uYKNO#ʌDp䍞BU Wˊul3fnjW+ F'bZV}Ԝq|&ko4MgtZڿM ըM8qL]4*8&xK=vcyr#y;]\O BjHߘfWY\{*G$N2pֱÚUvݜ) $1]U& JOlR.W/A/\˱vZ:٤PLqg9YXS<"1'8ϻU7w9FHle#=kmBHi#|W#ܥz3cuz<ǝVcjSǧ]ɛ:ҳr~/V31uZ]洉ĞZ_V%eF䪭cX:pz͙ٴ#\7CjG}]M M߀wS`2+ñ+ <Қbױ;?-rUІK]@G\i#1&J_SAX5un3er?c: ;⽙]'OX_j)iyz.˕xַ/,JmHȦ7+pEQ|Y(pJLSyw*y܍\F,tݾ㾮p$Nބe,K|ף, OdB34LWm{SFS_ǩ5u-K2>֋Gzh)0.返:jʥ!xh{jm|;!$rN;uju]jY9κZWf$FelrT&sSΫn3 C Rr=?g,PM^]>8L9)Z׾8 5yӂqi(jVI^<9:W1iUQQX;jg3/uZZ+]o:5Zf$j2E* &h:nF@CcgXNJj.0SU{hkYd.8վcd#cqߥ:F )3f5LK=rb-)6[ ~>Ut/xN'&~~bj7h$mHt'S+ND#le&fDŜmEq,e"_a#}&|ۋ}Hd@NL~6挻p6W^Sg$GԋKv̄GǑ>O]u6!UǙUp}=U ڲ2ו f+~bwk"Ѳr} ` ~H1 F/'bmeaqOL46+,J7a lہro+ݘHk|g)sF<-:^FW ad^2r|@x(2fGyGbK;z]2t0eJl$sJsGl粿5"Dk̛?W5YRst–d~$tH lC-uM=۷Vk$ٽ%&Z0;Wu&jY_ǒ>5M-;fӧ en-SG #5_hv_HvMo*:kb[,#]7ҭn#?hpyx)-/zqKTVw˃=7gӜ[܆1*i=󵹤ny_a\u_f5V#f+g&fۘ.ƫ8kB1+#[vo ꎀ+`DR <_ϜJ `^[P#_oLpb]`4Dh¾ОD/%I'tPi6^ ƅW%Fے^FrPP.`\jzmҫꥤ]^lX\8XtvNۺ0bFb) O}ˊvnel-tVP*Rw3]Upg"L/ 8`#کJ{zċx[<?.wږ⛷Js"^([H|`t5\F/pYur`@ުeq/wPC-x0`J @30R`<(U0Jǫ~e΀qkV/"/[(u孺jSwM5w媿N/ؔX/P+ Wݰ%WCU7KnoZu `tkkǦOՌkp#]`e|fSFX-#+1U7~l۶.Vݠ3ZjM +Gҟ)ZPzOIl*pJ(gzU>]=ܔ K%k0xK9ޔg_ż}SV H7 U>AzzYX?ZZ{G?T9EZҖP׫%򴿵>?y%)0r.WZ3X44CO56Y'6uq.*4 ֶYZ}TEWtMj_o+]; L~[obA5x3uTx_eFcCg VCchi(Qw~l$&_魧$6mGoܖic濒~逸$Op"W1핝'`ZjIf)Ҿ@61{GԵkßbiʅ\͘Y:3!W7򀑈x XMluu|`cx'^$WG%~qS<0sM1F2맶Ϳa{~u?j ~F?E+ۗ՘զXӴ>+}Udsy=WV}h `m >:`a` thuB혯nmjHڗ4  Z }jL+IM3aUO]įY&JMӋq_Z\P7iNy?b_5g˰)ҁ,4`=xǽ{Q:ɀ?3R7<#~+/z?:jtW7??a8zqK &׌\V[u6/ݛZ)_aYb-a Ƹ-G{lOGsgł::TMoc_}ԗ:E]U#=pխW&e{@Ԙzk^O˹Kة̶89+}|K%iǮje7 *9kho^0$Ue9T7+r:/D|S.;=sG[,5M:&hP^,.U6<>X婧G[-j@}+G<幦H uHR[싫ԙs{>fl.ԗTu"f S7r+ڢj9gѪj2AWugxGWYؓա+ޛ&]E0n;K_wƼ︪{<,F$, Fo7KgYZs>#MeV7#i<8Ò?tq 2 ~՞ZV7٢(ף0N.=+H ]􈤚EUYNtl ޱUdoq4ݧrc^F `Ÿh?%9:=4Mݛ2A:+.N NO1wON.ڛlv~2]S1a7eƬiG?W+җ+ޛ2cVck&iQ3s񂼽.{sM_ `i=R/벧%93qqSLL7 w8ά OFqjoh‚D5RdFڕe?1N7]iˍY9Ͱc&ɞP/hoʍ|<( ד<~"w,{3]>CG~Y<,߷?*59۹7ÿ['po϶;vJs:;dˎy߹oLkY\`7[c։3V;q2cvzDOu!uh3Uw]ݜԬ|ӛcԜyJ)ccs'Ez3])GN&s>6>oܩgw":ͫV'7|?ODO\3?.Mw(ku64cg֔ImYcʯz3كΔ靝z!V1˺^NW+3,ڗ8Ȭq9c<ު7,iV4Y>}'qNsT.YkV [>ާiO_f~[3EpQh9.Yʨ,.l!+dX}g>ܷc}63ęݛYoG#w]tQښo1Kbje2]~1=O¼7fI~UMi>MW N>S>c!)6>3Ek{0@^o͟ ?YukIv]`|yr$Yph› gVQ+֛󇚽Z8腻cV\pE~ 7y,n_C-۳l'orzoZM3^S7fcK vOFŖ޻Q'UR|0D315RͶ@84T0TWrgKU7F P v7-ɪq,^5Ӫ FZ~ΘAޯ+v*5jA]u`Pe~ǿqU #@%`U #@+WDxꚯ- F8Sr̖_$`:zn `IU#@ұ?=IeF( Y`y:GSڷVB;FI]?Y.k1Qth330 x鱿(#`x:afN&`ҸrEW_U?RW )kU?Pw=a-X3uTSU 0OfE߸ûFVy M4S$`օU7(/~7>R0cl=WF'Fjxv߹?ۘn+#@azVE'"d"`(տc5Fx@7Xu#V1ԟV"C#@n~&UIg)jɪX(GC`:F9:FPyPsz3~.kTA.@>xegU07O~F^P\#~X_J#`P#1@<%tEXtdate:create2017-08-07T17:42:57+02:008%tEXtdate:modify2017-08-07T17:42:57+02:00黀3tEXtpdf:VersionPDF-1.5 \ 9IENDB`batchtools/vignettes/tikz_prob_algo_simple.tex0000644000176200001440000000347513144310031021523 0ustar liggesusers\documentclass[crop,tikz,convert]{standalone} \usetikzlibrary{shapes,matrix,positioning,chains,arrows,shadows,decorations.pathmorphing,fit,backgrounds} \begin{document} \begin{tikzpicture}[auto] \tikzstyle{userinput}=[rectangle, drop shadow, draw=black, fill=black!10, thick, minimum width=4cm, align=center] \tikzstyle{internal}=[rectangle, drop shadow, draw=black, fill=white, thick, minimum width=4cm, rounded corners, align=center] \tikzstyle{result}=[ellipse, drop shadow, draw=black, fill=white, thick, align=center, minimum width=3cm] \tikzstyle{line} = [draw, thick, -latex'] \tikzstyle{sline} = [draw, thick, -latex',decorate, decoration={snake, segment length=2mm,post length=2mm}] \matrix [row sep=10mm, column sep=20mm] { % first row \node {}; & \node {}; & \node [result] (result) {result}; \\ % second row \node [userinput] (static_problem_part) { static problem part\\\texttt{data} }; & \node [userinput] (dynamic_problem_part) { dynamic problem function\\ \texttt{fun(data, ...)} }; & \node [userinput] (algorithm) { algorithm function\\ \texttt{fun(data, instance, ...)} }; \\ % third row \node {}; & \node [userinput] (problem_design) { problem design\\ (\texttt{addExperiments}) }; & \node [userinput] (algorithm_design) { algorithm design\\ (\texttt{addExperiments}) }; \\ }; \draw [sline] (algorithm) to (result) ; \draw [line] (static_problem_part) to (dynamic_problem_part); \draw [sline] (dynamic_problem_part) to node {\texttt{instance}} (algorithm) ; \draw [line] (static_problem_part) to [out=0, in=0, bend left=20] (algorithm); \draw [line] (problem_design) to node {\texttt{...}} (dynamic_problem_part); \draw [line] (algorithm_design) to node {\texttt{...}} (algorithm); \end{tikzpicture} \end{document} batchtools/R/0000755000176200001440000000000013606056241012620 5ustar liggesusersbatchtools/R/clusterFunctionsSlurm.R0000644000176200001440000001102513357106165017343 0ustar liggesusers#' @title ClusterFunctions for Slurm Systems #' #' @description #' Cluster functions for Slurm (\url{http://slurm.schedmd.com/}). #' #' Job files are created based on the brew template \code{template.file}. This #' file is processed with brew and then submitted to the queue using the #' \code{sbatch} command. Jobs are killed using the \code{scancel} command and #' the list of running jobs is retrieved using \code{squeue}. The user must #' have the appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all resources passed to \code{\link{submitJobs}} #' as well as all variables stored in the \code{\link{JobCollection}}. #' It is the template file's job to choose a queue for the job and handle the desired resource #' allocations. #' #' Note that you might have to specify the cluster name here if you do not want to use the default, #' otherwise the commands for listing and killing jobs will not work. #' #' @template template #' @param array.jobs [\code{logical(1)}]\cr #' If array jobs are disabled on the computing site, set to \code{FALSE}. #' @template nodename #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsSlurm = function(template = "slurm", array.jobs = TRUE, nodename = "localhost", scheduler.latency = 1, fs.latency = 65) { # nocov start assertFlag(array.jobs) assertString(nodename) template = findTemplateFile(template) if (testScalarNA(template)) stopf("Argument 'template' (=\"%s\") must point to a readable template file", template) template = cfReadBrewTemplate(template, "##") quote = if (isLocalHost(nodename)) identity else shQuote getClusters = function(reg) { clusters = filterNull(lapply(reg$resources$resources, "[[", "cluster")) if (length(clusters)) return(stri_flatten(unique(as.character(clusters)), ",")) return(character(0L)) } submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") if (jc$array.jobs) { logs = sprintf("%s_%i", fs::path_file(jc$log.file), seq_row(jc$jobs)) jc$log.file = stri_join(jc$log.file, "_%a") } outfile = cfBrewTemplate(reg, template, jc) res = runOSCommand("sbatch", shQuote(outfile), nodename = nodename) output = stri_flatten(stri_trim_both(res$output), "\n") if (res$exit.code > 0L) { temp.errors = c( "Batch job submission failed: Job violates accounting policy (job submit limit, user's size and/or time limits)", "Socket timed out on send/recv operation", "Submission rate too high, suggest using job arrays" ) i = wf(stri_detect_fixed(output, temp.errors)) if (length(i) == 1L) return(makeSubmitJobResult(status = i, batch.id = NA_character_, msg = temp.errors[i])) return(cfHandleUnknownSubmitError("sbatch", res$exit.code, res$output)) } id = stri_split_fixed(output[1L], " ")[[1L]][4L] if (jc$array.jobs) { if (!array.jobs) stop("Array jobs not supported by cluster function") makeSubmitJobResult(status = 0L, batch.id = sprintf("%s_%i", id, seq_row(jc$jobs)), log.file = logs) } else { makeSubmitJobResult(status = 0L, batch.id = id) } } listJobs = function(reg, args) { assertRegistry(reg, writeable = FALSE) args = c(args, "--noheader", "--format=%i") if (array.jobs) args = c(args, "-r") clusters = getClusters(reg) if (length(clusters)) args = c(args, sprintf("--clusters=%s", clusters)) res = runOSCommand("squeue", args, nodename = nodename) if (res$exit.code > 0L) OSError("Listing of jobs failed", res) if (length(clusters)) tail(res$output, -1L) else res$output } listJobsQueued = function(reg) { args = c(quote("--user=$USER"), "--states=PD") listJobs(reg, args) } listJobsRunning = function(reg) { args = c(quote("--user=$USER"), "--states=R,S,CG") listJobs(reg, args) } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "scancel", c(sprintf("--clusters=%s", getClusters(reg)), batch.id), nodename = nodename) } makeClusterFunctions(name = "Slurm", submitJob = submitJob, killJob = killJob, listJobsRunning = listJobsRunning, listJobsQueued = listJobsQueued, array.var = "SLURM_ARRAY_TASK_ID", store.job.collection = TRUE, store.job.files = !isLocalHost(nodename), scheduler.latency = scheduler.latency, fs.latency = fs.latency) } # nocov end batchtools/R/testJob.R0000644000176200001440000000416713606043144014363 0ustar liggesusers#' @title Run Jobs Interactively #' #' @description #' Starts a single job on the local machine. #' #' @template id #' @param external [\code{logical(1)}]\cr #' Run the job in an external R session? If \code{TRUE}, starts a fresh R #' session on the local machine to execute the with \code{\link{execJob}}. #' You will not be able to use debug tools like \code{\link[base]{traceback}} #' or \code{\link[base]{browser}}. #' #' If \code{external} is set to \code{FALSE} (default) on the other hand, #' \code{testJob} will execute the job in the current R session and the usual #' debugging tools work. However, spotting missing variable declarations (as they #' are possibly resolved in the global environment) is impossible. #' Same holds for missing package dependency declarations. #' #' @template reg #' @return Returns the result of the job if successful. #' @export #' @family debug #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(function(x) if (x == 2) xxx else x, 1:2, reg = tmp) #' testJob(1, reg = tmp) #' \dontrun{ #' testJob(2, reg = tmp) #' } testJob = function(id, external = FALSE, reg = getDefaultRegistry()) { assertRegistry(reg) assertFlag(external) id = convertId(reg, id) if (external) { td = fs::path_abs(fs::path_temp()) fn.r = fs::path(td, "batchtools-testJob.R") fn.jc = fs::path(td, "batchtools-testJob.jc") fn.res = fs::path(td, "batchtools-testJob.rds") writeRDS(makeJobCollection(id, reg = reg), file = fn.jc, compress = reg$compress) brew(file = system.file(fs::path("templates", "testJob.tmpl"), package = "batchtools", mustWork = TRUE), output = fn.r, envir = list2env(list(jc = fn.jc, result = fn.res))) res = runOSCommand(Rscript(), fn.r) writeLines(res$output) if (res$exit.code == 0L) return(readRDS(fn.res)) stopf("testJob() failed for job with id=%i. To properly debug, re-run with external=FALSE", id$job.id) } else { with_dir(reg$work.dir, { loadRegistryDependencies(reg, must.work = TRUE) execJob(job = makeJob(id, reg = reg)) }) } } batchtools/R/clusterFunctionsOpenLava.R0000644000176200001440000000572413301520663017747 0ustar liggesusers#' @title ClusterFunctions for OpenLava #' #' @description #' Cluster functions for OpenLava. #' #' Job files are created based on the brew template \code{template}. This #' file is processed with brew and then submitted to the queue using the #' \code{bsub} command. Jobs are killed using the \code{bkill} command and the #' list of running jobs is retrieved using \code{bjobs -u $USER -w}. The user #' must have the appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all resources passed to \code{\link{submitJobs}} #' as well as all variables stored in the \code{\link{JobCollection}}. #' It is the template file's job to choose a queue for the job and handle the desired resource #' allocations. #' #' @note #' Array jobs are currently not supported. #' #' @template template #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsOpenLava = function(template = "openlava", scheduler.latency = 1, fs.latency = 65) { # nocov start template = findTemplateFile(template) if (testScalarNA(template)) stopf("Argument 'template' (=\"%s\") must point to a readable template file", template) template = cfReadBrewTemplate(template) # When LSB_BJOBS_CONSISTENT_EXIT_CODE = Y, the bjobs command exits with 0 only # when unfinished jobs are found, and 255 when no jobs are found, # or a non-existent job ID is entered. Sys.setenv(LSB_BJOBS_CONSISTENT_EXIT_CODE = "Y") submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") outfile = cfBrewTemplate(reg, template, jc) res = runOSCommand("bsub", stdin = shQuote(outfile)) if (res$exit.code > 0L) { cfHandleUnknownSubmitError("bsub", res$exit.code, res$output) } else { batch.id = stri_extract_first_regex(stri_flatten(res$output, " "), "\\d+") makeSubmitJobResult(status = 0L, batch.id = batch.id) } } listJobs = function(reg, args) { assertRegistry(reg, writeable = FALSE) res = runOSCommand("bjobs", args) if (res$exit.code > 0L) { if (res$exit.code == 255L || any(stri_detect_regex(res$output, "No (unfinished|pending|running) job found"))) return(character(0L)) OSError("Listing of jobs failed", res) } stri_extract_first_regex(tail(res$output, -1L), "\\d+") } listJobsQueued = function(reg) { listJobs(reg, c("-u $USER", "-w", "-p")) } listJobsRunning = function(reg) { listJobs(reg, c("-u $USER", "-w", "-r")) } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "bkill", batch.id) } makeClusterFunctions(name = "OpenLava", submitJob = submitJob, killJob = killJob, listJobsQueued = listJobsQueued, listJobsRunning = listJobsRunning, store.job.collection = TRUE, scheduler.latency = scheduler.latency, fs.latency = fs.latency) } # nocov end batchtools/R/batchMap.R0000644000176200001440000001026113606043537014466 0ustar liggesusers#' @title Map Operation for Batch Systems #' #' @description #' A parallel and asynchronous \code{\link[base]{Map}}/\code{\link[base]{mapply}} for batch systems. #' Note that this function only defines the computational jobs. #' The actual computation is started with \code{\link{submitJobs}}. #' Results and partial results can be collected with \code{\link{reduceResultsList}}, \code{\link{reduceResults}} or #' \code{\link{loadResult}}. #' #' For a synchronous \code{\link[base]{Map}}-like execution, see \code{\link{btmapply}}. #' #' @param fun [\code{function}]\cr #' Function to map over arguments provided via \code{...}. #' Parameters given via \code{args} or \code{...} are passed as-is, in the respective order and possibly named. #' If the function has the named formal argument \dQuote{.job}, the \code{\link{Job}} is passed to the function #' on the slave. #' @param ... [ANY]\cr #' Arguments to vectorize over (list or vector). #' Shorter vectors will be recycled (possibly with a warning any length is not a multiple of the longest length). #' Mutually exclusive with \code{args}. #' Note that although it is possible to iterate over large objects (e.g., lists of data frames or matrices), this usually #' hurts the overall performance and thus is discouraged. #' @param args [\code{list} | \code{data.frame}]\cr #' Arguments to vectorize over as (named) list or data frame. #' Shorter vectors will be recycled (possibly with a warning any length is not a multiple of the longest length). #' Mutually exclusive with \code{...}. #' @template more.args #' @template reg #' @return [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. #' @export #' @seealso \code{\link{batchReduce}} #' @examples #' \dontshow{ batchtools:::example_push_temp(3) } #' # example using "..." and more.args #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' f = function(x, y) x^2 + y #' ids = batchMap(f, x = 1:10, more.args = list(y = 100), reg = tmp) #' getJobPars(reg = tmp) #' testJob(6, reg = tmp) # 100 + 6^2 = 136 #' #' # vector recycling #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' f = function(...) list(...) #' ids = batchMap(f, x = 1:3, y = 1:6, reg = tmp) #' getJobPars(reg = tmp) #' #' # example for an expand.grid()-like operation on parameters #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' ids = batchMap(paste, args = data.table::CJ(x = letters[1:3], y = 1:3), reg = tmp) #' getJobPars(reg = tmp) #' testJob(6, reg = tmp) batchMap = function(fun, ..., args = list(), more.args = list(), reg = getDefaultRegistry()) { list2dt = function(x) { # converts a list to a data.table, but avoids creating column names nn = names(x) if (is.null(nn)) names(x) = rep.int("", length(x)) as.data.table(x) } assertRegistry(reg, class = "Registry", writeable = TRUE) if (nrow(reg$defs) > 0L) stop("Registry must be empty") assertFunction(fun) assert(checkList(args), checkDataFrame(args)) assertList(more.args, names = "strict") if (length(args) > 0L) { if (...length() > 0L) stop("You may only provide arguments via '...' *or* 'args'") ddd = list2dt(args) } else { ddd = list2dt(list(...)) } if (".job" %chin% names(ddd)) stop("Name '.job' not allowed as parameter name (reserved keyword)") if (any(dim(ddd) == 0L)) return(noIds()) info("Adding %i jobs ...", nrow(ddd)) writeRDS(fun, file = fs::path(reg$file.dir, "user.function.rds"), compress = reg$compress) if (length(more.args) > 0L) writeRDS(more.args, file = fs::path(reg$file.dir, "more.args.rds"), compress = reg$compress) ids = seq_row(ddd) reg$defs = data.table( def.id = ids, job.pars = .mapply(list, dots = ddd, MoreArgs = list()), key = "def.id") reg$status = data.table( job.id = ids, def.id = ids, submitted = NA_real_, started = NA_real_, done = NA_real_, error = NA_character_, mem.used = NA_real_, resource.id = NA_integer_, batch.id = NA_character_, log.file = NA_character_, job.hash = NA_character_, job.name = NA_character_, key = "job.id") saveRegistry(reg) invisible(allIds(reg)) } batchtools/R/mergeRegistries.R0000644000176200001440000000647713453602073016121 0ustar liggesusers# @title Merge the computational status of two registries # # @description # Merges the computational status of jobs found in the registries located at # \code{file.dir} into the registry \code{reg}. # Both registries must have the same jobs defined and may only differ w.r.t. # the computational status of the jobs. # This function is intended to be applied in the following context: # \enumerate{ # \item Define all jobs locally (and ensure they work as intended by testing them). # \item Copy the \code{file.dir} to remote systems. # \item Submit a subset of jobs on each system, # \item After all jobs are terminated, copy both registries back to the local file system. Remember to keep backups. # \item Load one registry with \code{\link{loadRegistry}}, merge the second with this function. # } # # @param file.dir [\code{character(1)}]\cr # Path to first registry. # @template reg # @return [\code{\link{Registry}}]. # @export # @examples # \dontshow{ batchtools:::example_push_temp(2) } # target = makeRegistry(NA, make.default = FALSE) # batchMap(identity, 1:10, reg = target) # td = tempdir() # file.copy(target$file.dir, td, recursive = TRUE) # file.dir = file.path(td, basename(target$file.dir)) # source = loadRegistry(file.dir, update.paths = TRUE) # # submitJobs(1:5, reg = target) # submitJobs(6:10, reg = source) # # new = mergeRegistries(source, target) mergeRegistries = function(source, target = getDefaultRegistry()) { assertRegistry(source, writeable = TRUE, sync = TRUE, running.ok = FALSE) assertRegistry(target, writeable = TRUE, sync = TRUE, running.ok = FALSE) if (fs::path_real(source$file.dir) == fs::path_real(target$file.dir)) stop("You must provide two different registries (using different file directories") hash = function(x) unlist(.mapply(function(...) digest(list(...)), x[, !"def.id"], list())) # update only jobs which are not already computed and only those which are terminated status = source$status[.findNotDone(target), ][.findSubmitted(source)] # create a hash of parameters to match on status$hash = hash(sjoin(source$defs, status)) # create temp table for target with the same hashes tmp = data.table(def.id = status$def.id, hash = hash(sjoin(target$defs, status))) # filter status to keep only jobs with matching ids and hashes # in status there are now only jobs which have an exact match in target$status # perform an updating join status = status[tmp, nomatch = 0L, on = c("def.id", "hash")] info("Merging %i jobs ...", nrow(status)) src = getResultFiles(source, status) dst = fs::path(dir(target, "results"), fs::path_file(src)) info("Copying %i result files ...", length(src)) fs::file_copy(src, dst, overwrite = TRUE) src = getLogFiles(source, status) dst = fs::path(dir(target, "logs"), fs::path_file(src)) info("Copying %i log files ...", length(src)) fs::file_copy(src, dst, overwrite = TRUE) ext.dirs = as.integer(chintersect(list.files(dir(source, "external")), as.character(status$job.id))) if (length(ext.dirs) > 0L) { src = getExternalDirs(source, ext.dirs) dst = getExternalDirs(target, ext.dirs) info("Copying %i external directories ...", length(ext.dirs)) fs::dir_delete(dst[fs::dir_exists(dst)]) fs::dir_copy(src, fs::path_dir(dst)) } target$status = ujoin(target$status, status, by = "job.id") saveRegistry(reg = target) } batchtools/R/Export.R0000644000176200001440000000420213606043272014222 0ustar liggesusers#' @title Export Objects to the Slaves #' #' @description #' Objects are saved in subdirectory \dQuote{exports} of the #' \dQuote{file.dir} of \code{reg}. #' They are automatically loaded and placed in the global environment #' each time the registry is loaded or a job collection is executed. #' #' @param export [\code{list}]\cr #' Named list of objects to export. #' @param unexport [\code{character}]\cr #' Vector of object names to unexport. #' @template reg #' @return [\code{data.table}] with name and uri to the exported objects. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' #' # list exports #' exports = batchExport(reg = tmp) #' print(exports) #' #' # add a job and required exports #' batchMap(function(x) x^2 + y + z, x = 1:3, reg = tmp) #' exports = batchExport(export = list(y = 99, z = 1), reg = tmp) #' print(exports) #' #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' stopifnot(loadResult(1, reg = tmp) == 101) #' #' # Un-export z #' exports = batchExport(unexport = "z", reg = tmp) #' print(exports) batchExport = function(export = list(), unexport = character(0L), reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE) assertList(export, names = "named") assertCharacter(unexport, any.missing = FALSE, min.chars = 1L) path = fs::path(reg$file.dir, "exports") if (length(export) > 0L) { nn = names(export) fn = fs::path(path, mangle(nn)) found = fs::file_exists(fn) if (any(!found)) info("Exporting new objects: '%s' ...", stri_flatten(nn[!found], "','")) if (any(found)) info("Overwriting previously exported object: '%s'", stri_flatten(nn[found], "','")) Map(writeRDS, object = export, file = fn, compress = reg$compress) } if (length(unexport) > 0L) { fn = fs::path(path, mangle(unexport)) found = fs::file_exists(fn) if (any(found)) info("Un-exporting exported objects: '%s' ...", stri_flatten(unexport[found], "','")) file_remove(fn[found]) } fns = list.files(path, pattern = "\\.rds") invisible(data.table(name = unmangle(fns), uri = fs::path(path, fns))) } batchtools/R/getDefaultRegistry.R0000644000176200001440000000113213144310031016540 0ustar liggesusers#' @title Get and Set the Default Registry #' @description #' \code{getDefaultRegistry} returns the registry currently set as default (or #' stops with an exception if none is set). \code{setDefaultRegistry} sets #' a registry as default. #' #' @template reg #' @family Registry #' @export getDefaultRegistry = function() { if (is.null(batchtools$default.registry)) stop("No default registry defined") batchtools$default.registry } #' @export #' @rdname getDefaultRegistry setDefaultRegistry = function(reg) { if (!is.null(reg)) assertRegistry(reg) batchtools$default.registry = reg } batchtools/R/syncRegistry.R0000644000176200001440000000300313462044773015453 0ustar liggesusers#' @title Synchronize the Registry #' #' @description #' Parses update files written by the slaves to the file system and updates the #' internal data base. #' #' @template reg #' @return [\code{logical(1)}]: \code{TRUE} if the state has changed, \code{FALSE} otherwise. #' @family Registry #' @export syncRegistry = function(reg = getDefaultRegistry()) { assertRegistry(reg) altered = sync(reg) if (altered) saveRegistry(reg) altered } sync = function(reg) { "!DEBUG [syncRegistry]: Triggered syncRegistry" fns = list.files(dir(reg, "updates"), full.names = TRUE) if (length(fns) == 0L) return(invisible(FALSE)) runHook(reg, "pre.sync", fns = fns) updates = lapply(fns, function(fn) { x = try(readRDS(fn), silent = TRUE) if (is.error(x)) { if (reg$writeable && difftime(Sys.time(), fs::file_info(fn)$modification_time, units = "mins") > 60) { info("Removing unreadable update file '%s'", fn) file_remove(fn) } else { info("Skipping unreadable update file '%s'", fn) } return(NULL) } return(x) }) failed = vlapply(updates, is.null) updates = rbindlist(updates, fill = TRUE, use.names = TRUE) # -> fill = TRUE for #135 if (nrow(updates) > 0L) { expr = quote(`:=`(started = i.started, done = i.done, error = i.error, mem.used = i.mem.used)) reg$status[updates, eval(expr), on = "job.id"] if (reg$writeable) file_remove(fns[!failed]) } runHook(reg, "post.sync", updates = updates) invisible(nrow(updates) > 0L) } batchtools/R/clusterFunctionsMulticore.R0000644000176200001440000000633413606041641020205 0ustar liggesusersif (getRversion() < "3.3.2" && .Platform$OS.type != "windows") { # Provided patch for upstream which is shipped with R >= 3.3.2: # https://stat.ethz.ch/pipermail/r-devel/2016-August/073035.html selectChildren = getFromNamespace("selectChildren", "parallel") readChild = getFromNamespace("readChild", "parallel") mccollect = function(pids, timeout = 0) { if (!length(pids)) return (NULL) if (!is.integer(pids)) stop("invalid 'jobs' argument") s = selectChildren(pids, timeout) if (is.logical(s) || !length(s)) return(NULL) res = lapply(s, function(x) { r = readChild(x) if (is.raw(r)) unserialize(r) else NULL }) names(res) = as.character(pids)[match(s, pids)] res } } else { mccollect = function(jobs, timeout = 0) { parallel::mccollect(jobs, wait = FALSE, timeout = timeout) } } Multicore = R6Class("Multicore", cloneable = FALSE, public = list( jobs = NULL, ncpus = NA_integer_, initialize = function(ncpus) { self$jobs = data.table(pid = integer(0L), count = integer(0L)) self$ncpus = ncpus reg.finalizer(self, function(e) mccollect(self$procs$pid, timeout = 1), onexit = FALSE) }, spawn = function(jc) { force(jc) repeat { self$collect(0) if (nrow(self$jobs) < self$ncpus) break Sys.sleep(1) } pid = parallel::mcparallel(doJobCollection(jc, output = jc$log.file), mc.set.seed = FALSE)$pid self$jobs = rbind(self$jobs, data.table(pid = pid, count = 0L)) invisible(as.character(pid)) }, list = function() { self$collect(0) as.character(self$jobs$pid) }, collect = function(timeout) { repeat { res = mccollect(self$jobs$pid, timeout = timeout) if (is.null(res)) break pids = as.integer(names(res)) self$jobs[pid %in% pids, count := count + 1L] self$jobs = self$jobs[count < 1L] } } ) ) #' @title ClusterFunctions for Parallel Multicore Execution #' #' @description #' Jobs are spawned asynchronously using the functions \code{mcparallel} and \code{mccollect} (both in \pkg{parallel}). #' Does not work on Windows, use \code{\link{makeClusterFunctionsSocket}} instead. #' #' @template ncpus #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsMulticore = function(ncpus = NA_integer_, fs.latency = 0) { if (testOS("windows")) stop("ClusterFunctionsMulticore do not support Windows. Use makeClusterFunctionsSocket instead.") if (is.na(ncpus)) { ncpus = max(as.numeric(getOption("mc.cores")), parallel::detectCores(), 1L, na.rm = TRUE) info("Auto-detected %i CPUs", ncpus) } ncpus = asCount(ncpus, na.ok = FALSE, positive = TRUE) p = Multicore$new(ncpus) submitJob = function(reg, jc) { force(jc) pid = p$spawn(jc) makeSubmitJobResult(status = 0L, batch.id = pid) } listJobsRunning = function(reg) { assertRegistry(reg, writeable = FALSE) p$list() } makeClusterFunctions(name = "Multicore", submitJob = submitJob, listJobsRunning = listJobsRunning, store.job.collection = FALSE, fs.latency = fs.latency, hooks = list(pre.sync = function(reg, fns) p$collect(1))) } batchtools/R/RDSReader.R0000644000176200001440000000171313432464672014530 0ustar liggesusersRDSReader = R6Class("RDSReader", cloneable = FALSE, public = list( cache = list(), use.cache = NA, initialize = function(use.cache = FALSE) { self$use.cache = use.cache }, get = function(uri, slot = NA_character_) { read = function(uri) if (fs::file_exists(uri)) readRDS(uri) else NULL # no cache used, read object from disk and return if (!self$use.cache) return(read(uri)) # not slotted: # look up object in cache. If not found, add to cache. Return cached object if (is.na(slot)) { if (! uri %chin% names(self$cache)) self$cache[[uri]] = read(uri) return(self$cache[[uri]]) } # slotted: # object is stored in cache[[slot]] as list(obj = [cached obj], uri = uri) if (is.null(self$cache[[slot]]) || self$cache[[slot]]$uri != uri) self$cache[[slot]] = list(obj = read(uri), uri = uri) return(self$cache[[slot]]$obj) } ) ) batchtools/R/findJobs.R0000644000176200001440000002234613453602073014510 0ustar liggesusers#' @title Find and Filter Jobs #' #' @description #' These functions are used to find and filter jobs, depending on either their parameters (\code{findJobs} and #' \code{findExperiments}), their tags (\code{findTagged}), or their computational status (all other functions, #' see \code{\link{getStatus}} for an overview). #' #' Note that \code{findQueued}, \code{findRunning}, \code{findOnSystem} and \code{findExpired} are somewhat heuristic #' and may report misleading results, depending on the state of the system and the \code{\link{ClusterFunctions}} implementation. #' #' See \code{\link{JoinTables}} for convenient set operations (unions, intersects, differences) on tables with job ids. #' #' @param expr [\code{expression}]\cr #' Predicate expression evaluated in the job parameters. #' Jobs for which \code{expr} evaluates to \code{TRUE} are returned. #' @templateVar ids.default all #' @template ids #' @template reg #' @return [\code{\link{data.table}}] with column \dQuote{job.id} containing matched jobs. #' @seealso \code{\link{getStatus}} \code{\link{JoinTables}} #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(identity, i = 1:3, reg = tmp) #' ids = findNotSubmitted(reg = tmp) #' #' # get all jobs: #' findJobs(reg = tmp) #' #' # filter for jobs with parameter i >= 2 #' findJobs(i >= 2, reg = tmp) #' #' # filter on the computational status #' findSubmitted(reg = tmp) #' findNotDone(reg = tmp) #' #' # filter on tags #' addJobTags(2:3, "my_tag", reg = tmp) #' findTagged(tags = "my_tag", reg = tmp) #' #' # combine filter functions using joins #' # -> jobs which are not done and not tagged (using an anti-join): #' ajoin(findNotDone(reg = tmp), findTagged("my_tag", reg = tmp)) findJobs = function(expr, ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) ids = convertIds(reg, ids) if (missing(expr)) return(ids %??% allIds(reg)) expr = substitute(expr) ee = parent.frame() fun = function(pars) eval(expr, pars, enclos = ee) job.pars = NULL setkeyv(mergedJobs(reg, ids, c("job.id", "job.pars"))[vlapply(job.pars, fun), "job.id"], "job.id") } #' @export #' @rdname findJobs #' @param prob.name [\code{character}]\cr #' Exact name of the problem (no substring matching). #' If not provided, all problems are matched. #' @param prob.pattern [\code{character}]\cr #' Regular expression pattern to match problem names. #' If not provided, all problems are matched. #' @param algo.name [\code{character}]\cr #' Exact name of the problem (no substring matching). #' If not provided, all algorithms are matched. #' @param algo.pattern [\code{character}]\cr #' Regular expression pattern to match algorithm names. #' If not provided, all algorithms are matched. #' @param prob.pars [\code{expression}]\cr #' Predicate expression evaluated in the problem parameters. #' @param algo.pars [\code{expression}]\cr #' Predicate expression evaluated in the algorithm parameters. #' @param repls [\code{integer}]\cr #' Whitelist of replication numbers. If not provided, all replications are matched. findExperiments = function(ids = NULL, prob.name = NA_character_, prob.pattern = NA_character_, algo.name = NA_character_, algo.pattern = NA_character_, prob.pars, algo.pars, repls = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", sync = TRUE) assertString(prob.name, na.ok = TRUE, min.chars = 1L) assertString(prob.pattern, na.ok = TRUE, min.chars = 1L) assertString(algo.name, na.ok = TRUE, min.chars = 1L) assertString(algo.pattern, na.ok = TRUE, min.chars = 1L) ee = parent.frame() tab = mergedJobs(reg, convertIds(reg, ids), c("job.id", "problem", "algorithm", "prob.pars", "algo.pars", "repl")) if (!is.na(prob.name)) { problem = NULL tab = tab[problem == prob.name] } if (!is.na(prob.pattern)) { problem = NULL tab = tab[stri_detect_regex(problem, prob.pattern)] } if (!is.na(algo.name)) { algorithm = NULL tab = tab[algorithm == algo.name] } if (!is.na(algo.pattern)) { algorithm = NULL tab = tab[stri_detect_regex(algorithm, algo.pattern)] } if (!is.null(repls)) { repls = asInteger(repls, any.missing = FALSE) repl = NULL tab = tab[repl %in% repls] } if (!missing(prob.pars)) { expr = substitute(prob.pars) fun = function(pars) eval(expr, pars, enclos = ee) prob.pars = NULL tab = tab[vlapply(prob.pars, fun)] } if (!missing(algo.pars)) { expr = substitute(algo.pars) fun = function(pars) eval(expr, pars, enclos = ee) algo.pars = NULL tab = tab[vlapply(algo.pars, fun)] } setkeyv(tab[, "job.id"], "job.id")[] } #' @export #' @rdname findJobs findSubmitted = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findSubmitted(reg, convertIds(reg, ids)) } .findSubmitted = function(reg, ids = NULL) { submitted = NULL filter(reg$status, ids, c("job.id", "submitted"))[!is.na(submitted), "job.id"] } #' @export #' @rdname findJobs findNotSubmitted = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findNotSubmitted(reg, convertIds(reg, ids)) } .findNotSubmitted = function(reg, ids = NULL) { submitted = NULL filter(reg$status, ids, c("job.id", "submitted"))[is.na(submitted), "job.id"] } #' @export #' @rdname findJobs findStarted = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findStarted(reg, convertIds(reg, ids)) } .findStarted = function(reg, ids = NULL, batch.ids = getBatchIds(reg, status = "running")) { started = batch.id = status = NULL bids = batch.ids[status == "running"]$batch.id filter(reg$status, ids, c("job.id", "started", "batch.id"))[!is.na(started) | batch.id %in% bids, "job.id"] } #' @export #' @rdname findJobs findNotStarted = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findNotStarted(reg, convertIds(reg, ids)) } .findNotStarted = function(reg, ids = NULL, batch.ids = getBatchIds(reg, status = "running")) { started = batch.id = status = NULL bids = batch.ids[status == "running"]$batch.id filter(reg$status, ids, c("job.id", "started", "batch.id"))[is.na(started) & ! batch.id %chin% bids, "job.id"] } #' @export #' @rdname findJobs findDone = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findDone(reg, convertIds(reg, ids)) } .findDone = function(reg, ids = NULL) { done = error = NULL filter(reg$status, ids, c("job.id", "done", "error"))[!is.na(done) & is.na(error), "job.id"] } #' @export #' @rdname findJobs findNotDone = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findNotDone(reg, convertIds(reg, ids)) } .findNotDone = function(reg, ids = NULL) { done = error = NULL filter(reg$status, ids, c("job.id", "done", "error"))[is.na(done) | !is.na(error), "job.id"] } #' @export #' @rdname findJobs findErrors = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findErrors(reg, convertIds(reg, ids)) } .findErrors = function(reg, ids = NULL) { error = NULL filter(reg$status, ids, c("job.id", "error"))[!is.na(error), "job.id"] } # used in waitForJobs: find jobs which are done or error .findTerminated = function(reg, ids = NULL) { done = NULL filter(reg$status, ids, c("job.id", "done"))[!is.na(done), "job.id"] } #' @export #' @rdname findJobs findOnSystem = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findOnSystem(reg, convertIds(reg, ids)) } .findOnSystem = function(reg, ids = NULL, cols = "job.id", batch.ids = getBatchIds(reg, status = "all")) { if (length(batch.ids) == 0L) return(noIds()) submitted = done = batch.id = NULL filter(reg$status, ids, c("job.id", "submitted", "done", "batch.id"))[!is.na(submitted) & is.na(done) & batch.id %in% batch.ids$batch.id, cols, with = FALSE] } #' @export #' @rdname findJobs findRunning = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findOnSystem(reg, convertIds(reg, ids), batch.ids = getBatchIds(reg, status = "running")) } #' @export #' @rdname findJobs findQueued = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findOnSystem(reg, convertIds(reg, ids), batch.ids = getBatchIds(reg, status = "queued")) } #' @export #' @rdname findJobs findExpired = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) .findExpired(reg, convertIds(reg, ids)) } .findExpired = function(reg, ids = NULL, batch.ids = getBatchIds(reg)) { submitted = done = batch.id = NULL filter(reg$status, ids, c("job.id", "submitted", "done", "batch.id"))[!is.na(submitted) & is.na(done) & batch.id %chnin% batch.ids$batch.id, "job.id"] } #' @export #' @rdname findJobs #' @param tags [\code{character}]\cr #' Return jobs which are tagged with any of the tags provided. findTagged = function(tags = character(0L), ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids, default = allIds(reg)) assertCharacter(tags, any.missing = FALSE, pattern = "^[[:alnum:]_.]+$", min.len = 1L) tag = NULL ids[unique(reg$tags[tag %chin% tags, "job.id"], by = "job.id")] } batchtools/R/Worker.R0000644000176200001440000000742213354623710014222 0ustar liggesusers#' @title Create a Linux-Worker #' @docType class #' @format An \code{\link{R6Class}} generator object #' #' @description #' \code{\link[R6]{R6Class}} to create local and remote linux workers. #' #' @field nodename Host name. Set via constructor. #' @field ncpus Number of CPUs. Set via constructor and defaults to a heuristic which tries to detect the number of CPUs of the machine. #' @field max.load Maximum load average (of the last 5 min). Set via constructor and defaults to the number of CPUs of the machine. #' @field status Status of the worker; one of \dQuote{unknown}, \dQuote{available}, \dQuote{max.cpus} and \dQuote{max.load}. #' @section Methods: #' \describe{ #' \item{\code{new(nodename, ncpus, max.load)}}{Constructor.} #' \item{\code{update(reg)}}{Update the worker status.} #' \item{\code{list(reg)}}{List running jobs.} #' \item{\code{start(reg, fn, outfile)}}{Start job collection in file \dQuote{fn} and output to \dQuote{outfile}.} #' \item{\code{kill(reg, batch.id)}}{Kill job matching the \dQuote{batch.id}.} #' } #' @return [\code{\link{Worker}}]. #' @export #' @examples #' \dontrun{ #' # create a worker for the local machine and use 4 CPUs. #' Worker$new("localhost", ncpus = 4) #' } Worker = R6Class("Worker", cloneable = FALSE, public = list( nodename = NULL, ncpus = NULL, max.load = NULL, script = NULL, status = "unknown", initialize = function(nodename, ncpus = NULL, max.load = NULL) { if (testOS("windows")) stop("Windows is not supported by the Worker Class") self$nodename = assertString(nodename) if (!is.null(ncpus)) ncpus = asCount(ncpus) if (!is.null(max.load)) assertNumber(max.load) if (nodename == "localhost") { self$script = system.file("bin", "linux-helper", package = "batchtools") } else { args = c("-e", shQuote("message(\"[bt] --BOF--\\n\", \"[bt] \", system.file(\"bin/linux-helper\", package = \"batchtools\"), \"\\n[bt] --EOF--\\n\")")) res = runOSCommand("Rscript", args, nodename = nodename) script = private$filter_output(res)$output self$script = assertString(script, min.chars = 1L) } self$ncpus = ncpus %??% as.integer(private$run("number-of-cpus")$output) self$max.load = max.load %??% self$ncpus }, list = function(reg) { stri_join(self$nodename, "#", stri_trim_both(private$run(c("list-jobs", reg$file.dir))$output)) }, start = function(reg, fn, outfile) { private$run(c("start-job", fn, outfile)) }, kill = function(reg, batch.id) { pid = stri_split_fixed(batch.id, "#", n = 2L)[[1L]][2L] cfKillJob(reg, self$script, c("kill-job", pid)) }, update = function(reg) { "!DEBUG [Worker]: Updating Worker '`self$nodename`'" res = private$run(c("status", reg$file.dir)) res = as.numeric(stri_split_regex(res$output, "\\s+")[[1L]]) names(res) = c("load", "n.rprocs", "n.rprocs.50", "n.jobs") self$status = if (res["load"] > self$max.load) { "max.load" } else if (res["n.jobs"] >= self$ncpus) { "max.cpus" } else { "available" } return(res) } ), private = list( filter_output = function(res) { output = stri_trim_both(res$output) marker = stri_detect_regex(output, "^\\[bt\\] --[BE]OF--$") if (sum(marker) != 2L) { stopf("runOSCommand failed: Expected BOF+EOF markers for '%s %s', but got:\n %s", res$sys.cmd, stri_flatten(res$sys.args, " "), stri_flatten(res$output, "\n") %??% "") } info = stri_startswith_fixed(output, "[bt]") & !marker res$output = stri_trim_left(stri_sub(output[info], 5L)) res }, run = function(args) { private$filter_output(runOSCommand(self$script, args, nodename = self$nodename)) } ) ) batchtools/R/Logs.R0000644000176200001440000000776413432537725013675 0ustar liggesusers#' @useDynLib batchtools fill_gaps readLog = function(id, missing.as.empty = FALSE, reg = getDefaultRegistry()) { log.file = getLogFiles(reg, id) if (is.na(log.file) || !waitForFile(log.file, timeout = reg$cluster.functions$fs.latency, must.work = FALSE)) { if (missing.as.empty) return(data.table(job.id = integer(0L), lines = character(0L))) stopf("Log file '%s' for job with id %i not available", log.file, id$job.id) } lines = readLines(log.file) if (length(lines) > 0L) { job.id = as.integer(stri_match_last_regex(lines, c("\\[batchtools job\\.id=([0-9]+)\\]$"))[, 2L]) job.id = .Call(fill_gaps, job.id) } else { job.id = integer(0L) } setkeyv(data.table(job.id = job.id, lines = lines), "job.id", physical = FALSE) } extractLog = function(log, id) { job.id = NULL log[is.na(job.id) | job.id %in% id$job.id]$lines } #' @title Grep Log Files for a Pattern #' #' @description #' Crawls through log files and reports jobs with lines matching the \code{pattern}. #' See \code{\link{showLog}} for an example. #' #' @templateVar ids.default findStarted #' @template ids #' @param pattern [\code{character(1L)}]\cr #' Regular expression or string (see \code{fixed}). #' @param ignore.case [\code{logical(1L)}]\cr #' If \code{TRUE} the match will be performed case insensitively. #' @param fixed [\code{logical(1L)}]\cr #' If \code{FALSE} (default), \code{pattern} is a regular expression and a fixed string otherwise. #' @template reg #' @export #' @family debug #' @return [\code{\link{data.table}}] with columns \dQuote{job.id} and \dQuote{message}. grepLogs = function(ids = NULL, pattern, ignore.case = FALSE, fixed = FALSE, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) assertString(pattern, min.chars = 1L) assertFlag(ignore.case) assertFlag(fixed) job.id = job.hash = log.file = matches = NULL ids = convertIds(reg, ids, default = .findStarted(reg = reg)) tab = filter(reg$status[!is.na(job.hash)], ids)[, list(job.id = job.id, hash = sprintf("%s-%s", job.hash, log.file))] if (nrow(tab) == 0L) return(data.table(job.id = integer(0L), matches = character(0L))) setorderv(tab, "hash") res = data.table(job.id = tab$job.id, matches = NA_character_) hash.before = "" matcher = if (fixed) stri_detect_fixed else stri_detect_regex for (i in seq_row(tab)) { if (hash.before != tab$hash[i]) { log = readLog(tab[i], missing.as.empty = TRUE, reg = reg) hash.before = tab$hash[i] } if (nrow(log) > 0L) { lines = extractLog(log, tab[i]) m = matcher(lines, pattern, case_insensitive = ignore.case) if (any(m)) set(res, i, "matches", stri_flatten(lines[m], "\n")) } } setkeyv(res[!is.na(matches)], "job.id")[] } #' @title Inspect Log Files #' #' @description #' \code{showLog} opens the log in the pager. For customization, see \code{\link[base]{file.show}}. #' \code{getLog} returns the log as character vector. #' @template id #' @template reg #' @export #' @family debug #' @return Nothing. #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' #' # Create some dummy jobs #' fun = function(i) { #' if (i == 3) stop(i) #' if (i %% 2 == 1) warning("That's odd.") #' } #' ids = batchMap(fun, i = 1:5, reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' getStatus(reg = tmp) #' #' writeLines(getLog(ids[1], reg = tmp)) #' \dontrun{ #' showLog(ids[1], reg = tmp) #' } #' #' grepLogs(pattern = "warning", ignore.case = TRUE, reg = tmp) showLog = function(id, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) id = convertId(reg, id) lines = extractLog(readLog(id, reg = reg), id) log.file = fs::path(fs::path_temp(), sprintf("%i.log", id$job.id)) writeLines(text = lines, con = log.file) file.show(log.file, delete.file = TRUE) } #' @export #' @rdname showLog getLog = function(id, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) id = convertId(reg, id) extractLog(readLog(id, reg = reg), id) } batchtools/R/zzz.R0000644000176200001440000000404413606055213013600 0ustar liggesusers#' @description #' For bug reports and feature requests please use the tracker: #' \url{https://github.com/mllg/batchtools}. #' #' @section Package options: #' \describe{ #' \item{\code{batchtools.verbose}}{ #' Verbosity. Set to \code{FALSE} to suppress info messages and progress bars. #' } #' \item{\code{batchtools.progress}}{ #' Progress bars. Set to \code{FALSE} to disable them. #' } #' \item{\code{batchtools.timestamps}}{ #' Add time stamps to log output. Set to \code{FALSE} to disable them. #' } #' } #' Furthermore, you may enable a debug mode using the \pkg{debugme} package by #' setting the environment variable \dQuote{DEBUGME} to \dQuote{batchtools} before #' loading \pkg{batchtools}. #' @import utils #' @import checkmate #' @import stringi #' @import data.table #' @importFrom R6 R6Class #' @importFrom digest digest #' @importFrom brew brew #' @importFrom progress progress_bar #' @importFrom rappdirs user_config_dir site_config_dir #' @importFrom stats runif predict pexp #' @importFrom base64url base32_encode base32_decode #' @importFrom withr with_dir with_seed local_options local_dir "_PACKAGE" batchtools = new.env(parent = emptyenv()) batchtools$debug = FALSE batchtools$hooks = list( remote = c("pre.do.collection", "post.do.collection"), local = c("pre.sync", "post.sync", "pre.submit.job", "post.submit.job", "pre.submit", "post.submit", "pre.kill", "post.kill") ) batchtools$resources = list( per.job = c("walltime", "memory", "ncpus", "omp.threads", "blas.threads"), per.chunk = c("measure.memory", "chunks.as.arrayjobs", "pm.backend", "foreach.backend") ) .onLoad = function(libname, pkgname) { # nocov start if (requireNamespace("debugme", quietly = TRUE) && "batchtools" %in% strsplit(Sys.getenv("DEBUGME"), ",", fixed = TRUE)[[1L]]) { debugme::debugme() batchtools$debug = TRUE } backports::import(pkgname, "...length") backports::import(pkgname, "hasName", force = TRUE) } # nocov end .onUnload = function (libpath) { # nocov start library.dynam.unload("batchtools", libpath) } # nocov end batchtools/R/reduceResults.R0000644000176200001440000002151313543336703015602 0ustar liggesusers#' @title Reduce Results #' #' @description #' A version of \code{\link[base]{Reduce}} for \code{\link{Registry}} objects #' which iterates over finished jobs and aggregates them. #' All jobs must have terminated, an error is raised otherwise. #' #' @note #' If you have thousands of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) #' can significantly increase the performance. #' #' @templateVar ids.default findDone #' @template ids #' @param fun [\code{function}]\cr #' A function to reduce the results. The result of previous iterations (or #' the \code{init}) will be passed as first argument, the result of of the #' i-th iteration as second. See \code{\link[base]{Reduce}} for some #' examples. #' If the function has the formal argument \dQuote{job}, the \code{\link{Job}}/\code{\link{Experiment}} #' is also passed to the function (named). #' @param init [\code{ANY}]\cr #' Initial element, as used in \code{\link[base]{Reduce}}. #' If missing, the reduction uses the result of the first job as \code{init} and the reduction starts #' with the second job. #' @param ... [\code{ANY}]\cr #' Additional arguments passed to function \code{fun}. #' @return Aggregated results in the same order as provided ids. #' Return type depends on the user function. If \code{ids} #' is empty, \code{reduceResults} returns \code{init} (if available) or \code{NULL} otherwise. #' @template reg #' @family Results #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(function(a, b) list(sum = a+b, prod = a*b), a = 1:3, b = 1:3, reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' #' # Extract element sum from each result #' reduceResults(function(aggr, res) c(aggr, res$sum), init = list(), reg = tmp) #' #' # Aggregate element sum via '+' #' reduceResults(function(aggr, res) aggr + res$sum, init = 0, reg = tmp) #' #' # Aggregate element prod via '*' where parameter b < 3 #' reduce = function(aggr, res, job) { #' if (job$pars$b >= 3) #' return(aggr) #' aggr * res$prod #' } #' reduceResults(reduce, init = 1, reg = tmp) #' #' # Reduce to data.frame() (inefficient, use reduceResultsDataTable() instead) #' reduceResults(rbind, init = data.frame(), reg = tmp) #' #' # Reduce to data.frame by collecting results first, then utilize vectorization of rbind: #' res = reduceResultsList(fun = as.data.frame, reg = tmp) #' do.call(rbind, res) #' #' # Reduce with custom combine function: #' comb = function(x, y) list(sum = x$sum + y$sum, prod = x$prod * y$prod) #' reduceResults(comb, reg = tmp) #' #' # The same with neutral element NULL #' comb = function(x, y) if (is.null(x)) y else list(sum = x$sum + y$sum, prod = x$prod * y$prod) #' reduceResults(comb, init = NULL, reg = tmp) #' #' # Alternative: Reduce in list, reduce manually in a 2nd step #' res = reduceResultsList(reg = tmp) #' Reduce(comb, res) reduceResults = function(fun, ids = NULL, init, ..., reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) ids = convertIds(reg, ids, default = .findDone(reg = reg), keep.order = TRUE) fun = match.fun(fun) if (nrow(.findNotDone(reg, ids))) stop("All jobs must be have been successfully computed") if (nrow(ids) == 0L) return(if (missing(init)) NULL else init) fns = getResultFiles(reg, ids) if (missing(init)) { init = readRDS(fns[1L]) fns = fns[-1L] if (length(fns) == 0L) return(init) } pb = makeProgressBar(total = length(fns), format = "Reduce [:bar] :percent eta: :eta") if ("job" %chin% names(formals(fun))) { for (i in seq_along(fns)) { init = fun(init, readRDS(fns[i]), job = makeJob(ids[i], reg = reg), ...) pb$tick() } } else { for (i in seq_along(fns)) { init = fun(init, readRDS(fns[i]), ...) pb$tick() } } return(init) } #' @title Apply Functions on Results #' #' @description #' Applies a function on the results of your finished jobs and thereby collects #' them in a \code{\link[base]{list}} or \code{\link[data.table]{data.table}}. #' The later requires the provided function to return a list (or \code{data.frame}) of scalar values. #' See \code{\link[data.table]{rbindlist}} for features and limitations of the aggregation. #' #' If not all jobs are terminated, the respective result will be \code{NULL}. #' #' @note #' If you have thousands of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) #' can significantly increase the performance. #' #' @templateVar ids.default findDone #' @template ids #' @param fun [\code{function}]\cr #' Function to apply to each result. The result is passed unnamed as first argument. If \code{NULL}, the identity is used. #' If the function has the formal argument \dQuote{job}, the \code{\link{Job}}/\code{\link{Experiment}} is also passed to the function. #' @param ... [\code{ANY}]\cr #' Additional arguments passed to to function \code{fun}. #' @template missing.val #' @template reg #' @return \code{reduceResultsList} returns a list of the results in the same order as the provided ids. #' \code{reduceResultsDataTable} returns a \code{\link[data.table]{data.table}} with columns \dQuote{job.id} and additional result columns #' created via \code{\link[data.table]{rbindlist}}, sorted by \dQuote{job.id}. #' @seealso \code{\link{reduceResults}} #' @family Results #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(2) } #' ### Example 1 - reduceResultsList #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(function(x) x^2, x = 1:10, reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' reduceResultsList(fun = sqrt, reg = tmp) #' #' ### Example 2 - reduceResultsDataTable #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) #' #' # add first problem #' fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd) #' addProblem("rnorm", fun = fun, reg = tmp) #' #' # add second problem #' fun = function(job, data, n, lambda, ...) rexp(n, rate = lambda) #' addProblem("rexp", fun = fun, reg = tmp) #' #' # add first algorithm #' fun = function(instance, method, ...) if (method == "mean") mean(instance) else median(instance) #' addAlgorithm("average", fun = fun, reg = tmp) #' #' # add second algorithm #' fun = function(instance, ...) sd(instance) #' addAlgorithm("deviation", fun = fun, reg = tmp) #' #' # define problem and algorithm designs #' library(data.table) #' prob.designs = algo.designs = list() #' prob.designs$rnorm = CJ(n = 100, mean = -1:1, sd = 1:5) #' prob.designs$rexp = data.table(n = 100, lambda = 1:5) #' algo.designs$average = data.table(method = c("mean", "median")) #' algo.designs$deviation = data.table() #' #' # add experiments and submit #' addExperiments(prob.designs, algo.designs, reg = tmp) #' submitJobs(reg = tmp) #' #' # collect results and join them with problem and algorithm paramters #' res = ijoin( #' getJobPars(reg = tmp), #' reduceResultsDataTable(reg = tmp, fun = function(x) list(res = x)) #' ) #' unwrap(res, sep = ".") reduceResultsList = function(ids = NULL, fun = NULL, ..., missing.val, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) assertFunction(fun, null.ok = TRUE) ids = convertIds(reg, ids, default = .findDone(reg = reg), keep.order = TRUE) .reduceResultsList(ids, fun, ..., missing.val = missing.val, reg = reg) } #' @export #' @rdname reduceResultsList reduceResultsDataTable = function(ids = NULL, fun = NULL, ..., missing.val, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) ids = convertIds(reg, ids, default = .findDone(reg = reg)) assertFunction(fun, null.ok = TRUE) results = .reduceResultsList(ids = ids, fun = fun, ..., missing.val = missing.val, reg = reg) if (length(results) == 0L) return(noIds()) ids[, "result" := results][] } .reduceResultsList = function(ids, fun = NULL, ..., missing.val, reg = getDefaultRegistry()) { if (is.null(fun)) { worker = function(.res, .job, ...) .res } else { fun = match.fun(fun) if ("job" %chin% names(formals(fun))) worker = function(.res, .job, ...) fun(.res, job = .job, ...) else worker = function(.res, .job, ...) fun(.res, ...) } results = vector("list", nrow(ids)) done = ids[.findDone(reg, ids), nomatch = 0L, which = TRUE, on = "job.id"] if (missing(missing.val)) { if (length(done) != nrow(ids)) stop("All jobs must be have been successfully computed") } else { results[setdiff(seq_row(ids), done)] = list(missing.val) } if (length(done) > 0L) { fns = getResultFiles(reg, ids) pb = makeProgressBar(total = length(fns), format = "Reducing [:bar] :percent eta: :eta") reader = RDSReader$new(TRUE) for (i in done) { res = worker(readRDS(fns[i]), makeJob(ids$job.id[i], reader = reader, reg = reg), ...) if (!is.null(res)) results[[i]] = res rm(res) pb$tick() } } return(results) } batchtools/R/unwrap.R0000644000176200001440000000664213543337002014264 0ustar liggesusers#' @title Unwrap Nested Data Frames #' #' @description #' Some functions (e.g., \code{\link{getJobPars}}, \code{\link{getJobResources}} or \code{\link{reduceResultsDataTable}} #' return a \code{data.table} with columns of type \code{list}. #' These columns can be unnested/unwrapped with this function. #' The contents of these columns will be transformed to a \code{data.table} and \code{\link[base]{cbind}}-ed #' to the input data.frame \code{x}, replacing the original nested column. #' #' @note #' There is a name clash with function \code{flatten} in package \pkg{purrr}. #' The function \code{flatten} is discouraged to use for this reason in favor of \code{unwrap}. #' #' @param x [\code{\link{data.frame}} | \code{\link[data.table]{data.table}}]\cr #' Data frame to flatten. #' @param cols [\code{character}]\cr #' Columns to consider for this operation. If set to \code{NULL} (default), #' will operate on all columns of type \dQuote{list}. #' @param sep [\code{character(1)}]\cr #' If \code{NULL} (default), the column names of the additional columns will re-use the names #' of the nested \code{list}/\code{data.frame}. #' This may lead to name clashes. #' If you provide \code{sep}, the variable column name will be constructed as #' \dQuote{[column name of x][sep][inner name]}. #' @return [\code{\link{data.table}}]. #' @export #' @examples #' x = data.table::data.table( #' id = 1:3, #' values = list(list(a = 1, b = 3), list(a = 2, b = 2), list(a = 3)) #' ) #' unwrap(x) #' unwrap(x, sep = ".") unwrap = function(x, cols = NULL, sep = NULL) { assertDataFrame(x) if (!is.data.table(x)) x = as.data.table(x) if (is.null(cols)) { cols = names(x)[vlapply(x, is.list)] } else { assertNames(cols, "unique", subset.of = names(x)) qassertr(x[, cols, with = FALSE], "l") } assertString(sep, null.ok = TRUE) res = data.table(.row = seq_row(x), key = ".row") extra.cols = chsetdiff(names(x), cols) if (length(extra.cols)) res = cbind(res, x[, extra.cols, with = FALSE]) for (col in cols) { xc = x[[col]] new.cols = lapply(xc, function(x) { if (!is.null(x)) { ii = !vlapply(x, qtest, c("l", "d", "v1")) # FIXME: add parameter `which` to qtestr x[ii] = lapply(x[ii], list) na = which(is.na(names2(x))) if (length(na) > 0L) names(x)[na] = sprintf("%s.%i", col, seq_along(na)) } x }) new.cols = rbindlist(new.cols, fill = TRUE, idcol = ".row", use.names = TRUE) if (ncol(new.cols) > 1L) { if (nrow(new.cols) > nrow(x) || anyDuplicated(new.cols, by = ".row") > 0L) stopf("Some rows are unsuitable for unnesting. Unwrapping row in column '%s' leads to multiple rows", col) if (!is.null(sep)) { nn = setdiff(names(new.cols), ".row") setnames(new.cols, nn, stri_paste(col, nn, sep = sep)) } clash = chsetdiff(chintersect(names(res), names(new.cols)), ".row") if (length(clash) > 0L) stopf("Name clash while unwrapping data.table: Duplicated column names: %s", stri_flatten(clash, ", ")) res = merge(res, new.cols, all.x = TRUE, by = ".row") } } res[, ".row" := NULL] kx = key(x) if (!is.null(kx) && all(kx %chin% names(res))) setkeyv(res, kx) res[] } #' @rdname unwrap #' @export flatten = function(x, cols = NULL, sep = NULL) { #nocov start "!DEBUG Call of soon-to-be deprecated function flatten. Use unwrap() instead!" unwrap(x, cols, sep) } #nocov end batchtools/R/JobCollection.R0000644000176200001440000001036013606051520015464 0ustar liggesusers#' @title JobCollection Constructor #' #' @description #' \code{makeJobCollection} takes multiple job ids and creates an object of class \dQuote{JobCollection} which holds all #' necessary information for the calculation with \code{\link{doJobCollection}}. It is implemented as an environment #' with the following variables: #' \describe{ #' \item{file.dir}{\code{file.dir} of the \link{Registry}.} #' \item{work.dir:}{\code{work.dir} of the \link{Registry}.} #' \item{job.hash}{Unique identifier of the job. Used to create names on the file system.} #' \item{jobs}{\code{\link[data.table]{data.table}} holding individual job information. See examples.} #' \item{log.file}{Location of the designated log file for this job.} #' \item{resources:}{Named list of of specified computational resources.} #' \item{uri}{Location of the job description file (saved with \code{link[base]{saveRDS}} on the file system.} #' \item{seed}{\code{integer(1)} Seed of the \link{Registry}.} #' \item{packages}{\code{character} with required packages to load via \code{\link[base]{require}}.} #' \item{namespaces}{code{character} with required packages to load via \code{\link[base]{requireNamespace}}.} #' \item{source}{\code{character} with list of files to source before execution.} #' \item{load}{\code{character} with list of files to load before execution.} #' \item{array.var}{\code{character(1)} of the array environment variable specified by the cluster functions.} #' \item{array.jobs}{\code{logical(1)} signaling if jobs were submitted using \code{chunks.as.arrayjobs}.} #' } #' If your \link{ClusterFunctions} uses a template, \code{\link[brew]{brew}} will be executed in the environment of such #' a collection. Thus all variables available inside the job can be used in the template. #' #' @templateVar ids.default all #' @template ids #' @param resources [\code{list}]\cr #' Named list of resources. Default is \code{list()}. #' @template reg #' @return [\code{JobCollection}]. #' @family JobCollection #' @aliases JobCollection #' @rdname JobCollection #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE, packages = "methods") #' batchMap(identity, 1:5, reg = tmp) #' #' # resources are usually set in submitJobs() #' jc = makeJobCollection(1:3, resources = list(foo = "bar"), reg = tmp) #' ls(jc) #' jc$resources makeJobCollection = function(ids = NULL, resources = list(), reg = getDefaultRegistry()) { UseMethod("makeJobCollection", reg) } createCollection = function(jobs, resources = list(), reg = getDefaultRegistry()) { jc = new.env(parent = emptyenv()) jc$jobs = setkeyv(jobs, "job.id") jc$job.hash = rnd_hash("job") jc$job.name = if (anyMissing(jobs$job.name)) jc$job.hash else jobs$job.name[1L] jc$file.dir = reg$file.dir jc$work.dir = reg$work.dir jc$seed = reg$seed jc$uri = getJobFiles(reg, hash = jc$job.hash) jc$log.file = fs::path(reg$file.dir, "logs", sprintf("%s.log", jc$job.hash)) jc$packages = reg$packages jc$namespaces = reg$namespaces jc$source = reg$source jc$load = reg$load jc$resources = resources jc$array.var = reg$cluster.functions$array.var jc$array.jobs = isTRUE(resources$chunks.as.arrayjobs) jc$compress = reg$compress hooks = chintersect(names(reg$cluster.functions$hooks), batchtools$hooks$remote) if (length(hooks) > 0L) jc$hooks = reg$cluster.functions$hooks[hooks] return(jc) } #' @export makeJobCollection.Registry = function(ids = NULL, resources = list(), reg = getDefaultRegistry()) { jc = createCollection(mergedJobs(reg, convertIds(reg, ids), c("job.id", "job.name", "job.pars")), resources, reg) setClasses(jc, "JobCollection") } #' @export makeJobCollection.ExperimentRegistry = function(ids = NULL, resources = list(), reg = getDefaultRegistry()) { jc = createCollection(mergedJobs(reg, convertIds(reg, ids), c("job.id", "job.name", "problem", "algorithm", "prob.pars", "algo.pars", "repl")), resources, reg) setClasses(jc, c("ExperimentCollection", "JobCollection")) } #' @export print.JobCollection = function(x, ...) { catf("Collection of %i jobs", nrow(x$jobs)) catf(" Hash : %s", x$job.hash) catf(" Log file: %s", x$log.file) } batchtools/R/Problem.R0000644000176200001440000001316713606043260014350 0ustar liggesusers#' @title Define Problems for Experiments #' #' @description #' Problems may consist of up to two parts: A static, immutable part (\code{data} in \code{addProblem}) #' and a dynamic, stochastic part (\code{fun} in \code{addProblem}). #' For example, for statistical learning problems a data frame would be the static problem part while #' a resampling function would be the stochastic part which creates problem instance. #' This instance is then typically passed to a learning algorithm like a wrapper around a statistical model #' (\code{fun} in \code{\link{addAlgorithm}}). #' #' This function serialize all components to the file system and registers the problem in the \code{\link{ExperimentRegistry}}. #' #' \code{removeProblem} removes all jobs from the registry which depend on the specific problem. #' \code{reg$problems} holds the IDs of already defined problems. #' #' @param name [\code{character(1)}]\cr #' Unique identifier for the problem. #' @param data [\code{ANY}]\cr #' Static problem part. Default is \code{NULL}. #' @param fun [\code{function}]\cr #' The function defining the stochastic problem part. #' The static part is passed to this function with name \dQuote{data} and the \code{\link{Job}}/\code{\link{Experiment}} #' is passed as \dQuote{job}. #' Therefore, your function must have the formal arguments \dQuote{job} and \dQuote{data} (or dots \code{...}). #' If you do not provide a function, it defaults to a function which just returns the data part. #' @param seed [\code{integer(1)}]\cr #' Start seed for this problem. This allows the \dQuote{synchronization} of a stochastic #' problem across algorithms, so that different algorithms are evaluated on the same stochastic instance. #' If the problem seed is defined, the seeding mechanism works as follows: #' (1) Before the dynamic part of a problem is instantiated, #' the seed of the problem + [replication number] - 1 is set, i.e. the first #' replication uses the problem seed. (2) The stochastic part of the problem is #' instantiated. (3) From now on the usual experiment seed of the registry is used, #' see \code{\link{ExperimentRegistry}}. #' If \code{seed} is set to \code{NULL} (default), the job seed is used to instantiate the problem and #' different algorithms see different stochastic instances of the same problem. #' @param cache [\code{logical(1)}]\cr #' If \code{TRUE} and \code{seed} is set, problem instances will be cached on the file system. #' This assumes that each problem instance is deterministic for each combination of hyperparameter setting #' and each replication number. #' This feature is experimental. #' @template expreg #' @return [\code{Problem}]. Object of class \dQuote{Problem} (invisibly). #' @aliases Problem #' @seealso \code{\link{Algorithm}}, \code{\link{addExperiments}} #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) #' addProblem("p1", fun = function(job, data) data, reg = tmp) #' addProblem("p2", fun = function(job, data) job, reg = tmp) #' addAlgorithm("a1", fun = function(job, data, instance) instance, reg = tmp) #' addExperiments(repls = 2, reg = tmp) #' #' # List problems, algorithms and job parameters: #' tmp$problems #' tmp$algorithms #' getJobPars(reg = tmp) #' #' # Remove one problem #' removeProblems("p1", reg = tmp) #' #' # List problems and algorithms: #' tmp$problems #' tmp$algorithms #' getJobPars(reg = tmp) addProblem = function(name, data = NULL, fun = NULL, seed = NULL, cache = FALSE, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE) assertString(name, min.chars = 1L) if (!stri_detect_regex(name, "^[[:alnum:]_.-]+$")) stopf("Illegal characters in problem name: %s", name) if (is.null(fun)) { fun = function(job, data, ...) data } else { assert(checkFunction(fun, args = c("job", "data")), checkFunction(fun, args = "...")) } if (is.null(seed)) { cache = FALSE } else { seed = asCount(seed, positive = TRUE) cache = assertFlag(cache) } info("Adding problem '%s'", name) prob = setClasses(list(name = name, seed = seed, cache = cache, data = data, fun = fun), "Problem") writeRDS(prob, file = getProblemURI(reg, name), compress = reg$compress) reg$problems = union(reg$problems, name) cache.dir = getProblemCacheDir(reg, name) if (fs::dir_exists(cache.dir)) fs::dir_delete(cache.dir) if (cache) fs::dir_create(cache.dir) saveRegistry(reg) invisible(prob) } #' @export #' @rdname addProblem removeProblems = function(name, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE, running.ok = FALSE) assertCharacter(name, any.missing = FALSE) assertSubset(name, reg$problems) problem = NULL for (nn in name) { def.ids = reg$defs[problem == nn, "def.id"] job.ids = filter(def.ids, reg$status, "job.id") info("Removing Problem '%s' and %i corresponding jobs ...", nn, nrow(job.ids)) file_remove(getProblemURI(reg, nn)) reg$defs = reg$defs[!def.ids] reg$status = reg$status[!job.ids] reg$problems = chsetdiff(reg$problems, nn) cache = getProblemCacheDir(reg, nn) if (fs::dir_exists(cache)) fs::dir_delete(cache) } sweepRegistry(reg) invisible(TRUE) } getProblemURI = function(reg, name) { fs::path(dir(reg, "problems"), mangle(name)) } getProblemCacheDir = function(reg, name) { fs::path(dir(reg, "cache"), "problems", base32_encode(name, use.padding = FALSE)) } getProblemCacheURI = function(job) { fs::path(getProblemCacheDir(job, job$prob.name), sprintf("%s.rds", digest(list(job$prob.name, job$prob.pars, job$repl)))) } batchtools/R/Tags.R0000644000176200001440000000513413435713470013650 0ustar liggesusers#' @title Add or Remove Job Tags #' @name Tags #' @rdname Tags #' #' @description #' Add and remove arbitrary tags to jobs. #' #' @templateVar ids.default all #' @template ids #' @param tags [\code{character}]\cr #' Tags to add or remove as strings. Each tag may consist of letters, numbers, underscore and dots (pattern \dQuote{^[[:alnum:]_.]+}). #' @return [\code{\link[data.table]{data.table}}] with job ids affected (invisible). #' @template reg #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' ids = batchMap(sqrt, x = -3:3, reg = tmp) #' #' # Add new tag to all ids #' addJobTags(ids, "needs.computation", reg = tmp) #' getJobTags(reg = tmp) #' #' # Add more tags #' addJobTags(findJobs(x < 0, reg = tmp), "x.neg", reg = tmp) #' addJobTags(findJobs(x > 0, reg = tmp), "x.pos", reg = tmp) #' getJobTags(reg = tmp) #' #' # Submit first 5 jobs and remove tag if successful #' ids = submitJobs(1:5, reg = tmp) #' if (waitForJobs(reg = tmp)) #' removeJobTags(ids, "needs.computation", reg = tmp) #' getJobTags(reg = tmp) #' #' # Grep for warning message and add a tag #' addJobTags(grepLogs(pattern = "NaNs produced", reg = tmp), "div.zero", reg = tmp) #' getJobTags(reg = tmp) #' #' # All tags where tag x.neg is set: #' ids = findTagged("x.neg", reg = tmp) #' getUsedJobTags(ids, reg = tmp) addJobTags = function(ids = NULL, tags, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE) ids = convertIds(reg, ids, default = allIds(reg)) assertCharacter(tags, any.missing = FALSE, pattern = "^[[:alnum:]_.]+$", min.len = 1L) for (cur in tags) { ids[, ("tag") := cur] reg$tags = rbind(reg$tags, ids) } reg$tags = setkeyv(unique(reg$tags, by = NULL), "job.id") saveRegistry(reg) invisible(ids[, "job.id"]) } #' @export #' @rdname Tags removeJobTags = function(ids = NULL, tags, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE) ids = convertIds(reg, ids) assertCharacter(tags, any.missing = FALSE, pattern = "^[[:alnum:]_.]+$", min.len = 1L) job.id = tag = NULL if (is.null(ids)) { i = reg$tags[tag %in% tags, which = TRUE] } else { i = reg$tags[job.id %in% ids$job.id & tag %in% tags, which = TRUE] } if (length(i) > 0L) { ids = unique(reg$tags[i, "job.id"], by = "job.id") reg$tags = reg$tags[-i] saveRegistry(reg) } else { ids = noIds() } invisible(ids) } #' @export #' @rdname Tags getUsedJobTags = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids) unique(filter(reg$tags, ids), by = "tag")$tag } batchtools/R/helpers.R0000644000176200001440000000742313422307502014406 0ustar liggesusersmergedJobs = function(reg, ids, cols) { if (is.null(ids)) reg$defs[reg$status, cols, on = "def.id", nomatch = 0L, with = missing(cols)] else reg$defs[reg$status[ids, nomatch = 0L, on = "job.id"], cols, on = "def.id", nomatch = 0L, with = missing(cols)] } auto_increment = function(ids, n = 1L) { if (length(ids) == 0L) seq_len(n) else max(ids) + seq_len(n) } ustamp = function() { round(as.numeric(Sys.time()), 4L) } names2 = function (x, missing.val = NA_character_) { n = names(x) if (is.null(n)) return(rep.int(missing.val, length(x))) replace(n, is.na(n) | !nzchar(n), missing.val) } insert = function(x, y) { x[names2(y)] = y x[order(names2(x))] } makeProgressBar = function(...) { if (!batchtools$debug && getOption("batchtools.verbose", TRUE) && getOption("batchtools.progress", TRUE) && getOption("width") >= 20L) { progress_bar$new(...) } else { list(tick = function(len = 1, tokens = list()) NULL, update = function(ratio, tokens) NULL) } } seq_row = function(x) { seq_len(nrow(x)) } vlapply = function (x, fun, ..., use.names = TRUE) { vapply(X = x, FUN = fun, ..., FUN.VALUE = NA, USE.NAMES = use.names) } viapply = function (x, fun, ..., use.names = TRUE) { vapply(X = x, FUN = fun, ..., FUN.VALUE = NA_integer_, USE.NAMES = use.names) } vnapply = function (x, fun, ..., use.names = TRUE) { vapply(X = x, FUN = fun, ..., FUN.VALUE = NA_real_, USE.NAMES = use.names) } vcapply = function (x, fun, ..., use.names = TRUE) { vapply(X = x, FUN = fun, ..., FUN.VALUE = NA_character_, USE.NAMES = use.names) } is.error = function(x) { inherits(x, "try-error") } # formating info message info = function(...) { if (getOption("batchtools.verbose", TRUE)) message(sprintf(...)) } # formating cat() catf = function(..., con = "") { cat(stri_flatten(sprintf(...), "\n"), "\n", sep = "", file = con) } # formating message() messagef = function(..., con = "") { message(sprintf(...)) } # formating waring() warningf = function(...) { warning(simpleWarning(sprintf(...), call = sys.call(sys.parent()))) } # formating stop() stopf = function(...) { stop(simpleError(sprintf(...), call = NULL)) } `%nin%` = function(x, y) { !match(x, y, nomatch = 0L) } `%chnin%` = function(x, y) { !chmatch(x, y, nomatch = 0L) } setClasses = function(x, cl) { setattr(x, "class", cl) x } #' @useDynLib batchtools count_not_missing count = function(x) { .Call(count_not_missing, x) } filterNull = function(x) { x[!vlapply(x, is.null)] } stri_trunc = function(str, length, append = "") { if (is.na(str)) return(str) if (stri_length(str) > length) { if (is.na(append) || !nzchar(append)) return(stri_sub(str, 1L, length)) return(stri_join(stri_sub(str, 1L, length - stri_length(append)), append)) } return(str) } Rscript = function() { fs::path(R.home("bin"), ifelse(testOS("windows"), "Rscript.exe", "Rscript")) } getSeed = function(start.seed, id) { if (id > .Machine$integer.max - start.seed) start.seed - .Machine$integer.max + id else start.seed + id } chsetdiff = function(x, y) { # Note: assumes that x has no duplicates x[chmatch(x, y, 0L) == 0L] } chintersect = function(x, y) { # Note: assumes that x has no duplicates x[chmatch(y, x, 0L)] } rnd_hash = function(prefix = "") { stri_join(prefix, digest(list(runif(1L), as.numeric(Sys.time())))) } now = function() { if (isTRUE(getOption("batchtools.timestamps", FALSE))) sprintf(" %s", strftime(Sys.time())) else "" } example_push_temp = function(i = 1L) { if (identical(Sys.getenv("IN_PKGDOWN"), "true")) { base = fs::path(dirname(tempdir()), "batchtools-example") dirs = if (i == 1L) fs::path(base, "reg") else fs::path(base, sprintf("reg%i", seq_len(i))) fs::dir_delete(dirs[fs::dir_exists(dirs)]) fs::file_temp_push(dirs) } } batchtools/R/chunkIds.R0000644000176200001440000001210713543336703014520 0ustar liggesusers#' @title Chunk Jobs for Sequential Execution #' #' @description #' Jobs can be partitioned into \dQuote{chunks} to be executed sequentially on the computational nodes. #' Chunks are defined by providing a data frame with columns \dQuote{job.id} and \dQuote{chunk} (integer) #' to \code{\link{submitJobs}}. #' All jobs with the same chunk number will be grouped together on one node to form a single #' computational job. #' #' The function \code{chunk} simply splits \code{x} into either a fixed number of groups, or #' into a variable number of groups with a fixed number of maximum elements. #' #' The function \code{lpt} also groups \code{x} into a fixed number of chunks, #' but uses the actual values of \code{x} in a greedy \dQuote{Longest Processing Time} algorithm. #' As a result, the maximum sum of elements in minimized. #' #' \code{binpack} splits \code{x} into a variable number of groups whose sum of elements do #' not exceed the upper limit provided by \code{chunk.size}. #' #' See examples of \code{\link{estimateRuntimes}} for an application of \code{binpack} and \code{lpt}. #' #' @param x [\code{numeric}]\cr #' For \code{chunk} an atomic vector (usually the \code{job.id}). #' For \code{binpack} and \code{lpt}, the weights to group. #' @param chunk.size [\code{integer(1)}]\cr #' Requested chunk size for each single chunk. #' For \code{chunk} this is the number of elements in \code{x}, for \code{binpack} the size #' is determined by the sum of values in \code{x}. #' Mutually exclusive with \code{n.chunks}. #' @param n.chunks [\code{integer(1)}]\cr #' Requested number of chunks. #' The function \code{chunk} distributes the number of elements in \code{x} evenly while #' \code{lpt} tries to even out the sum of elements in each chunk. #' If more chunks than necessary are requested, empty chunks are ignored. #' Mutually exclusive with \code{chunks.size}. #' @param shuffle [\code{logical(1)}]\cr #' Shuffles the groups. Default is \code{TRUE}. #' @return [\code{integer}] giving the chunk number for each element of \code{x}. #' @seealso \code{\link{estimateRuntimes}} #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(2) } #' ch = chunk(1:10, n.chunks = 2) #' table(ch) #' #' ch = chunk(rep(1, 10), chunk.size = 2) #' table(ch) #' #' set.seed(1) #' x = runif(10) #' ch = lpt(x, n.chunks = 2) #' sapply(split(x, ch), sum) #' #' set.seed(1) #' x = runif(10) #' ch = binpack(x, 1) #' sapply(split(x, ch), sum) #' #' # Job chunking #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' ids = batchMap(identity, 1:25, reg = tmp) #' #' ### Group into chunks with 10 jobs each #' library(data.table) #' ids[, chunk := chunk(job.id, chunk.size = 10)] #' print(ids[, .N, by = chunk]) #' #' ### Group into 4 chunks #' ids[, chunk := chunk(job.id, n.chunks = 4)] #' print(ids[, .N, by = chunk]) #' #' ### Submit to batch system #' submitJobs(ids = ids, reg = tmp) #' #' # Grouped chunking #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) #' prob = addProblem(reg = tmp, "prob1", data = iris, fun = function(job, data) nrow(data)) #' prob = addProblem(reg = tmp, "prob2", data = Titanic, fun = function(job, data) nrow(data)) #' algo = addAlgorithm(reg = tmp, "algo", fun = function(job, data, instance, i, ...) problem) #' prob.designs = list(prob1 = data.table(), prob2 = data.table(x = 1:2)) #' algo.designs = list(algo = data.table(i = 1:3)) #' addExperiments(prob.designs, algo.designs, repls = 3, reg = tmp) #' #' ### Group into chunks of 5 jobs, but do not put multiple problems into the same chunk #' # -> only one problem has to be loaded per chunk, and only once because it is cached #' ids = getJobTable(reg = tmp)[, .(job.id, problem, algorithm)] #' ids[, chunk := chunk(job.id, chunk.size = 5), by = "problem"] #' ids[, chunk := .GRP, by = c("problem", "chunk")] #' dcast(ids, chunk ~ problem) chunk = function(x, n.chunks = NULL, chunk.size = NULL, shuffle = TRUE) { assertAtomicVector(x) if (!xor(is.null(n.chunks), is.null(chunk.size))) stop("You must provide either 'n.chunks' (x)or 'chunk.size'") assertCount(n.chunks, positive = TRUE, null.ok = TRUE) assertCount(chunk.size, positive = TRUE, null.ok = TRUE) assertFlag(shuffle) n = length(x) if (n == 0L) return(integer(0L)) if (is.null(n.chunks)) n.chunks = (n %/% chunk.size + (n %% chunk.size > 0L)) chunks = as.integer((seq.int(0L, n - 1L) %% min(n.chunks, n))) + 1L if (shuffle) chunks = sample(chunks) else chunks = sort(chunks) return(chunks) } #' @rdname chunk #' @useDynLib batchtools c_lpt #' @export lpt = function(x, n.chunks = 1L) { assertNumeric(x, lower = 0, any.missing = FALSE, finite = TRUE) assertCount(n.chunks, positive = TRUE) .Call(c_lpt, as.numeric(x), order(x, decreasing = TRUE), as.integer(n.chunks)) } #' @rdname chunk #' @useDynLib batchtools c_binpack #' @export binpack = function(x, chunk.size = max(x)) { assertNumeric(x, lower = 0, any.missing = FALSE, finite = TRUE) assertNumber(chunk.size, lower = 0) if (length(x) == 0L) return(integer(0L)) .Call(c_binpack, as.numeric(x), order(x, decreasing = TRUE), as.double(chunk.size)) } batchtools/R/updateRegisty.R0000644000176200001440000000721713606043745015610 0ustar liggesusers# returns TRUE if the state possibly changed updateRegistry = function(reg = getDefaultRegistry()) { # nocov start "!DEBUG [updateRegistry]: Running updateRegistry" pv = packageVersion("batchtools") if (identical(pv, reg$version)) return(FALSE) if (is.null(reg$version) || reg$version < "0.9.0") stop("Your registry is too old.") if (reg$version < "0.9.1-9000") { ### hotfix for timestamps if (is.integer(reg$status$submitted)) { info("Converting timestamps to numeric") for (x in c("submitted", "started", "done")) reg$status[[x]] = as.numeric(reg$status[[x]]) } ### hotfix for log.file column if ("log.file" %chnin% names(reg$status)) { info("Adding column 'log.file'") reg$status[, ("log.file") := rep(NA_character_, .N)] } } if (reg$version < "0.9.1-9001") { ### hotfix for base32 encoding of exports fns = list.files(fs::path(reg$file.dir, "exports"), pattern = "\\.rds$", all.files = TRUE, no.. = TRUE) if (length(fns)) { info("Renaming export files") fs::file_move( fs::path(reg$file.dir, fns), fs::path(reg$file.dir, mangle(stri_sub(fns, to = -5L))) ) } } if (reg$version < "0.9.1-9002" && inherits(reg, "ExperimentRegistry")) { info("Renaming problems and algorithm files") getProblemIds = function(reg) levels(reg$defs$problem) getAlgorithmIds = function(reg) levels(reg$defs$algorithm) for (prob in getProblemIds(reg)) fs::file_move(fs::path(reg$file.dir, "problems", sprintf("%s.rds", digest(prob))), getProblemURI(reg, prob)) for (algo in getAlgorithmIds(reg)) fs::file_move(fs::path(reg$file.dir, "algorithms", sprintf("%s.rds", digest(algo))), getAlgorithmURI(reg, algo)) } if (reg$version < "0.9.4-9001") { if ("job.name" %chnin% names(reg$status)) { info("Adding column 'job.name'") reg$status[, ("job.name") := rep(NA_character_, .N)] } } if (reg$version < "0.9.6-9001") { info("Updating registry internals") if (!inherits(reg, "ExperimentRegistry")) { setnames(reg$defs, "pars", "job.pars") } else { alloc.col(reg$defs, ncol(reg$defs) + 1L) reg$problems = levels(reg$defs$problem) reg$algorithms = levels(reg$defs$algorithm) reg$defs$problem = as.character(reg$defs$problem) reg$defs$algorithm = as.character(reg$defs$algorithm) reg$defs$prob.pars = lapply(reg$defs$pars, `[[`, "prob.pars") reg$defs$algo.pars = lapply(reg$defs$pars, `[[`, "algo.pars") reg$defs$pars = NULL info("Recalculating job hashes") reg$defs$pars.hash = calculateHash(reg$defs) } } if (reg$version < "0.9.7-9001") { if (inherits(reg, "ExperimentRegistry")) { info("Updating problems") for (id in reg$problems) { uri = getProblemURI(reg, id) p = readRDS(uri) p$cache = FALSE saveRDS(p, file = uri, version = 2L) } } } if (reg$version < "0.9.7-9002") { if (hasName(reg$status, "memory")) { info("Renaming memory column in data base") setnames(reg$status, "memory", "mem.used") } fns = list.files(dir(reg, "updates"), full.names = TRUE) if (length(fns) > 0L) { info("Renaming memory column in update files") updates = lapply(fns, function(fn) { x = try(readRDS(fn), silent = TRUE) if (is.error(x)) { fs::file_delete(x) } else { if (hasName(x, "memory")) { setnames(x, "memory", "mem.used") saveRDS(x, file = fn, version = 2L) } } }) } } if (is.null(reg$compress)) { reg$compress = "gzip" } reg$version = pv return(TRUE) } # nocov end batchtools/R/getErrorMessages.R0000644000176200001440000000270313533162535016231 0ustar liggesusers#' @title Retrieve Error Messages #' #' @description #' Extracts error messages from the internal data base and returns them in a table. #' #' @templateVar ids.default findErrors #' @template ids #' @param missing.as.error [\code{logical(1)}]\cr #' Treat missing results as errors? If \code{TRUE}, the error message \dQuote{[not terminated]} is imputed #' for jobs which have not terminated. Default is \code{FALSE} #' @template reg #' @return [\code{\link{data.table}}] with columns \dQuote{job.id}, \dQuote{terminated} (logical), #' \dQuote{error} (logical) and \dQuote{message} (string). #' @family debug #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' fun = function(i) if (i == 3) stop(i) else i #' ids = batchMap(fun, i = 1:5, reg = tmp) #' submitJobs(1:4, reg = tmp) #' waitForJobs(1:4, reg = tmp) #' getErrorMessages(ids, reg = tmp) #' getErrorMessages(ids, missing.as.error = TRUE, reg = tmp) getErrorMessages = function(ids = NULL, missing.as.error = FALSE, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) assertFlag(missing.as.error) ids = convertIds(reg, ids, default = .findErrors(reg = reg)) job.id = done = error = NULL tab = reg$status[ids, list(job.id, terminated = !is.na(done), error = !is.na(error), message = error)] if (missing.as.error) tab[!tab$terminated, c("error", "message") := list(TRUE, "[not terminated]")] tab[] } batchtools/R/config.R0000644000176200001440000000443213606055703014215 0ustar liggesusers#' @title Find a batchtools Configuration File #' #' @description #' This functions returns the path to the first configuration file found in the following locations: #' \enumerate{ #' \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} #' \item{File \dQuote{batchtools.conf.R} in the current working directory.} #' \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} #' \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} #' \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} #' } #' @return [\code{character(1)}] Path to the configuration file or \code{NA} if no configuration file was found. #' @keywords internal #' @export findConfFile = function() { x = Sys.getenv("R_BATCHTOOLS_SEARCH_PATH") if (nzchar(x)) { x = fs::path(x, "batchtools.conf.R") if (fs::file_access(x, "read")) return(fs::path_abs(x)) } x = "batchtools.conf.R" if (fs::file_access(x, "read")) return(fs::path_abs(x)) x = fs::path(user_config_dir("batchtools", expand = FALSE), "config.R") if (fs::file_access(x, "read")) return(x) x = fs::path("~", ".batchtools.conf.R") if (fs::file_access(x, "read")) return(fs::path_abs(x)) x = fs::path(site_config_dir("batchtools"), "config.R") if (fs::file_access(x, "read")) return(x) return(NA_character_) } setSystemConf = function(reg, conf.file) { reg$cluster.functions = makeClusterFunctionsInteractive() reg$default.resources = list() reg$temp.dir = fs::path_temp() reg$compress = "gzip" if (!is.na(conf.file)) { assertString(conf.file) info("Sourcing configuration file '%s' ...", conf.file) sys.source(conf.file, envir = reg, keep.source = FALSE) assertClass(reg$cluster.functions, "ClusterFunctions") assertList(reg$default.resources, names = "unique") fs::dir_create(reg$temp.dir) } else { info("No readable configuration file found") } } batchtools/R/waitForFiles.R0000644000176200001440000000232213453602073015340 0ustar liggesusers# use list.files() here as this seems to trick the nfs cache # see https://github.com/mllg/batchtools/issues/85 waitForFiles = function(path, fns, timeout = 0) { if (timeout == 0) return(TRUE) fns = fns[!fs::file_exists(fns)] if (length(fns) == 0L) return(TRUE) "!DEBUG [waitForFiles]: `length(fns)` files not found via 'file.exists()'" fns = chsetdiff(fns, list.files(path, all.files = TRUE)) if (length(fns) == 0L) return(TRUE) timeout = timeout + Sys.time() repeat { Sys.sleep(0.5) fns = chsetdiff(fns, list.files(path, all.files = TRUE)) if (length(fns) == 0L) return(TRUE) if (Sys.time() > timeout) stopf("Timeout while waiting for %i files, e.g. '%s'", length(fns), fns[1L]) } } waitForFile = function(fn, timeout = 0, must.work = TRUE) { if (timeout == 0 || fs::file_exists(fn)) return(TRUE) "!DEBUG [waitForFile]: `fn` not found via 'file.exists()'" timeout = timeout + Sys.time() path = fs::path_dir(fn) repeat { Sys.sleep(0.5) if (fn %chin% list.files(path, all.files = TRUE)) return(TRUE) if (Sys.time() > timeout) { if (must.work) stopf("Timeout while waiting for file '%s'", fn) return(FALSE) } } } batchtools/R/Registry.R0000644000176200001440000003621213606055770014565 0ustar liggesusers#' @title Registry Constructor #' #' @description #' \code{makeRegistry} constructs the inter-communication object for all functions in \code{batchtools}. #' All communication transactions are processed via the file system: #' All information required to run a job is stored as \code{\link{JobCollection}} in a file in the #' a subdirectory of the \code{file.dir} directory. #' Each jobs stores its results as well as computational status information (start time, end time, error message, ...) #' also on the file system which is regular merged parsed by the master using \code{\link{syncRegistry}}. #' After integrating the new information into the Registry, the Registry is serialized to the file system via \code{\link{saveRegistry}}. #' Both \code{\link{syncRegistry}} and \code{\link{saveRegistry}} are called whenever required internally. #' Therefore it should be safe to quit the R session at any time. #' Work can later be resumed by calling \code{\link{loadRegistry}} which de-serializes the registry from #' the file system. #' #' The registry created last is saved in the package namespace (unless \code{make.default} is set to #' \code{FALSE}) and can be retrieved via \code{\link{getDefaultRegistry}}. #' #' Canceled jobs and jobs submitted multiple times may leave stray files behind. #' These can be swept using \code{\link{sweepRegistry}}. #' \code{\link{clearRegistry}} completely erases all jobs from a registry, including log files and results, #' and thus allows you to start over. #' #' @details #' Currently \pkg{batchtools} understands the following options set via the configuration file: #' \describe{ #' \item{\code{cluster.functions}:}{As returned by a constructor, e.g. \code{\link{makeClusterFunctionsSlurm}}.} #' \item{\code{default.resources}:}{List of resources to use. Will be overruled by resources specified via \code{\link{submitJobs}}.} #' \item{\code{temp.dir}:}{Path to directory to use for temporary registries.} #' \item{\code{sleep}:}{Custom sleep function. See \code{\link{waitForJobs}}.} #' \item{\code{expire.after}:}{Number of iterations before treating jobs as expired in \code{\link{waitForJobs}}.} #' \item{\code{compress}:}{Compression algorithm to use via \code{\link{saveRDS}}.} #' } #' #' @param file.dir [\code{character(1)}]\cr #' Path where all files of the registry are saved. #' Default is directory \dQuote{registry} in the current working directory. #' The provided path will get normalized unless it is given relative to the home directory #' (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well. #' #' If you pass \code{NA}, a temporary directory will be used. #' This way, you can create disposable registries for \code{\link{btlapply}} or examples. #' By default, the temporary directory \code{\link[base]{tempdir}()} will be used. #' If you want to use another directory, e.g. a directory which is shared between nodes, #' you can set it in your configuration file by setting the variable \code{temp.dir}. #' @param work.dir [\code{character(1)}]\cr #' Working directory for R process for running jobs. #' Defaults to the working directory currently set during Registry construction (see \code{\link[base]{getwd}}). #' \code{loadRegistry} uses the stored \code{work.dir}, but you may also explicitly overwrite it, #' e.g., after switching to another system. #' #' The provided path will get normalized unless it is given relative to the home directory #' (i.e., starting with \dQuote{~}). Note that some templates do not handle relative paths well. #' @param conf.file [\code{character(1)}]\cr #' Path to a configuration file which is sourced while the registry is created. #' In the configuration file you can define how \pkg{batchtools} interacts with the system via \code{\link{ClusterFunctions}}. #' Separating the configuration of the underlying host system from the R code allows to easily move computation to another site. #' #' The file lookup is implemented in the internal (but exported) function \code{findConfFile} which returns the first file found of the following candidates: #' \enumerate{ #' \item{File \dQuote{batchtools.conf.R} in the path specified by the environment variable \dQuote{R_BATCHTOOLS_SEARCH_PATH}.} #' \item{File \dQuote{batchtools.conf.R} in the current working directory.} #' \item{File \dQuote{config.R} in the user configuration directory as reported by \code{rappdirs::user_config_dir("batchtools", expand = FALSE)} (depending on OS, e.g., on linux this usually resolves to \dQuote{~/.config/batchtools/config.R}).} #' \item{\dQuote{.batchtools.conf.R} in the home directory (\dQuote{~}).} #' \item{\dQuote{config.R} in the site config directory as reported by \code{rappdirs::site_config_dir("batchtools")} (depending on OS). This file can be used for admins to set sane defaults for a computation site.} #' } #' Set to \code{NA} if you want to suppress reading any configuration file. #' If a configuration file is found, it gets sourced inside the environment of the registry after the defaults for all variables are set. #' Therefore you can set and overwrite slots, e.g. \code{default.resources = list(walltime = 3600)} to set default resources or \dQuote{max.concurrent.jobs} to #' limit the number of jobs allowed to run simultaneously on the system. #' @param packages [\code{character}]\cr #' Packages that will always be loaded on each node. #' Uses \code{\link[base]{require}} internally. #' Default is \code{character(0)}. #' @param namespaces [\code{character}]\cr #' Same as \code{packages}, but the packages will not be attached. #' Uses \code{\link[base]{requireNamespace}} internally. #' Default is \code{character(0)}. #' @param source [\code{character}]\cr #' Files which should be sourced on the slaves prior to executing a job. #' Calls \code{\link[base]{sys.source}} using the \code{\link[base]{.GlobalEnv}}. #' @param load [\code{character}]\cr #' Files which should be loaded on the slaves prior to executing a job. #' Calls \code{\link[base]{load}} using the \code{\link[base]{.GlobalEnv}}. #' @param seed [\code{integer(1)}]\cr #' Start seed for jobs. Each job uses the (\code{seed} + \code{job.id}) as seed. #' Default is a random integer between 1 and 32768 #' @param make.default [\code{logical(1)}]\cr #' If set to \code{TRUE}, the created registry is saved inside the package #' namespace and acts as default registry. You might want to switch this #' off if you work with multiple registries simultaneously. #' Default is \code{TRUE}. #' @return [\code{environment}] of class \dQuote{Registry} with the following slots: #' \describe{ #' \item{\code{file.dir} [path]:}{File directory.} #' \item{\code{work.dir} [path]:}{Working directory.} #' \item{\code{temp.dir} [path]:}{Temporary directory. Used if \code{file.dir} is \code{NA} to create temporary registries.} #' \item{\code{packages} [character()]:}{Packages to load on the slaves.} #' \item{\code{namespaces} [character()]:}{Namespaces to load on the slaves.} #' \item{\code{seed} [integer(1)]:}{Registry seed. Before each job is executed, the seed \code{seed + job.id} is set.} #' \item{\code{cluster.functions} [cluster.functions]:}{Usually set in your \code{conf.file}. Set via a call to \code{\link{makeClusterFunctions}}. See example.} #' \item{\code{default.resources} [named list()]:}{Usually set in your \code{conf.file}. Named list of default resources.} #' \item{\code{max.concurrent.jobs} [integer(1)]:}{Usually set in your \code{conf.file}. Maximum number of concurrent jobs for a single user and current registry on the system. #' \code{\link{submitJobs}} will try to respect this setting. The resource \dQuote{max.concurrent.jobs} has higher precedence.} #' \item{\code{defs} [data.table]:}{Table with job definitions (i.e. parameters).} #' \item{\code{status} [data.table]:}{Table holding information about the computational status. Also see \code{\link{getJobStatus}}.} #' \item{\code{resources} [data.table]:}{Table holding information about the computational resources used for the job. Also see \code{\link{getJobResources}}.} #' \item{\code{tags} [data.table]:}{Table holding information about tags. See \link{Tags}.} #' \item{\code{hash} [character(1)]:}{Unique hash which changes each time the registry gets saved to the file system. Can be utilized to invalidate the cache of \pkg{knitr}.} #' } #' @aliases Registry #' @family Registry #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' print(tmp) #' #' # Set cluster functions to interactive mode and start jobs in external R sessions #' tmp$cluster.functions = makeClusterFunctionsInteractive(external = TRUE) #' #' # Change packages to load #' tmp$packages = c("MASS") #' saveRegistry(reg = tmp) makeRegistry = function(file.dir = "registry", work.dir = getwd(), conf.file = findConfFile(), packages = character(0L), namespaces = character(0L), source = character(0L), load = character(0L), seed = NULL, make.default = TRUE) { assertString(file.dir, na.ok = TRUE) if (!is.na(file.dir)) assertPathForOutput(file.dir, overwrite = FALSE) assertString(work.dir) assertDirectoryExists(work.dir, access = "r") assertString(conf.file, na.ok = TRUE) assertCharacter(packages, any.missing = FALSE, min.chars = 1L) assertCharacter(namespaces, any.missing = FALSE, min.chars = 1L) assertCharacter(source, any.missing = FALSE, min.chars = 1L) assertCharacter(load, any.missing = FALSE, min.chars = 1L) assertFlag(make.default) seed = if (is.null(seed)) as.integer(runif(1L, 1, 32768)) else asCount(seed, positive = TRUE) reg = new.env(parent = asNamespace("batchtools")) reg$file.dir = file.dir reg$work.dir = work.dir reg$packages = packages reg$namespaces = namespaces reg$source = source reg$load = load reg$seed = seed reg$writeable = TRUE reg$version = packageVersion("batchtools") reg$defs = data.table( def.id = integer(0L), job.pars = list(), key = "def.id") reg$status = data.table( job.id = integer(0L), def.id = integer(0L), submitted = double(0L), started = double(0L), done = double(0L), error = character(0L), mem.used = double(0L), resource.id = integer(0L), batch.id = character(0L), log.file = character(0L), job.hash = character(0L), job.name = character(0L), key = "job.id") reg$resources = data.table( resource.id = integer(0L), resource.hash = character(0L), resources = list(), key = "resource.id") reg$tags = data.table( job.id = integer(0L), tag = character(0L), key = "job.id") setSystemConf(reg, conf.file) if (is.na(file.dir)) reg$file.dir = fs::file_temp("registry", tmp_dir = reg$temp.dir) "!DEBUG [makeRegistry]: Creating directories in '`reg$file.dir`'" fs::dir_create(c(reg$file.dir, reg$work.dir)) reg$file.dir = fs::path_abs(reg$file.dir) reg$work.dir = fs::path_abs(reg$work.dir) fs::dir_create(fs::path(reg$file.dir, c("jobs", "results", "updates", "logs", "exports", "external"))) with_dir(reg$work.dir, loadRegistryDependencies(reg)) class(reg) = "Registry" saveRegistry(reg) reg$mtime = file_mtime(fs::path(reg$file.dir, "registry.rds")) reg$hash = rnd_hash() info("Created registry in '%s' using cluster functions '%s'", reg$file.dir, reg$cluster.functions$name) if (make.default) batchtools$default.registry = reg return(reg) } #' @export print.Registry = function(x, ...) { cat("Job Registry\n") catf(" Backend : %s", x$cluster.functions$name) catf(" File dir : %s", x$file.dir) catf(" Work dir : %s", x$work.dir) catf(" Jobs : %i", nrow(x$status)) catf(" Seed : %i", x$seed) catf(" Writeable: %s", x$writeable) } #' @title assertRegistry #' #' @description #' Assert that a given object is a \code{batchtools} registry. #' Additionally can sync the registry, check if it is writeable, or check if jobs are running. #' If any check fails, throws an error indicting the reason for the failure. #' #' @param reg [\code{\link{Registry}}]\cr #' The object asserted to be a \code{Registry}. #' @param class [\code{character(1)}]\cr #' If \code{NULL} (default), \code{reg} must only inherit from class \dQuote{Registry}. #' Otherwise check that \code{reg} is of class \code{class}. #' E.g., if set to \dQuote{Registry}, a \code{\link{ExperimentRegistry}} would not pass. #' @param writeable [\code{logical(1)}]\cr #' Check if the registry is writeable. #' @param sync [\code{logical(1)}]\cr #' Try to synchronize the registry by including pending results from the file system. #' See \code{\link{syncRegistry}}. #' @param running.ok [\code{logical(1)}]\cr #' If \code{FALSE} throw an error if jobs associated with the registry are currently running. #' @return \code{TRUE} invisibly. #' @export assertRegistry = function(reg, class = NULL, writeable = FALSE, sync = FALSE, running.ok = TRUE) { if (batchtools$debug) { if (!identical(key(reg$status), "job.id")) stop("Key of reg$job.id lost") if (!identical(key(reg$defs), "def.id")) stop("Key of reg$defs lost") if (!identical(key(reg$resources), "resource.id")) stop("Key of reg$resources lost") } if (is.null(class)) { assertClass(reg, "Registry") } else { assertString(class) assertClass(reg, class, ordered = TRUE) } assertFlag(writeable) assertFlag(sync) assertFlag(running.ok) if (reg$writeable && file_mtime(fs::path(reg$file.dir, "registry.rds")) > reg$mtime + 1) { warning("Registry has been altered since last read. Switching to read-only mode in this session. See ?loadRegistry.") reg$writeable = FALSE } if (writeable && !reg$writeable) stop("Registry must be writeable. See ?loadRegistry.") if (!running.ok && nrow(.findOnSystem(reg = reg)) > 0L) stop("This operation is not allowed while jobs are running on the system") if (sync && sync(reg)) saveRegistry(reg) invisible(TRUE) } loadRegistryDependencies = function(x, must.work = FALSE) { "!DEBUG [loadRegistryDependencies]: Starting ..." pkgs = union(x$packages, "methods") handler = if (must.work) stopf else warningf ok = vlapply(pkgs, require, character.only = TRUE) if (!all(ok)) handler("Failed to load packages: %s", stri_flatten(pkgs[!ok], ", ")) ok = vlapply(x$namespaces, requireNamespace) if (!all(ok)) handler("Failed to load namespaces: %s", stri_flatten(x$namespaces[!ok], ", ")) if (length(x$source) > 0L) { for (fn in x$source) { ok = try(sys.source(fn, envir = .GlobalEnv), silent = TRUE) if (is.error(ok)) handler("Failed to source file '%s': %s", fn, as.character(ok)) } } if (length(x$load) > 0L) { for (fn in x$load) { ok = try(load(fn, envir = .GlobalEnv), silent = TRUE) if (is.error(ok)) handler("Failed to load file '%s': %s", fn, as.character(ok)) } } path = fs::path(x$file.dir, "exports") fns = list.files(path, pattern = "\\.rds$") if (length(fns) > 0L) { ee = .GlobalEnv Map(function(name, fn) { assign(x = name, value = readRDS(fn), envir = ee) }, name = unmangle(fns), fn = fs::path(path, fns)) } invisible(TRUE) } batchtools/R/execJob.R0000644000176200001440000000414613432557726014342 0ustar liggesusers#' @title Execute a Single Jobs #' #' @description #' Executes a single job (as created by \code{\link{makeJob}}) and returns #' its result. Also works for Experiments. #' #' @param job [\code{\link{Job}} | \code{\link{Experiment}}]\cr #' Job/Experiment to execute. #' @return Result of the job. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(identity, 1:2, reg = tmp) #' job = makeJob(1, reg = tmp) #' execJob(job) execJob = function(job) { UseMethod("execJob") } #' @export execJob.character = function(job) { execJob(readRDS(job)) } #' @export execJob.JobCollection = function(job) { if (nrow(job$jobs) != 1L) stop("You must provide a JobCollection with exactly one job") execJob(getJob(job, i = 1L)) } #' @export execJob.Job = function(job) { opts = options("error") options(error = function(e) traceback(2L)) on.exit(options(opts)) # this needs to be cat, message outputs to stderr which R cannot capture properly catf("### [bt%s]: Setting seed to %i ...", now(), job$seed) if (".job" %chin% names(formals(job$fun))) { with_seed(job$seed, do.call(job$fun, c(job$pars, list(.job = job)), envir = .GlobalEnv)) } else { with_seed(job$seed, do.call(job$fun, job$pars, envir = .GlobalEnv)) } } #' @export execJob.Experiment = function(job) { opts = options("error") options(error = function(e) traceback(2L)) on.exit(options(opts)) # this needs to be cat, message outputs to stderr which R cannot capture properly catf("### [bt%s]: Generating problem instance for problem '%s' ...", now(), job$prob.name) instance = job$instance force(instance) job$allow.access.to.instance = FALSE wrapper = function(...) job$algorithm$fun(job = job, data = job$problem$data, instance = instance, ...) # this needs to be cat, message outputs to stderr which R cannot capture properly catf("### [bt%s]: Applying algorithm '%s' on problem '%s' for job %i (seed = %i) ...", now(), job$algo.name, job$prob.name, job$id, job$seed) with_seed(job$seed, do.call(wrapper, job$algo.pars, envir = .GlobalEnv)) } batchtools/R/submitJobs.R0000644000176200001440000004234713606051736015102 0ustar liggesusers#' @title Submit Jobs to the Batch Systems #' #' @description #' Submits defined jobs to the batch system. #' #' After submitting the jobs, you can use \code{\link{waitForJobs}} to wait for the #' termination of jobs or call \code{\link{reduceResultsList}}/\code{\link{reduceResults}} #' to collect partial results. #' The progress can be monitored with \code{\link{getStatus}}. #' #' @section Resources: #' You can pass arbitrary resources to \code{submitJobs()} which then are available in the cluster function template. #' Some resources' names are standardized and it is good practice to stick to the following nomenclature to avoid confusion: #' \describe{ #' \item{walltime:}{Upper time limit in seconds for jobs before they get killed by the scheduler. Can be passed as additional column as part of \code{ids} to set per-job resources.} #' \item{memory:}{Memory limit in Mb. If jobs exceed this limit, they are usually killed by the scheduler. Can be passed as additional column as part of \code{ids} to set per-job resources.} #' \item{ncpus:}{Number of (physical) CPUs to use on the slave. Can be passed as additional column as part of \code{ids} to set per-job resources.} #' \item{omp.threads:}{Number of threads to use via OpenMP. Used to set environment variable \dQuote{OMP_NUM_THREADS}. Can be passed as additional column as part of \code{ids} to set per-job resources.} #' \item{pp.size:}{Maximum size of the pointer protection stack, see \code{\link[base]{Memory}}.} #' \item{blas.threads:}{Number of threads to use for the BLAS backend. Used to set environment variables \dQuote{MKL_NUM_THREADS} and \dQuote{OPENBLAS_NUM_THREADS}. Can be passed as additional column as part of \code{ids} to set per-job resources.} #' \item{measure.memory:}{Enable memory measurement for jobs. Comes with a small runtime overhead.} #' \item{chunks.as.arrayjobs:}{Execute chunks as array jobs.} #' \item{pm.backend:}{Start a \pkg{parallelMap} backend on the slave.} #' \item{foreach.backend:}{Start a \pkg{foreach} backend on the slave.} #' \item{clusters:}{Resource used for Slurm to select the set of clusters to run \code{sbatch}/\code{squeue}/\code{scancel} on.} #' } #' #' @section Chunking of Jobs: #' Multiple jobs can be grouped (chunked) together to be executed sequentially on the batch system as a single batch job. #' This is especially useful to avoid overburding the scheduler by submitting thousands of jobs simultaneously. #' To chunk jobs together, job ids must be provided as \code{data.frame} with columns \dQuote{job.id} and \dQuote{chunk} (integer). #' All jobs with the same chunk number will be executed sequentially inside the same batch job. #' The utility functions \code{\link{chunk}}, \code{\link{binpack}} and \code{\link{lpt}} #' can assist in grouping jobs. #' #' @section Array Jobs: #' If your cluster supports array jobs, you can set the resource \code{chunks.as.arrayjobs} to \code{TRUE} in order #' to execute chunks as job arrays on the cluster. #' For each chunk of size \code{n}, \pkg{batchtools} creates a \code{\link{JobCollection}} of (possibly heterogeneous) jobs which is #' submitted to the scheduler as a single array job with \code{n} repetitions. #' For each repetition, the \code{JobCollection} is first read from the file system, then subsetted to the \code{i}-th job using #' the environment variable \code{reg$cluster.functions$array.var} (depending on the cluster backend, defined automatically) and finally #' executed. #' #' @section Order of Submission: #' Jobs are submitted in the order of chunks, i.e. jobs which have chunk number #' \code{sort(unique(ids$chunk))[1]} first, then jobs with chunk number \code{sort(unique(ids$chunk))[2]} #' and so on. If no chunks are provided, jobs are submitted in the order of \code{ids$job.id}. #' #' @section Limiting the Number of Jobs: #' If requested, \code{submitJobs} tries to limit the number of concurrent jobs of the user by waiting until jobs terminate #' before submitting new ones. #' This can be controlled by setting \dQuote{max.concurrent.jobs} in the configuration file (see \code{\link{Registry}}) #' or by setting the resource \dQuote{max.concurrent.jobs} to the maximum number of jobs to run simultaneously. #' If both are set, the setting via the resource takes precedence over the setting in the configuration. #' #' @section Measuring Memory: #' Setting the resource \code{measure.memory} to \code{TRUE} turns on memory measurement: #' \code{\link[base]{gc}} is called directly before and after the job and the difference is #' stored in the internal database. Note that this is just a rough estimate and does #' neither work reliably for external code like C/C++ nor in combination with threading. #' #' @section Inner Parallelization: #' Inner parallelization is typically done via threading, sockets or MPI. #' Two backends are supported to assist in setting up inner parallelization. #' #' The first package is \pkg{parallelMap}. #' If you set the resource \dQuote{pm.backend} to \dQuote{multicore}, \dQuote{socket} or \dQuote{mpi}, #' \code{\link[parallelMap]{parallelStart}} is called on the slave before the first job in the chunk is started #' and \code{\link[parallelMap]{parallelStop}} is called after the last job terminated. #' This way, the resources for inner parallelization can be set and get automatically stored just like other computational resources. #' The function provided by the user just has to call \code{\link[parallelMap]{parallelMap}} to start parallelization using the preconfigured backend. #' #' To control the number of CPUs, you have to set the resource \code{ncpus}. #' Otherwise \code{ncpus} defaults to the number of available CPUs (as reported by (see \code{\link[parallel]{detectCores}})) #' on the executing machine for multicore and socket mode and defaults to the return value of \code{\link[Rmpi]{mpi.universe.size}-1} for MPI. #' Your template must be set up to handle the parallelization, e.g. request the right number of CPUs or start R with \code{mpirun}. #' You may pass further options like \code{level} to \code{\link[parallelMap]{parallelStart}} via the named list \dQuote{pm.opts}. #' #' The second supported parallelization backend is \pkg{foreach}. #' If you set the resource \dQuote{foreach.backend} to \dQuote{seq} (sequential mode), \dQuote{parallel} (\pkg{doParallel}) or #' \dQuote{mpi} (\pkg{doMPI}), the requested \pkg{foreach} backend is automatically registered on the slave. #' Again, the resource \code{ncpus} is used to determine the number of CPUs. #' #' Neither the namespace of \pkg{parallelMap} nor the namespace \pkg{foreach} are attached. #' You have to do this manually via \code{\link[base]{library}} or let the registry load the packages for you. #' #' @note #' If you a large number of jobs, disabling the progress bar (\code{options(batchtools.progress = FALSE)}) #' can significantly increase the performance of \code{submitJobs}. #' #' @templateVar ids.default findNotSubmitted #' @template ids #' @param resources [\code{named list}]\cr #' Computational resources for the jobs to submit. The actual elements of this list #' (e.g. something like \dQuote{walltime} or \dQuote{nodes}) depend on your template file, exceptions are outlined in the section 'Resources'. #' Default settings for a system can be set in the configuration file by defining the named list \code{default.resources}. #' Note that these settings are merged by name, e.g. merging \code{list(walltime = 300)} into \code{list(walltime = 400, memory = 512)} #' will result in \code{list(walltime = 300, memory = 512)}. #' Same holds for individual job resources passed as additional column of \code{ids} (c.f. section 'Resources'). #' @param sleep [\code{function(i)} | \code{numeric(1)}]\cr #' Parameter to control the duration to sleep between temporary errors. #' You can pass an absolute numeric value in seconds or a \code{function(i)} which returns the number of seconds to sleep in the \code{i}-th #' iteration between temporary errors. #' If not provided (\code{NULL}), tries to read the value (number/function) from the configuration file (stored in \code{reg$sleep}) or defaults to #' a function with exponential backoff between 5 and 120 seconds. #' @template reg #' @return [\code{\link{data.table}}] with columns \dQuote{job.id} and \dQuote{chunk}. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(3) } #' ### Example 1: Submit subsets of jobs #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' #' # toy function which fails if x is even and an input file does not exists #' fun = function(x, fn) if (x %% 2 == 0 && !file.exists(fn)) stop("file not found") else x #' #' # define jobs via batchMap #' fn = tempfile() #' ids = batchMap(fun, 1:20, reg = tmp, fn = fn) #' #' # submit some jobs #' ids = 1:10 #' submitJobs(ids, reg = tmp) #' waitForJobs(ids, reg = tmp) #' getStatus(reg = tmp) #' #' # create the required file and re-submit failed jobs #' file.create(fn) #' submitJobs(findErrors(ids, reg = tmp), reg = tmp) #' getStatus(reg = tmp) #' #' # submit remaining jobs which have not yet been submitted #' ids = findNotSubmitted(reg = tmp) #' submitJobs(ids, reg = tmp) #' getStatus(reg = tmp) #' #' # collect results #' reduceResultsList(reg = tmp) #' #' ### Example 2: Using memory measurement #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' #' # Toy function which creates a large matrix and returns the column sums #' fun = function(n, p) colMeans(matrix(runif(n*p), n, p)) #' #' # Arguments to fun: #' args = data.table::CJ(n = c(1e4, 1e5), p = c(10, 50)) # like expand.grid() #' print(args) #' #' # Map function to create jobs #' ids = batchMap(fun, args = args, reg = tmp) #' #' # Set resources: enable memory measurement #' res = list(measure.memory = TRUE) #' #' # Submit jobs using the currently configured cluster functions #' submitJobs(ids, resources = res, reg = tmp) #' #' # Retrive information about memory, combine with parameters #' info = ijoin(getJobStatus(reg = tmp)[, .(job.id, mem.used)], getJobPars(reg = tmp)) #' print(unwrap(info)) #' #' # Combine job info with results -> each job is aggregated using mean() #' unwrap(ijoin(info, reduceResultsDataTable(fun = function(res) list(res = mean(res)), reg = tmp))) #' #' ### Example 3: Multicore execution on the slave #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' #' # Function which sleeps 10 seconds, i-times #' f = function(i) { #' parallelMap::parallelMap(Sys.sleep, rep(10, i)) #' } #' #' # Create one job with parameter i=4 #' ids = batchMap(f, i = 4, reg = tmp) #' #' # Set resources: Use parallelMap in multicore mode with 4 CPUs #' # batchtools internally loads the namespace of parallelMap and then #' # calls parallelStart() before the job and parallelStop() right #' # after the job last job in the chunk terminated. #' res = list(pm.backend = "multicore", ncpus = 4) #' #' \dontrun{ #' # Submit both jobs and wait for them #' submitJobs(resources = res, reg = tmp) #' waitForJobs(reg = tmp) #' #' # If successfull, the running time should be ~10s #' getJobTable(reg = tmp)[, .(job.id, time.running)] #' #' # There should also be a note in the log: #' grepLogs(pattern = "parallelMap", reg = tmp) #' } submitJobs = function(ids = NULL, resources = list(), sleep = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE) assertList(resources, names = "strict") resources = insert(reg$default.resources, resources) if (hasName(resources, "pm.backend")) assertChoice(resources$pm.backend, c("local", "multicore", "socket", "mpi")) if (hasName(resources, "foreach.backend")) assertChoice(resources$foreach.backend, c("seq", "parallel", "mpi")) if (hasName(resources, "pm.opts")) assertList(resources$pm.opts, names = "unique") if (hasName(resources, "ncpus")) assertCount(resources$ncpus, positive = TRUE) if (hasName(resources, "measure.memory")) assertFlag(resources$measure.memory) sleep = getSleepFunction(reg, sleep) ids = convertIds(reg, ids, default = .findNotSubmitted(reg = reg), keep.extra = c("chunk", batchtools$resources$per.job)) if (nrow(ids) == 0L) return(noIds()) # handle chunks use.chunking = hasName(ids, "chunk") && anyDuplicated(ids, by = "chunk") > 0L if (use.chunking) { ids$chunk = asInteger(ids$chunk, any.missing = FALSE) chunks = sort(unique(ids$chunk)) } else { chunks = ids$chunk = seq_row(ids) } # check for jobs already on system on.sys = .findOnSystem(reg = reg, cols = c("job.id", "batch.id")) ids.on.sys = on.sys[ids, nomatch = 0L, on = "job.id"] if (nrow(ids.on.sys) > 0L) stopf("Some jobs are already on the system, e.g. %i", ids.on.sys[1L, ]$job.id) # handle max.concurrent.jobs max.concurrent.jobs = assertCount(resources$max.concurrent.jobs, null.ok = TRUE) %??% assertCount(reg$max.concurrent.jobs, null.ok = TRUE) %??% NA_integer_ # handle chunks.as.arrayjobs chunks.as.arrayjobs = FALSE if (hasName(resources, "chunks.as.arrayjobs")) { assertFlag(resources$chunks.as.arrayjobs) if (resources$chunks.as.arrayjobs) { if (is.na(reg$cluster.functions$array.var)) { info("Ignoring resource 'chunks.as.arrayjobs', not supported by cluster functions '%s'", reg$cluster.functions$name) } else { chunks.as.arrayjobs = TRUE } } } if (!is.na(max.concurrent.jobs)) { if (uniqueN(on.sys, by = "batch.id") + (!chunks.as.arrayjobs) * length(chunks) + chunks.as.arrayjobs * nrow(ids) > max.concurrent.jobs) { "!DEBUG [submitJobs]: Limiting the number of concurrent jobs to `max.concurrent.jobs`" } else { max.concurrent.jobs = NA_integer_ } } # handle job resources per.job.resources = chintersect(names(ids), batchtools$resources$per.job) if (length(per.job.resources) > 0L) { if (use.chunking) stopf("Combining per-job resources with chunking is not supported") ids$resource.id = addResources(reg, .mapply(function(...) insert(resources, list(...)), ids[, per.job.resources, with = FALSE], MoreArgs = list())) ids[, (per.job.resources) := NULL] } else { ids$resource.id = addResources(reg, list(resources)) } info("Submitting %i jobs in %i chunks using cluster functions '%s' ...", nrow(ids), length(chunks), reg$cluster.functions$name) on.exit(saveRegistry(reg)) chunk = NULL runHook(reg, "pre.submit") pb = makeProgressBar(total = length(chunks), format = ":status [:bar] :percent eta: :eta") pb$tick(0, tokens = list(status = "Submitting")) for (ch in chunks) { ids.chunk = ids[chunk == ch, c("job.id", "resource.id")] jc = makeJobCollection(ids.chunk, resources = reg$resources[ids.chunk, on = "resource.id"]$resources[[1L]], reg = reg) if (reg$cluster.functions$store.job.collection) writeRDS(jc, file = jc$uri, compress = jc$compress) # do we have to wait for jobs to get terminated before proceeding? if (!is.na(max.concurrent.jobs)) { # count chunks or job.id i = 1L repeat { n.on.sys = uniqueN(getBatchIds(reg), by = "batch.id") "!DEBUG [submitJobs]: Detected `n.on.sys` batch jobs on system (`max.concurrent.jobs` allowed concurrently)" if (n.on.sys < max.concurrent.jobs) break pb$tick(0, tokens = list(status = "Waiting ")) sleep(i) i = i + 1L } } # remove old result files fns = getResultFiles(reg, ids.chunk) file_remove(fns) i = 1L repeat { runHook(reg, "pre.submit.job") now = ustamp() submit = reg$cluster.functions$submitJob(reg = reg, jc = jc) if (submit$status == 0L) { if (!testCharacter(submit$batch.id, any.missing = FALSE, min.len = 1L)) { stopf("Cluster function did not return valid batch ids:\n%s", stri_flatten(capture.output(str(submit$batch.id)), "\n")) } reg$status[ids.chunk, c("submitted", "started", "done", "error", "mem.used", "resource.id", "batch.id", "log.file", "job.hash") := list(now, NA_real_, NA_real_, NA_character_, NA_real_, ids.chunk$resource.id, submit$batch.id, submit$log.file, jc$job.hash)] runHook(reg, "post.submit.job") break } else if (submit$status > 0L && submit$status < 100L) { # temp error pb$tick(0, tokens = list(status = submit$msg)) sleep(i) i = i + 1L } else if (submit$status > 100L && submit$status <= 200L) { # fatal error stopf("Fatal error occurred: %i. %s", submit$status, submit$msg) } } pb$tick(len = 1, tokens = list(status = "Submitting")) } Sys.sleep(reg$cluster.functions$scheduler.latency) runHook(reg, "post.submit") # return ids, registry is saved via on.exit() return(invisible(ids)) } addResources = function(reg, resources) { ai = function(tab, col) { # auto increment by reference i = tab[is.na(get(col)), which = TRUE] if (length(i) > 0L) { ids = seq_along(i) if (length(i) < nrow(tab)) ids = ids + max(tab[, max(col, na.rm = TRUE), with = FALSE][[1L]], na.rm = TRUE) tab[i, (col) := ids] setkeyv(tab, col)[] } } tab = data.table(resources = resources, resource.hash = vcapply(resources, digest)) new.tab = unique(tab, by = "resource.hash")[!reg$resources, on = "resource.hash"] if (nrow(new.tab)) { reg$resources = rbindlist(list(reg$resources, new.tab), fill = TRUE, use.names = TRUE) ai(reg$resources, "resource.id") } reg$resources[tab, "resource.id", on = "resource.hash"][[1L]] } batchtools/R/batchReduce.R0000644000176200001440000000453613276253713015172 0ustar liggesusers#' @title Reduce Operation for Batch Systems #' #' @description #' A parallel and asynchronous \code{\link[base]{Reduce}} for batch systems. #' Note that this function only defines the computational jobs. #' Each job reduces a certain number of elements on one slave. #' The actual computation is started with \code{\link{submitJobs}}. #' Results and partial results can be collected with \code{\link{reduceResultsList}}, \code{\link{reduceResults}} or #' \code{\link{loadResult}}. #' #' @param fun [\code{function(aggr, x, ...)}]\cr #' Function to reduce \code{xs} with. #' @param xs [\code{vector}]\cr #' Vector to reduce. #' @param init [ANY]\cr #' Initial object for reducing. See \code{\link[base]{Reduce}}. #' @param chunks [\code{integer(length(xs))}]\cr #' Group for each element of \code{xs}. Can be generated with \code{\link{chunk}}. #' @param more.args [\code{list}]\cr #' A list of additional arguments passed to \code{fun}. #' @template reg #' @return [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. #' @export #' @seealso \code{\link{batchMap}} #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' # define function to reduce on slave, we want to sum a vector #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' xs = 1:100 #' f = function(aggr, x) aggr + x #' #' # sum 20 numbers on each slave process, i.e. 5 jobs #' chunks = chunk(xs, chunk.size = 5) #' batchReduce(fun = f, 1:100, init = 0, chunks = chunks, reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' #' # now reduce one final time on master #' reduceResults(fun = function(aggr, job, res) f(aggr, res), reg = tmp) batchReduce = function(fun, xs, init = NULL, chunks = seq_along(xs), more.args = list(), reg = getDefaultRegistry()) { assertRegistry(reg, class = "Registry", writeable = TRUE) if (nrow(reg$defs) > 0L) stop("Registry must be empty") assertFunction(fun, c("aggr", "x")) assertAtomicVector(xs) assertIntegerish(chunks, len = length(xs), any.missing = FALSE, lower = 0L) assertList(more.args, names = "strict") more.args = c(more.args, list(.fun = fun, .init = init)) batchMap(batchReduceWrapper, unname(split(xs, chunks)), more.args = more.args, reg = reg) } batchReduceWrapper = function(xs.block, .fun, .init, ...) { fun = function(aggr, x) .fun(aggr, x, ...) Reduce(fun, xs.block, init = .init) } batchtools/R/getStatus.R0000644000176200001440000000660113361631777014744 0ustar liggesusers#' @title Summarize the Computational Status #' #' @description #' This function gives an encompassing overview over the computational status on your system. #' The status can be one or many of the following: #' \itemize{ #' \item \dQuote{defined}: Jobs which are defined via \code{\link{batchMap}} or \code{\link{addExperiments}}, but are not yet submitted. #' \item \dQuote{submitted}: Jobs which are submitted to the batch system via \code{\link{submitJobs}}, scheduled for execution. #' \item \dQuote{started}: Jobs which have been started. #' \item \dQuote{done}: Jobs which terminated successfully. #' \item \dQuote{error}: Jobs which terminated with an exception. #' \item \dQuote{running}: Jobs which are listed by the cluster functions to be running on the live system. Not supported for all cluster functions. #' \item \dQuote{queued}: Jobs which are listed by the cluster functions to be queued on the live system. Not supported for all cluster functions. #' \item \dQuote{system}: Jobs which are listed by the cluster functions to be queued or running. Not supported for all cluster functions. #' \item \dQuote{expired}: Jobs which have been submitted, but vanished from the live system. Note that this is determined heuristically and may include some false positives. #' } #' Here, a job which terminated successfully counts towards the jobs which are submitted, started and done. #' To retrieve the corresponding job ids, see \code{\link{findJobs}}. #' #' @templateVar ids.default all #' @template ids #' @template reg #' @return [\code{\link[data.table]{data.table}}] (with class \dQuote{Status} for printing). #' @seealso \code{\link{findJobs}} #' @export #' @family debug #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' fun = function(i) if (i == 3) stop(i) else i #' ids = batchMap(fun, i = 1:5, reg = tmp) #' submitJobs(ids = 1:4, reg = tmp) #' waitForJobs(reg = tmp) #' #' tab = getStatus(reg = tmp) #' print(tab) #' str(tab) getStatus = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) stats = getStatusTable(convertIds(reg, ids), reg = reg) setClasses(stats, c("Status", class(stats))) } getStatusTable = function(ids = NULL, batch.ids = getBatchIds(reg = reg), reg = getDefaultRegistry()) { submitted = started = done = error = status = NULL stats = merge(filter(reg$status, ids), batch.ids, by = "batch.id", all.x = TRUE, all.y = FALSE, sort = FALSE)[, list( defined = .N, submitted = count(submitted), started = sum(!is.na(started) | !is.na(status) & status == "running"), done = count(done), error = count(error), queued = sum(status == "queued", na.rm = TRUE), running = sum(status == "running", na.rm = TRUE), expired = sum(!is.na(submitted) & is.na(done) & is.na(status)) )] stats$done = stats$done - stats$error stats$system = stats$queued + stats$running return(stats) } #' @export print.Status = function(x, ...) { fmt = sprintf(" %%-13s: %%%ii (%%5.1f%%%%)", stri_width(x$defined)) pr = function(label, h) catf(fmt, label, h, h / x$defined * 100) catf("Status for %i jobs at %s:", x$defined, strftime(Sys.time())) pr("Submitted", x$submitted) pr("-- Queued", x$queued) pr("-- Started", x$started) pr("---- Running", x$running) pr("---- Done", x$done) pr("---- Error", x$error) pr("---- Expired", x$expired) } batchtools/R/ids.R0000644000176200001440000000413713361632161013526 0ustar liggesusersallIds = function(reg) { reg$status[, "job.id"] } noIds = function() { data.table(job.id = integer(0L), key = "job.id") } castIds = function(ids, setkey = TRUE) { if (is.data.table(ids)) { qassert(ids$job.id, "X", .var.name = "column 'job.id'") if (!is.integer(ids$job.id)) { "!DEBUG [castIds]: Casting ids in data.table to integer" ids = copy(ids) ids$job.id = as.integer(ids$job.id) } if (setkey && !identical(key(ids), "job.id")) { "!DEBUG [castIds]: Setting missing key for ids table" ids = copy(ids) setkeyv(ids, "job.id") } return(ids) } if (is.data.frame(ids)) { "!DEBUG [castIds]: Casting ids from data.frame to data.table" ids$job.id = asInteger(ids$job.id, .var.name = "column 'job.id'") ids = as.data.table(ids) if (setkey) setkeyv(ids, "job.id") return(ids) } if (qtest(ids, "X")) { "!DEBUG [castIds]: Casting ids from vector to data.table" return(data.table(job.id = as.integer(ids), key = if (setkey) "job.id" else NULL)) } stop("Format of 'ids' not recognized. Must be a data.frame with column 'job.id' or an integerish vector") } convertIds = function(reg, ids, default = NULL, keep.extra = character(0L), keep.order = FALSE) { if (is.null(ids)) return(default) ids = castIds(ids, setkey = !keep.order) if (anyDuplicated(ids, by = "job.id")) stop("Duplicated ids provided") invalid = ids[!reg$status, on = "job.id", which = TRUE] if (length(invalid) > 0L) { info("Ignoring %i invalid job id%s", length(invalid), if (length(ids) > 1L) "s" else "") ids = ids[-invalid] } cols = if (length(keep.extra)) union("job.id", chintersect(keep.extra, names(ids))) else "job.id" ids[, cols, with = FALSE] } convertId = function(reg, id) { id = convertIds(reg, id) if (nrow(id) != 1L) stopf("You must provide exactly one valid id (%i provided)", nrow(id)) return(id) } filter = function(x, y, cols) { if (is.null(y)) { if (missing(cols)) return(x) return(x[, cols, with = FALSE]) } return(x[y, cols, on = key(x), nomatch = 0L, with = missing(cols)]) } batchtools/R/sweepRegistry.R0000644000176200001440000000424413453602073015623 0ustar liggesusers#' @title Check Consistency and Remove Obsolete Information #' #' @description #' Canceled jobs and jobs submitted multiple times may leave stray files behind. #' This function checks the registry for consistency and removes obsolete files #' and redundant data base entries. #' #' @template reg #' @family Registry #' @export sweepRegistry = function(reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE, running.ok = FALSE) "!DEBUG [sweepRegistry]: Running sweepRegistry" submitted = reg$status[.findSubmitted(reg = reg), c("job.id", "job.hash")] obsolete = chsetdiff( list.files(dir(reg, "results"), full.names = TRUE), getResultFiles(reg, submitted) ) if (length(obsolete)) { info("Removing %i obsolete result files ...", length(obsolete)) fs::file_delete(obsolete) } obsolete = chsetdiff( list.files(dir(reg, "logs"), full.names = TRUE), getLogFiles(reg, submitted) ) if (length(obsolete)) { info("Removing %i obsolete log files ...", length(obsolete)) fs::file_delete(obsolete) } obsolete = list.files(dir(reg, "jobs"), pattern = "\\.rds", full.names = TRUE) if (length(obsolete)) { info("Removing %i obsolete job collection files ...", length(obsolete)) fs::file_delete(obsolete) } obsolete = list.files(dir(reg, "jobs"), pattern = "\\.job$", full.names = TRUE) if (length(obsolete)) { info("Removing %i job description files ...", length(obsolete)) fs::file_delete(obsolete) } obsolete = chsetdiff( list.files(dir(reg, "external"), pattern = "^[0-9]+$", full.names = TRUE), getExternalDirs(reg, submitted) ) if (length(obsolete)) { info("Removing %i external directories of unsubmitted jobs ...", length(obsolete)) fs::dir_delete(obsolete) } obsolete = reg$resources[!reg$status, on = "resource.id", which = TRUE] if (length(obsolete)) { info("Removing %i resource specifications ...", length(obsolete)) reg$resources = reg$resources[-obsolete] } obsolete = reg$tags[!reg$status, on = "job.id", which = TRUE] if (length(obsolete)) { info("Removing %i tags ...", length(obsolete)) reg$tags = reg$tags[-obsolete] } saveRegistry(reg) } batchtools/R/files.R0000644000176200001440000000244113606043466014053 0ustar liggesusersdir = function(reg, what) { fs::path(fs::path_expand(reg$file.dir), what) } getResultFiles = function(reg, ids) { fs::path(dir(reg, "results"), sprintf("%i.rds", if (is.atomic(ids)) ids else ids$job.id)) } getLogFiles = function(reg, ids) { job.hash = log.file = NULL tab = reg$status[list(ids), c("job.id", "job.hash", "log.file")] tab[is.na(log.file) & !is.na(job.hash), log.file := sprintf("%s.log", job.hash)] tab[!is.na(log.file), log.file := fs::path(dir(reg, "logs"), log.file)]$log.file } getJobFiles = function(reg, hash) { fs::path(reg$file.dir, "jobs", sprintf("%s.rds", hash)) } getExternalDirs = function(reg, ids) { fs::path(dir(reg, "external"), if (is.atomic(ids)) ids else ids$job.id) } mangle = function(x) { sprintf("%s.rds", base32_encode(x, use.padding = FALSE)) } unmangle = function(x) { base32_decode(stri_sub(x, to = -5L), use.padding = FALSE) } file_remove = function(x) { fs::file_delete(x[fs::file_exists(x)]) while(any(i <- fs::file_exists(x))) { Sys.sleep(0.5) fs::file_delete(x[i]) } } file_mtime = function(x) { fs::file_info(x)$modification_time } writeRDS = function(object, file, compress = "gzip") { file_remove(file) saveRDS(object, file = file, version = 2L, compress = compress) waitForFile(file, 300) invisible(TRUE) } batchtools/R/summarizeExperiments.R0000644000176200001440000000147513214447313017211 0ustar liggesusers#' @title Quick Summary over Experiments #' #' @description #' Returns a frequency table of defined experiments. #' See \code{\link{ExperimentRegistry}} for an example. #' #' @templateVar ids.default all #' @template ids #' @param by [\code{character}]\cr #' Split the resulting table by columns of \code{\link{getJobPars}}. #' @template expreg #' @return [\code{\link{data.table}}] of frequencies. #' @export #' @family Experiment summarizeExperiments = function(ids = NULL, by = c("problem", "algorithm"), reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry") assertCharacter(by, any.missing = FALSE, min.chars = 1L, min.len = 1L, unique = TRUE) tab = getJobPars(ids = ids, reg = reg) if (!setequal(by, c("problem", "algorithm"))) tab = unwrap(tab) tab[, list(.count = .N), by = by] } batchtools/R/clearRegistry.R0000644000176200001440000000117413234300075015557 0ustar liggesusers#' @title Remove All Jobs #' @description #' Removes all jobs from a registry and calls \code{\link{sweepRegistry}}. #' #' @template reg #' @family Registry #' @export clearRegistry = function(reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE, running.ok = FALSE) info("Removing %i jobs ...", nrow(reg$status)) reg$status = reg$status[FALSE] reg$defs = reg$defs[FALSE] reg$resources = reg$resources[FALSE] user.fun = fs::path(reg$file.dir, "user.function.rds") if (fs::file_exists(user.fun)) { info("Removing user function ...") file_remove(user.fun) } sweepRegistry(reg = reg) } batchtools/R/Job.R0000644000176200001440000001722313606052202013453 0ustar liggesusersBaseJob = R6Class("BaseJob", cloneable = FALSE, public = list( file.dir = NULL, id = NULL, seed = NULL, resources = NULL, reader = NULL, initialize = function(file.dir, reader, id, seed, resources) { self$file.dir = file.dir self$reader = reader self$id = id self$seed = seed self$resources = resources } ), active = list( job.id = function() { # alias for id. This is confusing not to have. self$id }, external.dir = function() { fs::dir_create(fs::path(self$file.dir, "external", self$id)) } ) ) Job = R6Class("Job", cloneable = FALSE, inherit = BaseJob, public = list( job.pars = NULL, initialize = function(file.dir, reader, id, job.pars, seed, resources) { self$job.pars = job.pars super$initialize(file.dir, reader, id, seed, resources) } ), active = list( fun = function() { self$reader$get(fs::path(self$file.dir, "user.function.rds")) }, pars = function() { c(self$job.pars, self$reader$get(fs::path(self$file.dir, "more.args.rds"))) } ) ) Experiment = R6Class("Experiment", cloneable = FALSE, inherit = BaseJob, public = list( repl = NA_integer_, prob.name = NULL, algo.name = NULL, prob.pars = NULL, algo.pars = NULL, compress = NULL, allow.access.to.instance = TRUE, initialize = function(file.dir, reader, id, prob.pars, algo.pars, repl, seed, resources, prob.name, algo.name, compress = "gzip") { super$initialize(file.dir, reader, id,seed, resources) self$repl = repl self$prob.name = as.character(prob.name) self$prob.pars = prob.pars self$algo.name = as.character(algo.name) self$algo.pars = algo.pars self$compress = compress } ), active = list( problem = function() { self$reader$get(getProblemURI(self, self$prob.name), slot = "..problem..") }, algorithm = function() { self$reader$get(getAlgorithmURI(self, self$algo.name)) }, pars = function() { list(prob.pars = self$prob.pars, algo.pars = self$algo.pars) }, instance = function() { if (!self$allow.access.to.instance) stop("You cannot access 'job$instance' in the problem generation or algorithm function") p = self$problem if (p$cache) { cache.file = getProblemCacheURI(self) if (fs::file_exists(cache.file)) { result = try(readRDS(cache.file)) if (!inherits(result, "try-error")) return(result) } } seed = if (is.null(p$seed)) self$seed else getSeed(p$seed, self$repl - 1L) wrapper = function(...) p$fun(job = self, data = p$data, ...) result = with_seed(seed, do.call(wrapper, self$prob.pars, envir = .GlobalEnv)) if (p$cache) writeRDS(result, file = cache.file, compress = self$compress) return(result) } ) ) #' @title Jobs and Experiments #' #' @description #' Jobs and Experiments are abstract objects which hold all information necessary to execute a single computational #' job for a \code{\link{Registry}} or \code{\link{ExperimentRegistry}}, respectively. #' #' They can be created using the constructor \code{makeJob} which takes a single job id. #' Jobs and Experiments are passed to reduce functions like \code{\link{reduceResults}}. #' Furthermore, Experiments can be used in the functions of the \code{\link{Problem}} and \code{\link{Algorithm}}. #' Jobs and Experiments hold these information: #' \describe{ #' \item{\code{job.id}}{Job ID as integer.} #' \item{\code{pars}}{ #' Job parameters as named list. #' For \code{\link{ExperimentRegistry}}, the parameters are divided into the sublists \dQuote{prob.pars} and \dQuote{algo.pars}. #' } #' \item{\code{seed}}{Seed which is set via \code{\link{doJobCollection}} as scalar integer.} #' \item{\code{resources}}{Computational resources which were set for this job as named list.} #' \item{\code{external.dir}}{ #' Path to a directory which is created exclusively for this job. You can store external files here. #' Directory is persistent between multiple restarts of the job and can be cleaned by calling \code{\link{resetJobs}}. #' } #' \item{\code{fun}}{Job only: User function passed to \code{\link{batchMap}}.} #' \item{\code{prob.name}}{Experiments only: Problem id.} #' \item{\code{algo.name}}{Experiments only: Algorithm id.} #' \item{\code{problem}}{Experiments only: \code{\link{Problem}}.} #' \item{\code{instance}}{Experiments only: Problem instance.} #' \item{\code{algorithm}}{Experiments only: \code{\link{Algorithm}}.} #' \item{\code{repl}}{Experiments only: Replication number.} #' } #' #' Note that the slots \dQuote{pars}, \dQuote{fun}, \dQuote{algorithm} and \dQuote{problem} #' lazy-load required files from the file system and construct the object on the first access. #' The realizations are cached for all slots except \dQuote{instance} (which might be stochastic). #' #' Jobs and Experiments can be executed manually with \code{\link{execJob}}. #' #' @template id #' @param reader [\code{RDSReader} | \code{NULL}]\cr #' Reader object to retrieve files. Used internally to cache reading from the file system. #' The default (\code{NULL}) does not make use of caching. #' @template reg #' @return [\code{Job} | \code{Experiment}]. #' @aliases Job Experiment #' @rdname JobExperiment #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(function(x, y) x + y, x = 1:2, more.args = list(y = 99), reg = tmp) #' submitJobs(resources = list(foo = "bar"), reg = tmp) #' job = makeJob(1, reg = tmp) #' print(job) #' #' # Get the parameters: #' job$pars #' #' # Get the job resources: #' job$resources #' #' # Execute the job locally: #' execJob(job) makeJob = function(id, reader = NULL, reg = getDefaultRegistry()) { UseMethod("makeJob", object = reg) } #' @export makeJob.Registry = function(id, reader = NULL, reg = getDefaultRegistry()) { row = mergedJobs(reg, convertId(reg, id), c("job.id", "job.pars", "resource.id")) resources = reg$resources[row, "resources", on = "resource.id", nomatch = NA]$resources[[1L]] %??% list() Job$new(file.dir = reg$file.dir, reader %??% RDSReader$new(FALSE), id = row$job.id, job.pars = row$job.pars[[1L]], seed = getSeed(reg$seed, row$job.id), resources = resources) } #' @export makeJob.ExperimentRegistry = function(id, reader = NULL, reg = getDefaultRegistry()) { row = mergedJobs(reg, convertId(reg, id), c("job.id", "problem", "prob.pars", "algorithm", "algo.pars", "repl", "resource.id")) resources = reg$resources[row, "resources", on = "resource.id", nomatch = NA]$resources[[1L]] %??% list() Experiment$new(file.dir = reg$file.dir, reader %??% RDSReader$new(FALSE), id = row$job.id, prob.pars = row$prob.pars[[1L]], algo.pars = row$algo.pars[[1L]], seed = getSeed(reg$seed, row$job.id), repl = row$repl, resources = resources, prob.name = row$problem, algo.name = row$algorithm) } getJob = function(jc, i, reader = NULL) { UseMethod("getJob") } getJob.JobCollection = function(jc, i, reader = RDSReader$new(FALSE)) { row = jc$jobs[i] Job$new(file.dir = jc$file.dir, reader = reader, id = row$job.id, job.pars = row$job.pars[[1L]], seed = getSeed(jc$seed, row$job.id), resources = jc$resources) } getJob.ExperimentCollection = function(jc, i, reader = RDSReader$new(FALSE)) { row = jc$jobs[i] Experiment$new(file.dir = jc$file.dir, reader = reader, id = row$job.id, prob.pars = row$prob.pars[[1L]], algo.pars = row$algo.pars[[1L]], seed = getSeed(jc$seed, row$job.id), repl = row$repl, resources = jc$resources, prob.name = row$problem, algo.name = row$algorithm, compress = jc$compress) } batchtools/R/clusterFunctionsSocket.R0000644000176200001440000000434213432537710017473 0ustar liggesusersSocket = R6Class("Socket", cloneable = FALSE, public = list( cl = NULL, pids = NULL, initialize = function(ncpus) { loadNamespace("snow") self$cl = snow::makeSOCKcluster(rep.int("localhost", ncpus)) self$pids = character(ncpus) reg.finalizer(self, function(e) if (!is.null(e$cl)) { snow::stopCluster(e$cl); self$cl = NULL }, onexit = TRUE) }, spawn = function(jc, ...) { force(jc) if (all(nzchar(self$pids))) { res = snow::recvOneResult(self$cl) self$pids[self$pids == res$tag] = "" } i = wf(!nzchar(self$pids)) snow::sendCall(self$cl[[i]], doJobCollection, list(jc = jc, output = jc$log.file), return = FALSE, tag = jc$job.hash) self$pids[i] = jc$job.hash invisible(jc$job.hash) }, list = function() { if (is.null(self$cl)) return(character(0L)) sl = lapply(self$cl, function(x) x$con) finished = which(socketSelect(sl, write = FALSE, timeout = 1)) for (i in seq_along(finished)) { res = snow::recvOneResult(self$cl) self$pids[self$pids == res$tag] = "" } self$pids[nzchar(self$pids)] } ) ) #' @title ClusterFunctions for Parallel Socket Execution #' #' @description #' Jobs are spawned asynchronously using the package \pkg{snow}. #' #' @template ncpus #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsSocket = function(ncpus = NA_integer_, fs.latency = 65) { assertCount(ncpus, positive = TRUE, na.ok = TRUE) if (is.na(ncpus)) { ncpus = max(getOption("mc.cores", parallel::detectCores()), 1L, na.rm = TRUE) info("Auto-detected %i CPUs", ncpus) } p = Socket$new(ncpus) submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") p$spawn(jc) makeSubmitJobResult(status = 0L, batch.id = jc$job.hash) } listJobsRunning = function(reg) { assertRegistry(reg, writeable = FALSE) p$list() } makeClusterFunctions(name = "Socket", submitJob = submitJob, listJobsRunning = listJobsRunning, store.job.collection = FALSE, fs.latency = fs.latency, hooks = list(pre.sync = function(reg, fns) p$list())) } batchtools/R/clusterFunctionsInteractive.R0000644000176200001440000000333313305515204020510 0ustar liggesusers#' @title ClusterFunctions for Sequential Execution in the Running R Session #' #' @description #' All jobs are executed sequentially using the current R process in which \code{\link{submitJobs}} is called. #' Thus, \code{submitJob} blocks the session until the job has finished. #' The main use of this \code{ClusterFunctions} implementation is to test and debug programs on a local computer. #' #' Listing jobs returns an empty vector (as no jobs can be running when you call this) #' and \code{killJob} is not implemented for the same reasons. #' #' @param external [\code{logical(1)}]\cr #' If set to \code{TRUE}, jobs are started in a fresh R session instead of currently active but still #' waits for its termination. #' Default is \code{FALSE}. #' @param write.logs [\code{logical(1)}]\cr #' Sink the output to log files. Turning logging off can increase the speed of #' calculations but makes it very difficult to debug. #' Default is \code{TRUE}. #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsInteractive = function(external = FALSE, write.logs = TRUE, fs.latency = 0) { assertFlag(external) assertFlag(write.logs) submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") if (external) { runOSCommand(Rscript(), sprintf("-e \"batchtools::doJobCollection('%s', output = '%s')\"", jc$uri, jc$log.file)) } else { doJobCollection(jc, output = jc$log.file) } makeSubmitJobResult(status = 0L, batch.id = "cfInteractive") } makeClusterFunctions(name = "Interactive", submitJob = submitJob, store.job.collection = external, fs.latency = fs.latency) } batchtools/R/batchMapResults.R0000644000176200001440000000540413276253713016055 0ustar liggesusers#' @title Map Over Results to Create New Jobs #' #' @description #' This function allows you to create new computational jobs (just like \code{\link{batchMap}} based on the results of #' a \code{\link{Registry}}. #' #' @note #' The URI to the result files in registry \code{source} is hard coded as parameter in the \code{target} registry. #' This means that \code{target} is currently not portable between systems for computation. #' #' @templateVar ids.default findDone #' @param fun [\code{function}]\cr #' Function which takes the result as first (unnamed) argument. #' @template ids #' @param ... [ANY]\cr #' Arguments to vectorize over (list or vector). Passed to \code{\link{batchMap}}. #' @template more.args #' @param target [\code{\link{Registry}}]\cr #' Empty Registry where new jobs are created for. #' @param source [\code{\link{Registry}}]\cr #' Registry. If not explicitly passed, uses the default registry (see \code{\link{setDefaultRegistry}}). #' @return [\code{\link{data.table}}] with ids of jobs added to \code{target}. #' @export #' @family Results #' @examples #' \dontshow{ batchtools:::example_push_temp(2) } #' # Source registry: calculate square of some numbers #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(function(x) list(square = x^2), x = 1:10, reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' #' # Target registry: calculate the square root on results of first registry #' target = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMapResults(fun = function(x, y) list(sqrt = sqrt(x$square)), ids = 4:8, #' target = target, source = tmp) #' submitJobs(reg = target) #' waitForJobs(reg = target) #' #' # Map old to new ids. First, get a table with results and parameters #' results = unwrap(rjoin(getJobPars(reg = target), reduceResultsDataTable(reg = target))) #' print(results) #' #' # Parameter '.id' points to job.id in 'source'. Use a inner join to combine: #' ijoin(results, unwrap(reduceResultsDataTable(reg = tmp)), by = c(".id" = "job.id")) batchMapResults = function(fun, ids = NULL, ..., more.args = list(), target, source = getDefaultRegistry()) { assertRegistry(source, sync = TRUE) assertRegistry(target, writeable = TRUE, sync = TRUE) assertFunction(fun) ids = convertIds(source, ids, default = .findDone(reg = source)) assertList(more.args, names = "strict") if (nrow(target$status) > 0L) stop("Target registry 'target' must be empty") fns = getResultFiles(source, ids) names(fns) = ids$job.id more.args = c(list(.fn = fns, .fun = fun), more.args) args = c(list(.id = ids$job.id), list(...)) batchMap(batchMapResultsWrapper, args = args, more.args = more.args, reg = target) } batchMapResultsWrapper = function(.fun, .fn, .id, ...) { .fun(readRDS(.fn[[as.character(.id)]]), ...) } batchtools/R/Hooks.R0000644000176200001440000000544213435713470014037 0ustar liggesusers#' @title Trigger Evaluation of Custom Function #' #' @description #' Hooks allow to trigger functions calls on specific events. #' They can be specified via the \code{\link{ClusterFunctions}} and are triggered on the following events: #' \describe{ #' \item{\code{pre.sync}}{\code{function(reg, fns, ...)}: Run before synchronizing the registry on the master. \code{fn} is the character vector of paths to the update files.} #' \item{\code{post.sync}}{\code{function(reg, updates, ...)}: Run after synchronizing the registry on the master. \code{updates} is the data.table of processed updates.} #' \item{\code{pre.submit.job}}{\code{function(reg, ...)}: Run before a job is successfully submitted to the scheduler on the master.} #' \item{\code{post.submit.job}}{\code{function(reg, ...)}: Run after a job is successfully submitted to the scheduler on the master.} #' \item{\code{pre.submit}}{\code{function(reg, ...)}: Run before any job is submitted to the scheduler.} #' \item{\code{post.submit}}{\code{function(reg, ...)}: Run after a jobs are submitted to the schedule.} #' \item{\code{pre.do.collection}}{\code{function(reg, reader, ...)}: Run before starting the job collection on the slave. #' \code{reader} is an internal cache object.} #' \item{\code{post.do.collection}}{\code{function(reg, updates, reader, ...)}: Run after all jobs in the chunk are terminated on the slave. #' \code{updates} is a \code{\link{data.table}} of updates which will be merged with the \code{\link{Registry}} by the master. #' \code{reader} is an internal cache object.} #' \item{\code{pre.kill}}{\code{function(reg, ids, ...)}: Run before any job is killed.} #' \item{\code{post.kill}}{\code{function(reg, ids, ...)}: Run after jobs are killed. \code{ids} is the return value of \code{\link{killJobs}}.} #' } #' #' @param obj [\link{Registry} | \link{JobCollection}]\cr #' Registry which contains the \link{ClusterFunctions} with element \dQuote{hooks} #' or a \link{JobCollection} which holds the subset of functions which are executed #' remotely. #' @param hook [\code{character(1)}]\cr #' ID of the hook as string. #' @param ... [ANY]\cr #' Additional arguments passed to the function referenced by \code{hook}. #' See description. #' @return Return value of the called function, or \code{NULL} if there is no hook #' with the specified ID. #' @aliases Hooks Hook #' @export runHook = function(obj, hook, ...) { UseMethod("runHook") } #' @export runHook.Registry = function(obj, hook, ...) { f = obj$cluster.functions$hooks[[hook]] if (is.null(f)) return(NULL) "!DEBUG [runHook]: Running hook '`hook`'" f(obj, ...) } #' @export runHook.JobCollection = function(obj, hook, ...) { f = obj$hooks[[hook]] if (is.null(f)) return(NULL) "!DEBUG [runHook]: Running hook '`hook`'" f(obj, ...) } batchtools/R/JobNames.R0000644000176200001440000000263413263666045014456 0ustar liggesusers#' @title Set and Retrieve Job Names #' @name JobNames #' #' @description #' Set custom names for jobs. These are passed to the template as \sQuote{job.name}. #' If no custom name is set (or any of the job names of the chunk is missing), #' the job hash is used as job name. #' Individual job names can be accessed via \code{jobs$job.name}. #' #' @templateVar ids.default all #' @template ids #' @param names [\code{character}]\cr #' Character vector of the same length as provided ids. #' @template reg #' @return \code{setJobNames} returns \code{NULL} invisibly, \code{getJobTable} #' returns a \code{data.table} with columns \code{job.id} and \code{job.name}. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' ids = batchMap(identity, 1:10, reg = tmp) #' setJobNames(ids, letters[1:nrow(ids)], reg = tmp) #' getJobNames(reg = tmp) setJobNames = function(ids = NULL, names, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE) ids = convertIds(reg, ids, default = noIds()) assertCharacter(names, min.chars = 1L, len = nrow(ids)) reg$status[ids, "job.name" := names] saveRegistry(reg) invisible(NULL) } #' @export #' @rdname JobNames getJobNames = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids, default = allIds(reg)) reg$status[ids, c("job.id", "job.name")] } batchtools/R/removeRegistry.R0000644000176200001440000000237013245733511015774 0ustar liggesusers#' @title Remove a Registry from the File System #' #' @description #' All files will be erased from the file system, including all results. #' If you wish to remove only intermediate files, use \code{\link{sweepRegistry}}. #' #' @param wait [\code{numeric(1)}]\cr #' Seconds to wait before proceeding. This is a safety measure to not #' accidentally remove your precious files. Set to 0 in #' non-interactive scripts to disable this precaution. #' @template reg #' #' @return [\code{character(1)}]: Path of the deleted file directory. #' @export #' @family Registry #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' removeRegistry(0, tmp) removeRegistry = function(wait = 5, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE, running.ok = FALSE) assertNumber(wait, lower = 0) if (wait > 0) { info("This deletes all files in '%s'. Proceeding in %g seconds ...", reg$file.dir, wait) Sys.sleep(wait) } if (identical(batchtools$default.registry$file.dir, reg$file.dir)) { info("Unsetting registry as default") setDefaultRegistry(NULL) } info("Recursively removing files in '%s' ...", reg$file.dir) fs::dir_delete(reg$file.dir) } batchtools/R/estimateRuntimes.R0000644000176200001440000001355413543336703016321 0ustar liggesusers#' @title Estimate Remaining Runtimes #' #' @description #' Estimates the runtimes of jobs using the random forest implemented in \pkg{ranger}. #' Observed runtimes are retrieved from the \code{\link{Registry}} and runtimes are #' predicted for unfinished jobs. #' #' The estimated remaining time is calculated in the \code{print} method. #' You may also pass \code{n} here to determine the number of parallel jobs which is then used #' in a simple Longest Processing Time (LPT) algorithm to give an estimate for the parallel runtime. #' #' @param tab [\code{\link{data.table}}]\cr #' Table with column \dQuote{job.id} and additional columns to predict the runtime. #' Observed runtimes will be looked up in the registry and serve as dependent variable. #' All columns in \code{tab} except \dQuote{job.id} will be passed to \code{\link[ranger]{ranger}} as #' independent variables to fit the model. #' @param ... [ANY]\cr #' Additional parameters passed to \code{\link[ranger]{ranger}}. Ignored for the \code{print} method. #' @template reg #' @return [\code{RuntimeEstimate}] which is a \code{list} with two named elements: #' \dQuote{runtimes} is a \code{\link{data.table}} with columns \dQuote{job.id}, #' \dQuote{runtime} (in seconds) and \dQuote{type} (\dQuote{estimated} if runtime is estimated, #' \dQuote{observed} if runtime was observed). #' The other element of the list named \dQuote{model}] contains the fitted random forest object. #' @export #' @seealso \code{\link{binpack}} and \code{\link{lpt}} to chunk jobs according to their estimated runtimes. #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' # Create a simple toy registry #' set.seed(1) #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE, seed = 1) #' addProblem(name = "iris", data = iris, fun = function(data, ...) nrow(data), reg = tmp) #' addAlgorithm(name = "nrow", function(instance, ...) nrow(instance), reg = tmp) #' addAlgorithm(name = "ncol", function(instance, ...) ncol(instance), reg = tmp) #' addExperiments(algo.designs = list(nrow = data.table::CJ(x = 1:50, y = letters[1:5])), reg = tmp) #' addExperiments(algo.designs = list(ncol = data.table::CJ(x = 1:50, y = letters[1:5])), reg = tmp) #' #' # We use the job parameters to predict runtimes #' tab = unwrap(getJobPars(reg = tmp)) #' #' # First we need to submit some jobs so that the forest can train on some data. #' # Thus, we just sample some jobs from the registry while grouping by factor variables. #' library(data.table) #' ids = tab[, .SD[sample(nrow(.SD), 5)], by = c("problem", "algorithm", "y")] #' setkeyv(ids, "job.id") #' submitJobs(ids, reg = tmp) #' waitForJobs(reg = tmp) #' #' # We "simulate" some more realistic runtimes here to demonstrate the functionality: #' # - Algorithm "ncol" is 5 times more expensive than "nrow" #' # - x has no effect on the runtime #' # - If y is "a" or "b", the runtimes are really high #' runtime = function(algorithm, x, y) { #' ifelse(algorithm == "nrow", 100L, 500L) + 1000L * (y %in% letters[1:2]) #' } #' tmp$status[ids, done := done + tab[ids, runtime(algorithm, x, y)]] #' rjoin(sjoin(tab, ids), getJobStatus(ids, reg = tmp)[, c("job.id", "time.running")]) #' #' # Estimate runtimes: #' est = estimateRuntimes(tab, reg = tmp) #' print(est) #' rjoin(tab, est$runtimes) #' print(est, n = 10) #' #' # Submit jobs with longest runtime first: #' ids = est$runtimes[type == "estimated"][order(runtime, decreasing = TRUE)] #' print(ids) #' \dontrun{ #' submitJobs(ids, reg = tmp) #' } #' #' # Group jobs into chunks with runtime < 1h #' ids = est$runtimes[type == "estimated"] #' ids[, chunk := binpack(runtime, 3600)] #' print(ids) #' print(ids[, list(runtime = sum(runtime)), by = chunk]) #' \dontrun{ #' submitJobs(ids, reg = tmp) #' } #' #' # Group jobs into 10 chunks with similar runtime #' ids = est$runtimes[type == "estimated"] #' ids[, chunk := lpt(runtime, 10)] #' print(ids[, list(runtime = sum(runtime)), by = chunk]) estimateRuntimes = function(tab, ..., reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) data = copy(convertIds(reg, tab, keep.extra = names(tab))) if (!requireNamespace("ranger", quietly = TRUE)) stop("Please install package 'ranger' for runtime estimation") data[, "runtime" := as.numeric(getJobStatus(tab, reg)$time.running)] i = is.na(data$runtime) if (all(i)) stop("No training data available. Some jobs must be finished before estimating runtimes.") rf = ranger::ranger(runtime ~ ., data = data[!i, !"job.id"], ...) data[i, "runtime" := predict(rf, .SD)$predictions, .SDcols = chsetdiff(names(data), c("job.id", "runtime"))] data$type = factor(ifelse(i, "estimated", "observed"), levels = c("observed", "estimated")) setClasses(list(runtimes = data[, c("job.id", "type", "runtime")], model = rf), c("RuntimeEstimate", class(data))) } #' @rdname estimateRuntimes #' @param x [\code{RuntimeEstimate}]\cr #' Object to print. #' @param n [\code{integer(1)}]\cr #' Number of parallel jobs to assume for runtime estimation. #' @export print.RuntimeEstimate = function(x, n = 1L, ...) { ps = function(x, nc = 2L) { sprintf(paste0("%0", nc, "id %02ih %02im %.1fs"), floor(x / 86400), floor((x / 3600) %% 24L), floor((x / 60) %% 60L), x %% 60L ) } assertCount(n, positive = TRUE) runtime = type = NULL calculated = x$runtimes[type == "observed", sum(runtime)] remaining = x$runtimes[type == "estimated", sum(runtime)] total = calculated + remaining nc = max(1L, nchar(total %/% 86400)) catf("Runtime Estimate for %i jobs with %i CPUs", nrow(x$runtimes), n) catf(" Done : %s", ps(calculated, nc = nc)) if (x$runtimes[type == "estimated", .N] > 0L) { catf(" Remaining: %s", ps(remaining, nc = nc)) if (n >= 2L) { rt = x$runtimes[type == "estimated"]$runtime catf(" Parallel : %s", ps(max(vnapply(split(rt, lpt(rt, n)), sum)), nc = nc)) } } catf(" Total : %s", ps(total, nc = nc)) } batchtools/R/JobTables.R0000644000176200001440000001107113252221002014572 0ustar liggesusers#' @title Query Job Information #' #' @description #' \code{getJobStatus} returns the internal table which stores information about the computational #' status of jobs, \code{getJobPars} a table with the job parameters, \code{getJobResources} a table #' with the resources which were set to submit the jobs, and \code{getJobTags} the tags of the jobs #' (see \link{Tags}). #' #' \code{getJobTable} returns all these tables joined. #' #' @templateVar ids.default all #' @template ids #' @template reg #' @return [\code{\link{data.table}}] with the following columns (not necessarily in this order): #' \describe{ #' \item{job.id}{Unique Job ID as integer.} #' \item{submitted}{Time the job was submitted to the batch system as \code{\link[base]{POSIXct}}.} #' \item{started}{Time the job was started on the batch system as \code{\link[base]{POSIXct}}.} #' \item{done}{Time the job terminated (successfully or with an error) as \code{\link[base]{POSIXct}}.} #' \item{error}{Either \code{NA} if the job terminated successfully or the error message.} #' \item{mem.used}{Estimate of the memory usage.} #' \item{batch.id}{Batch ID as reported by the scheduler.} #' \item{log.file}{Log file. If missing, defaults to \code{[job.hash].log}.} #' \item{job.hash}{Unique string identifying the job or chunk.} #' \item{time.queued}{Time in seconds (as \code{\link[base]{difftime}}) the job was queued.} #' \item{time.running}{Time in seconds (as \code{\link[base]{difftime}}) the job was running.} #' \item{pars}{List of parameters/arguments for this job.} #' \item{resources}{List of computational resources set for this job.} #' \item{tags}{Tags as joined string, delimited by \dQuote{,}.} #' \item{problem}{Only for \code{\link{ExperimentRegistry}}: the problem identifier.} #' \item{algorithm}{Only for \code{\link{ExperimentRegistry}}: the algorithm identifier.} #' } #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' f = function(x) if (x < 0) stop("x must be > 0") else sqrt(x) #' batchMap(f, x = c(-1, 0, 1), reg = tmp) #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' addJobTags(1:2, "tag1", reg = tmp) #' addJobTags(2, "tag2", reg = tmp) #' #' # Complete table: #' getJobTable(reg = tmp) #' #' # Job parameters: #' getJobPars(reg = tmp) #' #' # Set and retrieve tags: #' getJobTags(reg = tmp) #' #' # Job parameters with tags right-joined: #' rjoin(getJobPars(reg = tmp), getJobTags(reg = tmp)) getJobTable = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids) getJobStatus(ids, reg = reg)[getJobPars(ids, reg = reg)][getJobResources(ids = ids, reg = reg)][getJobTags(ids = ids, reg = reg)] } #' @export #' @rdname getJobTable getJobStatus = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) submitted = started = done = NULL cols = chsetdiff(names(reg$status), c("def.id", "resource.id")) tab = filter(reg$status, convertIds(reg, ids), cols) tab[, "submitted" := as.POSIXct(submitted, origin = "1970-01-01")] tab[, "started" := as.POSIXct(started, origin = "1970-01-01")] tab[, "done" := as.POSIXct(done, origin = "1970-01-01")] tab[, "time.queued" := difftime(started, submitted, units = "secs")] tab[, "time.running" := difftime(done, started, units = "secs")] tab[] } #' @export #' @rdname getJobTable getJobResources = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids) tab = merge(filter(reg$status, ids, c("job.id", "resource.id")), reg$resources, all.x = TRUE, by = "resource.id")[, c("job.id", "resources")] setkeyv(tab, "job.id")[] } #' @export #' @rdname getJobTable getJobPars = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) UseMethod("getJobPars", object = reg) } #' @export getJobPars.Registry = function(ids = NULL, reg = getDefaultRegistry()) { ids = convertIds(reg, ids) tab = mergedJobs(reg, ids, c("job.id", "job.pars")) setkeyv(tab, "job.id")[] } #' @export getJobPars.ExperimentRegistry = function(ids = NULL, reg = getDefaultRegistry()) { ids = convertIds(reg, ids) tab = mergedJobs(reg, ids, c("job.id", "problem", "prob.pars", "algorithm", "algo.pars")) setkeyv(tab, "job.id")[] } #' @export #' @rdname getJobTable getJobTags = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg) ids = convertIds(reg, ids, default = allIds(reg)) tag = NULL reg$tags[ids, on = "job.id"][, list(tags = stri_flatten(sort(tag, na.last = TRUE), ",")), by = "job.id"] } batchtools/R/sleep.R0000644000176200001440000000071113334504325014051 0ustar liggesusersgetSleepFunction = function(reg, sleep) { if (is.null(sleep)) { if (is.null(reg$sleep)) return(function(i) { Sys.sleep(5 + 115 * pexp(i - 1, rate = 0.01)) }) sleep = reg$sleep } if (is.numeric(sleep)) { assertNumber(sleep, lower = 0) return(function(i) Sys.sleep(sleep)) } if (is.function(sleep)) { return(function(i) Sys.sleep(sleep(i))) } stop("Argument 'sleep' must be either a numeric value or function(i)") } batchtools/R/resetJobs.R0000644000176200001440000000231113453602073014700 0ustar liggesusers#' @title Reset the Computational State of Jobs #' #' @description #' Resets the computational state of jobs in the \code{\link{Registry}}. #' This function automatically checks if any of the jobs to reset is either pending or running. #' However, if the implemented heuristic fails, this can lead to inconsistencies in the data base. #' Use with care while jobs are running. #' #' @templateVar ids.default none #' @template ids #' @template reg #' @return [\code{\link{data.table}}] of job ids which have been reset. #' See \code{\link{JoinTables}} for examples on working with job tables. #' @family debug #' @export resetJobs = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE, running.ok = FALSE) ids = convertIds(reg, ids, default = noIds()) info("Resetting %i jobs in DB ...", nrow(ids)) cols = c("submitted", "started", "done", "error", "mem.used", "resource.id", "batch.id", "log.file", "job.hash") reg$status[ids, (cols) := list(NA_real_, NA_real_, NA_real_, NA_character_, NA_real_, NA_integer_, NA_character_, NA_character_, NA_character_), on = "job.id"] fns = getResultFiles(reg, ids) file_remove(fns) sweepRegistry(reg) invisible(ids) } batchtools/R/Algorithm.R0000644000176200001440000000547213606051742014702 0ustar liggesusers#' @title Define Algorithms for Experiments #' #' @description #' Algorithms are functions which get the code{data} part as well as the problem instance (the return value of the #' function defined in \code{\link{Problem}}) and return an arbitrary R object. #' #' This function serializes all components to the file system and registers the algorithm in the \code{\link{ExperimentRegistry}}. #' #' \code{removeAlgorithm} removes all jobs from the registry which depend on the specific algorithm. #' \code{reg$algorithms} holds the IDs of already defined algorithms. #' #' @param name [\code{character(1)}]\cr #' Unique identifier for the algorithm. #' @param fun [\code{function}]\cr #' The algorithm function. The static problem part is passed as \dQuote{data}, the generated #' problem instance is passed as \dQuote{instance} and the \code{\link{Job}}/\code{\link{Experiment}} as \dQuote{job}. #' Therefore, your function must have the formal arguments \dQuote{job}, \dQuote{data} and \dQuote{instance} (or dots \code{...}). #' #' If you do not provide a function, it defaults to a function which just returns the instance. #' @template expreg #' @return [\code{Algorithm}]. Object of class \dQuote{Algorithm}. #' @aliases Algorithm #' @seealso \code{\link{Problem}}, \code{\link{addExperiments}} #' @export addAlgorithm = function(name, fun = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE) assertString(name, min.chars = 1L) if (!stri_detect_regex(name, "^[[:alnum:]_.-]+$")) stopf("Illegal characters in problem name: %s", name) if (is.null(fun)) { fun = function(job, data, instance, ...) instance } else { assert(checkFunction(fun, args = c("job", "data", "instance")), checkFunction(fun, args = "...")) } info("Adding algorithm '%s'", name) algo = setClasses(list(fun = fun, name = name), "Algorithm") writeRDS(algo, file = getAlgorithmURI(reg, name), compress = reg$compress) reg$algorithms = union(reg$algorithms, name) saveRegistry(reg) invisible(algo) } #' @export #' @rdname addAlgorithm removeAlgorithms = function(name, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE, running.ok = FALSE) assertCharacter(name, any.missing = FALSE) assertSubset(name, reg$algorithms) algorithm = NULL for (nn in name) { def.ids = reg$defs[algorithm == nn, "def.id"] job.ids = filter(def.ids, reg$status, "job.id") info("Removing Algorithm '%s' and %i corresponding jobs ...", nn, nrow(job.ids)) file_remove(getAlgorithmURI(reg, nn)) reg$defs = reg$defs[!def.ids] reg$status = reg$status[!job.ids] reg$algorithms = chsetdiff(reg$algorithms, nn) } sweepRegistry(reg) invisible(TRUE) } getAlgorithmURI = function(reg, name) { fs::path(dir(reg, "algorithms"), mangle(name)) } batchtools/R/loadRegistry.R0000644000176200001440000000733313301520663015415 0ustar liggesusers#' @title Load a Registry from the File System #' #' @description #' Loads a registry from its \code{file.dir}. #' #' Multiple R sessions accessing the same registry simultaneously can lead to database inconsistencies. #' This is especially dangerous if the same \code{file.dir} is accessed from multiple machines, e.g. via a mount. #' #' If you just need to check on the status or peek into some preliminary results while another process is still submitting or waiting #' for pending results, you can load the registry in a read-only mode. #' All operations that need to change the registry will raise an exception in this mode. #' Files communicated back by the computational nodes are parsed to update the registry in memory while the registry on the file system remains unchanged. #' #' A heuristic tries to detect if the registry has been altered in the background by an other process and in this case automatically restricts the current registry to read-only mode. #' However, you should rely on this heuristic to work flawlessly. #' Thus, set to \code{writeable} to \code{TRUE} if and only if you are absolutely sure that other state-changing processes are terminated. #' #' If you need write access, load the registry with \code{writeable} set to \code{TRUE}. #' #' @param writeable [\code{logical(1)}]\cr #' Loads the registry in read-write mode. Default is \code{FALSE}. #' @inheritParams makeRegistry #' @family Registry #' @return [\code{\link{Registry}}]. #' @export loadRegistry = function(file.dir, work.dir = NULL, conf.file = findConfFile(), make.default = TRUE, writeable = FALSE) { assertString(file.dir) assertDirectory(file.dir) assertString(work.dir, null.ok = TRUE) assertString(conf.file, na.ok = TRUE) assertFlag(make.default) assertFlag(writeable) # read registry if (writeable) info("Reading registry in read-write mode") else info(stri_paste( "Reading registry in read-only mode.", "You can inspect results and errors, but cannot add, remove, submit or alter jobs in any way.", "If you need write-access, re-load the registry with `loadRegistry([...], writeable = TRUE)`." )) file.dir = fs::path_abs(file.dir) reg = readRegistry(file.dir) # re-allocate stuff which has not been serialized reg$file.dir = file.dir reg$writeable = writeable reg$mtime = file_mtime(fs::path(reg$file.dir, "registry.rds")) alloc.col(reg$status, ncol(reg$status)) alloc.col(reg$defs, ncol(reg$defs)) alloc.col(reg$resources, ncol(reg$resources)) alloc.col(reg$tags, ncol(reg$tags)) if (!is.null(work.dir)) reg$work.dir = fs::path_abs(work.dir) updated = updateRegistry(reg = reg) # try to load dependencies relative to work.dir if (fs::dir_exists(reg$work.dir)) { with_dir(reg$work.dir, loadRegistryDependencies(reg)) } else { warningf("The work.dir '%s' does not exist, jobs might fail to run on this system.", reg$work.dir) loadRegistryDependencies(reg) } # source system config setSystemConf(reg, conf.file) if (make.default) batchtools$default.registry = reg if (sync(reg = reg) || updated) saveRegistry(reg) return(reg) } readRegistry = function(file.dir) { fn.old = fs::path(file.dir, "registry.rds") fn.new = fs::path(file.dir, "registry.new.rds") if (fs::file_exists(fn.new)) { reg = try(readRDS(fn.new), silent = TRUE) if (!is.error(reg)) { fs::file_move(fn.new, fn.old) return(reg) } else { warning("Latest version of registry seems to be corrupted, trying backup ...") } } if (fs::file_exists(fn.old)) { reg = try(readRDS(fn.old), silent = TRUE) if (!is.error(reg)) return(reg) stop("Could not load the registry, files seem to be corrupt") } stopf("No registry found in '%s'", file.dir) } batchtools/R/waitForJobs.R0000644000176200001440000001225713453602073015203 0ustar liggesusers#' @title Wait for Termination of Jobs #' #' @description #' This function simply waits until all jobs are terminated. #' #' @templateVar ids.default findSubmitted #' @template ids #' @param sleep [\code{function(i)} | \code{numeric(1)}]\cr #' Parameter to control the duration to sleep between queries. #' You can pass an absolute numeric value in seconds or a \code{function(i)} which returns #' the number of seconds to sleep in the \code{i}-th iteration. #' If not provided (\code{NULL}), tries to read the value (number/function) from the configuration file #' (stored in \code{reg$sleep}) or defaults to a function with exponential backoff between #' 5 and 120 seconds. #' @param timeout [\code{numeric(1)}]\cr #' After waiting \code{timeout} seconds, show a message and return #' \code{FALSE}. This argument may be required on some systems where, e.g., #' expired jobs or jobs on hold are problematic to detect. If you don't want #' a timeout, set this to \code{Inf}. Default is \code{604800} (one week). #' @param expire.after [\code{integer(1)}]\cr #' Jobs count as \dQuote{expired} if they are not found on the system but have not communicated back #' their results (or error message). This frequently happens on managed system if the scheduler kills #' a job because the job has hit the walltime or request more memory than reserved. #' On the other hand, network file systems often require several seconds for new files to be found, #' which can lead to false positives in the detection heuristic. #' \code{waitForJobs} treats such jobs as expired after they have not been detected on the system #' for \code{expire.after} iterations. #' If not provided (\code{NULL}), tries to read the value from the configuration file (stored in \code{reg$expire.after}), #' and finally defaults to \code{3}. #' @param stop.on.error [\code{logical(1)}]\cr #' Immediately cancel if a job terminates with an error? Default is #' \code{FALSE}. #' @param stop.on.expire [\code{logical(1)}]\cr #' Immediately cancel if jobs are detected to be expired? Default is \code{FALSE}. #' Expired jobs will then be ignored for the remainder of \code{waitForJobs()}. #' @template reg #' @return [\code{logical(1)}]. Returns \code{TRUE} if all jobs terminated #' successfully and \code{FALSE} if either the timeout is reached or at least #' one job terminated with an exception or expired. #' @export waitForJobs = function(ids = NULL, sleep = NULL, timeout = 604800, expire.after = NULL, stop.on.error = FALSE, stop.on.expire = FALSE, reg = getDefaultRegistry()) { waitForResults = function(ids) { waitForFiles( fs::path(reg$file.dir, "results"), sprintf("%i.rds", .findDone(reg, ids)$job.id), reg$cluster.functions$fs.latency ) } assertRegistry(reg, sync = TRUE) assertNumber(timeout, lower = 0) assertFlag(stop.on.error) assertFlag(stop.on.expire) expire.after = assertCount(expire.after, positive = TRUE, null.ok = TRUE) %??% reg$expire.after %??% 3L sleep = getSleepFunction(reg, sleep) ids = convertIds(reg, ids, default = .findSubmitted(reg = reg)) if (nrow(.findNotSubmitted(ids = ids, reg = reg)) > 0L) { warning("Cannot wait for unsubmitted jobs. Removing from ids.") ids = ids[.findSubmitted(ids = ids, reg = reg), nomatch = 0L] } if (nrow(ids) == 0L) { return(TRUE) } terminated = on.sys = expire.counter = NULL ids$terminated = FALSE ids$on.sys = FALSE ids$expire.counter = 0L timeout = Sys.time() + timeout pb = makeProgressBar(total = nrow(ids), format = "Waiting (Q::queued R::running D::done E::error ?::expired) [:bar] :percent eta: :eta") i = 0L repeat { ### case 1: all jobs terminated or expired -> nothing on system ids[.findTerminated(reg, ids), "terminated" := TRUE] if (ids[!(terminated) & expire.counter <= expire.after, .N] == 0L) { "!DEBUG [waitForJobs]: All jobs terminated" pb$update(1) waitForResults(ids) return(nrow(.findDone(reg, ids)) == nrow(ids)) } ### case 2: there are errors and stop.on.error is TRUE if (stop.on.error && nrow(.findErrors(reg, ids)) > 0L) { "!DEBUG [waitForJobs]: Errors found and stop.on.error is TRUE" pb$update(1) return(FALSE) } batch.ids = getBatchIds(reg) ids[, "on.sys" := FALSE][.findOnSystem(reg, ids, batch.ids = batch.ids), "on.sys" := TRUE] ids[(on.sys), "expire.counter" := 0L] ids[!(on.sys) & !(terminated), "expire.counter" := expire.counter + 1L] stats = getStatusTable(ids = ids, batch.ids = batch.ids, reg = reg) pb$update(mean(ids$terminated), tokens = as.list(stats)) "!DEBUG [waitForJobs]: batch.ids: `stri_flatten(batch.ids$batch.id, ',')`" ### case 3: jobs disappeared, we cannot find them on the system after [expire.after] iterations if (stop.on.expire && ids[!(terminated) & expire.counter > expire.after, .N] > 0L) { warning("Jobs disappeared from the system") pb$update(1) waitForResults(ids) return(FALSE) } # case 4: we reach a timeout sleep(i) i = i + 1L if (Sys.time() > timeout) { pb$update(1) warning("Timeout reached") return(FALSE) } if (suppressMessages(sync(reg = reg))) saveRegistry(reg) } } batchtools/R/btlapply.R0000644000176200001440000000656613243531011014575 0ustar liggesusers#' @title Synchronous Apply Functions #' #' @description #' This is a set of functions acting as counterparts to the sequential popular apply functions in base R: #' \code{btlapply} for \code{\link[base]{lapply}} and \code{btmapply} for \code{\link[base]{mapply}}. #' #' Internally, jobs are created using \code{\link{batchMap}} on the provided registry. #' If no registry is provided, a temporary registry (see argument \code{file.dir} of \code{\link{makeRegistry}}) and \code{\link{batchMap}} #' will be used. #' After all jobs are terminated (see \code{\link{waitForJobs}}), the results are collected and returned as a list. #' #' Note that these functions are one suitable for short and fail-safe operations #' on batch system. If some jobs fail, you have to retrieve partial results from the #' registry directory yourself. #' #' @param X [\code{\link[base]{vector}}]\cr #' Vector to apply over. #' @param fun [\code{function}]\cr #' Function to apply. #' @param more.args [\code{list}]\cr #' Additional arguments passed to \code{fun}. #' @param ... [\code{ANY}]\cr #' Additional arguments passed to \code{fun} (\code{btlapply}) or vectors to map over (\code{btmapply}). #' @inheritParams submitJobs #' @param n.chunks [\code{integer(1)}]\cr #' Passed to \code{\link{chunk}} before \code{\link{submitJobs}}. #' @param chunk.size [\code{integer(1)}]\cr #' Passed to \code{\link{chunk}} before \code{\link{submitJobs}}. #' @template reg #' @return [\code{list}] List with the results of the function call. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' btlapply(1:3, function(x) x^2) #' btmapply(function(x, y, z) x + y + z, x = 1:3, y = 1:3, more.args = list(z = 1), simplify = TRUE) btlapply = function(X, fun, ..., resources = list(), n.chunks = NULL, chunk.size = NULL, reg = makeRegistry(file.dir = NA)) { assertVector(X) assertFunction(fun) assertRegistry(reg, class = "Registry", writeable = TRUE) ids = batchMap(fun, X, more.args = list(...), reg = reg) if (!is.null(n.chunks) || !is.null(chunk.size)) ids$chunk = chunk(ids$job.id, n.chunks = n.chunks, chunk.size = chunk.size) submitJobs(ids = ids, resources = resources, reg = reg) waitForJobs(ids = ids, reg = reg) reduceResultsList(ids = ids, reg = reg) } #' @export #' @param simplify [\code{logical(1)}]\cr #' Simplify the results using \code{\link[base]{simplify2array}}? #' @param use.names [\code{logical(1)}]\cr #' Use names of the input to name the output? #' @rdname btlapply btmapply = function(fun, ..., more.args = list(), simplify = FALSE, use.names = TRUE, resources = list(), n.chunks = NULL, chunk.size = NULL, reg = makeRegistry(file.dir = NA)) { assertFunction(fun) assertFlag(simplify) assertFlag(use.names) assertRegistry(reg, class = "Registry", writeable = TRUE) ids = batchMap(fun, ..., more.args = more.args, reg = reg) if (!is.null(n.chunks) || !is.null(chunk.size)) ids$chunk = chunk(ids$job.id, n.chunks = n.chunks, chunk.size = chunk.size) submitJobs(ids = ids, resources = resources, reg = reg) waitForJobs(ids = ids, reg = reg) res = reduceResultsList(ids = ids, reg = reg) if (use.names) { x = head(list(...), 1L) if (length(x) > 0L) { x = x[[1L]] if (is.null(names(x))) { if(is.character(x)) names(res) = x } else { names(res) = names(x) } } } if (simplify) simplify2array(res) else res } batchtools/R/clusterFunctionsSSH.R0000644000176200001440000000514413240541606016675 0ustar liggesusers#' @title ClusterFunctions for Remote SSH Execution #' #' @description #' Jobs are spawned by starting multiple R sessions via \code{Rscript} over SSH. #' If the hostname of the \code{\link{Worker}} equals \dQuote{localhost}, #' \code{Rscript} is called directly so that you do not need to have an SSH client installed. #' #' @param workers [\code{list} of \code{\link{Worker}}]\cr #' List of Workers as constructed with \code{\link{Worker}}. #' @inheritParams makeClusterFunctions #' #' @note #' If you use a custom \dQuote{.ssh/config} file, make sure your #' ProxyCommand passes \sQuote{-q} to ssh, otherwise each output will #' end with the message \dQuote{Killed by signal 1} and this will break #' the communication with the nodes. #' #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export #' @examples #' \dontrun{ #' # cluster functions for multicore execution on the local machine #' makeClusterFunctionsSSH(list(Worker$new("localhost", ncpus = 2))) #' } makeClusterFunctionsSSH = function(workers, fs.latency = 65) { # nocov start assertList(workers, types = "Worker") names(workers) = vcapply(workers, "[[", "nodename") if (anyDuplicated(names(workers))) stop("Duplicated hostnames found in list of workers") submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") lapply(workers, function(w) w$update(reg)) rload = vnapply(workers, function(w) w$max.load / w$ncpus) worker = Find(function(w) w$status == "available", sample(workers, prob = 1 / (rload + 0.1)), nomatch = NULL) if (!is.null(worker) && worker$status == "available") { pid = try(worker$start(reg, jc$uri, jc$log.file)) if (is.error(pid)) { makeSubmitJobResult(status = 101L, batch.id = NA_character_, msg = "Submit failed.") } else { makeSubmitJobResult(status = 0L, batch.id = sprintf("%s#%s", worker$nodename, pid$output)) } } else { makeSubmitJobResult(status = 1L, batch.id = NA_character_, msg = sprintf("Busy: %s", workers[[1L]]$status)) } } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) nodename = stri_split_fixed(batch.id, "#", n = 2L)[[1L]][1L] workers[[nodename]]$kill(reg, batch.id) } listJobsRunning = function(reg) { assertRegistry(reg, writeable = FALSE) unlist(lapply(workers, function(w) w$list(reg)), use.names = FALSE) } makeClusterFunctions(name = "SSH", submitJob = submitJob, killJob = killJob, listJobsRunning = listJobsRunning, store.job.collection = TRUE, fs.latency = fs.latency) } # nocov end batchtools/R/clusterFunctions.R0000644000176200001440000003426513437462512016333 0ustar liggesusers#' @title ClusterFunctions Constructor #' #' @description #' This is the constructor used to create \emph{custom} cluster functions. #' Note that some standard implementations for TORQUE, Slurm, LSF, SGE, etc. ship #' with the package. #' #' @param name [\code{character(1)}]\cr #' Name of cluster functions. #' @param submitJob [\code{function(reg, jc, ...)}]\cr #' Function to submit new jobs. Must return a \code{\link{SubmitJobResult}} object. #' The arguments are \code{reg} (\code{\link{Registry}}) and \code{jobs} (\code{\link{JobCollection}}). #' @param killJob [\code{function(reg, batch.id)}]\cr #' Function to kill a job on the batch system. Make sure that you definitely kill the job! Return #' value is currently ignored. Must have the arguments \code{reg} (\code{\link{Registry}}) and #' \code{batch.id} (\code{character(1)} as returned by \code{submitJob}). #' Note that there is a helper function \code{\link{cfKillJob}} to repeatedly try to kill jobs. #' Set \code{killJob} to \code{NULL} if killing jobs cannot be supported. #' @param listJobsQueued [\code{function(reg)}]\cr #' List all queued jobs on the batch system for the current user. #' Must return an character vector of batch ids, same format as they #' are returned by \code{submitJob}. #' Set \code{listJobsQueued} to \code{NULL} if listing of queued jobs is not supported. #' @param listJobsRunning [\code{function(reg)}]\cr #' List all running jobs on the batch system for the current user. #' Must return an character vector of batch ids, same format as they #' are returned by \code{submitJob}. It does not matter if you return a few job ids too many (e.g. #' all for the current user instead of all for the current registry), but you have to include all #' relevant ones. Must have the argument are \code{reg} (\code{\link{Registry}}). #' Set \code{listJobsRunning} to \code{NULL} if listing of running jobs is not supported. #' @param array.var [\code{character(1)}]\cr #' Name of the environment variable set by the scheduler to identify IDs of job arrays. #' Default is \code{NA} for no array support. #' @param store.job.collection [\code{logical(1)}]\cr #' Flag to indicate that the cluster function implementation of \code{submitJob} can not directly handle \code{\link{JobCollection}} objects. #' If set to \code{FALSE}, the \code{\link{JobCollection}} is serialized to the file system before submitting the job. #' @param store.job.files [\code{logical(1)}]\cr #' Flag to indicate that job files need to be stored in the file directory. #' If set to \code{FALSE} (default), the job file is created in a temporary directory, otherwise (or if the debug mode is enabled) in #' the subdirectory \code{jobs} of the \code{file.dir}. #' @param scheduler.latency [\code{numeric(1)}]\cr #' Time to sleep after important interactions with the scheduler to ensure a sane state. #' Currently only triggered after calling \code{\link{submitJobs}}. #' @param fs.latency [\code{numeric(1)}]\cr #' Expected maximum latency of the file system, in seconds. #' Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to #' access files and directories. #' Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system. #' @param hooks [\code{list}]\cr #' Named list of functions which will we called on certain events like \dQuote{pre.submit} or \dQuote{post.sync}. #' See \link{Hooks}. #' @export #' @aliases ClusterFunctions #' @family ClusterFunctions #' @family ClusterFunctionsHelper makeClusterFunctions = function(name, submitJob, killJob = NULL, listJobsQueued = NULL, listJobsRunning = NULL, array.var = NA_character_, store.job.collection = FALSE, store.job.files = FALSE, scheduler.latency = 0, fs.latency = 0, hooks = list()) { assertList(hooks, types = "function", names = "unique") assertSubset(names(hooks), unlist(batchtools$hooks, use.names = FALSE)) setClasses(list( name = assertString(name, min.chars = 1L), submitJob = assertFunction(submitJob, c("reg", "jc"), null.ok = TRUE), killJob = assertFunction(killJob, c("reg", "batch.id"), null.ok = TRUE), listJobsQueued = assertFunction(listJobsQueued, "reg", null.ok = TRUE), listJobsRunning = assertFunction(listJobsRunning, "reg", null.ok = TRUE), array.var = assertString(array.var, na.ok = TRUE), store.job.collection = assertFlag(store.job.collection), store.job.files = assertFlag(store.job.files), scheduler.latency = assertNumber(scheduler.latency, lower = 0), fs.latency = assertNumber(fs.latency, lower = 0), hooks = hooks), "ClusterFunctions") } #' @export print.ClusterFunctions = function(x, ...) { catf("ClusterFunctions for mode: %s", x$name) catf(" List queued Jobs : %s", !is.null(x$listJobsQueued)) catf(" List running Jobs: %s", !is.null(x$listJobsRunning)) catf(" Kill Jobs : %s", !is.null(x$killJob)) catf(" Hooks : %s", if (length(x$hooks)) stri_flatten(names(x$hooks), ",") else "-") } #' @title Create a SubmitJobResult #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Use this function in your implementation of \code{\link{makeClusterFunctions}} to create a return #' value for the \code{submitJob} function. #' #' @param status [\code{integer(1)}]\cr #' Launch status of job. 0 means success, codes between 1 and 100 are temporary errors and any #' error greater than 100 is a permanent failure. #' @param batch.id [\code{character()}]\cr #' Unique id of this job on batch system, as given by the batch system. #' Must be globally unique so that the job can be terminated using just this information. #' For array jobs, this may be a vector of length equal to the number of jobs in the array. #' @param log.file [\code{character()}]\cr #' Log file. If \code{NA}, defaults to \code{[job.hash].log}. #' Some cluster functions set this for array jobs. #' @param msg [\code{character(1)}]\cr #' Optional error message in case \code{status} is not equal to 0. Default is \dQuote{OK}, #' \dQuote{TEMPERROR}, \dQuote{ERROR}, depending on \code{status}. #' @return [\code{\link{SubmitJobResult}}]. A list, containing #' \code{status}, \code{batch.id} and \code{msg}. #' @family ClusterFunctionsHelper #' @aliases SubmitJobResult #' @export makeSubmitJobResult = function(status, batch.id, log.file = NA_character_, msg = NA_character_) { status = asInt(status) if (is.na(msg)) { msg = if (status == 0L) "OK" else if (status <= 100L) "TEMPERROR" else "ERROR" } "!DEBUG [makeSubmitJobResult]: Result for batch.id '`paste0(batch.id, sep = ',')`': `status` (`msg`)" setClasses(list(status = status, batch.id = batch.id, log.file = log.file, msg = msg), "SubmitJobResult") } #' @export print.SubmitJobResult = function(x, ...) { cat("Job submission result\n") catf(" ID : %s", stri_flatten(x$batch.id, ",")) catf(" Status: %i", x$status) catf(" Msg : %s", x$msg) } #' @title Cluster Functions Helper to Parse a Brew Template #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' This function is only intended for use in your own cluster functions implementation. #' Simply reads your template file and returns it as a character vector. #' #' @param template [\code{character(1)}]\cr #' Path to template file which is then passed to \code{\link[brew]{brew}}. #' @param comment.string [\code{character(1)}]\cr #' Ignore lines starting with this string. #' @return [\code{character}]. #' @family ClusterFunctionsHelper #' @export cfReadBrewTemplate = function(template, comment.string = NA_character_) { "!DEBUG [cfReadBrewTemplate]: Parsing template file '`template`'" lines = stri_trim_both(readLines(template)) lines = lines[!stri_isempty(lines)] if (!is.na(comment.string)) lines = lines[!stri_startswith_fixed(lines, comment.string)] if (length(lines) == 0L) stopf("Error reading template '%s' or empty template", template) return(stri_flatten(lines, "\n")) } #' @title Cluster Functions Helper to Write Job Description Files #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Calls brew silently on your template, any error will lead to an exception. #' The file is stored at the same place as the corresponding job file in the \dQuote{jobs}-subdir #' of your files directory. #' #' @template reg #' @param text [\code{character(1)}]\cr #' String ready to be brewed. See \code{\link{cfReadBrewTemplate}} to read a template from the file system. #' @param jc [\code{\link{JobCollection})}]\cr #' Will be used as environment to brew the template file in. See \code{\link{JobCollection}} for a list of all #' available variables. #' @return [\code{character(1)}]. File path to brewed template file. #' @family ClusterFunctionsHelper #' @export cfBrewTemplate = function(reg, text, jc) { assertString(text) outfile = fs::path(dir(reg, "jobs"), sprintf("%s.job", jc$job.hash)) parent.env(jc) = asNamespace("batchtools") on.exit(parent.env(jc) <- emptyenv()) "!DEBUG [cfBrewTemplate]: Brewing template to file '`outfile`'" z = try(brew(text = text, output = outfile, envir = jc), silent = TRUE) if (is.error(z)) stopf("Error brewing template: %s", as.character(z)) waitForFile(outfile, reg$cluster.functions$fs.latency) return(outfile) } #' @title Cluster Functions Helper to Handle Unknown Errors #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Simply constructs a \code{\link{SubmitJobResult}} object with status code 101, NA as batch id and #' an informative error message containing the output of the OS command in \code{output}. #' #' @param cmd [\code{character(1)}]\cr #' OS command used to submit the job, e.g. qsub. #' @param exit.code [\code{integer(1)}]\cr #' Exit code of the OS command, should not be 0. #' @param output [\code{character}]\cr #' Output of the OS command, hopefully an informative error message. #' If these are multiple lines in a vector, they are automatically joined. #' @return [\code{\link{SubmitJobResult}}]. #' @family ClusterFunctionsHelper #' @export cfHandleUnknownSubmitError = function(cmd, exit.code, output) { assertString(cmd, min.chars = 1L) exit.code = asInt(exit.code) assertCharacter(output, any.missing = FALSE) msg = sprintf("Command '%s' produced exit code %i. Output: '%s'", cmd, exit.code, stri_flatten(output, "\n")) makeSubmitJobResult(status = 101L, batch.id = NA_character_, msg = msg) } #' @title Cluster Functions Helper to Kill Batch Jobs #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Calls the OS command to kill a job via \code{\link[base]{system}} like this: \dQuote{cmd batch.job.id}. If the #' command returns an exit code > 0, the command is repeated after a 1 second sleep #' \code{max.tries-1} times. If the command failed in all tries, an error is generated. #' #' @template reg #' @param cmd [\code{character(1)}]\cr #' OS command, e.g. \dQuote{qdel}. #' @param args [\code{character}]\cr #' Arguments to \code{cmd}, including the batch id. #' @param max.tries [\code{integer(1)}]\cr #' Number of total times to try execute the OS command in cases of failures. #' Default is \code{3}. #' @inheritParams runOSCommand #' @return \code{TRUE} on success. An exception is raised otherwise. #' @family ClusterFunctionsHelper #' @export cfKillJob = function(reg, cmd, args = character(0L), max.tries = 3L, nodename = "localhost") { assertString(cmd, min.chars = 1L) assertCharacter(args, any.missing = FALSE) assertString(nodename) max.tries = asCount(max.tries) for (i in seq_len(max.tries)) { res = runOSCommand(cmd, args, nodename = nodename) if (res$exit.code == 0L) return(TRUE) Sys.sleep(1) } stopf("Really tried to kill job, but failed %i times with '%s'.\nMessage: %s", max.tries, stri_flatten(c(cmd, args), " "), stri_flatten(res$output, "\n")) } getBatchIds = function(reg, status = "all") { cf = reg$cluster.functions tab = data.table(batch.id = character(0L), status = character(0L)) batch.id = NULL if (status %chin% c("all", "running") && !is.null(cf$listJobsRunning)) { "!DEBUG [getBatchIds]: Getting running Jobs" x = unique(cf$listJobsRunning(reg)) if (length(x) > 0L) tab = rbind(tab, data.table(batch.id = x, status = "running")) } if (status %chin% c("all", "queued") && !is.null(cf$listJobsQueued)) { "!DEBUG [getBatchIds]: Getting queued Jobs" x = chsetdiff(cf$listJobsQueued(reg), tab$batch.id) if (length(x) > 0L) tab = rbind(tab, data.table(batch.id = unique(x), status = "queued")) } submitted = done = batch.id = NULL batch.ids = reg$status[!is.na(submitted) & is.na(done) & !is.na(batch.id), unique(batch.id)] tab[batch.id %in% batch.ids] } #' @title Find a batchtools Template File #' #' @description #' This functions returns the path to a template file on the file system. #' @template template #' @return [\code{character}] Path to the file or \code{NA} if no template template file was found. #' @keywords internal #' @export findTemplateFile = function(template) { assertString(template, min.chars = 1L) if (stri_endswith_fixed(template, ".tmpl")) { assertFileExists(template, access = "r") return(fs::path_abs(template)) } x = Sys.getenv("R_BATCHTOOLS_SEARCH_PATH") if (nzchar(x)) { x = fs::path(x, sprintf("batchtools.%s.tmpl", template)) if (fs::file_access(x, "read")) return(fs::path_abs(x)) } x = sprintf("batchtools.%s.tmpl", template) if (fs::file_access(x, "read")) return(fs::path_abs(x)) x = fs::path(user_config_dir("batchtools", expand = FALSE), sprintf("%s.tmpl", template)) if (fs::file_access(x, "read")) return(x) x = fs::path("~", sprintf(".batchtools.%s.tmpl", template)) if (fs::file_access(x, "read")) return(fs::path_abs(x)) x = fs::path(site_config_dir("batchtools"), sprintf("%s.tmpl", template)) if (fs::file_access(x, "read")) return(x) x = system.file("templates", sprintf("%s.tmpl", template), package = "batchtools") if (fs::file_access(x, "read")) return(x) return(NA_character_) } batchtools/R/removeExperiments.R0000644000176200001440000000206313453602073016465 0ustar liggesusers#' @title Remove Experiments #' #' @description #' Remove Experiments from an \code{\link{ExperimentRegistry}}. #' This function automatically checks if any of the jobs to reset is either pending or running. #' However, if the implemented heuristic fails, this can lead to inconsistencies in the data base. #' Use with care while jobs are running. #' #' @templateVar ids.default none #' @template ids #' @template expreg #' @return [\code{\link{data.table}}] of removed job ids, invisibly. #' @export #' @family Experiment removeExperiments = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE, running.ok = FALSE) ids = convertIds(reg, ids, default = noIds()) info("Removing %i Experiments ...", nrow(ids)) reg$status = reg$status[!ids] i = reg$defs[!reg$status, on = "def.id", which = TRUE] if (length(i) > 0L) { info("Removing %i job definitions ...", length(i)) reg$defs = reg$defs[-i] } fns = getResultFiles(reg, ids) file_remove(fns) sweepRegistry(reg) invisible(ids) } batchtools/R/addExperiments.R0000644000176200001440000002031113543336703015720 0ustar liggesusers#' @title Add Experiments to the Registry #' #' @description #' Adds experiments (parametrized combinations of problems with algorithms) to the registry and thereby defines batch jobs. #' #' If multiple problem designs or algorithm designs are provided, they are combined via the Cartesian product. #' E.g., if you have two problems \code{p1} and \code{p2} and three algorithms \code{a1}, \code{a2} and \code{a3}, #' \code{addExperiments} creates experiments for all parameters for the combinations \code{(p1, a1)}, \code{(p1, a2)}, #' \code{(p1, a3)}, \code{(p2, a1)}, \code{(p2, a2)} and \code{(p2, a3)}. #' #' @note #' R's \code{data.frame} converts character vectors to factors by default which frequently resulted in problems using \code{addExperiments}. #' Therefore, this function will warn about factor variables if the following conditions hold: #' \enumerate{ #' \item The design is passed as a \code{data.frame}, not a \code{\link[data.table]{data.table}} or \code{\link[tibble]{tibble}}. #' \item The option \dQuote{stringsAsFactors} is not set or set to \code{TRUE}. #' } #' #' @param prob.designs [named list of \code{\link[base]{data.frame}}]\cr #' Named list of data frames (or \code{\link[data.table]{data.table}}). #' The name must match the problem name while the column names correspond to parameters of the problem. #' If \code{NULL}, experiments for all defined problems without any parameters are added. #' @param algo.designs [named list of \code{\link[data.table]{data.table}} or \code{\link[base]{data.frame}}]\cr #' Named list of data frames (or \code{\link[data.table]{data.table}}). #' The name must match the algorithm name while the column names correspond to parameters of the algorithm. #' If \code{NULL}, experiments for all defined algorithms without any parameters are added. #' @param repls [\code{integer(1)}]\cr #' Number of replications for each experiment. #' @param combine [\code{character(1)}]\cr #' How to combine the rows of a single problem design with the rows of a single algorithm design? #' Default is \dQuote{crossprod} which combines each row of the problem design which each row of the algorithm design #' in a cross-product fashion. Set to \dQuote{bind} to just \code{\link[base]{cbind}} the tables of #' problem and algorithm designs where the shorter table is repeated if necessary. #' @template expreg #' @return [\code{\link{data.table}}] with ids of added jobs stored in column \dQuote{job.id}. #' @export #' @family Experiment #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) #' #' # add first problem #' fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd) #' addProblem("rnorm", fun = fun, reg = tmp) #' #' # add second problem #' fun = function(job, data, n, lambda, ...) rexp(n, rate = lambda) #' addProblem("rexp", fun = fun, reg = tmp) #' #' # add first algorithm #' fun = function(instance, method, ...) if (method == "mean") mean(instance) else median(instance) #' addAlgorithm("average", fun = fun, reg = tmp) #' #' # add second algorithm #' fun = function(instance, ...) sd(instance) #' addAlgorithm("deviation", fun = fun, reg = tmp) #' #' # define problem and algorithm designs #' library(data.table) #' prob.designs = algo.designs = list() #' prob.designs$rnorm = CJ(n = 100, mean = -1:1, sd = 1:5) #' prob.designs$rexp = data.table(n = 100, lambda = 1:5) #' algo.designs$average = data.table(method = c("mean", "median")) #' algo.designs$deviation = data.table() #' #' # add experiments and submit #' addExperiments(prob.designs, algo.designs, reg = tmp) #' #' # check what has been created #' summarizeExperiments(reg = tmp) #' unwrap(getJobPars(reg = tmp)) addExperiments = function(prob.designs = NULL, algo.designs = NULL, repls = 1L, combine = "crossprod", reg = getDefaultRegistry()) { convertDesigns = function(type, designs, keywords) { check.factors = default.stringsAsFactors() Map(function(id, design) { if (check.factors && identical(class(design)[1L], "data.frame")) { i = which(vlapply(design, is.factor)) if (length(i) > 0L) { warningf("%s design '%s' passed as 'data.frame' and 'stringsAsFactors' is TRUE. Column(s) '%s' may be encoded as factors accidentally.", type, id, stri_flatten(names(design)[i]), "','") } } if (!is.data.table(design)) design = as.data.table(design) i = wf(keywords %chin% names(design)) if (length(i) > 0L) stopf("%s design %s contains reserved keyword '%s'", type, id, keywords[i]) design }, id = names(designs), design = designs) } increment = function(ids, n = 1L) { if (length(ids) == 0L) seq_len(n) else max(ids) + seq_len(n) } assertRegistry(reg, class = "ExperimentRegistry", writeable = TRUE) if (is.null(prob.designs)) { prob.designs = replicate(length(reg$problems), data.table(), simplify = FALSE) names(prob.designs) = reg$problems } else { assertList(prob.designs, types = "data.frame", names = "named") assertSubset(names(prob.designs), reg$problems) prob.designs = convertDesigns("Problem", prob.designs, c("job", "data")) } if (is.null(algo.designs)) { algo.designs = replicate(length(reg$algorithms), data.table(), simplify = FALSE) names(algo.designs) = reg$algorithms } else { assertList(algo.designs, types = "data.frame", names = "named") assertSubset(names(algo.designs), reg$algorithms) algo.designs = convertDesigns("Algorithm", algo.designs, c("job", "data", "instance")) } repls = asCount(repls) assertChoice(combine, c("crossprod", "bind")) all.ids = integer(0L) for (i in seq_along(prob.designs)) { pn = names(prob.designs)[i] pd = prob.designs[[i]] n.pd = max(nrow(pd), 1L) for (j in seq_along(algo.designs)) { an = names(algo.designs)[j] ad = algo.designs[[j]] n.ad = max(nrow(ad), 1L) if (combine == "crossprod") { n.jobs = n.pd * n.ad * repls info("Adding %i experiments ('%s'[%i] x '%s'[%i] x repls[%i]) ...", n.jobs, pn, n.pd, an, n.ad, repls) idx = CJ(.i = seq_len(n.pd), .j = seq_len(n.ad)) } else { n.jobs = max(n.pd, n.ad) * repls info("Adding %i experiments (('%s'[%i] | '%s'[%i]) x repls[%i]) ...", n.jobs, pn, n.pd, an, n.ad, repls) idx = data.table(.i = rep_len(seq_len(n.pd), n.jobs), .j = rep_len(seq_len(n.ad), n.jobs)) } # create temp tab with prob name, algo name and pars as list tab = data.table( problem = pn, algorithm = an, prob.pars = if (nrow(pd) > 0L) .mapply(list, pd[idx$.i], list()) else list(list()), algo.pars = if (nrow(ad) > 0L) .mapply(list, ad[idx$.j], list()) else list(list()) ) # create hash of each row of tab tab$pars.hash = calculateHash(tab) # merge with already defined experiments to get def.ids if (nrow(reg$defs) == 0L) { # this is no optimization, but fixes an strange error on r-devel/windows for merging empty data.tables tab$def.id = NA_integer_ } else { tab = merge(reg$defs[, !c("problem", "algorithm", "prob.pars", "algo.pars")], tab, by = "pars.hash", all.x = FALSE, all.y = TRUE, sort = FALSE) } # generate def ids for new experiments w = which(is.na(tab$def.id)) if (length(w) > 0L) { tab[w, "def.id" := increment(reg$defs$def.id, length(w))] reg$defs = rbind(reg$defs, tab[w]) } # create rows in status table for new defs and each repl and filter for defined tab = CJ(def.id = tab$def.id, repl = seq_len(repls))[!reg$status, on = c("def.id", "repl")] if (nrow(tab) < n.jobs) info("Skipping %i duplicated experiments ...", n.jobs - nrow(tab)) if (nrow(tab) > 0L) { # rbind new status tab$job.id = increment(reg$status$job.id, nrow(tab)) reg$status = rbind(reg$status, tab, fill = TRUE) } all.ids = c(all.ids, tab$job.id) } } setkeyv(reg$defs, "def.id") setkeyv(reg$status, "job.id") saveRegistry(reg) invisible(data.table(job.id = all.ids, key = "job.id")) } calculateHash = function(tab) { cols = c("problem", "algorithm", "prob.pars", "algo.pars") unlist(.mapply(function(...) digest(list(...)), tab[, cols, with = FALSE], list())) } batchtools/R/ExperimentRegistry.R0000644000176200001440000000654213543336703016627 0ustar liggesusers#' @title ExperimentRegistry Constructor #' #' @description #' \code{makeExperimentRegistry} constructs a special \code{\link{Registry}} which #' is suitable for the definition of large scale computer experiments. #' #' Each experiments consists of a \code{\link{Problem}} and an \code{\link{Algorithm}}. #' These can be parametrized with \code{\link{addExperiments}} to actually define computational #' jobs. #' #' @inheritParams makeRegistry #' @aliases ExperimentRegistry #' @return [\code{ExperimentRegistry}]. #' @export #' @family Registry Experiment #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeExperimentRegistry(file.dir = NA, make.default = FALSE) #' #' # Definde one problem, two algorithms and add them with some parameters: #' addProblem(reg = tmp, "p1", #' fun = function(job, data, n, mean, sd, ...) rnorm(n, mean = mean, sd = sd)) #' addAlgorithm(reg = tmp, "a1", fun = function(job, data, instance, ...) mean(instance)) #' addAlgorithm(reg = tmp, "a2", fun = function(job, data, instance, ...) median(instance)) #' ids = addExperiments(reg = tmp, list(p1 = data.table::CJ(n = c(50, 100), mean = -2:2, sd = 1:4))) #' #' # Overview over defined experiments: #' tmp$problems #' tmp$algorithms #' summarizeExperiments(reg = tmp) #' summarizeExperiments(reg = tmp, by = c("problem", "algorithm", "n")) #' ids = findExperiments(prob.pars = (n == 50), reg = tmp) #' print(unwrap(getJobPars(ids, reg = tmp))) #' #' # Submit jobs #' submitJobs(reg = tmp) #' waitForJobs(reg = tmp) #' #' # Reduce the results of algorithm a1 #' ids.mean = findExperiments(algo.name = "a1", reg = tmp) #' reduceResults(ids.mean, fun = function(aggr, res, ...) c(aggr, res), reg = tmp) #' #' # Join info table with all results and calculate mean of results #' # grouped by n and algorithm #' ids = findDone(reg = tmp) #' pars = unwrap(getJobPars(ids, reg = tmp)) #' results = unwrap(reduceResultsDataTable(ids, fun = function(res) list(res = res), reg = tmp)) #' tab = ljoin(pars, results) #' tab[, list(mres = mean(res)), by = c("n", "algorithm")] makeExperimentRegistry = function(file.dir = "registry", work.dir = getwd(), conf.file = findConfFile(), packages = character(0L), namespaces = character(0L), source = character(0L), load = character(0L), seed = NULL, make.default = TRUE) { reg = makeRegistry(file.dir = file.dir, work.dir = work.dir, conf.file = conf.file, packages = packages, namespaces = namespaces, source = source, load = load, seed = seed, make.default = make.default) fs::dir_create(fs::path(reg$file.dir, c("problems", "algorithms"))) reg$problems = character(0L) reg$algorithms = character(0L) reg$status$repl = integer(0L) reg$defs$problem = character(0L) reg$defs$algorithm = character(0L) reg$defs$job.pars = NULL reg$defs$prob.pars = list() reg$defs$algo.pars = list() reg$defs$pars.hash = character(0L) class(reg) = c("ExperimentRegistry", "Registry") saveRegistry(reg) return(reg) } #' @export print.ExperimentRegistry = function(x, ...) { cat("Experiment Registry\n") catf(" Backend : %s", x$cluster.functions$name) catf(" File dir : %s", x$file.dir) catf(" Work dir : %s", x$work.dir) catf(" Jobs : %i", nrow(x$status)) catf(" Problems : %i", length(x$problems)) catf(" Algorithms: %i", length(x$algorithms)) catf(" Seed : %i", x$seed) catf(" Writeable : %s", x$writeable) } batchtools/R/runOSCommand.R0000644000176200001440000000477613606041641015324 0ustar liggesusers#' @title Run OS Commands on Local or Remote Machines #' #' @description #' This is a helper function to run arbitrary OS commands on local or remote machines. #' The interface is similar to \code{\link[base]{system2}}, but it always returns the exit status #' \emph{and} the output. #' #' @param sys.cmd [\code{character(1)}]\cr #' Command to run. #' @param sys.args [\code{character}]\cr #' Arguments for \code{sys.cmd}. #' @param stdin [\code{character(1)}]\cr #' Argument passed to \code{\link[base]{system2}}. #' @param nodename [\code{character(1)}]\cr #' Name of the SSH node to run the command on. If set to \dQuote{localhost} (default), the command #' is not piped through SSH. #' @return [\code{named list}] with \dQuote{sys.cmd}, \dQuote{sys.args}, \dQuote{exit.code} (integer), \dQuote{output} (character). #' @export #' @family ClusterFunctionsHelper #' @examples #' \dontrun{ #' runOSCommand("ls") #' runOSCommand("ls", "-al") #' runOSCommand("notfound") #' } runOSCommand = function(sys.cmd, sys.args = character(0L), stdin = "", nodename = "localhost") { assertCharacter(sys.cmd, any.missing = FALSE, len = 1L) assertCharacter(sys.args, any.missing = FALSE) assertString(nodename, min.chars = 1L) if (!isLocalHost(nodename)) { command = sprintf("%s %s", sys.cmd, stri_flatten(sys.args, " ")) if (getRversion() < "4.0.0") { command = shQuote(command) } command = stri_replace_all_fixed(command, "\\$", "$") sys.args = c("-q", nodename, command) sys.cmd = "ssh" } "!DEBUG [runOSCommand]: cmd: `sys.cmd` `stri_flatten(sys.args, ' ')`" if (nzchar(Sys.which(sys.cmd))) { res = suppressWarnings(system2(command = sys.cmd, args = sys.args, stdin = stdin, stdout = TRUE, stderr = TRUE, wait = TRUE)) output = as.character(res) exit.code = attr(res, "status") %??% 0L } else { output = "command not found" exit.code = 127L } "!DEBUG [runOSCommand]: OS result (stdin '`stdin`', exit code `exit.code`):" "!DEBUG [runOSCommand]: `paste0(output, sep = '\n')`" return(list(sys.cmd = sys.cmd, sys.args = sys.args, exit.code = exit.code, output = output)) } isLocalHost = function(nodename) { is.null(nodename) || nodename %chin% c("localhost", "127.0.0.1", "::1") } OSError = function(msg, res) { cmd = stri_flatten(c(res$sys.cmd, res$sys.args), collapse = " ") %??% NA_character_ exit.code = res$exit.code %??% NA_integer_ output = stri_flatten(res$output, "\n") %??% "" stopf("%s (exit code %i);\ncmd: '%s'\noutput:\n%s", msg, exit.code, cmd, output) } batchtools/R/clusterFunctionsLSF.R0000644000176200001440000000613113335230703016657 0ustar liggesusers#' @title ClusterFunctions for LSF Systems #' #' @description #' Cluster functions for LSF (\url{https://www.ibm.com/us-en/marketplace/hpc-workload-management}). #' #' Job files are created based on the brew template \code{template.file}. This #' file is processed with brew and then submitted to the queue using the #' \code{bsub} command. Jobs are killed using the \code{bkill} command and the #' list of running jobs is retrieved using \code{bjobs -u $USER -w}. The user #' must have the appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all resources passed to \code{\link{submitJobs}} #' as well as all variables stored in the \code{\link{JobCollection}}. #' It is the template file's job to choose a queue for the job and handle the desired resource #' allocations. #' #' @note #' Array jobs are currently not supported. #' #' @template template #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsLSF = function(template = "lsf", scheduler.latency = 1, fs.latency = 65) { # nocov start template = findTemplateFile(template) if (testScalarNA(template)) stopf("Argument 'template' (=\"%s\") must point to a readable template file or contain the template itself as string (containing at least one newline)", template) template = cfReadBrewTemplate(template) # When LSB_BJOBS_CONSISTENT_EXIT_CODE = Y, the bjobs command exits with 0 only # when unfinished jobs are found, and 255 when no jobs are found, # or a non-existent job ID is entered. Sys.setenv(LSB_BJOBS_CONSISTENT_EXIT_CODE = "Y") submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") outfile = cfBrewTemplate(reg, template, jc) res = runOSCommand("bsub", stdin = shQuote(outfile)) if (res$exit.code > 0L) { cfHandleUnknownSubmitError("bsub", res$exit.code, res$output) } else { batch.id = stri_extract_first_regex(stri_flatten(res$output, " "), "\\d+") makeSubmitJobResult(status = 0L, batch.id = batch.id) } } listJobs = function(reg, args) { assertRegistry(reg, writeable = FALSE) res = runOSCommand("bjobs", args) if (res$exit.code > 0L) { if (res$exit.code == 255L || any(stri_detect_regex(res$output, "No (unfinished|pending|running) job found"))) return(character(0L)) OSError("Listing of jobs failed", res) } stri_extract_first_regex(tail(res$output, -1L), "\\d+") } listJobsQueued = function(reg) { listJobs(reg, c("-u $USER", "-w", "-p")) } listJobsRunning = function(reg) { listJobs(reg, c("-u $USER", "-w", "-r")) } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "bkill", batch.id) } makeClusterFunctions(name = "LSF", submitJob = submitJob, killJob = killJob, listJobsQueued = listJobsQueued, listJobsRunning = listJobsRunning, store.job.collection = TRUE, scheduler.latency = scheduler.latency, fs.latency = fs.latency) } # nocov end batchtools/R/clusterFunctionsSGE.R0000644000176200001440000000546413435713470016671 0ustar liggesusers#' @title ClusterFunctions for SGE Systems #' #' @description #' Cluster functions for Univa Grid Engine / Oracle Grid Engine / #' Sun Grid Engine (\url{http://www.univa.com/}). #' #' Job files are created based on the brew template \code{template}. This #' file is processed with brew and then submitted to the queue using the #' \code{qsub} command. Jobs are killed using the \code{qdel} command and the #' list of running jobs is retrieved using \code{qselect}. The user must have #' the appropriate privileges to submit, delete and list jobs on the cluster #' (this is usually the case). #' #' The template file can access all resources passed to \code{\link{submitJobs}} #' as well as all variables stored in the \code{\link{JobCollection}}. #' It is the template file's job to choose a queue for the job and handle the desired resource #' allocations. #' #' @note #' Array jobs are currently not supported. #' #' @template template #' @inheritParams makeClusterFunctions #' @template nodename #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsSGE = function(template = "sge", nodename = "localhost", scheduler.latency = 1, fs.latency = 65) { # nocov start assertString(nodename) template = findTemplateFile(template) if (testScalarNA(template)) stopf("Argument 'template' (=\"%s\") must point to a readable template file", template) template = cfReadBrewTemplate(template) quote = if (isLocalHost(nodename)) identity else shQuote submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") outfile = cfBrewTemplate(reg, template, jc) res = runOSCommand("qsub", shQuote(outfile), nodename = nodename) if (res$exit.code > 0L) { cfHandleUnknownSubmitError("qsub", res$exit.code, res$output) } else { batch.id = stri_extract_first_regex(stri_flatten(res$output, " "), "\\d+") makeSubmitJobResult(status = 0L, batch.id = batch.id) } } listJobs = function(reg, args) { assertRegistry(reg, writeable = FALSE) res = runOSCommand("qstat", args, nodename = nodename) if (res$exit.code > 0L) OSError("Listing of jobs failed", res) stri_extract_first_regex(tail(res$output, -2L), "\\d+") } listJobsQueued = function(reg) { listJobs(reg, c("-u $USER", "-s p")) } listJobsRunning = function(reg) { listJobs(reg, c("-u $USER", "-s rs")) } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "qdel", batch.id, nodename = nodename) } makeClusterFunctions(name = "SGE", submitJob = submitJob, killJob = killJob, listJobsQueued = listJobsQueued, listJobsRunning = listJobsRunning, store.job.collection = TRUE, scheduler.latency = scheduler.latency, fs.latency = fs.latency) } # nocov end batchtools/R/clusterFunctionsDocker.R0000644000176200001440000001350613305515241017446 0ustar liggesusers#' @title ClusterFunctions for Docker #' #' @description #' Cluster functions for Docker/Docker Swarm (\url{https://docs.docker.com/swarm/}). #' #' The \code{submitJob} function executes #' \code{docker [docker.args] run --detach=true [image.args] [resources] [image] [cmd]}. #' Arguments \code{docker.args}, \code{image.args} and \code{image} can be set on construction. #' The \code{resources} part takes the named resources \code{ncpus} and \code{memory} #' from \code{\link{submitJobs}} and maps them to the arguments \code{--cpu-shares} and \code{--memory} #' (in Megabytes). The resource \code{threads} is mapped to the environment variables \dQuote{OMP_NUM_THREADS} #' and \dQuote{OPENBLAS_NUM_THREADS}. #' To reliably identify jobs in the swarm, jobs are labeled with \dQuote{batchtools=[job.hash]} and named #' using the current login name (label \dQuote{user}) and the job hash (label \dQuote{batchtools}). #' #' \code{listJobsRunning} uses \code{docker [docker.args] ps --format=\{\{.ID\}\}} to filter for running jobs. #' #' \code{killJobs} uses \code{docker [docker.args] kill [batch.id]} to filter for running jobs. #' #' These cluster functions use a \link{Hook} to remove finished jobs before a new submit and every time the \link{Registry} #' is synchronized (using \code{\link{syncRegistry}}). #' This is currently required because docker does not remove terminated containers automatically. #' Use \code{docker ps -a --filter 'label=batchtools' --filter 'status=exited'} to identify and remove terminated #' containers manually (or usa a cron job). #' #' @param image [\code{character(1)}]\cr #' Name of the docker image to run. #' @param docker.args [\code{character}]\cr #' Additional arguments passed to \dQuote{docker} *before* the command (\dQuote{run}, \dQuote{ps} or \dQuote{kill}) to execute (e.g., the docker host). #' @param image.args [\code{character}]\cr #' Additional arguments passed to \dQuote{docker run} (e.g., to define mounts or environment variables). #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsDocker = function(image, docker.args = character(0L), image.args = character(0L), scheduler.latency = 1, fs.latency = 65) { # nocov start assertString(image) assertCharacter(docker.args, any.missing = FALSE) assertCharacter(image.args, any.missing = FALSE) user = Sys.info()["user"] submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") assertIntegerish(jc$resources$ncpus, lower = 1L, any.missing = FALSE, .var.name = "resources$ncpus") assertIntegerish(jc$resources$memory, lower = 1L, any.missing = FALSE, .var.name = "resources$memory") timeout = if (is.null(jc$resources$walltime)) character(0L) else sprintf("timeout %i", asInt(jc$resources$walltime, lower = 0L)) cmd = c("docker", docker.args, "run", "--detach=true", image.args, sprintf("-e DEBUGME='%s'", Sys.getenv("DEBUGME")), sprintf("-e OMP_NUM_THREADS=%i", jc$resources$omp.threads %??% jc$resources$threads), sprintf("-e OPENBLAS_NUM_THREADS=%i", jc$resources$blas.threads %??% jc$resources$threads), sprintf("-e MKL_NUM_THREADS=%i", jc$resources$blas.threads %??% jc$resources$threads), sprintf("-c %i", jc$resources$ncpus), sprintf("-m %im", jc$resources$memory), sprintf("--memory-swap %im", jc$resources$memory), sprintf("--label batchtools=%s", jc$job.hash), sprintf("--label user=%s", user), sprintf("--name=%s_bt_%s", user, jc$job.hash), image, timeout, "Rscript", stri_join("-e", shQuote(sprintf("batchtools::doJobCollection('%s', '%s')", jc$uri, jc$log.file)), sep = " ")) res = runOSCommand(cmd[1L], cmd[-1L]) if (res$exit.code > 0L) { housekeeping(reg) no.res.msg = "no resources available" if (res$exit.code == 1L && any(stri_detect_fixed(res$output, no.res.msg))) return(makeSubmitJobResult(status = 1L, batch.id = NA_character_, msg = no.res.msg)) return(cfHandleUnknownSubmitError(stri_flatten(cmd, " "), res$exit.code, res$output)) } else { if (length(res$output != 1L)) { matches = which(stri_detect_regex(res$output, "^[[:alnum:]]{64}$")) if (length(matches) != 1L) stopf("Command '%s' did not return a long UUID identitfier", stri_flatten(cmd, " ")) res$output = res$output[matches] } return(makeSubmitJobResult(status = 0L, batch.id = stri_sub(res$output, 1L, 12L))) } } listJobs = function(reg, filter = character(0L)) { assertRegistry(reg, writeable = FALSE) # use a workaround for DockerSwarm: docker ps does not list all jobs correctly, only # docker inspect reports the status correctly args = c(docker.args, "ps", "--format={{.ID}}", "--filter 'label=batchtools'", filter) res = runOSCommand("docker", args) if (res$exit.code > 0L) OSError("Listing of jobs failed", res) if (length(res$output) == 0L || !nzchar(res$output)) return(character(0L)) res$output } housekeeping = function(reg, ...) { batch.ids = chintersect(listJobs(reg, "--filter 'status=exited'"), reg$status$batch.id) if (length(batch.ids) > 0L) runOSCommand("docker", c(docker.args, "rm", batch.ids)) invisible(TRUE) } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "docker", c(docker.args, "kill", batch.id)) } listJobsRunning = function(reg) { assertRegistry(reg, writeable = FALSE) listJobs(reg, sprintf("--filter 'user=%s'", user)) } makeClusterFunctions(name = "Docker", submitJob = submitJob, killJob = killJob, listJobsRunning = listJobsRunning, store.job.collection = TRUE, scheduler.latency = scheduler.latency, fs.latency = fs.latency, hooks = list(post.submit = housekeeping, post.sync = housekeeping)) } # nocov end batchtools/R/doJobCollection.R0000644000176200001440000001677313606051724016033 0ustar liggesusers#' @title Execute Jobs of a JobCollection #' #' @description #' Executes every job in a \code{\link{JobCollection}}. #' This function is intended to be called on the slave. #' #' @param jc [\code{\link{JobCollection}}]\cr #' Either an object of class \dQuote{JobCollection} as returned by #' \code{\link{makeJobCollection}} or a string with the path to file #' containing a \dQuote{JobCollection} as RDS file (as stored by \code{\link{submitJobs}}). #' @param output [\code{character(1)}]\cr #' Path to a file to write the output to. Defaults to \code{NULL} which means #' that output is written to the active \code{\link[base]{sink}}. #' Do not set this if your scheduler redirects output to a log file. #' @return [\code{character(1)}]: Hash of the \code{\link{JobCollection}} executed. #' @family JobCollection #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(identity, 1:2, reg = tmp) #' jc = makeJobCollection(1:2, reg = tmp) #' doJobCollection(jc) doJobCollection = function(jc, output = NULL) { UseMethod("doJobCollection") } #' @export doJobCollection.character = function(jc, output = NULL) { obj = readRDS(jc) force(obj) if (!batchtools$debug && !obj$array.jobs) { fs::file_delete(jc) job = fs::path_ext_set(jc, "job") if (fs::file_exists(job)) fs::file_delete(job) } doJobCollection.JobCollection(obj, output = output) } #' @export doJobCollection.JobCollection = function(jc, output = NULL) { error = function(msg, ...) { now = ustamp() updates = data.table(job.id = jc$jobs$job.id, started = now, done = now, error = stri_trunc(stri_trim_both(sprintf(msg, ...)), 500L, " [truncated]"), mem.used = NA_real_, key = "job.id") writeRDS(updates, file = fs::path(jc$file.dir, "updates", sprintf("%s.rds", jc$job.hash)), compress = jc$compress) invisible(NULL) } # signal warnings immediately opts = options("warn") options(warn = 1L) on.exit(options(opts)) # setup output connection if (!is.null(output)) { if (!testPathForOutput(output, overwrite = TRUE)) return(error("Cannot create output file for logging")) fp = file(output, open = "wt") sink(file = fp) sink(file = fp, type = "message") on.exit({ sink(type = "message"); sink(type = "output"); close(fp) }, add = TRUE) } # subset array jobs if (jc$array.jobs) { i = as.integer(Sys.getenv(jc$array.var)) if (!testInteger(i, any.missing = FALSE, lower = 1L, upper = nrow(jc$jobs))) return(error("Failed to subset JobCollection using array environment variable '%s' [='%s']", jc$array.var, i)) jc$jobs = jc$jobs[i] } # say hi n.jobs = nrow(jc$jobs) s = now() catf("### [bt%s]: This is batchtools v%s", s, packageVersion("batchtools")) catf("### [bt%s]: Starting calculation of %i jobs", s, n.jobs) catf("### [bt%s]: Setting working directory to '%s'", s, jc$work.dir) # set work dir if (!fs::dir_exists(jc$work.dir)) return(error("Work dir does not exist")) local_dir(jc$work.dir) # load registry dependencies: packages, source files, ... # note that this should happen _before_ parallelMap or foreach is initialized ok = try(loadRegistryDependencies(jc, must.work = TRUE), silent = TRUE) if (is.error(ok)) return(error("Error loading registry dependencies: %s", as.character(ok))) # setup inner parallelization with parallelMap if (hasName(jc$resources, "pm.backend")) { if (!requireNamespace("parallelMap", quietly = TRUE)) return(error("parallelMap not installed")) pm.opts = filterNull(insert(list(mode = jc$resources$pm.backend, cpus = jc$resources$ncpus, show.info = FALSE), jc$resources$pm.opts)) do.call(parallelMap::parallelStart, pm.opts) on.exit(parallelMap::parallelStop(), add = TRUE) pm.opts = parallelMap::parallelGetOptions()$settings catf("### [bt%s]: Using %i CPUs for parallelMap/%s on level '%s'", s, pm.opts$cpus, pm.opts$mode, if (is.na(pm.opts$level)) "default" else pm.opts$level) } # setup inner parallelization with foreach if (hasName(jc$resources, "foreach.backend")) { if (!requireNamespace("foreach", quietly = TRUE)) return(error("Package 'foreach' is not installed")) backend = jc$resources$foreach.backend ncpus = jc$resources$ncpus if (backend == "seq") { foreach::registerDoSEQ() } else if (backend == "parallel") { if (!requireNamespace("doParallel", quietly = TRUE)) return(error("Package 'doParallel' is not installed")) doParallel::registerDoParallel(cores = ncpus) } else if (backend == "mpi") { if (!requireNamespace("doMPI", quietly = TRUE)) return(error("Package 'doMPI' is not installed")) cl = doMPI::startMPIcluster(count = ncpus) doMPI::registerDoMPI(cl) on.exit(doMPI::closeCluster(cl), add = TRUE) } else { return(error("Unknwon foreach backend: '%s'", backend)) } catf("### [bt%s]: Using %i CPUs for foreach/%s", s, ncpus, backend) } # setup memory measurement measure.memory = isTRUE(jc$resources$measure.memory) catf("### [bt%s]: Memory measurement %s", s, ifelse(measure.memory, "enabled", "disabled")) if (measure.memory) { memory.mult = c(if (.Machine$sizeof.pointer == 4L) 28L else 56L, 8L) } # try to pre-fetch some objects from the file system reader = RDSReader$new(n.jobs > 1L) buf = UpdateBuffer$new(jc$jobs$job.id) runHook(jc, "pre.do.collection", reader = reader) for (i in seq_len(n.jobs)) { job = getJob(jc, i, reader = reader) id = job$id update = list(started = ustamp(), done = NA_integer_, error = NA_character_, mem.used = NA_real_) catf("### [bt%s]: Starting job [batchtools job.id=%i]", now(), id) if (measure.memory) { gc(reset = TRUE) result = try(execJob(job)) update$mem.used = sum(gc()[, 1L] * memory.mult) / 1000000L } else { result = try(execJob(job)) } update$done = ustamp() if (is.error(result)) { catf("\n### [bt%s]: Job terminated with an exception [batchtools job.id=%i]", now(), id) update$error = stri_trunc(stri_trim_both(as.character(result)), 500L, " [truncated]") } else { catf("\n### [bt%s]: Job terminated successfully [batchtools job.id=%i]", now(), id) writeRDS(result, file = getResultFiles(jc, id), compress = jc$compress) } buf$add(i, update) buf$flush(jc) } runHook(jc, "post.do.collection", updates = buf$updates, reader = reader) buf$save(jc) catf("### [bt%s]: Calculation finished!", now()) invisible(jc$job.hash) } UpdateBuffer = R6Class("UpdateBuffer", cloneable = FALSE, public = list( updates = NULL, next.update = NA_real_, initialize = function(ids) { self$updates = data.table(job.id = ids, started = NA_real_, done = NA_real_, error = NA_character_, mem.used = NA_real_, written = FALSE, key = "job.id") self$next.update = Sys.time() + runif(1L, 60, 300) }, add = function(i, x) { set(self$updates, i, names(x), x) }, save = function(jc) { i = self$updates[!is.na(started) & (!written), which = TRUE] if (length(i) > 0L) { first.id = self$updates$job.id[i[1L]] writeRDS(self$updates[i, !"written"], file = fs::path(jc$file.dir, "updates", sprintf("%s-%i.rds", jc$job.hash, first.id)), compress = jc$compress) set(self$updates, i, "written", TRUE) } }, flush = function(jc) { now = Sys.time() if (now > self$next.update) { self$save(jc) self$next.update = now + runif(1L, 60, 300) } } ) ) batchtools/R/saveRegistry.R0000644000176200001440000000216713606051760015441 0ustar liggesusers#' @title Store the Registy to the File System #' #' @description #' Stores the registry on the file system in its \dQuote{file.dir} (specified #' for construction in \code{\link{makeRegistry}}, can be accessed via #' \code{reg$file.dir}). #' This function is usually called internally whenever needed. #' #' @template reg #' @return [\code{logical(1)}]: \code{TRUE} if the registry was saved, #' \code{FALSE} otherwise (if the registry is read-only). #' @family Registry #' @export saveRegistry = function(reg = getDefaultRegistry()) { if (!reg$writeable) { "!DEBUG [saveRegistry]: Skipping saveRegistry (read-only)" return(FALSE) } "!DEBUG [saveRegistry]: Saving Registry" reg$hash = rnd_hash() fn = fs::path(reg$file.dir, c("registry.new.rds", "registry.rds")) ee = new.env(parent = asNamespace("batchtools")) exclude = c("cluster.functions", "default.resources", "temp.dir", "mtime", "writeable") list2env(mget(chsetdiff(ls(reg), exclude), reg), ee) class(ee) = class(reg) writeRDS(ee, file = fn[1L], compress = reg$compress) fs::file_move(fn[1L], fn[2L]) reg$mtime = file_mtime(fn[2L]) return(TRUE) } batchtools/R/clusterFunctionsTORQUE.R0000644000176200001440000000610413606056241017256 0ustar liggesusers#' @title ClusterFunctions for OpenPBS/TORQUE Systems #' #' @description #' Cluster functions for TORQUE/PBS (\url{https://adaptivecomputing.com/cherry-services/torque-resource-manager/}). #' #' Job files are created based on the brew template \code{template.file}. This file is processed #' with brew and then submitted to the queue using the \code{qsub} command. Jobs are killed using #' the \code{qdel} command and the list of running jobs is retrieved using \code{qselect}. The user #' must have the appropriate privileges to submit, delete and list jobs on the cluster (this is #' usually the case). #' #' The template file can access all resources passed to \code{\link{submitJobs}} #' as well as all variables stored in the \code{\link{JobCollection}}. #' It is the template file's job to choose a queue for the job and handle the desired resource #' allocations. #' #' @template template #' @inheritParams makeClusterFunctions #' @return [\code{\link{ClusterFunctions}}]. #' @family ClusterFunctions #' @export makeClusterFunctionsTORQUE = function(template = "torque", scheduler.latency = 1, fs.latency = 65) { # nocov start template = findTemplateFile(template) if (testScalarNA(template)) stopf("Argument 'template' (=\"%s\") must point to a readable template", template) template = cfReadBrewTemplate(template, "##") submitJob = function(reg, jc) { assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") outfile = cfBrewTemplate(reg, template, jc) res = runOSCommand("qsub", shQuote(outfile)) output = stri_flatten(stri_trim_both(res$output), "\n") if (res$exit.code > 0L) { max.jobs.msg = "Maximum number of jobs already in queue" if (stri_detect_fixed(output, max.jobs.msg) || res$exit.code == 228L) return(makeSubmitJobResult(status = 1L, batch.id = NA_character_, msg = max.jobs.msg)) return(cfHandleUnknownSubmitError("qsub", res$exit.code, res$output)) } if (jc$array.jobs) { logs = sprintf("%s-%i", fs::path_file(jc$log.file), seq_row(jc$jobs)) makeSubmitJobResult(status = 0L, batch.id = stri_replace_first_fixed(output, "[]", stri_paste("[", seq_row(jc$jobs), "]")), log.file = logs) } else { makeSubmitJobResult(status = 0L, batch.id = output) } } killJob = function(reg, batch.id) { assertRegistry(reg, writeable = TRUE) assertString(batch.id) cfKillJob(reg, "qdel", batch.id) } listJobs = function(reg, args) { assertRegistry(reg, writeable = FALSE) res = runOSCommand("qselect", args) if (res$exit.code > 0L) OSError("Listing of jobs failed", res) res$output } listJobsQueued = function(reg) { args = c("-u $USER", "-s QW") listJobs(reg, args) } listJobsRunning = function(reg) { args = c("-u $USER", "-s EHRT") listJobs(reg, args) } makeClusterFunctions(name = "TORQUE", submitJob = submitJob, killJob = killJob, listJobsQueued = listJobsQueued, listJobsRunning = listJobsRunning, array.var = "PBS_ARRAYID", store.job.collection = TRUE, scheduler.latency = scheduler.latency, fs.latency = fs.latency) } # nocov end batchtools/R/Joins.R0000644000176200001440000001113613246272667014043 0ustar liggesusers#' @title Inner, Left, Right, Outer, Semi and Anti Join for Data Tables #' @name JoinTables #' #' @description #' These helper functions perform join operations on data tables. #' Most of them are basically one-liners. #' See \url{http://rpubs.com/ronasta/join_data_tables} for a overview of join operations in #' data table or alternatively \pkg{dplyr}'s vignette on two table verbs. #' #' @param x [\code{\link{data.frame}}]\cr #' First data.frame to join. #' @param y [\code{\link{data.frame}}]\cr #' Second data.frame to join. #' @param by [\code{character}]\cr #' Column name(s) of variables used to match rows in \code{x} and \code{y}. #' If not provided, a heuristic similar to the one described in the \pkg{dplyr} vignette is used: #' \enumerate{ #' \item If \code{x} is keyed, the existing key will be used if \code{y} has the same column(s). #' \item If \code{x} is not keyed, the intersect of common columns names is used if not empty. #' \item Raise an exception. #' } #' You may pass a named character vector to merge on columns with different names in \code{x} and #' \code{y}: \code{by = c("x.id" = "y.id")} will match \code{x}'s \dQuote{x.id} column with \code{y}\'s #' \dQuote{y.id} column. #' @return [\code{\link{data.table}}] with key identical to \code{by}. #' @export #' @examples #' \dontshow{ batchtools:::example_push_temp(1) } #' # Create two tables for demonstration #' tmp = makeRegistry(file.dir = NA, make.default = FALSE) #' batchMap(identity, x = 1:6, reg = tmp) #' x = getJobPars(reg = tmp) #' y = findJobs(x >= 2 & x <= 5, reg = tmp) #' y$extra.col = head(letters, nrow(y)) #' #' # Inner join: similar to intersect(): keep all columns of x and y with common matches #' ijoin(x, y) #' #' # Left join: use all ids from x, keep all columns of x and y #' ljoin(x, y) #' #' # Right join: use all ids from y, keep all columns of x and y #' rjoin(x, y) #' #' # Outer join: similar to union(): keep all columns of x and y with matches in x or y #' ojoin(x, y) #' #' # Semi join: filter x with matches in y #' sjoin(x, y) #' #' # Anti join: filter x with matches not in y #' ajoin(x, y) #' #' # Updating join: Replace values in x with values in y #' ujoin(x, y) ijoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) setKey(x[y, nomatch = 0L, on = by], by) } #' @rdname JoinTables #' @export ljoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) setKey(y[x, on = by], by) } #' @rdname JoinTables #' @export rjoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) setKey(x[y, on = by], by) } #' @rdname JoinTables #' @export ojoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) res = if (is.null(names(by))) merge(x, y, all = TRUE, by = by) else merge(x, y, all = TRUE, by.x = names(by), by.y = by) setKey(res, by) } #' @rdname JoinTables #' @export sjoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) w = unique(x[y, on = by, nomatch = 0L, which = TRUE, allow.cartesian = TRUE]) setKey(x[w], by) } #' @rdname JoinTables #' @export ajoin = function(x, y, by = NULL) { x = as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) setKey(x[!y, on = by], by) } #' @rdname JoinTables #' @param all.y [logical(1)]\cr #' Keep columns of \code{y} which are not in \code{x}? #' @export ujoin = function(x, y, all.y = FALSE, by = NULL) { assertFlag(all.y) x = if (is.data.table(x)) copy(x) else as.data.table(x) y = as.data.table(y) by = guessBy(x, y, by) cn = chsetdiff(names(y), by) if (!all.y) cn = chintersect(names(x), cn) if (length(cn) == 0L) return(x) expr = parse(text = stri_join("`:=`(", stri_flatten(sprintf("%1$s=i.%1$s", cn), ","), ")")) setKey(x[y, eval(expr), on = by], by) } guessBy = function(x, y, by = NULL) { assertDataFrame(x, min.cols = 1L) assertDataFrame(y, min.cols = 1L) if (is.null(by)) { res = key(x) if (!is.null(res) && all(res %chin% names(y))) return(res) res = chintersect(names(x), names(y)) if (length(res) > 0L) return(res) stop("Unable to guess columns to match on. Please specify them explicitly or set keys beforehand.") } else { if (is.null(names(by))) { assertSubset(by, names(x)) } else { assertSubset(names(by), names(x)) } assertSubset(by, names(y)) return(by) } } setKey = function(res, by) { by = names(by) %??% unname(by) if (!identical(key(res), by)) setkeyv(res, by) res[] } batchtools/R/killJobs.R0000644000176200001440000000436213263664734014534 0ustar liggesusers#' @title Kill Jobs #' #' @description #' Kill jobs which are currently running on the batch system. #' #' In case of an error when killing, the function tries - after a short sleep - to kill the remaining #' batch jobs again. If this fails three times for some jobs, the function gives up. Jobs that could be #' successfully killed are reset in the \link{Registry}. #' #' @templateVar ids.default findOnSystem #' @template ids #' @template reg #' @return [\code{\link{data.table}}] with columns \dQuote{job.id}, the corresponding \dQuote{batch.id} and #' the logical flag \dQuote{killed} indicating success. #' @family debug #' @export killJobs = function(ids = NULL, reg = getDefaultRegistry()) { assertRegistry(reg, writeable = TRUE, sync = TRUE) kill = reg$cluster.functions$killJob if (is.null(kill)) stop("ClusterFunctions implementation does not support the killing of jobs") ids = convertIds(reg, ids, default = .findSubmitted(reg = reg)) tab = reg$status[.findOnSystem(ids = ids, reg = reg), c("job.id", "started", "batch.id")] if (nrow(tab) == 0L) return(data.table(job.id = integer(0L), batch.id = character(0L), killed = logical(0L))) runHook(reg, "pre.kill", tab) info("Trying to kill %i jobs ...", nrow(tab)) # kill queued jobs first, otherwise they might get started while killing running jobs setorderv(tab, "started", na.last = FALSE) tab[, "killed" := FALSE] batch.ids = unique(tab$batch.id) info("Killing %i real batch jobs ...", length(batch.ids)) for (i in seq_len(3L)) { tab[!tab$killed, "killed" := !is.error(try(kill(reg, .BY$batch.id), silent = TRUE)), by = "batch.id"] if (all(tab$killed)) break Sys.sleep(2) } if (!all(tab$killed)) warningf("Could not kill %i jobs", sum(!tab$killed)) # reset killed jobs sync(reg = reg) cols = c("submitted", "started", "done", "error", "mem.used", "resource.id", "batch.id", "log.file", "job.hash") reg$status[tab[tab$killed], (cols) := list(NA_real_, NA_real_, NA_real_, NA_character_, NA_real_, NA_integer_, NA_character_, NA_character_, NA_character_)] saveRegistry(reg) tab = setkeyv(tab[, c("job.id", "batch.id", "killed")], "job.id") Sys.sleep(reg$cluster.functions$scheduler.latency) runHook(reg, "post.kill", tab) return(tab) } batchtools/R/loadResult.R0000644000176200001440000000073013432556661015071 0ustar liggesusers#' @title Load the Result of a Single Job #' #' @description #' Loads the result of a single job. #' #' @template id #' @template reg #' @return [\code{ANY}]. The stored result. #' @family Results #' @export loadResult = function(id, reg = getDefaultRegistry()) { assertRegistry(reg, sync = TRUE) id = convertId(reg, id) if (nrow(.findDone(reg, id)) == 0L) stopf("Job with id %i not terminated", id$job.id) fn = getResultFiles(reg, id) return(readRDS(fn)) } batchtools/NEWS.md0000644000176200001440000002031113606056072013514 0ustar liggesusers# batchtools 0.9.12 * Moved `data.table` from `Depends` to `Imports`. User scripts might need to explicitly attach `data.table` via `library()` now. * Fixes for `ClusterFunctionsMulticore`. * Removed a workaround for `system2()` for R-devel (to be released as R-4.0.0). * New configuration option `compress` to select the compression algorithm (passed down to `saveRDS()`). # batchtools 0.9.11 * Removed deprecated function `chunkIds()`. * New default for argument `fs.timeout` in the cluster function constructor is `0` (was `NA` before). * Fixed a unit test for OSX. * Improved stability and documentation. * Fixed memory usage calculation. # batchtools 0.9.10 * Exported functions `findConfFile()` and `findTemplateFile()`. * Dropped support for providing a template file directly as string. A valid file is now always required. * Fixed writing to `TMPDIR` instead of the R session's temporary directory. # batchtools 0.9.9 * RDS files are explicitly stored in version 2 to ensure backward compatibility with R versions prior to 3.5.0. * Package `fs` is now used internally for all file system operations. * Support for per-site configuration files and templates to be set up by system administrators. * The print of `getStatus()` now includes a time stamp. * `chunk()` now optionally shuffles the ids before chunking. * Support for setting per-job resources in `submitJobs()`. * Example templates now include resources for `blas.threads` and `omp.threads`. * Some bug fixes regarding read-only registries. # batchtools 0.9.8 * Renamed column "memory" in the status table to "mem.used" to avoid name clashes with the resource specification. * Exported function `assertRegistry()`. * New function `unwrap()` as alias to `flatten()`. The latter causes a name clash with package `purrr` and will be deprecated in a future version. * Registries now contain a unique hash which is updated each time the registry is altered. Can be utilized to invalidate caches, e.g. the cache of knitr. # batchtools 0.9.7 * Added a workaround for a test to be compatible with testthat v2.0.0. * Better and more customizable handling of expired jobs in `waitForJobs()`. * Package `foreach` is now supported for nested parallelization as an alternative to `parallelMap`. * Depreciated argument flatten has been removed. * New helper function `flatten()` to manually unnest/unwrap lists in data frames. * Removed functions `getProblemIds()` and `getAlgorithmIds()`. Instead, you can just access `reg$problems` or `reg$algorithms`, respectively. * The number of the maximum concurrent jobs can now also be controlled via setting resources. * Internal data base changes to speed up some operations. Old registries are updated on first load by `loadRegistry()`. * Fixed a bug where the sleep mechanism between queries was not working. * Fixed a bug where submit errors on SLURM and TORQUE were not detected as temporary. # batchtools 0.9.6 * Fixed a bug where the wrong problem was retrieved from the cache. This was only triggered for chunked jobs in combination with an `ExperimentRegistry`. # batchtools 0.9.5 * Added a missing routine to upgrade registries created with batchtools prior to v0.9.3. * Fixed a bug where the registry could not be synced if jobs failed during initialization (#135). * The sleep duration for `waitForJobs()` and `submitJobs()` can now be set via the configuration file. * A new heuristic will try to detect if the registry has been altered by a simultaneously running R session. If this is detected, the registry in the current session will be set to a read-only state. * `waitForJobs()` has been reworked to allow control over the heuristic to detect expired jobs. Jobs are treated as expired if they have been submitted but are not detected on the system for `expire.after` iterations (default 3 iterations, before 1 iteration). * New argument `writeable` for `loadRegistry()` to allow loading registries explicitly as read-only. * Removed argument `update.paths` from `loadRegistry()`. Paths are always updated, but the registry on the file system remains unchanged unless loaded in read-write mode. * `ClusterFunctionsSlurm` now come with an experimental nodename argument. If set, all communication with the master is handled via SSH which effectively allows you to submit jobs from your local machine instead of the head node. Note that mounting the file system (e.g., via SSHFS) is mandatory. # batchtools 0.9.4 * Fixed handling of `file.dir` with special chars like whitespace. * All backward slashes will now be converted to forward slashes on windows. * Fixed order of arguments in `findExperiments()` (argument `ids` is now first). * Removed code to upgrade registries created with versions prior to v0.9.0 (first CRAN release). * `addExperiments()` now warns if a design is passed as `data.frame` with factor columns and `stringsAsFactors` is `TRUE`. * Added functions `setJobNames()` and `getJobNames()` to control the name of jobs on batch systems. Templates should be adapted to use `job.name` instead of `job.hash` for naming. * Argument `flatten` of `getJobResources()`, `getJobPars()` and `getJobTable()` is deprecated and will be removed. Future versions of the functions will behave like `flatten` is set to `FALSE` explicitly. Single resources/parameters must be extracted manually (or with `tidyr::unnest()`). # batchtools 0.9.3 * Running jobs now are also included while querying for status "started". This affects `findStarted()`, `findNotStarted()` and `getStatus()`. * `findExperiments()` now performs an exact string match (instead of matching substrings) for patterns specified via `prob.name` and `algo.name`. For substring matching, use `prob.pattern` or `algo.pattern`, respectively. * Changed arguments for `reduceResultsDataTable()` * Removed `fill`, now is always `TRUE` * Introduced `flatten` to control if the result should be represented as a column of lists or flattened as separate columns. Defaults to a backward-compatible heuristic, similar to `getJobPars`. * Improved heuristic to lookup template files. Templates shipped with the package can now be used by providing just the file name (w/o extension). * Updated CITATION # batchtools 0.9.2 * Full support for array jobs on Slurm and TORQUE. * Array jobs have been disabled for SGE and LSF (due to missing information about the output format) but will be re-enable in a future release. Note that the variable `n.array.jobs` has been removed from `JobCollection` in favor of the new variable `array.jobs` (logical). * `findExperiments()` now has two additional arguments to match using regular expressions. The possibility to prefix a string with "~" to enable regular expression matching has been removed. * New function `batchReduce()`. * New function `estimateRuntimes()`. * New function `removeRegistry()`. * Missing result files are now handled more consistently, raising an exception in its defaults if the result is not available. The argument `missing.val` has been added to `reduceResultsList()` and `reduceResultsDataTable()` and removed from `loadResult()` and `batchMapResults()`. * Timestamps are now stored with sub-second accuracy. * Renamed Torque to TORQUE. This especially affects the constructor `makeClusterFunctionsTorque` which now must be called via `makeClusterFunctionsTORQUE()` * `chunkIds()` has been deprecated. Use `chunk()`, `lpt()` or `binpack()` instead. * Fixed listing of jobs for `ClusterFunctionsLSF` and `ClusterFunctionsOpenLava` (thanks to @phaverty). * Job hashes are now prefixed with the literal string 'job' to ensure they start with a letter as required by some SGE systems. * Fixed handling of `NULL` results in `reduceResultsList()` * Fixed key lookup heuristic join functions. * Fixed a bug where `getJobTable()` returned `difftimes` with the wrong unit (e.g., in minutes instead of seconds). * Deactivated swap allocation for `ClusterFunctionsDocker`. * The package is now more patient while communicating with the scheduler or file system by using a timeout-based approach. This should make the package more reliable and robust under heavy load. # batchtools 0.9.0 Initial CRAN release. See the vignette for a brief comparison with [BatchJobs](https://cran.r-project.org/package=BatchJobs)/[BatchExperiments](https://cran.r-project.org/package=BatchExperiments). batchtools/MD50000644000176200001440000003011513606123217012725 0ustar liggesusersbfe411109598db33c115c071e27ef360 *DESCRIPTION 278452724142efa5429a2520386633f9 *NAMESPACE 8ae287ccd3d8ab493f7202172ef179d2 *NEWS.md 73ee8aa6814890a049796a489c7ca342 *R/Algorithm.R 58caa9998340656afaca8ef0596ecae4 *R/ExperimentRegistry.R 37eb8f8adcd0c664233edbab468c70e6 *R/Export.R c921cd9128dfb1e22a5099a76722e787 *R/Hooks.R cd44b5a40eebd677ee615ba72f091d2b *R/Job.R 9d956435290b06a2e2a4d5d9eb6321b3 *R/JobCollection.R 8ae73adb0f856d49cc715aed97246cfd *R/JobNames.R a93f90806e25ca5b7059e3ae52a7f43e *R/JobTables.R 5191a38673db4879f45410854a9e2395 *R/Joins.R 22a183e7710d432d2176ac188e6cd2b7 *R/Logs.R 02dd286b258051f4656f75ff04138649 *R/Problem.R ad6dac8f0e11d25d6570cecb01ed56a7 *R/RDSReader.R 4574b6ddecacfb3da4af6e4d787e2fdb *R/Registry.R d805630d4fc456d7b0599c3f39814764 *R/Tags.R a1cd5efab2ae2c138d6a13876b40cf43 *R/Worker.R 227d5948726652f39cc97cd823380cc0 *R/addExperiments.R 6ae6bc76768c86ce013fc94d77e5343e *R/batchMap.R c27de0e58a5efe2c55b19cc590298142 *R/batchMapResults.R 02a7fe83c74a0aaba3a2c0c5c7835c71 *R/batchReduce.R 0f27174556c02dbf3918c91a71a9ce99 *R/btlapply.R 7ba36511702fcf5acfc948ee51ca21e5 *R/chunkIds.R 50488faa176371ed2ec350947d5c7572 *R/clearRegistry.R ffc706907e449dad0a9d9ca7f7a92bac *R/clusterFunctions.R 1323fe0acb934dbf813891a2577d9272 *R/clusterFunctionsDocker.R 5b48b0f95ef0032cb4f83c94ee9b4a5c *R/clusterFunctionsInteractive.R a4553a5dc48c9f20598c1714588d425a *R/clusterFunctionsLSF.R f1f5f5ae858df5e8e606f1950f6b1d84 *R/clusterFunctionsMulticore.R 925ab366e52219637af424362db95979 *R/clusterFunctionsOpenLava.R 9f50a2252226321055180f6efe6b3467 *R/clusterFunctionsSGE.R 01bced9930a3a5864de11b2991b17571 *R/clusterFunctionsSSH.R 68ee516e8eddcae03f734f7edcd57435 *R/clusterFunctionsSlurm.R a4f5ab48f2f92dc41cb673d29f472955 *R/clusterFunctionsSocket.R 874bfd4c7ab3dd7561f85b73be98b252 *R/clusterFunctionsTORQUE.R 849b049d66d6e529c2c00b3281fff350 *R/config.R 7deec7a01f3aeb978e11f89127045a1a *R/doJobCollection.R ddae5d1e79fbf708699281d358a23b15 *R/estimateRuntimes.R 844aa63578332e8b40356d22ef83c317 *R/execJob.R 58df10643a9a87d483d5029556e318da *R/files.R 64ea4d24046549e1111a963a0dea3fc1 *R/findJobs.R 5422e5ea94a864be0bf7a0001ca675de *R/getDefaultRegistry.R b671529418dde718ba3c338fae2f99d7 *R/getErrorMessages.R 4b1a6fe7ee2fab26e47ee63fdc29ce64 *R/getStatus.R 655e23470d04bd090944374982fc2165 *R/helpers.R 221b8bb1c46cdbe28c6dbec3a61d5d20 *R/ids.R a7c42835cade8f3dbae5352052826ac9 *R/killJobs.R a6ca48438a40b710b39c6755292aa4c1 *R/loadRegistry.R 51ce9bfd45c5b641f01a6e99b5198436 *R/loadResult.R 545064657b2d3de3ec883462199e74df *R/mergeRegistries.R 89f186c15cffb605afa896cb876ab2f1 *R/reduceResults.R 11d38ad7a3b9e27e5bbb7b2112cb3b3d *R/removeExperiments.R c5b5e5d23be3b7850519ed5d1fb0a0b2 *R/removeRegistry.R 9c4cd91d3c23ccd6801f2c2e0d039a5c *R/resetJobs.R 4a126ec450a6b88e24056790003b9a2d *R/runOSCommand.R 0f8f45ef73176079386bf2bd56175fec *R/saveRegistry.R 117a9805e885afb3e33278bf35d1f2e9 *R/sleep.R 24c3f122bcbd1c6006dc2e9aa342dda7 *R/submitJobs.R c2210a00013c784983b630aa44783094 *R/summarizeExperiments.R 271b1afd398826360ab65c6e40cd683c *R/sweepRegistry.R 6a081e98e20c3bb100d19392ce830a82 *R/syncRegistry.R 35194cec64ae920b40135d0412f6d5c8 *R/testJob.R 1af1e1de8441d732c020b5fe440b3ee0 *R/unwrap.R 8e48227d264b3de2b92f9a0f6985982e *R/updateRegisty.R ccc39fcc11c06375026710add9cae98e *R/waitForFiles.R d15b0797f8f18140e82628dd38d855f6 *R/waitForJobs.R ec0dc6faa801e8629b218e62a3c9d6f1 *R/zzz.R 779566af8395e36fffa43e756c48095f *README.md bbe21ea93091a508ab677ed3b4ae5155 *build/vignette.rds f5d92960f829818426ba27a29243ff2f *inst/CITATION acb36fe62ca1068e7c348924830ae47d *inst/bin/linux-helper 488a5885a787b089656085cb86160a1f *inst/doc/batchtools.R 12d7beb0e2e159b1a8de88f1d81b7173 *inst/doc/batchtools.Rmd 52f38a9c331aba1e3cf2e17b0bf05efc *inst/doc/batchtools.pdf 2a1381800c8423ff55a9489fd898a321 *inst/templates/lsf-simple.tmpl d1e95d582ab1b45dc7c01f2ce22d77f2 *inst/templates/openlava-simple.tmpl aedaa713eacced98333e2cec782b5fc7 *inst/templates/sge-simple.tmpl ac9b7fd69ef71c1dc04b2ce98217b08d *inst/templates/slurm-dortmund.tmpl 144733207bf996da5e4ed301463dcef3 *inst/templates/slurm-lido3.tmpl 58bf997d8fe9113e0630576fe1460fc6 *inst/templates/slurm-simple.tmpl 1a2675606230261aeb75e6f8fea800f6 *inst/templates/testJob.tmpl c7bf481b0b1e2f393a6c3f1d66317402 *inst/templates/torque-lido.tmpl 26c135db8438c888e8c8f6653e6c23ca *man/JobCollection.Rd d2d6dd877cc948326d3ac010df468cd7 *man/JobExperiment.Rd 11b32f9bc13343d556d71796bd901eac *man/JobNames.Rd e70252fdb3d8b338dedc5b32b7c85cef *man/JoinTables.Rd e7c41cfc3bbbf478dabc8a0ef6166a85 *man/Tags.Rd a388ccce3f6f1d26aae533880f803855 *man/Worker.Rd 3eeb548162fa201c167a5c568b1f0b63 *man/addAlgorithm.Rd 8964822702271f5522b845e67754ffae *man/addExperiments.Rd 186d620e56b4d1cd3e8fad6ad2d689ce *man/addProblem.Rd e7b050041fbf4d6e7a1065598672dee1 *man/assertRegistry.Rd 7f8ab8bbf15d9a9ab0ebb0ada102e512 *man/batchExport.Rd 6abb0d4d8416f3d4b1840207c16bb9a5 *man/batchMap.Rd 0b5a6ed8056e78f0fe3b98c82b5a0acd *man/batchMapResults.Rd 417e88a3882000c80199b7ea059d4dab *man/batchReduce.Rd ec2619f59855e71f5077dbe54deb5a68 *man/batchtools-package.Rd 3ce5e16db1dc1720f9897989694e6754 *man/btlapply.Rd 5b04d072f5a95dc9ce7b2142c1ba1975 *man/cfBrewTemplate.Rd d8d38d2ebfb9aff9f34d5422f88e59f1 *man/cfHandleUnknownSubmitError.Rd 930521ad4669c7ccef7cff1f26a66080 *man/cfKillJob.Rd 623cacf03001939e275722cfef896841 *man/cfReadBrewTemplate.Rd 64114406bdf4e9ee61ec796102b288a8 *man/chunk.Rd 6d7268ee2e16cb1436277603125032fd *man/clearRegistry.Rd f4a81ca4e12dbc4185734eb868633986 *man/doJobCollection.Rd b262ef3d5e9330f6dbf4af439f0d5070 *man/estimateRuntimes.Rd c2761c7410f381cc952c077d2bda1ee3 *man/execJob.Rd 87794aafe59cc99e8efe17214e5b8de5 *man/findConfFile.Rd 93b82b4b70789a9a68e484b62651c725 *man/findJobs.Rd ba2c29edc24acb90b2f9c73628fab773 *man/findTemplateFile.Rd 2ee444928345c159fbe7bd6a0921bdc3 *man/getDefaultRegistry.Rd cbac5510b43f5d7ab79899aa0e465760 *man/getErrorMessages.Rd 22f98550c425f772284e181d77a2c5b4 *man/getJobTable.Rd d694d39176eba0b0c235865d6ffb0c46 *man/getStatus.Rd 215c77e283364cd77b022e262bd56ba5 *man/grepLogs.Rd 0ca6132bfe1d0eab520b74edb51790dc *man/killJobs.Rd b253327ebfeee3904c3e747135547e3f *man/loadRegistry.Rd 541726d94a83b0bc47b7fa64530e8b05 *man/loadResult.Rd 7691058b6230583fcde0d43e085998ce *man/makeClusterFunctions.Rd b356526d6a89fabbca13a45e6bcb3b49 *man/makeClusterFunctionsDocker.Rd 78560539070eefdeee6439e0cdd12a3a *man/makeClusterFunctionsInteractive.Rd a9e4d96943492ce242999a69f4c73077 *man/makeClusterFunctionsLSF.Rd 9677e20ad303d6c0c1c649f959d83e08 *man/makeClusterFunctionsMulticore.Rd dd19ac0d149f899d6bc3be2ebfe42bc3 *man/makeClusterFunctionsOpenLava.Rd 829fb641371b5849f33905cb516dfbc0 *man/makeClusterFunctionsSGE.Rd 16da9125c27e58c4570e30e42720e5a8 *man/makeClusterFunctionsSSH.Rd f33a08fd81b85d5a73b4c331d72e5c8f *man/makeClusterFunctionsSlurm.Rd 9f8a5afd59befad28b40a83da67cb6df *man/makeClusterFunctionsSocket.Rd bed6162930f78622d790726dafd985a8 *man/makeClusterFunctionsTORQUE.Rd 98172d4522fdc504321a333873f2e1ea *man/makeExperimentRegistry.Rd 4c09d48798feb37ca2aa8c02b2cf11e3 *man/makeRegistry.Rd d4598d79306bb1598cbe20e109561be8 *man/makeSubmitJobResult.Rd 1675239684357484112305d37fdc1026 *man/reduceResults.Rd e0e641bb425fbfbefd226cfa43ebeecb *man/reduceResultsList.Rd 726c758fdaa46b49c08dd9997753b230 *man/removeExperiments.Rd 55bc319ea0d72710e607c2165935842c *man/removeRegistry.Rd d60618786b86968c550cf7b3e637da94 *man/resetJobs.Rd 2463cfa47db89e0c84f89e183871524b *man/runHook.Rd a58c0c22cf7efdf262a9f54e3914cedc *man/runOSCommand.Rd ef5b83a3b228e37a354dae75c49fddcb *man/saveRegistry.Rd 4e30b258f4d67d02318f6f8877ebf4b1 *man/showLog.Rd 996e7e338e3d5b3e30d82b9160d91908 *man/submitJobs.Rd c63b95b588e92b456c8bfdf46614f65f *man/summarizeExperiments.Rd 70ee70b36705a13165a603d3346671e9 *man/sweepRegistry.Rd cbe2c2d93057bbfea37c64dfb1a44af7 *man/syncRegistry.Rd 2492bb6fc230f72f9201a4df78205f43 *man/testJob.Rd d9b815d3b142889bf8c157f2fd06aa4a *man/unwrap.Rd ac962b5fbbb36a8a97aea4672fcc160b *man/waitForJobs.Rd 26588a23da5a7210df8ceacba40ba4dd *src/Makevars 88417c4efaae231650659d9da71beaa7 *src/binpack.c 3f4cca3378e8eda0f9be19ae6b3b220c *src/count_not_missing.c dd81e051edf05fa147c51d7a1be399ec *src/fill_gaps.c de0bd67e7e560ee90200147f6a0830eb *src/init.c 4d8da4ccac75c571f52cd1a09e4f7907 *src/lpt.c a424a8813407241cfe11b401833363cf *tests/testthat.R 246226aec79992a0e25d6f4385a96db9 *tests/testthat/helper.R fdfe9c6673040bac782c6b3508a9ff28 *tests/testthat/test_Algorithm.R cdbc140959afdae66c9cc786f14cc928 *tests/testthat/test_ClusterFunctions.R 321c5d181768a3cbd261f076a9ee4349 *tests/testthat/test_ClusterFunctionsMulticore.R 53a752f84fe6ebaa469db2f42fa375a1 *tests/testthat/test_ClusterFunctionsSSH.R b34d6141e03ffd980f8981d037d4fc96 *tests/testthat/test_ClusterFunctionsSocket.R 6b58e130863ef57841da42b8f1e344b9 *tests/testthat/test_ExperimentRegistry.R 3b6774cbb73d17540d0f7dbdf0226db7 *tests/testthat/test_Job.R ebadbd18eca5f6a05fb0c7f813e5bfd5 *tests/testthat/test_JobCollection.R cda6a6c34d60e2fd56bf366a0b79a669 *tests/testthat/test_JobNames.R 3c24b0b9e2b60d27ba50746e3fe3496b *tests/testthat/test_Problem.R f60eb9543e5d814144d02bcee673a66f *tests/testthat/test_Registry.R 28d217811f9d143e2b408988c9803237 *tests/testthat/test_addExperiments.R 5974b6c403a850c9bc1447c4e02870fa *tests/testthat/test_batchMap.R 225a0fdaaca843154d1a8c2b9b2a312b *tests/testthat/test_batchReduce.R 327ec255267feb4d98ce8fc5b0b48ccf *tests/testthat/test_btlapply.R 869e5e59218ffeeaabe296893aae39ba *tests/testthat/test_chunk.R d5ca93ac3ba3cd5c91ce80374dd3b216 *tests/testthat/test_convertIds.R ba297e77a9ab1e06520e51f46a03451e *tests/testthat/test_count.R 9f9f545b6708939af0612ee21770be54 *tests/testthat/test_doJobCollection.R 6625e651984af86f16ed5ec0ee7718fc *tests/testthat/test_estimateRuntimes.R 6ef9ae56f83f491388f27786273ebb49 *tests/testthat/test_export.R e84d068f709a128a6cd02bb114b5b055 *tests/testthat/test_findConfFile.R 73a1092b5ee27ef86e0eea19e76756c0 *tests/testthat/test_findJobs.R cb706803c0eb245b803428a3d90e2fc4 *tests/testthat/test_foreach.R dcab5e9bdf482363b3f00abc591d29c9 *tests/testthat/test_future.R 9a6cabe522f7b9cc0de868cc91325ba6 *tests/testthat/test_getErrorMessages.R bf6e876a462e5020e7404a18777a00d3 *tests/testthat/test_getJobTable.R 61979e645acd24efb63edf46e7fb3e7f *tests/testthat/test_getStatus.R 12b6287c0e1e28d1fc077f1422811c86 *tests/testthat/test_grepLogs.R 0050fa7d958084330d45fac1e93e5589 *tests/testthat/test_hooks.R e4710bf086afccfe07056c4fbd0f2f8c *tests/testthat/test_joins.R 8cb58c271931bedfc204d41c090d4670 *tests/testthat/test_killJobs.R 67ebdb6b7f2725752f83eb89fe52a605 *tests/testthat/test_manual.R 793412ea5e4b5f8cd2af13a09d9760fe *tests/testthat/test_memory.R 619c7604edf321977ec3fd416baee416 *tests/testthat/test_mergeRegistries.R dcea2b99f913e5fc9c99207198154693 *tests/testthat/test_parallelMap.R b19af1987653cb961014a4152f239336 *tests/testthat/test_reduceResults.R 4566f21ed5cd722dd24ee795559c40cf *tests/testthat/test_removeExperiments.R b2361bd3c144d3a301cbf5ac25c05aea *tests/testthat/test_removeRegistry.R 650c943fed70f122307ae8b599d638c7 *tests/testthat/test_resetJobs.R e3e617494425a27d9a50d1f131287a69 *tests/testthat/test_runOSCommand.R 484eca9b321b7117078c23f85000f981 *tests/testthat/test_seed.R 93f12020a93fecb22aa828cda5187267 *tests/testthat/test_showLog.R 3231969f63ae5f46f6cd3f343ea6fdfa *tests/testthat/test_sleep.R 7c8dd5e80dcadf40d2ffadd44c6777b5 *tests/testthat/test_submitJobs.R 31b3d431de329be819b31f1ad76466f8 *tests/testthat/test_summarizeExperiments.R 89484744efd66276197972c000eba63e *tests/testthat/test_sweepRegistry.R d5413774badde26d1c0ffc8232d800b5 *tests/testthat/test_tags.R a9e1d289fe81fc0c21211baf18dd9707 *tests/testthat/test_testJob.R d7a898fae7f78153fd2a8f0c17232d6a *tests/testthat/test_unwrap.R 26ca243de466af283a9eed0f016bc1b2 *tests/testthat/test_waitForJobs.R 12d7beb0e2e159b1a8de88f1d81b7173 *vignettes/batchtools.Rmd a98f013026e5193936f275ad1ef56aac *vignettes/function_overview.pdf 7bd5346afd47b0dac70556cc79cb2300 *vignettes/function_overview.png 4c66119ba75de50ee6156b11ed2fe954 *vignettes/function_overview.tex b54e6a305617fde2ce8d37255b33155b *vignettes/tikz_prob_algo_simple.pdf 49b176be252716084fd6107e99768c16 *vignettes/tikz_prob_algo_simple.png 3b50a7f56b4cfd55c619db869420c132 *vignettes/tikz_prob_algo_simple.tex batchtools/inst/0000755000176200001440000000000013606067063013400 5ustar liggesusersbatchtools/inst/templates/0000755000176200001440000000000013435720123015367 5ustar liggesusersbatchtools/inst/templates/slurm-lido3.tmpl0000644000176200001440000000404013435713470020444 0ustar liggesusers#!/bin/bash ## Job Resource Interface Definition ## ## ncpus [integer(1)]: Number of required cpus per task, ## Set larger than 1 if you want to further parallelize ## with multicore/parallel within each task. ## walltime [integer(1)]: Walltime for this job, in seconds. ## Must be at least 1 minute. ## memory [integer(1)]: Memory in megabytes for each cpu. ## Must be at least 100 (when I tried lower values my ## jobs did not start at all). ## ## Default resources can be set in your .batchtools.conf.R by defining the variable ## 'default.resources' as a named list. <% # queue walltime = asInt(resources$walltime, lower = 60L, upper = 31L * 24L * 60L * 60L) memory = asInt(resources$memory, lower = 100L, upper = 1024L * 1024L) walltimes = c(2L, 8L, 48L, 672L) * 3600L queue = c("short", "med", "long", "ultralong")[wf(walltime <= walltimes)] ncpus = if (!is.null(resources$ncpus)) ncpus = assertInt(resources$ncpus, lower = 1L) else 1L # modules modules = paste(resources$modules, resources$R) # cli args cli.args = "" if (!is.null(resources$pp.size)) cli.args = sprintf("--max-ppsize=%i", assertInt(pp.size, upper = 500000L)) -%> #SBATCH --job-name=<%= job.name %> #SBATCH --output=<%= log.file %> #SBATCH --error=<%= log.file %> #SBATCH --time=<%= ceiling(walltime / 60L) %> #SBATCH --partition=<%= queue %> #SBATCH --cpus-per-task=<%= ncpus %> #SBATCH --mem-per-cpu=<%= memory %> <%= if (array.jobs) sprintf("#SBATCH --array=1-%i", nrow(jobs)) else "" %> ## Initialize work environment like module add <%= modules %> ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> ## Use /scratch on the node, TMPDIR is mounted as tmpfs export TMPDIR=/scratch/${USER}/${SLURM_JOBID} mkdir -p ${TMPDIR} ## Run R: ## we merge R output with stdout from SLURM, which gets then logged via --output option Rscript <%= cli.args -%> -e 'batchtools::doJobCollection("<%= uri %>")' batchtools/inst/templates/sge-simple.tmpl0000644000176200001440000000161613261472257020347 0ustar liggesusers#!/bin/bash ## The name of the job, can be anything, simply used when displaying the list of running jobs #$ -N <%= job.name %> ## Combining output/error messages into one file #$ -j y ## Giving the name of the output log file #$ -o <%= log.file %> ## One needs to tell the queue system to use the current directory as the working directory ## Or else the script may fail as it will execute in your top level home directory /home/username #$ -cwd ## Use environment variables #$ -V ## Use correct queue #$ -q <%= resources$queue %> ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> Rscript -e 'batchtools::doJobCollection("<%= uri %>")' exit 0 batchtools/inst/templates/testJob.tmpl0000644000176200001440000000053513435720123017702 0ustar liggesusersoptions(warn = 1L) Sys.setenv(DEBUGME = "<%= Sys.getenv('DEBUGME') %>") requireNamespace("batchtools", quietly = TRUE) jc = readRDS("<%= jc %>") setwd(jc$work.dir) batchtools:::loadRegistryDependencies(jc, must.work = TRUE) res = batchtools::execJob(jc) saveRDS(res, file = "<%= result %>", version = 2L) quit(save = "no", status = 0L) # vim: ft=r batchtools/inst/templates/slurm-simple.tmpl0000644000176200001440000000376213435713470020735 0ustar liggesusers#!/bin/bash ## Job Resource Interface Definition ## ## ntasks [integer(1)]: Number of required tasks, ## Set larger than 1 if you want to further parallelize ## with MPI within your job. ## ncpus [integer(1)]: Number of required cpus per task, ## Set larger than 1 if you want to further parallelize ## with multicore/parallel within each task. ## walltime [integer(1)]: Walltime for this job, in seconds. ## Must be at least 60 seconds for Slurm to work properly. ## memory [integer(1)]: Memory in megabytes for each cpu. ## Must be at least 100 (when I tried lower values my ## jobs did not start at all). ## ## Default resources can be set in your .batchtools.conf.R by defining the variable ## 'default.resources' as a named list. <% # relative paths are not handled well by Slurm log.file = fs::path_expand(log.file) -%> #SBATCH --job-name=<%= job.name %> #SBATCH --output=<%= log.file %> #SBATCH --error=<%= log.file %> #SBATCH --time=<%= ceiling(resources$walltime / 60) %> #SBATCH --ntasks=1 #SBATCH --cpus-per-task=<%= resources$ncpus %> #SBATCH --mem-per-cpu=<%= resources$memory %> <%= if (!is.null(resources$partition)) sprintf(paste0("#SBATCH --partition='", resources$partition, "'")) %> <%= if (array.jobs) sprintf("#SBATCH --array=1-%i", nrow(jobs)) else "" %> ## Initialize work environment like ## source /etc/profile ## module add ... ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> ## Run R: ## we merge R output with stdout from SLURM, which gets then logged via --output option Rscript -e 'batchtools::doJobCollection("<%= uri %>")' batchtools/inst/templates/torque-lido.tmpl0000644000176200001440000000631713337734730020552 0ustar liggesusers#!/bin/bash <% ## Check some resources and set sane defaults resources$walltime = asInt(resources$walltime, lower = 60L, upper = 3600L * 672L) resources$memory = asInt(resources$memory, lower = 100L, upper = 64L * 1024L) resources$ncpus = if (is.null(resources$ncpus)) 1L else asInt(resources$ncpus, lower = 1L) resources$modules = if (is.null(resources$modules)) character(0L) else assertCharacter(resources$modules, any.missing = FALSE) resources$R = if (is.null(resources$R)) "R/3.4.1-gcc49-base" else assertString(resources$R) resources$omp.threads = if (is.null(resources$omp.threads)) 1L else asInt(resources$omp.threads, lower = 1L) resources$blas.threads = if (is.null(resources$blas.threads)) 1L else asInt(resources$blas.threads, lower = 1L) if (!is.null(resources$type)) assertString(resources$type) if (resources$memory > 15000) resources$type = "quad" use.mpi = (resources$pm.backend %??% "default") == "mpi" ## first string of queue, selected by walltime walltimes = 3600L * c(1L, 8L, 48L, 672L) queue = c("short", "med", "long", "ultralong")[wf(resources$walltime <= walltimes)] ## check default modules modules.default = c(binutils = "binutils/2.25", gcc = "gcc/4.9.3", openblas = "openblas/0.2.17") modules = resources$modules for (i in seq_along(modules.default)) { if (!any(grepl(paste0("^", names(modules.default[i]), "/?[0-9\\.]*$"), modules))) { modules = paste(modules, modules.default[i]) } } ## add R modules = paste(modules, resources$R) ## add mpi if (use.mpi) modules = paste(modules, "openmpi/gcc4.9.x") ## very ugly hack because we cannot log to data (nobackup) filesystem on lido, ## only home fs is available ## unfortunately there seems to be no generic solution ## does log path start with /data/? log.file = log.file if (length(grep("^/data/", log.file)) > 0L) { ## strip that log.file = substr(log.file, 7L, nchar(log.file)) ## find next forward slash i = regexpr("/", log.file) if (i != -1) { ## this must be "user": e.g. /data/bischl/... user = substr(log.file, 1L, i-1L) ## put together log.file = sprintf("/home/%s/nobackup%s", user, substr(log.file, i, nchar(log.file))) } } -%> #PBS -N <%= job.name %> #PBS -o <%= log.file %> #PBS -l walltime=<%= resources$walltime %>,nodes=<%= if (use.mpi) resources$ncpus else 1 %>:ppn=<%= if (use.mpi) 1 else resources$ncpus %><%= if (!is.null(resources$type)) paste0(":", resources$type) %>,vmem=<%= resources$memory %>M #PBS -q <%= queue %> #PBS -j oe <%= if (array.jobs) sprintf("#PBS -t 1-%i", nrow(jobs)) else "" %> ## setup modules source /sysdata/shared/sfw/Modules/default/init/bash module add <%= modules %> ## create our own temp dir (and clean it up later), lido does not do this automatically mkdir /scratch/${USER}-${PBS_JOBID} export TMPDIR=/scratch/${USER}-${PBS_JOBID} <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> ## export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> ## run R <%= if (use.mpi) "mpirun -np 1 " else "" %>Rscript -e 'batchtools::doJobCollection("<%= uri %>")' ## Cleanup rm -rf /scratch/${USER}-${PBS_JOBID} batchtools/inst/templates/lsf-simple.tmpl0000644000176200001440000000172313261506463020351 0ustar liggesusers## Default resources can be set in your .batchtools.conf.R by defining the variable ## 'default.resources' as a named list. #BSUB-j <%= job.name %> # Name of the job #BSUB-o <%= log.file %> # Output is sent to logfile, stdout + stderr by default #BSUB-q <%= resources$queue %> # Job queue #BSUB-W <%= round(resources$walltime / 60, 1) %> # Walltime (LSF requires minutes, batchtools uses seconds) #BSUB-M <%= resources$memory %> # Memory requirements in KBytes; depends on setting LSF_UNIT_FOR_LIMITS in lsf.conf ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> Rscript -e 'batchtools::doJobCollection("<%= uri %>")' batchtools/inst/templates/openlava-simple.tmpl0000644000176200001440000000170113240567421021364 0ustar liggesusers## Default resources can be set in your .batchtools.conf.R by defining the variable ## 'default.resources' as a named list. ## Remove [*] if arrayjobs are not supported #BSUB-J <%= job.name %>[1-<%= nrow(jobs) %>] # name of the job / number of jobs in chunk #BSUB-o <%= log.file %> # output is sent to logfile, stdout + stderr by default #BSUB-q <%= resources$queue %> # Job queue #BSUB-W <%= resources$walltime %> # Walltime in minutes #BSUB-M <%= resources$memory %> # Memory requirements in Kbytes ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> Rscript -e 'batchtools::doJobCollection("<%= uri %>")' batchtools/inst/templates/slurm-dortmund.tmpl0000644000176200001440000000241213240567535021272 0ustar liggesusers#!/bin/bash <% backend = resources$pm.backend %??% "local" ncpus = resources$ncpus %??% 1L walltime = asInt(resources$walltime, lower = 1L, upper = 172800L) memory = asInt(resources$memory, lower = 100L, upper = 64000L) if (backend == "mpi") { cmd = "mpirun -np 1 Rscript" mincpus = 2L } else { cmd = "Rscript" mincpus = 1L } # relative paths are not handled well by Slurm log.file = fs::path_expand(log.file) -%> #SBATCH --job-name=<%= job.name %> #SBATCH --output=<%= log.file %> #SBATCH --error=<%= log.file %> #SBATCH --time=<%= ceiling(walltime / 60L)%> #SBATCH --ntasks=<%= if (backend == "mpi") ncpus else 1L %> #SBATCH --mincpus=<%= mincpus %> #SBATCH --cpus-per-task=<%= if (backend == "mpi") 1L else ncpus %> #SBATCH --mem-per-cpu=<%= memory %> #SBATCH --partition=all mkdir /tmp/${USER}-${SLURM_JOBID} export TMPDIR=/tmp/${USER}-${SLURM_JOBID} ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> <%= sprintf("export OMP_NUM_THREADS=%i", resources$omp.threads) -%> <%= sprintf("export OPENBLAS_NUM_THREADS=%i", resources$blas.threads) -%> <%= sprintf("export MKL_NUM_THREADS=%i", resources$blas.threads) -%> source /etc/profile <%= cmd %> -e 'batchtools::doJobCollection("<%= uri %>")' rm -rf /tmp/${USER}-${SLURM_JOBID} batchtools/inst/doc/0000755000176200001440000000000013606067063014145 5ustar liggesusersbatchtools/inst/doc/batchtools.Rmd0000644000176200001440000011511613543336703016760 0ustar liggesusers--- title: "batchtools" output: pdf_document: toc: true urlcolor: blue linkcolor: blue vignette: > %\VignetteIndexEntry{batchtools} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r,include = FALSE, cache = FALSE} library(batchtools) library(data.table) # for %chin%, data.table options(batchtools.progress = FALSE, datatable.print.class = TRUE, batchtools.timestamps = FALSE) if (identical(Sys.getenv("IN_PKGDOWN"), "true")) { tmp_dir = fs::path(dirname(tempdir()), "batchtools-vignette") if (fs::dir_exists(tmp_dir)) fs::dir_delete(tmp_dir) fs::file_temp_push(fs::path(tmp_dir, letters)) } ``` # Setup ## Cluster Functions The communication with the batch system is managed via so-called cluster functions. They are created with the constructor [makeClusterFunctions](https://mllg.github.io/batchtools/reference/makeClusterFunctions) which defines how jobs are submitted on your system. Furthermore, you may provide functions to list queued/running jobs and to kill jobs. Usually you do not have to start from scratch but can just use one of the cluster functions which ship with the package: * Interactive Cluster Functions (default): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsInteractive), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsInteractive.R) * Multicore Cluster Functions: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsMulticore), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsMulticore.R) * Socket Cluster Functions: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSocket), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSocket.R) * Makeshift SSH cluster: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSSH), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSSH.R) * Docker Swarm: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsDocker), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsDocker.R) * IBM Spectrum Load Sharing Facility (LSF): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsLSF), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsLSF.R) * OpenLava: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsOpenLava), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsOpenLava.R) * Univa Grid Engine / Oracle Grid Engine (OGE) / Sun Grid Engine (SGE): [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSGE), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSGE.R) * Slurm: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSlurm), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsSlurm.R) * TORQUE/OpenPBS: [docs](https://mllg.github.io/batchtools/reference/makeClusterFunctionsTORQUE), [implementation](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsTORQUE.R) To use the package with the socket cluster functions, you would call the respective constructor [makeClusterFunctionsSocket()](https://mllg.github.io/batchtools/reference/makeClusterFunctionsSocket): ```{r, message=FALSE} reg = makeRegistry(NA) reg$cluster.functions = makeClusterFunctionsSocket(2) ``` To make this selection permanent for this registry, save the Registry with [saveRegistry()](https://mllg.github.io/batchtools/reference/makeRegistry). To make your cluster function selection permanent for a specific system across R sessions for all new Registries, you can set up a configuration file (see below). If you have trouble debugging your cluster functions, you can enable the debug mode for extra output. To do so, install the [debugme package](https://cran.r-project.org/package=debugme) and set the environment variable `DEBUGME` to `batchtools` before you load the `batchtools` package: ```{r,eval=FALSE} Sys.setenv(DEBUGME = "batchtools") library(batchtools) ``` ## Template Files Many cluster functions require a template file as argument. These templates are used to communicate with the scheduler and contain placeholders to evaluate arbitrary R expressions. Internally, the [brew package](https://cran.r-project.org/package=brew) is used for this purpose. Some exemplary template files can be found [here](https://github.com/mllg/batchtools/tree/master/inst/templates). It would be great if you would help expand this collection to cover more exotic configurations. To do so, please send your template via [mail](mailto:michellang@gmail.com) or open a new pull request. Note that all variables defined in a [JobCollection](https://mllg.github.io/batchtools/reference/JobCollection) can be used inside the template. If you need to pass extra variables, you can set them via the argument `resources` of [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). If the flexibility which comes with templating is not sufficient, you can still construct a custom cluster function implementation yourself using the provided [constructor](https://mllg.github.io/batchtools/reference/makeClusterFunctions). ## Configuration File The configuration file can be used to set system specific options. Its default location depends on the operating system (see [Registry](https://mllg.github.io/batchtools/reference/makeRegistry)), but for the first time setup you can put one in the current working directory (as reported by `getwd()`). In order to set the cluster function implementation, you would generate a file with the following content: ```{r,eval = FALSE} cluster.functions = makeClusterFunctionsInteractive() ``` The configuration file is parsed whenever you create or load a [Registry](https://mllg.github.io/batchtools/reference/makeRegistry). It is sourced inside of your registry which has the advantage that you can (a) access all of the parameters which are passed to [makeRegistry](https://mllg.github.io/batchtools/reference/makeRegistry) and (b) you can also directly change them. Lets say you always want your working directory in your home directory and you always want to load the `checkmate` package on the nodes, you can just append these lines: ```{r, eval = FALSE} work.dir = "~" packages = union(packages, "checkmate") ``` See the documentation on [Registry](https://mllg.github.io/batchtools/reference/makeRegistry) for a more complete list of supported configuration options. # Migration from `BatchJobs`/`Batchexperiments` The development of [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) is discontinued because of the following reasons: * Maintainability: The packages [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) are tightly connected which makes maintaining difficult. Changes have to be synchronized and tested against the current CRAN versions for compatibility. Furthermore, BatchExperiments violates CRAN policies by calling internal functions of BatchJobs. * Data base issues: Although we invested weeks to mitigate issues with locks of the SQLite data base or file system (staged queries, file system timeouts, ...), BatchJobs kept working unreliable on some systems with high latency or specific file systems. This made BatchJobs unusable for many users. [BatchJobs](https://github.com/tudo-r/BatchJobs/) and [BatchExperiments](https://github.com/tudo-r/Batchexperiments) will remain on CRAN, but new features are unlikely to be ported back. ## Internal Changes * batchtools does not use SQLite anymore. Instead, all the information is stored directly in the registry using [data.tables](https://cran.r-project.org/package=data.table) acting as an in-memory database. As a side effect, many operations are much faster. * Nodes do not have to access the registry. [submitJobs()](https://mllg.github.io/batchtools/reference/submitJobs) stores a temporary object of type [JobCollection](https://mllg.github.io/batchtools/reference/JobCollection) on the file system which holds all the information necessary to execute a chunk of jobs via [doJobCollection()](https://mllg.github.io/batchtools/reference/doJobCollection) on the node. This avoids file system locks because each job accesses only one file exclusively. * `ClusterFunctionsMulticore` now uses the parallel package for multicore execution. * `ClusterFunctionsSSH` can still be used to emulate a scheduler-like system which respects the work load on the local machine. Setting the hostname to `"localhost"` just strips out `ssh` of the command issued. ## Interface Changes * batchtools remembers the last created or loaded Registry and sets it as default registry. This way, you do not need to pass the registry around anymore. If you need to work with multiple registries simultaneously on the other hand, you can still do so by explicitly passing registries to the functions. * Most functions now return a [data.table](https://cran.r-project.org/package=data.table) which is keyed with the `job.id`. This way, return values can be joined together easily and efficient (see this [help page](https://mllg.github.io/batchtools/reference/JoinTables) for some examples). * The building blocks of a problem has been renamed from `static` and `dynamic` to the more intuitive `data` and `fun`. Thus, algorithm function should have the formal arguments `job`, `data` and `instance`. * The function `makeDesign` has been removed. Parameters can be defined by just passing a `data.frame` or `data.table` to [addExperiments](https://mllg.github.io/batchtools/reference/addExperiments). For exhaustive designs, use `data.table::CJ()`. ## Template changes * The scheduler should directly execute the command: ``` Rscript -e 'batchtools::doJobCollection()' ``` There is no intermediate R source file like there was in `BatchJobs`. * All information stored in the object [`JobCollection`](https://mllg.github.io/batchtools/reference/JobCollection) can be accessed while brewing the template. * Extra variables may be passed via the argument `resoures` of [submitJobs](https://mllg.github.io/batchtools/reference/submitJobs). ## New features * Support for Docker Swarm via `ClusterFunctionsDocker`. * Jobs can now be tagged and untagged to provide an easy way to group them. * Some resources like the number of CPUs are now optionally passed to [parallelMap](https://cran.r-project.org/package=parallelMap). This eases nested parallelization, e.g. to use multicore parallelization on the slave by just setting a resource on the master. See [submitJobs()](https://mllg.github.io/batchtools/reference/submitJobs) for an example. * `ClusterFunctions` are now more flexible in general as they can define hook functions which will be called at certain events. [ClusterFunctionsDocker](https://github.com/mllg/batchtools/blob/master/R/clusterFunctionsDocker.R) is an example use case which implements a housekeeping routine. This routine is called every time before a job is about to get submitted to the scheduler (in the case: the Docker Swarm) via the hook `pre.submit` and every time directly after the registry synchronized jobs stored on the file system via the hook `post.sync`. * More new features are covered in the [NEWS](https://mllg.github.io/batchtools/news/index.html). ## Porting to `batchtools` The following table assists in porting to batchtools by mapping BatchJobs/BatchExperiments functions to their counterparts in batchtools. The table does not cover functions which are (a) used only internally in BatchJobs and (b) functions which have not been renamed. | BatchJobs | batchtools | | ------------- | :-------------: | | `addRegistryPackages` | Set `reg$packages` or `reg$namespaces`, call [saveRegistry()](https://mllg.github.io/batchtools/reference/saveRegistry) | | `addRegistrySourceDirs` | - | | `addRegistrySourceFiles` | Set `reg$source`, call [saveRegistry()](https://mllg.github.io/batchtools/reference/saveRegistry) | | `batchExpandGrid` | [batchMap](https://mllg.github.io/batchtools/reference/batchMap): `batchMap(..., args = CJ(x = 1:3, y = 1:10))` | | `batchMapQuick` | [btmapply](https://mllg.github.io/batchtools/reference/btlapply) | | `batchReduceResults` | - | | `batchUnexport` | [batchExport](https://mllg.github.io/batchtools/reference/batchExport) | | `filterResults` | - | | `getJobIds` | [findJobs](https://mllg.github.io/batchtools/reference/findJobs) | | `getJobInfo` | [getJobStatus](https://mllg.github.io/batchtools/reference/getJobTable) | | `getJob` | [makeJob](https://mllg.github.io/batchtools/reference/JobExperiment) | | `getJobParamDf` | [getJobPars](https://mllg.github.io/batchtools/reference/getJobTable) | | `loadResults` | [reduceResultsList](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `reduceResultsDataFrame` | [reduceResultsDataTable](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `reduceResultsMatrix` | [reduceResultsList](https://mllg.github.io/batchtools/reference/reduceResultsList) + `do.call(rbind, res)` | | `reduceResultsVector` | [reduceResultsDataTable](https://mllg.github.io/batchtools/reference/reduceResultsList) | | `setJobFunction` | - | | `setJobNames` | - | | `showStatus` | [getStatus](https://mllg.github.io/batchtools/reference/getStatus) | # Example 1: Approximation of $\pi$ To get a first insight into the usage of `batchtools`, we start with an exemplary Monte Carlo simulation to approximate $\pi$. For background information, see [Wikipedia](https://en.wikipedia.org/wiki/Monte_Carlo_method). First, a so-called registry object has to be created, which defines a directory where all relevant information, files and results of the computational jobs will be stored. There are two different types of registry objects: First, a regular [`Registry`](https://mllg.github.io/batchtools/reference/makeRegistry) which we will use in this example. Second, an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry) which provides an alternative way to define computational jobs and thereby is tailored for a broad range of large scale computer experiments (see, for example, [this vignette](ExampleExperiment.html)). Here, we use a temporary registry which is stored in the temp directory of the system and gets automatically deleted if you close the R session. ```{r, message = FALSE} reg = makeRegistry(file.dir = NA, seed = 1) ``` For a permanent registry, set the `file.dir` to a valid path. It can then be reused later, e.g., when you login to the system again, by calling the function `loadRegistry(file.dir)`. When a registry object is created or loaded, it is stored for the active R session as the default. Therefore the argument `reg` will be ignored in functions calls of this example, assuming the correct registry is set as default. To get the current default registry, [`getDefaultRegistry`](https://mllg.github.io/batchtools/reference/getDefaultRegistry) can be used. To switch to another registry, use [`setDefaultRegistry()`](https://mllg.github.io/batchtools/reference/getDefaultRegistry). First, we create a function which samples $n$ points $(x_i, y_i)$ whereas $x_i$ and $y_i$ are distributed uniformly, i.e. $x_i, y_i \sim \mathcal{U}(0,1)$. Next, the distance to the origin $(0, 0)$ is calculated and the fraction of points in the unit circle ($d \leq 1$) is returned. ```{r} piApprox = function(n) { nums = matrix(runif(2 * n), ncol = 2) d = sqrt(nums[, 1]^2 + nums[, 2]^2) 4 * mean(d <= 1) } set.seed(42) piApprox(1000) ``` We now parallelize `piApprox()` with `batchtools`: We create 10 jobs, each doing a MC simulation with $10^5$ jobs. We use [`batchMap()`](https://mllg.github.io/batchtools/reference/batchMap) to define the jobs (note that this does not yet start the calculation): ```{r} batchMap(fun = piApprox, n = rep(1e5, 10)) ``` The length of the vector or list defines how many different jobs are created, while the elements itself are used as arguments for the function. The function `batchMap(fun, ...)` works analogously to `Map(f, ...)` of the base package. An overview over the jobs and their IDs can be retrieved with [`getJobTable()`](https://mllg.github.io/batchtools/reference/getJobTable) which returns a data.frame with all relevant information: ```{r} names(getJobTable()) ``` Note that a unique job ID is assigned to each job. These IDs can be used to restrict operations to subsets of jobs. To actually start the calculation, call [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). The registry and the selected job IDs can be taken as arguments as well as an arbitrary list of resource requirements, which are to be handled by the cluster back end. ```{r} submitJobs(resources = list(walltime = 3600, memory = 1024)) ``` In this example, a cap for the execution time (so-called walltime) and for the maximum memory requirements are set. The progress of the submitted jobs can be checked with [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus). ```{r} getStatus() ``` The resulting output includes the number of jobs in the registry, how many have been submitted, have started to execute on the batch system, are currently running, have successfully completed, and have terminated due to an R exception. After jobs have successfully terminated, we can load their results on the master. This can be done in a simple fashion by using either [`loadResult()`](https://mllg.github.io/batchtools/reference/loadResult), which returns a single result exactly in the form it was calculated during mapping, or by using [`reduceResults()`](https://mllg.github.io/batchtools/reference/reduceResults), which is a version of `Reduce()` from the base package for registry objects. ```{r} waitForJobs() mean(sapply(1:10, loadResult)) reduceResults(function(x, y) x + y) / 10 ``` If you are absolutely sure that your function works, you can take a shortcut and use *batchtools* in an `lapply` fashion using [`btlapply()`](https://mllg.github.io/batchtools/reference/btlapply). This function creates a temporary registry (but you may also pass one yourself), calls [`batchMap()`](https://mllg.github.io/batchtools/reference/reduceResultsList), wait for the jobs to terminate with [`waitForJobs()`](https://mllg.github.io/batchtools/reference/waitForJobs) and then uses [`reduceResultsList()`](https://mllg.github.io/batchtools/reference/reduceResultsList) to return the results. ```{r, R.options=list(batchtools.verbose=FALSE)} res = btlapply(rep(1e5, 10), piApprox) mean(unlist(res)) ``` # Example 2: Machine Learning We stick to a rather simple, but not unrealistic example to explain some further functionalities: Applying two classification learners to the famous iris data set (Anderson 1935), vary a few hyperparameters and evaluate the effect on the classification performance. First, we create a registry, the central meta-data object which records technical details and the setup of the experiments. We use an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry) where the job definition is split into creating problems and algorithms. See the paper on [BatchJobs and BatchExperiments](http://www.jstatsoft.org/article/view/v064i11) for a detailed explanation. Again, we use a temporary registry and make it the default registry. ```{r, message = FALSE} library(batchtools) reg = makeExperimentRegistry(file.dir = NA, seed = 1) ``` ## Problems and Algorithms By adding a problem to the registry, we can define the data on which certain computational jobs shall work. This can be a matrix, data frame or array that always stays the same for all subsequent experiments. But it can also be of a more dynamic nature, e.g., subsamples of a dataset or random numbers drawn from a probability distribution . Therefore the function [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) accepts static parts in its `data` argument, which is passed to the argument `fun` which generates a (possibly stochastic) problem instance. For `data`, any R object can be used. If only `data` is given, the generated instance is `data`. The argument `fun` has to be a function with the arguments `data` and `job` (and optionally other arbitrary parameters). The argument `job` is an object of type [`Job`](https://mllg.github.io/batchtools/reference/JobExperiment) which holds additional information about the job. We want to split the iris data set into a training set and test set. In this example we use use subsampling which just randomly takes a fraction of the observations as training set. We define a problem function which returns the indices of the respective training and test set for a split with `100 * ratio`% of the observations being in the test set: ```{r} subsample = function(data, job, ratio, ...) { n = nrow(data) train = sample(n, floor(n * ratio)) test = setdiff(seq_len(n), train) list(test = test, train = train) } ``` [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) files the problem to the file system and the problem gets recorded in the registry. ```{r} data("iris", package = "datasets") addProblem(name = "iris", data = iris, fun = subsample, seed = 42) ``` The function call will be evaluated at a later stage on the workers. In this process, the `data` part will be loaded and passed to the function. Note that we set a problem seed to synchronize the experiments in the sense that the same resampled training and test sets are used for the algorithm comparison in each distinct replication. The algorithms for the jobs are added to the registry in a similar manner. When using [`addAlgorithm()`](https://mllg.github.io/batchtools/reference/addAlgorithm), an identifier as well as the algorithm to apply to are required arguments. The algorithm must be given as a function with arguments `job`, `data` and `instance`. Further arbitrary arguments (e.g., hyperparameters or strategy parameters) may be defined analogously as for the function in `addProblem`. The objects passed to the function via `job` and `data` are here the same as above, while via `instance` the return value of the evaluated problem function is passed. The algorithm can return any R object which will automatically be stored on the file system for later retrieval. Firstly, we create an algorithm which applies a support vector machine: ```{r} svm.wrapper = function(data, job, instance, ...) { library("e1071") mod = svm(Species ~ ., data = data[instance$train, ], ...) pred = predict(mod, newdata = data[instance$test, ], type = "class") table(data$Species[instance$test], pred) } addAlgorithm(name = "svm", fun = svm.wrapper) ``` Secondly, a random forest of classification trees: ```{r} forest.wrapper = function(data, job, instance, ...) { library("ranger") mod = ranger(Species ~ ., data = data[instance$train, ], write.forest = TRUE) pred = predict(mod, data = data[instance$test, ]) table(data$Species[instance$test], pred$predictions) } addAlgorithm(name = "forest", fun = forest.wrapper) ``` Both algorithms return a confusion matrix for the predictions on the test set, which will later be used to calculate the misclassification rate. Note that using the `...` argument in the wrapper definitions allows us to circumvent naming specific design parameters for now. This is an advantage if we later want to extend the set of algorithm parameters in the experiment. The algorithms get recorded in the registry and the corresponding functions are stored on the file system. Defined problems and algorithms can be queried with: ```{r} reg$problems reg$algorithms ``` The flow to define experiments is summarized in the following figure: ```{r,echo=FALSE} knitr::include_graphics("tikz_prob_algo_simple.png", auto_pdf = TRUE) ``` ## Creating jobs [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments) is used to parametrize the jobs and thereby define computational jobs. To do so, you have to pass named lists of parameters to [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments). The elements of the respective list (one for problems and one for algorithms) must be named after the problem or algorithm they refer to. The data frames contain parameter constellations for the problem or algorithm function where columns must have the same names as the target arguments. When the problem design and the algorithm design are combined in [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments), each combination of the parameter sets of the two designs defines a distinct job. How often each of these jobs should be computed can be determined with the argument `repls`. ```{r} # problem design: try two values for the ratio parameter pdes = list(iris = data.table(ratio = c(0.67, 0.9))) # algorithm design: try combinations of kernel and epsilon exhaustively, # try different number of trees for the forest ades = list( svm = CJ(kernel = c("linear", "polynomial", "radial"), epsilon = c(0.01, 0.1)), forest = data.table(ntree = c(100, 500, 1000)) ) addExperiments(pdes, ades, repls = 5) ``` The jobs are now available in the registry with an individual job ID for each. The function [`summarizeExperiments()`](https://mllg.github.io/batchtools/reference/summarizeExperiments) returns a table which gives a quick overview over all defined experiments. ```{r} summarizeExperiments() summarizeExperiments(by = c("problem", "algorithm", "ratio")) ``` ## Before Submitting Before submitting all jobs to the batch system, we encourage you to test each algorithm individually. Or sometimes you want to submit only a subset of experiments because the jobs vastly differ in runtime. Another reoccurring task is the collection of results for only a subset of experiments. For all these use cases, [`findExperiments()`](https://mllg.github.io/batchtools/reference/findJobs) can be employed to conveniently select a particular subset of jobs. It returns the IDs of all experiments that match the given criteria. Your selection can depend on substring matches of problem or algorithm IDs using `prob.name` or `algo.name`, respectively. You can also pass R expressions, which will be evaluated in your problem parameter setting (`prob.pars`) or algorithm parameter setting (`algo.pars`). The expression is then expected to evaluate to a Boolean value. Furthermore, you can restrict the experiments to specific replication numbers. To illustrate [`findExperiments()`](https://mllg.github.io/batchtools/reference/findJobs), we will select two experiments, one with a support vector machine and the other with a random forest and the parameter `ntree = 1000`. The selected experiment IDs are then passed to testJob. ```{r} id1 = head(findExperiments(algo.name = "svm"), 1) print(id1) id2 = head(findExperiments(algo.name = "forest", algo.pars = (ntree == 1000)), 1) print(id2) testJob(id = id1) testJob(id = id2) ``` If something goes wrong, `batchtools` comes with a bunch of useful debugging utilities (see separate vignette on error handling). If everything turns out fine, we can proceed with the calculation. ## Submitting and Collecting Results To submit the jobs, we call [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs) and wait for all jobs to terminate using [`waitForJobs()`](https://mllg.github.io/batchtools/reference/waitForJobs). ```{r} submitJobs() waitForJobs() ``` After jobs are finished, the results can be collected with [`reduceResultsDataTable()`](https://mllg.github.io/batchtools/reference/reduceResultsList) where we directly extract the mean misclassification error: ```{r} reduce = function(res) list(mce = (sum(res) - sum(diag(res))) / sum(res)) results = unwrap(reduceResultsDataTable(fun = reduce)) head(results) ``` Next, we merge the results table with the table of job parameters using one of the [join helpers](https://mllg.github.io/batchtools/reference/JoinTables) provided by `batchtools` (here, we use an inner join): ```{r} pars = unwrap(getJobPars()) tab = ijoin(pars, results) head(tab) ``` We now aggregate the results group-wise. You can use [`data.table`](https://cran.r-project.org/package=data.table), `base::aggregate()`, or the [`dplyr`](https://cran.r-project.org/package=dplyr) package for this purpose. Here, we use [`data.table`](https://cran.r-project.org/package=data.table) to subset the table to jobs where the ratio is `0.67` and group by algorithm the algorithm hyperparameters: ```{r} tab[ratio == 0.67, list(mmce = mean(mce)), by = c("algorithm", "kernel", "epsilon", "ntree")] ``` # Example: Error Handling In any large scale experiment many things can and will go wrong. The cluster might have an outage, jobs may run into resource limits or crash, subtle bugs in your code could be triggered or any other error condition might arise. In these situations it is important to quickly determine what went wrong and to recompute only the minimal number of required jobs. Therefore, before you submit anything you should use [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) to catch errors that are easy to spot because they are raised in many or all jobs. If `external` is set, this function runs the job without side effects in an independent R process on your local machine via `Rscript` similar as on the slave, redirects the output of the process to your R console, loads the job result and returns it. If you do not set `external`, the job is executed is in the currently running R session, with the drawback that you might be unable to catch missing variable declarations or missing package dependencies. By way of illustration here is a small example. First, we create a temporary registry. ```{r, message = FALSE} library(batchtools) reg = makeRegistry(file.dir = NA, seed = 1) ``` Ten jobs are created, one will trow a warning and two of them will raise an exception. ```{r} flakeyFunction <- function(value) { if (value == 5) warning("Just a simple warning") if (value %in% c(2, 9)) stop("Ooops.") value^2 } batchMap(flakeyFunction, 1:10) ``` Now that the jobs are defined, we can test jobs independently: ```{r} testJob(id = 1) ``` In this case, testing the job with ID = 1 provides the appropriate result but testing the job with ID = 2 leads to an error: ```{r} as.character(try(testJob(id = 2))) ``` We ignore the error here, and just assume everything looks fine and submit all jobs. ```{r} submitJobs() waitForJobs() ``` After you have submitted jobs and suspect that something is going wrong, the first thing to do is to run [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus) to display a summary of the current state of the system. ```{r} getStatus() ``` The status message shows that two of the jobs could not be executed successfully. To get the IDs of all jobs that failed due to an error we can use [`findErrors()`](https://mllg.github.io/batchtools/reference/findJobs) and to retrieve the actual error message, we can use [`getErrorMessages()`](https://mllg.github.io/batchtools/reference/getErrorMessages). ```{r} findErrors() getErrorMessages() ``` If we want to peek into the R log file of a job to see more context for the error we can use [`showLog()`](https://mllg.github.io/batchtools/reference/showLog) which opens a pager or use [`getLog()`](https://mllg.github.io/batchtools/reference/showLog) to get the log as character vector: ```{r} tail(getLog(id = 9)) ``` You can also grep for messages (output suppressed in this vignette for technical reasons): ```{r,eval=FALSE} grepLogs(pattern = "simple", ignore.case = TRUE) ``` # Workflow ## On the Local System 1. Create a Registry with [`makeRegistry()`](https://mllg.github.io/batchtools/reference/makeRegistry) (or [`makeExperimentRegistry()`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry)) or load an existing from the file system with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry). 2. Define computational jobs with [`batchMap()`](https://mllg.github.io/batchtools/reference/batchMap) or [`batchReduce()`](https://mllg.github.io/batchtools/reference/batchReduce) if you used [`makeRegistry()`](https://mllg.github.io/batchtools/reference/makeRegistry) or define with [`addAlgorithm()`](https://mllg.github.io/batchtools/reference/addAlgorithm), [`addProblem()`](https://mllg.github.io/batchtools/reference/addProblem) and [`addExperiments()`](https://mllg.github.io/batchtools/reference/addExperiments) if you started with [`makeExperimentRegistry()`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry). It is advised to test some jobs with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) in the interactive session and with `testJob(external = TRUE)` in a separate R process. Note that you can add additional jobs if you are using an [`ExperimentRegistry`](https://mllg.github.io/batchtools/reference/makeExperimentRegistry). 3. If required, query the data base for job ids depending on their status, parameters or tags (see [`findJobs()`](https://mllg.github.io/batchtools/reference/findJobs)). The returned tables can easily be combined in a set-like fashion with data base verbs: union ([`ojoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for outer join), intersect ([`ijoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for inner join), difference ([`ajoin()`](https://mllg.github.io/batchtools/reference/JoinTables) for anti join). 4. Submit jobs with [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs). You can specify job resources here. If you have thousands of fast terminating jobs, you want to [`chunk()`](https://mllg.github.io/batchtools/reference/chunk) them first. If some jobs already terminated, you can estimate the runtimes with [`estimateRuntimes()`](https://mllg.github.io/batchtools/reference/estimateRuntimes) and chunk jobs into heterogeneous groups with [`lpt()`](https://mllg.github.io/batchtools/reference/chunk) and [`binpack()`](https://mllg.github.io/batchtools/reference/chunk). 5. Monitor jobs. [`getStatus()`](https://mllg.github.io/batchtools/reference/getStatus) gives a summarizing overview. Use [`showLog()`](https://mllg.github.io/batchtools/reference/showLog) and [`grepLogs()`](https://mllg.github.io/batchtools/reference/grepLogs) to investigate log file. Run jobs in the currently running session with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) to get a `traceback()`. 6. Collect (partial) results. [`loadResult()`](https://mllg.github.io/batchtools/reference/loadResult) retrieves a single result from the file system. [`reduceResults()`](https://mllg.github.io/batchtools/reference/reduceResults) mimics `Reduce()` and allows to apply a function to many files in an iterative fashion. [`reduceResultsList()`](https://mllg.github.io/batchtools/reference/reduceResultsList) and [`reduceResultsDataTable()`](https://mllg.github.io/batchtools/reference/reduceResultsDataTable) collect results into a `list` or `data.table`, respectively. ```{r,echo=FALSE} knitr::include_graphics("function_overview.png", auto_pdf = TRUE) ``` ## On Multiple Systems Most users develop and prototype their experiments on a desktop box in their preferred IDE and later deploy to a large computing cluster. This can be done by prototyping locally ([`testJob()`](https://mllg.github.io/batchtools/reference/testJob) or submit subsets via [`submitJobs()`](https://mllg.github.io/batchtools/reference/submitJobs)). To deploy to the cluster, just copy the file directory (as reported by `reg$file.dir`) to the remote system. Next, log in on the cluster (typically via `ssh`), `cd` to the copied directory and call `loadRegistry(", "", writeable = TRUE)`. This function will (a) source the local configuration file so that you can talk to the cluster (verify by checking the output of `reg$cluster.functions`) and (b) adjust the paths to the new system if argument `update.paths` is set. After loading the Registry, it is advised to test some jobs again with [`testJob()`](https://mllg.github.io/batchtools/reference/testJob) before submitting all of them with `submitJobs(resources = list())` (remember you now need to set resources!). After some jobs are finished, the `file.dir` can be copied back (do not merge with the previous directory!) and loaded again with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry). This approach is totally viable as long as some general rules are followed: 1. Make sure you have all packages installed. Package versions can be synchronized across machines with [`checkpoint`](https://cran.r-project.org/package=checkpoint) or [`packrat`](https://cran.r-project.org/package=packrat). 2. Test jobs on the remote system prior to submit to ensure that paths are resolved correctly. 3. Make sure you have set the cluster functions in a configuration file, and stick to one backend as long as jobs are running. 4. The status can only be monitored on the remote system (for obvious reasons). 5. Partial results can be inspected both on the remote system and on the local system. For the latter, you need to copy over the **complete** `file.dir` first. Overwriting/merging directories is not advised as this may lead to inconsistencies if you added or removed experiments on the remote. If you have to merge, use `rsync` with option `--delete`. Load the registry locally with [`loadRegistry()`](https://mllg.github.io/batchtools/reference/loadRegistry) and collect results. Do not copy back and forth. 6. Avoid accessing the `file.dir` with multiple sessions simultaneously. This includes accessing the registry via a mount! Simultaneous access may lead to inconsistencies and missing results. batchtools/inst/doc/batchtools.R0000644000176200001440000001511013606067061016426 0ustar liggesusers## ----include = FALSE, cache = FALSE------------------------------------------- library(batchtools) library(data.table) # for %chin%, data.table options(batchtools.progress = FALSE, datatable.print.class = TRUE, batchtools.timestamps = FALSE) if (identical(Sys.getenv("IN_PKGDOWN"), "true")) { tmp_dir = fs::path(dirname(tempdir()), "batchtools-vignette") if (fs::dir_exists(tmp_dir)) fs::dir_delete(tmp_dir) fs::file_temp_push(fs::path(tmp_dir, letters)) } ## ---- message=FALSE----------------------------------------------------------- reg = makeRegistry(NA) reg$cluster.functions = makeClusterFunctionsSocket(2) ## ----eval=FALSE--------------------------------------------------------------- # Sys.setenv(DEBUGME = "batchtools") # library(batchtools) ## ----eval = FALSE------------------------------------------------------------- # cluster.functions = makeClusterFunctionsInteractive() ## ---- eval = FALSE------------------------------------------------------------ # work.dir = "~" # packages = union(packages, "checkmate") ## ---- message = FALSE--------------------------------------------------------- reg = makeRegistry(file.dir = NA, seed = 1) ## ----------------------------------------------------------------------------- piApprox = function(n) { nums = matrix(runif(2 * n), ncol = 2) d = sqrt(nums[, 1]^2 + nums[, 2]^2) 4 * mean(d <= 1) } set.seed(42) piApprox(1000) ## ----------------------------------------------------------------------------- batchMap(fun = piApprox, n = rep(1e5, 10)) ## ----------------------------------------------------------------------------- names(getJobTable()) ## ----------------------------------------------------------------------------- submitJobs(resources = list(walltime = 3600, memory = 1024)) ## ----------------------------------------------------------------------------- getStatus() ## ----------------------------------------------------------------------------- waitForJobs() mean(sapply(1:10, loadResult)) reduceResults(function(x, y) x + y) / 10 ## ---- R.options=list(batchtools.verbose=FALSE)-------------------------------- res = btlapply(rep(1e5, 10), piApprox) mean(unlist(res)) ## ---- message = FALSE--------------------------------------------------------- library(batchtools) reg = makeExperimentRegistry(file.dir = NA, seed = 1) ## ----------------------------------------------------------------------------- subsample = function(data, job, ratio, ...) { n = nrow(data) train = sample(n, floor(n * ratio)) test = setdiff(seq_len(n), train) list(test = test, train = train) } ## ----------------------------------------------------------------------------- data("iris", package = "datasets") addProblem(name = "iris", data = iris, fun = subsample, seed = 42) ## ----------------------------------------------------------------------------- svm.wrapper = function(data, job, instance, ...) { library("e1071") mod = svm(Species ~ ., data = data[instance$train, ], ...) pred = predict(mod, newdata = data[instance$test, ], type = "class") table(data$Species[instance$test], pred) } addAlgorithm(name = "svm", fun = svm.wrapper) ## ----------------------------------------------------------------------------- forest.wrapper = function(data, job, instance, ...) { library("ranger") mod = ranger(Species ~ ., data = data[instance$train, ], write.forest = TRUE) pred = predict(mod, data = data[instance$test, ]) table(data$Species[instance$test], pred$predictions) } addAlgorithm(name = "forest", fun = forest.wrapper) ## ----------------------------------------------------------------------------- reg$problems reg$algorithms ## ----echo=FALSE--------------------------------------------------------------- knitr::include_graphics("tikz_prob_algo_simple.png", auto_pdf = TRUE) ## ----------------------------------------------------------------------------- # problem design: try two values for the ratio parameter pdes = list(iris = data.table(ratio = c(0.67, 0.9))) # algorithm design: try combinations of kernel and epsilon exhaustively, # try different number of trees for the forest ades = list( svm = CJ(kernel = c("linear", "polynomial", "radial"), epsilon = c(0.01, 0.1)), forest = data.table(ntree = c(100, 500, 1000)) ) addExperiments(pdes, ades, repls = 5) ## ----------------------------------------------------------------------------- summarizeExperiments() summarizeExperiments(by = c("problem", "algorithm", "ratio")) ## ----------------------------------------------------------------------------- id1 = head(findExperiments(algo.name = "svm"), 1) print(id1) id2 = head(findExperiments(algo.name = "forest", algo.pars = (ntree == 1000)), 1) print(id2) testJob(id = id1) testJob(id = id2) ## ----------------------------------------------------------------------------- submitJobs() waitForJobs() ## ----------------------------------------------------------------------------- reduce = function(res) list(mce = (sum(res) - sum(diag(res))) / sum(res)) results = unwrap(reduceResultsDataTable(fun = reduce)) head(results) ## ----------------------------------------------------------------------------- pars = unwrap(getJobPars()) tab = ijoin(pars, results) head(tab) ## ----------------------------------------------------------------------------- tab[ratio == 0.67, list(mmce = mean(mce)), by = c("algorithm", "kernel", "epsilon", "ntree")] ## ---- message = FALSE--------------------------------------------------------- library(batchtools) reg = makeRegistry(file.dir = NA, seed = 1) ## ----------------------------------------------------------------------------- flakeyFunction <- function(value) { if (value == 5) warning("Just a simple warning") if (value %in% c(2, 9)) stop("Ooops.") value^2 } batchMap(flakeyFunction, 1:10) ## ----------------------------------------------------------------------------- testJob(id = 1) ## ----------------------------------------------------------------------------- as.character(try(testJob(id = 2))) ## ----------------------------------------------------------------------------- submitJobs() waitForJobs() ## ----------------------------------------------------------------------------- getStatus() ## ----------------------------------------------------------------------------- findErrors() getErrorMessages() ## ----------------------------------------------------------------------------- tail(getLog(id = 9)) ## ----eval=FALSE--------------------------------------------------------------- # grepLogs(pattern = "simple", ignore.case = TRUE) ## ----echo=FALSE--------------------------------------------------------------- knitr::include_graphics("function_overview.png", auto_pdf = TRUE) batchtools/inst/doc/batchtools.pdf0000644000176200001440000122021113606067063017001 0ustar liggesusers%PDF-1.5 % 46 0 obj << /Length 1603 /Filter /FlateDecode >> stream xZKs8Wpdrve2N!@cVm!:s! u_wK֛v ,!J\n-,a乖>"\XfT؉S32f;"1.W#NP3F:M7ѵ*S}uԸ,/7+$x7roZ.AK_WLH^7~3/+fԇX 0&wRQ`dm|l3 0'pPDp%#O_k qX7X\S?ONj0pP5LR}2T.D?]Qs5‹uC^دW+Yyq\h\2X ylJX0$,A?(({G$~^PA. F+,Mu,eZ'ǽ]X}݉핎ʑ ǂoe?^d9UՊ|9S"l\ۛbʡԑ-h?So=]\$xeG+9 C6h^o\ 7fqjOu׳#]s{~Sԁn?o;?%tY] xasE7Uf] 7O.7;3 \U-f`'lz,n\X[Xv99vLߎ/ { d"ZqP~j#u^"XU{n&ؓMQLQ&^Yz2x(TocՀɣ}_I+ȕcKѪx/[5q[јQX/Me,\bF`|:uI2{wJapS&"aH!Pf3Y K]|/B,H^j-*r‡|;7.bkU5Pc$EÇiQ_k}1UvmʠΔFpi VZE}Kxn*8 U.ێ:qF> stream xZK6WR{Ty`_CSNf<9{H5E*|x<{߾ r$9&)!A *\}$\}~٫$_"Odݮ2-4YQ(Bn7w{Wn\Uxk")R;Ǜ+?ُǫkx4?)h"|ݵR*4v꺛m^^W9싾l9P9/J#hRDa= W`k` >A74r,HC/eO<' :fNLO3Y۫(5= 4^ }[Uا5KHv/TvXM{IZIp(5^EN>rdtNyu)4ٽ48Roo2W˿ۙO[/Oa\)(;]V(:U0?[T 3퓟~/3Ҟ? -$`=5XY&,"D(iوGԡ`P X zcU$T&7͍ٕ`  ʼ 'ɤ#ֶYߙ:u!\!0$Njbcۥ3B*7QlB 2_%쀯7oBRAa2Vl$4e&@aͣ!1JmM(,Qʎ;S 9cjEAƷMKnB"3M0Ʀ* l#CFo6ɜp. /^8>7 (̋Q*dQK#Yr+*Jn3\ZJ &nSI5,/߇Ju9źmox:d0UFX@disNv9de6a{W6[59 mNHeh*3rG=IwB-_oa)̞}>␡m_6nJdn7άr'iQNZ$) Ǣb܁ڭh@xSHS4Ѓ)8DS)"\{HcYAt sW!ϳCyOYfj"sQ#⑹5' Ȁ?mS[|tL/)TOϳٴhC|؀'¯&{X7UyJD)_&xzeيeWs9{@Yi*+sgҊR:N!6=P^rr:i/C/nXYnx]%!"Wcqp7- G(k3zA$]a— )Xn:1f3TtPɱ0c8/ʚ7yi;6]iXѲLoxcPJ^TL[C{)ﱇIrEit&Wu973;![x&gMglVۆ)OVt7 XF+f]zsİpo`N@ZuOf- 2+zz+#FX7u [(w4"5H!B9κaЎQT\&B̤ jM9s7/!#J3%[抲:eW񳽆XTlhj(to#}Q&tM?ì&A b8a Ơ1gy8,t4{abLȳt%҂aM]3'ky-9nd`LH5Dyt}FOq^ \%v|uc͞D*E(d mOEVrl*6fs9=n{ +@De9AH(h2rk2mpBۇڹ=>sT\lS|xУ1 pm1TLjl^{|miޝәTo5Nqcɓn L2sa֖ޗUdziɤJ IjS3pڜ?0voja`a:tэԓVr1j6mo.1 H +H&"?|(Tq PZCx]sP[VF:um&/SoSH9<&/ITZ(t> R{l-#\?_F/S-RD .rd yJ/!t𺆷śM/ۀ0'"4|TFCǢeeN0Cawl׌J0\媀* 9EȋI։1ZL22"ē"Q- @v}=+ B:QYrh0hZ0tlmDDp}sA݋Xc~Z ("8`D ">"AUe'# N|B ^1Ŭ$x`fiT8rDM> +lX,A,k:fa_s/)d Tt "IG0~5HG̙\d.r]vnx-7K#TS! jJVcU2r͗oʝ_n[))U2 vHR@5c)re>M 5Xݟ\G90j,׏.bT#9~}Úmcޝdbsv)~Q:NPԌsEi N\ܯV4[EFϏ*():PK?N⯣TJ4Bu,OD3~(;ʺ 0܎,GҀ,^%Z~'ȪIO5> stream xڵZY~N0Ѽ fL0b]H/w"/ yyE,j˅/i*]rͦ򮨫?wf{ҀXoGo]67q6ڍIw(Zݭa;4z*+i\m{>yQvѧ q #մYO[]c؂+_> ^B[?Kw(:YzxM'V w @)=qE^{^ۯQWвlKQ?fܜ>Ad$Ttw\W) }O JT͢ŁrזJ?tvr,p,iŚs(ЎƩs+Z`0L߹_Oٷ!a=7lM3Flu(퍟Y}̧7SWs6_0I2ȴ,yZ4׼|}FdW%" F)` T Ո=WM FG@8 2K4MYV?WWhjΒyִP @Obz0 ?K!4z1k轕8ɸ@&DΚQH;~[ֈ#ڃ΁2=h[F Vd!F58zv0ZQ4&Ge6F'-h riˀÀErACB;CYc;*\Q A4GB$D Zvq-@xaKY[M6`DdE7g+UXf#gE맢q`VKZ˷W9PLRC;<~@šrA6k)wlRrCae׃Nmk-׼Ζܚ{궫 =?YVL"T0/-hÈO\XVv8s g8ΚD{'nnQ(^d_I2O_J)L8f~}ڟX+i^? (;#Ϡ\;-G^Zdeh. #2ȕ >hIε5ɾc s}{Џdj= 3'h|1lxA#|nK11mQ2־5_h }OW-5Q_"b^)1=8P,*CD`9A3Kʠ"׏@/& 2Dłjqxѫy(jC'"y[r[paL>LU?fyq4x_Iڇ,F͏;SLOf3DRċ {ghg@UT2'%בesCxLvd>r!LNOSJ"3#;_u|{?f&hy>L Jſ,@ 2cܭ+i|ڣa>%)m9GN ‰!%\uf{2h>Os*1Os3 fLÏ>I28  $ VcJ$^n˿Haaǽ] x7 t󷆗gZz._yCn3/=:I¿q#AÍ@ȟyr in\_n<Ш ݎdvơQ(hQbEogx; Pѵ?_ *pSْqyɋaaJ⫣!E0`D}71Ӵ<7E&>_mzuOPD9i%Qfj̏siY 7qD=nz;*g$XAN{z߷<|؍9oT*$pݳTrn endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 844 /Length 2970 /Filter /FlateDecode >> stream x[moG_1:;$I_Т=;.st%C)ohvm8]fCp8#Fx`$MLlx2ŐKX?Ȑ0#63HJ?4焋dBE\01 '3f . 9h$d$)lx/x'̄O(b|E&UN|hBɄM(,DELL 3:3x1q39q3DM.z10S3LuI-:¾%=Kh'gft߮My&X[G{>7_}g?.{2?gݣjۯWG8洿R;W~ׯ3 2g &rpZ3,b}bTXgځځۡJ5 HPA5*ܨp 7*ܨp 7*ܨp"4*ҨH"4*ҨH"4*QoTޙ:Ed[Xu_7gN{}}=zFjtk%rڱ(6Sİ=v,jl lrb1dS(7!ӳ;]Tlvf6l\d+LFi^cg"N?T,e Ì#"[Ҁp2ָhBP c<9I*14h]2=8r -"&lRE5#c{>h}.=E1`/8%[?d4l@"iAӻQ$=c>Z`nH-ar>9ܹ"<)MJ%AHMfAp GL% #g9HJ1yz>Di9fޙ -X:$Px)`{S!`% ) agg1(#Tw0m*h`vTAt볻ף՗]w\]v^^u=i]_W~zU)]}~HgHH)d;q[?Bmz- tf' 8' $Q!أ1$$Є|Z.c5+ܝz~^ngNQ`YVkaF&k >a/pO_ BNAFt I2F)ec1cvVE_v5 ݕNֵB.p)Ww+-y;\!ߨ]mX_ M`}4AFy"֊dj9tw*B!>\1F-@)j^@Xp}|ٯ~6!H˛ rʇ r>Qg9L[EՉ!1N%aB{" zQ>V@1XP!Dz;!*8\*;x K!!e^.` eG -7b2 JZU{^mku n_n<`);D ~aw?q]w9Y0 =_m {o{w ww6&4r&6,_nSU_iJRҨF4*%ݰ1؜yY8cH ӟ1 ^xtK;kt #s?f@$hSl hS*X_uYʾ^,߅B{p ;4<I ya?8NGN, endstream endobj 154 0 obj << /Length 3236 /Filter /FlateDecode >> stream xZKs6WF &$g3gsHrEJfF"$5vLv2U{I_?(QE_\|vT9K&HDE2 nate]]D]gױ/5<vm֭ﺺ޵4AX8(™Q&⛻8M[pWD]vs-ۮ Hl@ Crw+a8\PJK<>;z_U&{[߶O<,)Ee)N#]YWm/Xu^uIT]Q68YkX]++C]tT־NHP R:ԃTӤαQHp7 ]YZY(N9=MS4UYsl',&2̪#[TZi{QՐ2hi.a=6E틜\\ g4 ̤*X/~# rx6X&$1?822SVFq扣Nj3NtZN(&$<Wl5?eٶX:BR!_t$y¸6~NSlq8M1Yd\7KQ<[EdB/i֨dfۋG n,ֻ8 uID:JF%JL>6M, /`ZZ0Ǖsu[B3#kDqz1$W,,b&<Q,)hfa}`$TLB!,Dc`-4@趥8a2?nEK+gO|/ݶB|~r`o8Wvfx.0wTgg>X֟R-)kKC60"dsf vu bGJOnBMe`1, nVILQϊL3C5 e)q(%if$pNffś۲r7E;d1gTO({RODG>)ٻ~1ϞUHqQ]4[BŦEQžƖJ2C?J-PEᗇCScP3 u!dLQpk.GpOTC]22t2؄*ጞ14mkRiBo]WW{rOI'P(Jl"RH7T:]}I@"TnzmP<YjWۯfk=l մ*Z`dv7M/؎F&Lƶ1`TxmwqUmf+ۆzh.!z7΍8R8CZyS_;Hi(!\9$u j+ɳ-])uS؎N;f^شh'eSFMlqYѸ'N]q5ws96מ\;SpӸlP+Yñ  I-/wnl0< 6.4ƽNHA~\Qӛ$ݫg/; QSlݵ Lq4yBq @%#wKx+}8 u3 :OiGG,w a+#-=v/R(׵MF?zF3Jj6vTX%PC5~NaP˜bY)41RETh=1x)+A&$۬.w,aBdjͤ'6wyiJ_,@;ؚT/ffbkn}0l"L5Ҵ;ǒ!;k[ *kgbA^ی)rj- Nqq>6q;sTS"ۋpϖkHn.C:rex<Ù*p7ޣ}ڗTTPw}~C%uv㵮[aZ](骫&;zֹCb}llv[LVt'z*C_KqBm#"YP/!>dUy\_^>!H;deOz7uHAqI@(e\iI@{W}(Ů6X+^|l a5ޘ{|L)%|Q_C ƄπN.ypEC?Xl 5 ~hVdr*6(!X]83)KO'}?oVxVMvnkk9)HLiaxֳ]bbm!$G>/5 ĥ ka`g2 0WL,?-STΩQcG2i_\ud.!OD3%t͇6M悅5ȆF]u{0GGT!6ø.Υ'\-Ԕ/x23=#B:PY2cw:2$lS'O _ 2c .7VY endstream endobj 170 0 obj << /Length 3051 /Filter /FlateDecode >> stream xZs۸_MtkA6w7%C.IHn{w E(lIbۅd9ᓟɲDj=]^?vևg112cRMfg' Ʀ԰$Zvxc?xH3\L9Iam-^luus6RG?T)3㒖 z^(t ~AԈ@ EUʞӱ`isKpd,f2eȲ4_Q0egsuqsL,%J<өX[Vky`Y?eXˀJ44M z>Uhf2/=rߠ=g:8:'ojCT&! P!5Ӱ(&F_>%x9PGGf Dy)l9Z2G˓wiOF<7Bnف Z`؏ NOϽ`tz&T$*#}}:ml^:9쎆o?\[;SLj}c%4e)13>f'?B 8 !8Gx LD MF ʷ5 ,KeuX0yG`ɑ(/UWߺf8x|M_Gw?-5|F.bu)<M5H0|tٱwR\;H|Jwx (( j>l_d}>Ieu_QѶA| U^d뫽k|GW5TV?#G2(Sxm ͷd o1)@TbڏBfJ]ܙdh.Aᡛ? wQNio.ڑsՈ\s 7lާS|Myȥ Ȟ܏F~&7[pS$-d +YMvk]K;wemc.}\u_mE+Vep-3y9\A܌i|^/oڋY= CJ mS] 4uZi=Tk'k⃠J& ȝ{4;]]6{^bR`6ҰXkbrVTw|mhդM`sWD[x B &XLJHp>YCIs< ){qGuxP0z`L6^Wz1G兀Kv͘Y%RXՆ8:P-) ˸tv6,LNLl25xCA1> stream xZoCsŗ C-nqWpw(d[+K>I$(wJc;q6X$!g8 N'/BXh=m'dž?u/8N$JY$I:Yl^~Kx9pbM|zK)'y=/Fr4uVX"drdEnvջ:LSi: FBdFKg?LgZW;>\כYI\?(L }5 E S1Gn:uV||$߇[Y3!{BmݖwDX8⿄^, s=;빿[2}G= 0У=e-}1K]3!YG,I2HÚb2T 10F)6rO(fi˪ծZtE}ҞgoI[VbPƘ?$coӾ1ލ,|,J{R/'=/F⤲VΥ~|ˣnaDo:?3)n*a#krټ]GҠ[g7j7ۋ -2z>NEV[M:Yć]#irI%9OnjTR ~TgjA3u,8yt|VIӺ@178X vw{>]5tytࡂ[ǣB(8O,GTN< ы=zgдw&aR3p%vMt :3p0qi=% z(ȅpJpJVHj1LdrNx>I swagɍ|{jU~8'0"මnv <#Wo?(SU%y*(U 1؎ A)LHX .JTdfmi-B,7۬6y﬏9P1`igay̮M选poj>$}2ـ`^# k|[4m7876h^;0cb"Ŀd&+D͜6?w5yMR9nE0-Jy[  r:HBTYo\wZAAE8RWsαQ)&qoy NpPA_{-s%OtÖEqsMYQ !{=B1]Xmn.g'X893`,LI'>y9xꑢwDO^D>wk*H\FdbWR#MG,JH !X"Nt5 zuJAk bґxi+5ƽsppuMq{ ``e`vb؀ M3BWnlq&E4h{όmǎb ׻ B!IlQI@ހ ě*+bgH'@lt7eoADp)rOO^F,$ΟGy1ES- yhLijEXSRf0/:ZϝYfKA~vjǰEw- jRՊJUdw10%_ٍ u$`WܠU2u{6)c6˓򖸯%"|S+QӼ1q?FA3b.9ByGU* ^8~(r~n j;L ҏ UOS!mIL7))TѽP\0Gi8B"ːW7Z8C+$$;*9(#WE+VL_~;`.c@I]벰X pF3>nD_Tcw(tEHWUSô<މ[ǑnYVƟS6?e^p5g}C*i<-g|Dq謇*Svg{=cAMfd{Ə0I26O9R endstream endobj 192 0 obj << /Length 2851 /Filter /FlateDecode >> stream x]o}qW s/}z{=6EAGwͦE;C%Qv.I 9gYk?7,OD2;b2-gbkSXOޟԃ|'ILXiORʨ4uణ|uk\n;?7ÑtOcu44h1LȆTh@z|pa~8{''99 s=[lT*a~6)gfP$0.r[ CUHrŸNˢ+&TG΂1W K8 y0oʦl )ٻ:_; Ch;4a84 Lo˜*6ӈL3nrovwnkЋZlك2OÑGQ=ICB=[~n,koe{t m]^~fZȄQTX AX% <6zjX]YW(k׺*]KG'sE4|<:*ֻ~4vG`o}Nhz"4-;\Fk#a4Ztv.50J$KDLRY@˶EӅH,;eO[ ENaۢm kY 0Yi ;Td -8I>Xۏӝj/*e&ۛO[4rc*Fxوr2kZh,Cuj o 84)ʚ(´>6}ܵ~E{AF^M]nހP#}b 7pBm&$lp"mR&ՏLN%C BZ -͵4$`HVO7(2J쀥 Wܔ6ei@4W?/ ޵wI)Ʀ4$PΓxtv7-3;mM)&#ig$-1n- hxqTlЎPј魼Yz! >gZ2BgU~&^r /#Za+ :7y.< zxk8gBDp͒4~k b6 IVYA8^21J4XFoN2ЗswՠƎ&b6vip0zbH\!^Ɓ5g`` aάtFf6ϵk>Z4h?UW]@k0FN|7_)K2f", W[sdNL'R lEL+ѓWXnjDC0o5f \x+N4LπCr$gd:7Y7' m{|ļT' ?zAT{7 (̐~ܰbp5qݖdB}Wߩ<-O.c?Gjgs3*s"5Egn=i뮷^!yCzt$*2vwq3y=Z2HQTu&20 (ޫw8PM̑|֊ DLn)n ӳ\X(RT|rKmOJ\hs\J2|Opڭ G ʕӬDZWvx][a̘HE&Wœxa13$Y&zy6eK{D|e(wH0y0s퉒 0 ׮|-jAUPW*x"]S~r\A$׺1&qZA4?Ã'1BgⳌ%5 !J]bح]d\tݔp8NFx_d,OKdɄ^yేT* !B~@cxFpY!:{x5Wjgեupk PY6'^dQ NKWVs|qopkjJ懋m>uf7>2[9e5HV\0d _&YCCJDlb\ܓ vބyspBT-$bџS[Z2D8ʚ\77"EA5~A?s6 9"~Hsk@, L_^al<*02gW}{oHp; ѐ1糹y+@BG endstream endobj 202 0 obj << /Length 2109 /Filter /FlateDecode >> stream xnF_A\B6%@%yK` {EJb¢|}fv)Kd+A [g쬹r7Wȗst|-Ws8ݛu:+_ {khf͍&U= #n[ш+l=fl^wqJ[;YA iiW *ӖP [%6ȩ i9ǂfej$βWL7X?ym-4^ k鈩IXh% ³ Fj00.O}K:!R;m)"ҧq#R#wkuZ6dYHsve@ Rm*Gb”l tR,:u<>c '9t>%rv(t~2{z6BN[?}E8P?m|EbF+@ɤc&AfEH$2'#2BV(Z;!&J|\BzW r_.ٔ磭 6FC c{)H/>`<_E> OFeБ`xbA00Ӧks)c~IVП.܂`td9F/!zGO2?z,ɖAPni=EFv3Ϙ[KPCO8ί~8F RzJmNw~Z?`ո~uȇPjSeUdq~`u < /0c]rg@ ՉZWD˿#֗]^pb.—Lh1UQ*q6M!ľ1~c1Azcc}m<ӝm.}{ɭnL>@[{BYA|b/>kڲ J[`*Ǣ{cM7@i! Âqi !/1?:{@gTLTCHY}0EJeN:Q4' cL4-!aQ*R&ahD:]Qo8@E$%]tRaO_\/׾l.tķsR'Hܚ}5\mWۂqTٶܕ}-F+[[ܴ(>Ѷ8ϧ/ɣ'@=P>ƇQpR0 <=}za&Dkm59~pA/YymfZ!ܩPEՕ-=L݇%o;qfe&zcQMG`^s] J"\+<|.Dn.Yx5䴒 \Hܫc;- ɵ/Vp۲+^avSG{#0? :vޱQșkNDŽ^f endstream endobj 195 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./tikz_prob_algo_simple.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 206 0 R /BBox [0 0 489.212 139.805] /Resources << /ColorSpace << /pgfprgb [/Pattern/DeviceRGB] >>/Pattern << >>/ExtGState << /pgf#40CA.5 << /CA 0.5 >> /pgf#40ca.5 << /ca 0.5 >> >>/Font << /F8 209 0 R /F15 212 0 R >> /ProcSet [ /PDF /Text ] >> /Length 2192 /Filter /FlateDecode >> stream xZIoW2ު8 FBHjlG1hS_uu-6t^oBJ.`oa9%Q}Z!y2Yxo$֕`VgK4_\/f"sP+r<'IgGKa~:?kץ$ ޜQR\#WL6JlB)G"S,id\~?7GÎa[JVU44W9b7}PwźO9/X|1D+Ҭ"zX࡞c~z lN̳^_>ϧ>tᄰ6c&e]w5Dʶˆ$嫦@lD[,AL K\4`WA|P(o D0"xxsn1?ݏ~FDImYU.M'KWC_S(\B"h)zZrdp a(1!ο "h]cNa4Ǐ7@#,=4po"tl2+S=uW.Sqſ3 0}hMl34g?<~`?!FnP/̤ݑLwR:v,YBOI^aa/$+!sS<EL(H f,5%+_\>\04c;ݡg5 틋anuhC`aXhI;C|dARz:$mZ4`i),3X'05Lk;uk1jZ8vHt59@PK'6"nH\L!Q ӈJ,Tz L2}]@-.4TF%=%s.ɕd[E&}Ƴv(ڲ -=I\л[c4\ķbom.!̎B1"(uZ.Eno- sX7ǜ@ԇBWg-8k1X[AVtG FIrckDt$rk+Hol F$ ZXDT!Yu8ܓevTVλ12T `#lzU:l u+ ')ºˡ;^seN9XW# \J$|گ4T3B[};o1%sˎ&!<&#hF7$Hų4J6r*l/\TB\=A[<@Oekhc th:*(u䲯ʢqeV/4zP5jh c Um&}y;\ :h-joDΪ$mcIEX%!H֣w!!ثB(b`ZL੷PcZKVΙa+5D |[TBlU؜2v, 2Mͤ6wAV O A"yHQ-ynAp%WЁ@y*=],E3Rc:}1R*U:hL qk,Qc:)EYKP5FSaYYK΢ƱZ`m#c a-# I: ^jCHN>Sń#oxYWUhdq9X Sm]t 7LEN ;@hOԉʡba?:5վ:7k6bBW<3> stream xZKsܸWևp4X<teS]K*]R=Ph !}dYv6 F?nãmģw?\]|6MLθ2&zZkj-qUo޴Zpcr)zՏ߾MHk-2Ұ,ˈWketz_ CliHg %L@T19JG*kj㡥agʕ4Sv/*[,mScWl8=wp(aUn}\e2fubO~*,۽Yy;|)xq_zj ,Z M&E{rMe18=bTەqRoRUCnlc ǜA$`Qr%9}z*5; oe9v@#ÇN=d4.ۺP k'1zAque~*)+[*K8oWYzO9CIEoKze4JE0Ttïf?R̟h= D0)%Ͳh|bEy m3/xŭT^32֦Vn#)9ˍ::}S? y~E.b2ˣr)1+Ma胛Mf1gJoys$QyNB tã6M g?}FkZ*(!;Q 6R 7W~N1)(P&o( xB#?pD{r8Bb C~ IER(G|s_R,5:(8PyZi"JS }h~WZ_?H2 r S|@[-A[%I.~UTR ]`ه!_Yxǘr.BkjM;t1Cє|A&'ݙ<3DY()0F{{(L`P9'CqxPt%|`P#Fq<.W) &?sS,-u)sr.tJMfNA4Jq-rk*:0y[u۪! B=QH Tssӓ]q svG!z AkL!2P 2P/o#_&ܧܛ7y ͓^\z?&_/udXOC+çxtsdӚxҍJf{صj&H*mq2PCOc{CY|=6t#0B6ӄ^$8Tu5Tpwzf~.DʸFJvVBŻ԰^|KW 2wqүuIOR@_|%η5& 8$2l@ѐky1Jb n endstream endobj 234 0 obj << /Length 2071 /Filter /FlateDecode >> stream xYYs~ׯ@ys]M֛+QJmy50hE>=@ DՃD`fg8X8ߟn.~< EB6P4P#Tp3ҺNx”>ȲdNMVW7DdHRYN0b" &$D,,ۛqFP26^&bZ]G>pgq-@r_@;J"5M~c86V#Kp(Z+3'Q8Qmv[vF=xuR<J8G*lESQ-BQʀR)*`g u7 N8!LGVFzCIp{s6" ) fkguf/@ {SwA"ybR'D7&`J{0.<S3&AlSڗM]ΥC6hA5ρk ܾ'; ‡xA@⒴^13w^1 ZZ3 jntB" HJspxk,ccZeb~njiL\8lb)]2gqnG |dfS^2zyd w3D\&,qq,9&w4%*2inr~%HkfD(MWkeU*fY\U SYcK"] 7)SHy7.} #]lS FȉaBXC 9?D'|0v ڀIwrH ĭHO-f5$HFdC}L vwod<;~j r: "z=d<lw}*5 TnMqł7]$ٝ@`t d'C $%Z-φ܎J0 P„hm,wi^z}oVLj xD9H9Ƽ& !-" !#e <[LNbpE}\w63\[+6ڸӰ6؁u\ƫ0y?4d4]#x+S{in.lmyYQϯB='sTowuꙔV]Ӹ-v${*Φr)v'O<)=vׁT {i"+b^21$av[$< $hΗTԸI`G_إ~:=6n9=_&=)Tٕ}EQB(v㷤̓>'* WL=X&ϼ-;P @fuWZ:kH`Y}_YO*>K$.Z0݆3PSmx4x F%OѨyPGEglH ێ<!rCjkB;/e06/ج'wiDFtfWlEƒ!JtwGde*$<. u7>F/o,FNlpFdMAICέkX: ,c7'ͮu±h,N0vJ2@o^&nEs*E$zmPsشUe5 68Fc(0;T`\8(x%xqoªr<iUz-h#VX$)5WԀgFV5%ExRA+C E]i564~3s&^QseR˃#CzB"Y;/czr43[9d|Mp~:νB!!@-8g~e jᲵT˪`.&!z3Ih9eTם_g#w>FwQ`0,(tVp#XTE1a@CGp,so;%_aѠNCC{#Cr8P( (X"Ry]a0+$1g‰H2[c2 / .0x XF;cn  Jtp- p)*VrdRr>%/>(DcH; 576\?Ļy6`RS<ƞ)wwmL"FϲV3SSާG&$z/_LD5 sE&6G;Α':Zcgk (NYr&'9oW+Yq ;m?CV/qV}*\0֤让} 1]]-$#zi% EoRDL2Xeu|tnY| ,ϯio_1:;7כ~yDDI 8%5>96fiN] .Y}F-og@}VIvU%Ϫj|l_{`AL3vBG,P.oJ"H@><02Ӻ@;ÁPdIMR%)JߗQ„  P)>{.T.+ad,#,3(1I,L?m_|?qf^@ݦ\]]zl"-um⬤m7>'sbCp]D/:쫻ml\aȲ~Y9](zھ%Ԋ<6dHL7Yf;ݙzM[\'_4a;;5_A}T-e1Z]Tл[c`.~Ԣ|H(&P)$w> Co endstream endobj 131 0 obj << /Type /ObjStm /N 100 /First 911 /Length 3144 /Filter /FlateDecode >> stream x[mo_owA{|rqHCisa-m]dIX_g(ڑizYr8<3BZiHxp\g\N`Qo w'ȚQɏ (0E$l LA PYBpIY͠ LY5 ] Y.DEtK>o"c$|J;*)F>1$'6(&$XFDCfs1 :Dad0db$o+QҨ%V›ĤوdD2,_9(Z$Y `0+:=d̴N0+2O5Nd-!Ghe`-6K;qd) _:0/,eČʌ8B.̲e!0<Ԡ9LQY_$(6 < . s%*zOcp&]0]0-nA 0g7t\YK,Bmb7-hH7P/_GAzLt6{;謷-HK^7ѥ$=4ż @ݚ)&/Z&^(g(ZAmUۋ7BTc/λl9jG1l zٮݸ][^ixQ }nߊP 3fN={Ty}']k{TۣG=Qm.I=R?.Iە!z- e ide :ʄĂفA6zaA=\R{o~^HM9]lpǭZ5ۗtwX'edJNɅݕg %: Cty^^ZHRaTh۳ a EleJxϚr6@pjaI2B!rҲ$ޥ|\.~L^kiMsHr /q{ɮ,0a~s0kd$;ir^jʘ#m(JMOxH=c-@%jL˽zÎX&!H];9ÐNg94X0M!ɈAo 5\* Q-K%`D6EΖOyÂXoE@K&D7D(q\tG ?~Gywx,5GbuCϞ@/Pp;M Cd9M' D!7^ G;׹6 %@!\ 98 Kr/$.! r7҅%DBйΠt]Bfs>KI +Lg3(GH[&m)De.ag2(irzp2FvpDXFCm1L ObbgTL=E3Y!j:4 Qcvkى; fw\oKL]9]M=mSϱZX-p8V k8q)[ 飕dQb]pc 5}LHʹhr&.ssz$o8w Dbc9_K1xIyy-]7Çjqؗx֪zMFQ3;Ol'ۑ_o&tɛA5~3bVpRo|ҍtE`_7|/|6 +zlxoo_v٩<#|DCGi|9/#Bh<] A@>C.1hv@uriqd-xo(ph+[*\X0W,+̗ķ~;[IY6oP}kV|zh$mнXLn" VOO^˧A33/ӕߚ? ] ' ࿳2*~fEq~ݲWm#Foypq.xϕPJvq>@G -g蹅v5Kq]cj*YS'|LG Dop̉W8 91挓7@C#B$lm/U&zzgTHBWz`bu^dZW_x5{k<˥ xS. *uF*"]|^S0e&dzr!_loLgYqO-~]G=^''-&"dDj!ͫ)gu|&Qujzu [}/~#+ܥ:8QcR3S-hԧ$& U3g;$%ayeL9>bփ &o c0SO!Wٯw|6!2<{#2XGUt nD7yNTLu`M9\oƠz+ [~vw§]lOy{ywKj @dz" 5P('TDyQKgRx#]j۷_61w%coZaB`}_yݰS&J TŬjXPGƤsƽ3NtsZژKJK졃t>d7˝ 'Y]?Cvh)v5}Mt U VHtq[ endstream endobj 262 0 obj << /Length 2003 /Filter /FlateDecode >> stream xYmo6_XqR.kC[M}8Ai[]Yer(Yrp8"g3C:V^,[JE~[>]?w( =) )dKR/ߜ'Dث<,IU̽1/=0 핮fqnhZ$A/voa_ Q"# _#W߽ҨsE㑝̝$fp qL|r[nYfvY~{Dl ݮ'uSA}&xVHyȪZHABvNvf_FUjj ɤX<,\ڷO S F~T"܉M@Vd(&{ȫN$*^T=^e.]ʰ1U{8d?v@G|{ 7:>Uic2ץ+,]e쪼-9+;eʳ6H*v<`dVmQ#:ΤIIzS0=)[MWHKnƵĻ퉒F];*՛A0ؽ`V*bA$rꘄ:02>u.rcP.kUWawB\m$rC5G.Hbjmݝ9lM60<PߙٞUE$ P]$$DP^kR8/c{#8!Ŝg)} v.gp/Ard;1LD Gi?:I#QYj*(nPrvH}GcqDV@6xX?K;U}JD-ӿ-ڵ{FrTjKn^!>dYwewYAH8Q@%:]Y@)f3ֲڼn",& unnkoVmr Jړ*L[U-j_]k&} N j# Z 2%4 ZmN?R\Q>^r*B1g!>A0IuyUua3 cR?Ж:[mUw ޲?oԒ$)0?, ׊K1Ñ0AtnL( -:8 endstream endobj 291 0 obj << /Length 2722 /Filter /FlateDecode >> stream xڥY[o~ϯ0}P WR]LٶCl6'%ґ'Jo߹2]lNjtƳELd"W0˖] Z d25A=qYv{S#v'0/d zGfCRLI CvݯLT4܆p)ѷv=UeLew(oݺkf.?+r$+t>[/?u3yʰL1sW0'<{;#N1VͼS%~/G۸v.I1ED ^$ xbʦ#|q1@#sp"fcJi|.,oO9׶af[,}႙a\>?˹GvrU}vnFM~YZ˸'^LnƠ esMqe29Y<8xWND?7׿Б/Ct,)駵Dz);Kk1ɱŊ"Yѱ H~i9YYlH~>}ͰJpL0\J9۷gY SUàOU 2qĹ_3iК&oN|㪂'(vw]"[`SB)Z]I]˲ }t$؅:{ܺ=MVk/6G.N]my8X@^] =י0L3:Uk\UpT UZdIcJ<-TӖ?PDfЄ/] 2ɦlwh%~M}w~YlFIl}E4(]r U/O)2-ܷP2K=.֮|rН t(f10ʆOc.,&R>5dϒdLS#pY#m`8x"hƯZt05ao=WP)\1tq*e먪SrTXmPcۺoV6mgHuŮS0KlEnW-@K WPz"b'ţnCW&{l#G e2W}%" Prr=_j;H'p<}徱43 FD;`$<.Rй,{8q:3s2';Dn i3 2I k!pOt ;ttw}뭭,M_}ZUHf8BZ|M QZ2@|\F~-"3Oeߴ} پc]nRl& εWd4|B̑ A^p(_Xo`9{\@vfR Z嘓vW\oZ0)&#e&G`h5&@ r| 2EېKd16R7 ,Wrdr3R%4=m\|AᔓאCvoMS0#6ʇ6Ni2\:pB)* n՞dVyRG6Ea[ڈxS<Q-ժ4/Kꅩ$@ʅeXz\ L=MJ(\,AO]; l-$4\a7ϭklL[E-WGcą Dv;J 89#X'@/&_`(t>^K)Kou`iLQD , <"ع fE$`83dd~(. }8ŻՀ`SBF]pC T$wsF'.h W{^>HJ*"FyXViCԕKL8}1op]yE>2NE4_ˆ.z҈NڶC[/4 in,Dk}d.-tKЄt! p'{Ds'>XޗR=FP0$7;z19BUy9, :'/\ fd)GZp$˳ -P8}k S8&ΕGD^]j}S Re.yFćWB?E pY=@LVɻ M>/Pattern << >>/ExtGState << /pgf#40CA.5 << /CA 0.5 >> /pgf#40ca.5 << /ca 0.5 >> >>/Font << /F16 303 0 R /F18 306 0 R >> /ProcSet [ /PDF /Text ] >> /Length 2620 /Filter /FlateDecode >> stream x\Ko#7Wi9qdf Hķ$Yn{YYFvlflKHTd=HbM6ߟGg/mX#'θ3J3j}oz3J8q9N\6Z…TYwN1pA)hVqa9If G|&VI;qh-עAg- YI0cAЉhay³G]':HCKy0 y?\qA C'8H7t^+h'H}mԝG&X&Q1L%0L)8h;pnh)#9G0Na2eôd8L |Rsd[Wؕ Cǂr%cYm҅h7ptpv9kʱo@dXP)1 *Gs(j\ aǼTNpGאa>+4"(;O X:8V(41|cƟ*{Dc 1\\7Q,J]\5|yw ap{o?}Z#4x%HpMgB!5dDLd'LX#~^o.EG2,].sY3hti1?6Qξj?ƽ`,$yፅ; Q816{)1F- Je xmUQv3Z= ?,/ ̬ļrs'O~&[UL k-P$IBi->3F#D{j 4YE$A+dW75Ԗ6wK-W!ͬNczWok ;VÈ ي  fR}Y/ogSɨFÓO`"- IBac~"k<*./a"xUDX LMSyMD#0nZu_]2U.2,s2)/:Ac_ͻ?E* o7ʦ#\F3NL=ǡhgG7 2>R&~2oA[(Pc$  ;*p\ri.G,cK't$Fg`'OL#[X;WGT5pdNN1AQ9I\8@ϱ?HNCaH5Lr.矖_|f[$zX59 jVLcٯSy*5Or԰˓hry8gR(dvd) xpNt,qqoZiSͤM䈖5O1+I6BLF֍'TԶੌ쫝P(O(xّ*"LȀ_]G坰 C^Y^]cu1)=J"t7MBSSA@crRW~J"Zz RQ("??\޷=gEUzGj3B2KD̫Aˌ@ajj.JXb#$C}ČR}VJcѣ`t9ij3"Z"Zc{|dѡI Ӆ'󊈕l9~B̈́A@VN֑#VsgпӸLh,[3Pꟈ`J՘x|%#殝x^v(1!ԷnEȠ:w9{jx@SQc7NJtrWIMs.ǘ#SnAo|j422EWE=xNy/܌#]v5Is?߅5eQk ©y"\"# x+)J(.BicP+T\& ,J݌Q-).V[55Ԗ6wK-W!GV;ӧQxQA͠cwݚ6ߒ?d?3Ys{֠GЋ3)7?Q n}^hIS6]׮һ^F/O8oߌH憪Fv`! a!#ȡM"] Q"kIdlL_`+'Z&́k; ƶUh #}4Ǥ֟Fc=JW-B?r4!6˫feo(A1JfZkb{(= xc% q1G0)@R-owz@w endstream endobj 316 0 obj << /Length 1805 /Filter /FlateDecode >> stream xڝXK6pd`͈ MРEԷ٦mfe#[wʒd{YKy}3Ƴ,<HE:[ng1+3>,1e6[f"wǾ/,"|mF9R Ǵ{||˱)Z7oM4םuLӣb &"=U~'v᠛Ypѱ_vPS;ۜ^r eJΓ#/ NM TyQ@)|OeA So`9X!ܛ㱱s{dɿ|5"?XYaEEGF;]리=@tlVEGo~6%P'zs63.*EʊR :G0h!z:/ķXiՀgi8n-,ڸB+wti]ot+b`T{76kYԚV^^{bH)zpn*IX Lwʅ\Sȇ)F,rs`3HI\H8ZsQ#jfsj >؎^ϔk:: o.IQFx\y\PY $h| M<} b~1!ytY:p*zL&C.iUzq=R1Ӄն3>H19[kszXPzʸ _d9l?k^d@$=~W!z.t Jp69Ju>PB_GHr1 N K߂Sm8% { ,θQ(U;Y+\n~ /,MF;88pN,O.ET SXl*fRLrn]>/sς{ݛK 1"/cTR0;!'W$,+ 霢.x$pD U^M N7s%Qk*[W_Y\nrw*ȧG=XQ=^5GFpEk{D9氩5Ł.AAn5= a@EVS a@Cs) {hK1b ~/j 3)H4`l %{ s聓C 33j?Wɮ_wh endstream endobj 242 0 obj << /Type /ObjStm /N 100 /First 923 /Length 3074 /Filter /FlateDecode >> stream xZms۸_o=ޤ758n6Z%ղς-q䳪 &6YBq$S@ũ(* r$K)L( # $>!`H (F-xj H FE+13ZRRMQ-R \T3T__7CM2Fg28{b ڈ_@TcK2DG':pъdXEVN2,h#@ 4iLS|@#.2q<с /, =޴%~-`Q'y6.juO`l:\ ɜ:2xlP 'QW!~]CnAb#QBEbU""``S2Du.X+꽒*aR׾e[~EeoFơ}]ʎCݕ5$+=)f`9D.ZĀ4e cǑI;h-#M[5Fo3]}VǫYFz&4N 7Kl[Q{_+O06$4/OkkjA09뢤jdUzőx΂0kE)޲1"8yL˳VYOFcb P*T\ĜQhĒGgq)~\}-ĸHb6dV9lլJ40&gKٚցtЂ)JFhc*oHK[rBD|Gȹ!ң8h97)rk[>.)x%wxm=p '%_(x9Ce' O:;-yLiwvvc:M@<}jXy^kK, *|<l'c6ho;}Bo_wjLHN=Fhh %еWG:tP,l6dɒ;{,yڷwN{+A vV}_#Ź#Vol)rh(~R1W1|/&ȇa tI"Its) A:Mntr+Ԗl2V:8msw˥(f|ѡF>yyvqqxxh(|GEIECŒY,Q1^2ۚJzfT z Ub&^E*6lT6G&k*ކ+0Mo6MYOZ}ztC1z~Wzȕ5Lze6/70!Mu{C+zѝ g û?-\ɛm(fvIꦤ՛}JzӓZNo+Ҫ︯" ð[{|ܜ#[s2g> stream xڍP-CpwB-4@Ҹ{,{pw ;3_^uUw׶s>4f& {;3; @RIBƁLC ǎLrr !B^lR@ Q b` 8CwH]f%F la yzS;??/q[hPB,A/M6 {S0_),!VV7773 AN 3@[_ Yth؛C܀N l s~ q39^4* ?ɊtEN#hjjoY6 " ڙ&m_@ G@2_9: ,`3Nrvf ;3N ӗs`rvf0sq`մ;⼘Y n666^~r7d]'o >^1@>`s38|o0B& ?_ ?;l/cEafv6bV]yUEݷSB `fsx>GbrNi/ Υl\q|??v$bcOxxQ e _vڠ?WW}r.Y}`g;L 1S.5/ j ]/χ&p^KJۙڛ2n "%nn:P1x`nFyxM">?*7^F|V'Uo8_*m^b55|)ewqW _ |)m/R`|io}y~%}Ces1}95ҥ?C8_<_֊b)Ԇ+\u/xI/q84 c_{LfM[o*IܘDŽlk00{-8ܡ#&1Td:]' uc.mJ_-?z6"5'{?ǩOn O=#{ENl[+ C&х]5ƭWֽHNCG(il3f27'ӗW߱3ǟ}~~[㈾\.}IDMGH{=2I%,O0U#oNf4 ҆t䪢X*_&7,؜ 1U۠ڷl'Ϊ-zd7EvVk},p rۼEh K'0ɆwyюaxoN}e퐾_=y7E:DnI)`94IIUO/AFB;b0ks]͊AiV]vp T9}%LEGJ탫y+ 9tčaתdcESP*$''~5rQ^2:f\©22q!ς3W>M&Hi'eH!=Qrޗdԍ t=+˜:'MZح ume [̅coF,|F乭&33B>>ʒ8g\J>-[HO(g>[tZ wF,ٶl6hN?'҄ !h| l^9G,tuC ^x=t2[oSప؀@9PeP_ă#ԫ,"6GsEt'Yg_ѣN hXA`ot;̫XL?I <A}S7( nɁ]H{v8I$[:築Re,2zBS"FcnٹƕK%\U:PW+>uɬa ;^\)m/0k.|TG"p3p gg0uOc#]q;U`v]`ÕrtIv~*ho6Q^cSb̹@.VQߵ',9 q(ET*|d]$է n#q%y`4 oPD BkTba2$f23O!|iM܎}frJ*y'WkdEtf1 sjyړ>ʖoBQ@{82M&#qbuY#Sd4eTq1p'ǨQ YfIs$YtKcNRǻ&/Ϊ}%h,S`kP 2?kmwLo(@b<+BsMx+7fJF<)T 1708!fܰs="˰Ș^w壢gM *y"?^|?*L (/x"h` %4&Y|Os~Ƌ2v v_6}>c 5$Rg>g|sN(۲>xѸF j ֩mC]0=HgvٔӮ(~rQTi6`,Q2PNT4sA푏dyvJ/ Q`]ﶜm.y"bzggx"JJʙ@D*W|v2-0z0K\peW3"kJ-S"ֹgQ?}ZNB1jc{wu r5)x1,BhK 1xg+*xJ:ë&zS>G 4 *kzZ Zs_fԙ zIG;k|fGQW'n;];]qmj.=Lo# ?Dj+D*iFTR4?7f-EɅI1Rd-mH@ /= KWYS+&@ɸ  j:UG_; BG;U0aIM#9e}Aҥ-fuR Lee17/ܴ)rOV#i B|unႣ\(\yu* ïF"h25HN§Eʷ+Ч1%ctU-dger˱,6YjVC24VZ"xRGؤĹDJr#4idMXYU6(2xo9C}$0*˚O|-n#yp[Β]I(Q.HRjQlҒc[:X' 'b豷~h]~\%mp׻NyP==tb8[M7݆|7>^D>r4YZ ޔ͜$V-ПmSz [hmҊ0oj^j?z+Yץ !>k7=\ ݱ[:c'(Z׸M >roVj $$Ea?l$UHfiOiܚ0Q)˰=dsZ{ ,!#=2MMfD2F,r"ӿkT|#cB =U#NOM4g2Dyy+9J>? 93-4tr1Y=7 C1L5TN!{s:7#5~vdw@R6J/dwx}ị+e94b~+b}֊UJg%5Cїāx:~ S&|'`TcU}E|7h{V\6)twI-aʘE47f]ؗ,\Ewxjx^R<)_CG۴)fg?$ Jԯ[ڀZ0BNWK_bWT-MJ.[&訂4j2C6*%cI,hY3to@ii%.ʎ|!Rx^^]kVzp7X$+Aݍ8Nb&<~d:& C\=GGᄸy49nGw.kH#^mU2eIt3$6V Pi~r93}Λm3i䶭#=qxFAM{xg%{me޴ө\0ttikYx S~{q` nXCʚ"AC'9Ck6E+4잲T:U3%N7.k~r R2=:TkO}Z1 GsX^F5SjLwa=q/ktHZCQMZaa-)mΡ 73mYjvD\D.xټj_3Nq\vhc $Ix,1R 3Kzn&筇AڬI:I>ICq% \ 9#fNxǯ-Sjɭ)k'dDA,``hR$ض~QKҼQd:qșHϨBƁRx:wpn{ɝhbdo?o#GaC8vkJ)^BX I:3-nӔV413* Ǣi;X*f>AyyO@ye;9=ܚ&'?p>Trd~E1[or"B_8bۚʼ4i*QB,pfp#_f}Wzьm `G_(FZx4T\g^ ^[E) 19toufU;I>DT:Jk n'n|P}JLe#c+hB ] iyg35I)`CN:r+0nܰ!L_ uV!K'Bߗ_܉p/olgӢq6K!L{$B;ਛݰ蔞Z"XobYk&;-Җ"_:̌ #?^ny]F3Y门Np)@.>6MQ>"ғQڹm 侧u.9MZ!"겱NYN 7P,-ҁQ8z'(V7 (8k %=SLdoɅՖi헉fO)ݪ]-LqB벩 c.6]1nrƢ?M:~"s%Vx\\;=/UL_u~+# +2.W{,+|gDsp|yN؍OOOW5mוY !og$ qfC,l'^ :9rĉ֌b. OG^rz3 ]7O '5p)rl \VK&]IÊF\uv..~- Az!_#9s"jpS;?;_RGE-Ko"<& pLnКqgHDi/IIJ-WdHެ$\c0+YH+vg(A\o{YDAH13d^KXcuUV`D-P-9 +Ҧ 0paSy盁eWyM^Xdm 't r]u&xG{ΙbDê`9}֚޶'QzʃR*8)f$#[ΡM(EVdcu-RI ^(Gq]ǎ5l bk/|=FIfr([\M=#^Qԯ M(Xry{0r[ ,OL12M? }Bxg|멯s]9XvVà@x|4AN3*bfR( z?4G9Zȕ k\=ϖzhs8 9R6- :v70"Ҷ7Wh@E: 2DgZH Mzj֧a#.JOV5x`yTWcGT-~Mw׿Og޽eKl} V{Vz"0s#na8#>Exx5"trPp+Q#܃z>ȸm)6U*VǙ-x1{7Yغ} .bΥiUEGcR+T_<Yrә0lՃGbѬgPkbո=vT\UopŒb-9jnb =@iJQN'əۗ Lmhl't8k^ h W ^w Y-ZO> _\\BH;*>#$;={;ўvDW tvг'>b")<"oӢ|UDe޺@I\|sM)y3>Z]aKH|>Q@ՒzcݞF }N'6ϝ@v Zd8%.^\wݒgiY xNr+&wʋ&l #f9b:WJI܈LQ$6: ñMt:Sf+@ٯ~&teGX=4 $O~< g?NtD23x =[T{5B ޭu?ʊU5OϤbayfz3k,͂R-2oܹg`ZsvAlkpO<3cXz琛2)$Pr|>3ZD:QO~;KJIʺYvED:Eۂ];]xqugr@A 8Wk#Op=pTC瘈iabaXAra['^^u4Nł,?(V$/ *oyܚeWPB|zqUI)g[\xݙRbk8?|^5:HsQJqNgÈbcOgKi 0NI//<$Δ*W[PX"v`6ne">^70"W@Z䞠 .hgzXWgBfViɪWSJeZbXPLjpZM 3#[n6y#BtJ&o~Ңk*=qa_eVk% ![dEүSUtU:7->t#ܕiǍ*>2'-K5dC94|U'>u R/|:AqhFR>o}?Y ؒ;ɷB)5m0lW~hwW?yx=]kNgvE  vS#XzK(U'a*A~Mɱ7*6 m /'/Z{~Hܼ=-s_TncuS>s!RT D!h^.ss&n*UFUfA.FV@[Ѫ~BC{Czn(Q΄MK_Tyhűن6ILe~8*"L(9[[t9[ʆ/a SI=5!u #&1tw04jTaMR uOǾcy|`f|uJ漬 Iؕ:͎X7a48$V JΜ>eKhsv o\mxVMZ' *ӕ!10=h%*p-Ƙe@WSҳmޙ-Bw(1Ww+A}h 0$![$@,dD/G뮪ѻ#>h~PO!`M9=V7qU'Iop (VL |U>7n0sF)WU2e+PF.<s? 6F6y+۱[]kH$@Դ)Lb_wЎGjяx#}vCcJJڪ!B"@HptbE٭N]nO?JxA5E&A_o]G#=#=Z-iZGk˩Uu{ih g^Ӈv+>DZ ̤ i8DQ{+%CC5yYzM1yhV-u[w6!r9kg+y$;]$A{ea̞͖?.6le [ͼ:G>O{<փf!ў٬C2_w)S7iWZRRmdvLj,a?EG+vԺ%eI8TMNv/n;R.x[Ds7hƥϐAww&Cu5dhw%/ fg;`'옕Q{mnM+n K"{ xsS$-7@NzHSO' %x=DZS4HijzqQ EhZW@ih+&z^A o8Ƶ_K%.{}b|!&IFQ[{!YVb;,~aGF`'0U|8ޤCctԌsTF*qG:'M;r&\JRWa_"t l2& Dsu>7J`.S-#m^&Zh0oE[7).P*0 ]i"A mbEJK:Vt?.\@1ѬEާ~Dh,𢣐J )duɲHpNJNY_G%C2pfud؄tٜX HvʳzhE?PBOEC͆Ĭ 1#*ceVڸ !S0ЇVMwDk, m7 /j&ni"elHYkuyao["K:=zM; Liu'YUTXLȷ^3{ɠ,bt9VJe{0}[GV]jol^0c7E5%5ԅN OĢypFwsmy1c 9[Y;N]vը[Ft֟*?jU%:C9f;ERY endstream endobj 407 0 obj << /Length1 1676 /Length2 11842 /Length3 0 /Length 12920 /Filter /FlateDecode >> stream xڍPk.7ww+ E[X)NqwwZܥP[ws&3s-_=yi59,NP7'0@F57Qs-`A2q`2YN Pvwp<\\Bc* y@je'( Qbk{N?+V?R`W P@m'+_!XD`0ga ӓj+ ^`k A:`Ak;u|rur->W/\ۿ ?or<<]ܘaV NV"aa-UR;bS;Y9|\[qPSX+3B\RX$_<4֠mJj`zbc~o,@[/5:ߣ^FvewAτ= ^zK#fwv+U0J'9bucLs-3gP`hl^WScO/1cy |yf|Vtx:ȍȨ'|SI| E ^dćǹϓ^ y 1ƽc#NZROdJA\lMLf#ricjQh` W`MćہN"^ *[On69;%tȥ}EdPoș=f> r701_Yyz4'iS(e|r;^9twZ"Rz9 l M~WJ ;sQ&!wByzh9FkK][o^"mFI3Rz nCG ,]74h›ntK#iZx\}ϗJ j*?D-tWInոe[c]B7o;M!Msg{nrdvhbסܤUO@/& !vn+ŚKLoU]OE}H\D⣇Uv٤,ML bn&իDW_L: d%"u>dOz)jAP~"߆T2&gM`~5UJcnf%:L:>5&\.mW|fR8ʸ =2ʣpʰR8n__1ƻ;ffg(.%8st|gڑ ~ONZ{01',D3iJҞD8g؝愭BJ H= 4A6Ni}=C}!U[OklwUq듮 2I"zbڛfw ɚIIS+W7JMNU>ZESpQ9WSS6*_#K5 f |%ALԙ4T\V!9TaP)c$ՊACĂͣ5_I׏$ui@m?meaNje<=d>i9o>5bvaxj-^SI&b%09͆3Ilō\x&0X4.j\ώIxtUآt:|zyD˕ispt7~S>1@칱cTڽ0PuϕvU3sUi"x@/R+[G#4<2"Ek]}C; v_ {sIR~^ 5MDŽyUQnXx5~Kߕar-^}==ܖ)l NuA/4b&bt*:>&98Vtf.ZNǮ6Τ l<ށ^iqV m4 }vK$t Rf៖bF|. PmVSbPRfJ܀:{ÿUFר=UA9׈"H \t̴W{k3Wü0 8+XYtG̢R,<7j\qKfb9KqӞLfJdvT+ti4_DM= 70V*F ͗Sjo6NHa9Ns[, s^o-_ `pj\B-;9>5oDŽZ/=EL:SzEؙ iŜB 22=RkIbA7oUsD:~N'9߳`Ǩ_D‹oX.uX%WKFXQ($Z-#gz@#VҨ|fk~h CV ^ޖM9CX7[4؇wݦ_H؄beQ믄3$(qe?!\ ;|ۡ(&f'0]Zxh}EH6J颔g+]萜,B#u7WN=4v-HnxKxdo:OU}3oVcT^W)Ў\Zqx#-Q\ucSjFq1@SC(4+C%PWNkZjMATV%DcIĨQ^N"_tj&r 3B2m IyܴU%G@BWGcs"[ZX͠թS:o,&̋h_R+4N@].R(溥sGk t&UQv$\B,ƀ{΁:v~b>o#.y$<[P~^\ͽ6 ;b.%\E@ؑG76 8 FH+spQ"5Ӄn>J>9keDn dbs*H1<'CȒ,!!%톢 c[Ѵ#+>[RgqfNjZ7 qT-eKqŦH[oC BDLZ(]Ѫ=ͨ_Kf WU"/D5R燯.Z$2S}/ݞ2hd@Q\+)嗷~\0:qM3<.wai[Y;CŢCSj%⧪s{Vmytz[JVpڊLO-H`( ap|ɖR Pw˖;Qm)rjw@޵-:T=f5 ׵ =fAs#m'Oh#q/EV&l*7xT.c0,.1I󥉆i=R7 UuI&eqIZEEpp҇Qc9Gq/_/Y9 xPWX41+9<ݩyȽf,Mk΃G~GN.dI$ fe:5Tp3WHG]. Lc^GDM&1!"'4=2!D0ö@RF&W4Kh7KS=jQRUMC&X{fX2#ѓ Zpe?$R O}f,_\QƕQxmrYy]ȇ J\]ȹ*(~jo@W^Ԏo~2j[G.(/NQEt AxÞ g yOT,5Ō]WJ%j n[ڄ/e9ѡ.颷L\mǽwϙH&4^i(-GgkAn{J€ǴbO#?u3>]`$& _)Bg!4K x- =_޹z#dphW14p@(; ڎn:6%mECJ4}%lj}e,/hbW; :*T?<-:ޓHc> 'SP}ɾGF\; PRӍ4p oӋ[vwgDS&[|JWgշVk<^?ғUd?Q$O}LGyɚ"QtS-*S :987&{7fI"i~?_dMzBݧz?2gj~o{{B{Xh x:Πv[ "2jDig<5 rȼCsj3{TL)2թt8=r9CBGHr^tO U۠shkݺ,Jp+Fyz%o:f4POǗF r27>]ק-&u߿ 7,Iӽ:cCu k߆)H+aH [30 >S(ĦO2UkB;Ov«w6-a8~V<*>R7A ޓԖ0jeZxڞr8ե Q2CQA ~Ok-n/LDd(-x~!sǨ Wl#*s+;#R ̡=$='Nx5:I+i 8L?}~@gz(ոhgAc*0~׺s"r M5iD3ϯmz!iuDVТUڸ(F)Q_ӺoBvoþE fŲL#lyx6ޠgiLMMd<H[KK|_At:mJY@fs住}HOdgaU+m#T'ec_`?5 toJO6\72]Y7{Վ2Ae΅#Zf/Sxu 1І1bFT7e# ߩ-=;y* $hr"=FQG>GTz7{ ,mb-J8̹&^ġ }~[=-/WŮRgגNplGH-`C|5kOGō 65?lFI~hvMeiq_~ӖmڦԬGH._0a#,Wn&7ɪ">_ah"18}0y&Rlo˱[k3ds&$Vnv 2*X hƻhI s-|r V}c"e:rI>U rX~1[L${bLdrJUtC Є]] J2em/GHM37/Ot__6?j!mf$q-U[bhf%RaOy ,hN诂BbpdQJED5WbwSb~ߖau$)֪iV4`~\{՛. _wt0? Uj3ߺ:*}Ek7)i(jC^RKB@eQutƨI?uiNʶ( I2Qj@袵KL$.ԧlQQፎd? X,+Eh,=>D}Lp;)RkJD&"8D5U2d{๣JPω,\1 *X9:BЗD#U/LeD FQE`uJ ͻQlj*^;Y *:%ӝaʯvFA?{ۅdMIL :;ƚḎW֯ hݷ, *Ƀhb>tw\- Ĕ`2oxwӆt5h*4 9I-gJ 7\Ѳ1uuXQ,Sz#3^;g2h`M8p-PK=?"*}[EENՏ(Ӣ ]wmr4^CӗY4A23ZeϘa%WfڍÃNHe%Ѭ m܆,,aAs\T%׷ZJ9e.&7*nݤnt?P]i48{uHa)T]xe3eˌ5M3]O4Z =Yo1=Q_Z|川y.h ;u{EFdԔێMP|U/uÙ]>^oϋ!w/Ô~Rw[t[?))>ya{8&|fzC?Suʷ aN(.j"ŒCpXE d>7j=ep/z۫}d9 ]>ƪ4a&% W?ת:by[ Q:Vq\8R88gH2ѶYb?(0܈rmRmgaOlKhXZ&U.ni{-M& ҫTqn}Q#_Ox2nmb3B(x8К=^P&A8)x~67D:Vۗ1w$5AsNnW ʡ:ɓ22,<k[?| mdpdR5tJ9&LVR>.T/ڏ1a9rW }K7)#p.[z*vMKWkZVɽɫcA1RH9mQ|>\Svz) DŽks= Ut*S,(:gUA"g>T${[kUw3ߚDm^Nx=YM뎭"^ ΏD] ʹH&yPl%fLM9œ]4.~g4JV)82fVggQH4~<=?s+OiԂ$ 1uQOʝoVG˂Oy2c&߹f:~7B A,tHXQJ'fr#" : k2!lǖ1RHzwٛ?[yMW;>l̓ps{+PnMZ$Q&OE!襉NczQ75iZʂhXKRhǕZIBoVy8]d#1!=r~FL#;.:LTӨ*guwɠfGā$ٷ_+WAȾ>h[4yKibGcHPЌYh7kysx\ k5Q_L] ;g{<=9oE7۞H1V'UxB"t1R>*qєb7Xëܴm݇9Eh|cd#dg OBzǙ1|y=D/k+-NsF4_OŇ݌lՆ?&g,BŐ1vTeYR^0Di@yZ 9<_XzLJ/FVU}l;_k`8]9'WOXWν=–GzԴH+=A9&"@#Yn&sR0*,Ե-&iX3gmA@)!owگjȴE6eO/]s]&㶉0d7%֝d b;0[ԸeÒ7'&vGRËَ!-9IuQ仞$Q)t}pF51,\M6RꂾswrرK%7Z]׭.Y^4ZwmYgB֎9aEd):w G/ J2})7B/4D?@x&` v7*i>JgZK-~il6^ޯ5O`'aq68ބvrpO*X#a?W9//]:^XBF ]c"#t?Bsng-:1fZsV.\C,aȮة%5w95ooxpΦ!>bIf] hN1S7[g/3P*h )Y<Ɗe"C>zVg h1?. k d~e4Զ)&b1[YD1IB7([ǿ$4`PHٕ9ÿBQJ||X-mt!BUUo1cRgI/R_$K@A$^ge~.m/7Nao7ed|W2PvB{_Iiebɵ;?1^6bkd+K2aƎ¡3 ob"kdY[ӆB/G^^v:-Y0zD?x7_QhaDi!^8Tq܃ Y&o%mX&7ckS]ީ0#$X mYq_/xq& QQ"qԪ$WVJyY`']DžGnt7wqqgv8w.aL͜WYK(P49|3&h,M٬esսu>Մ!i!X?&lJ{|83dTqGkkSEXIE WS rHj4r1>RrGw4q]બT+.sڜrQI'%7ZntNxuG<,A6ȅ?pk/Nzk0T/( ]~= A.>*~Z6WoRtG /u%f3RTU"gЗ\6l⡸] W*6zѬR_M_WVNaezAhuv74m.V u s>}[MS̺++;"x'gn˘ۥ-TeK0P].)-,2L9 K^Dz 9NenX0;Z1}u d-Pc j*bibML wFޒ`Ox쾿.'~p?tbv@A `EeK,:aC5jtMtN;h߃3>㡬WSBeR ~BwmwV7~rT&!㽪ʧf;) WR #KS |%LmKik k! endstream endobj 408 0 obj << /Length1 1658 /Length2 10148 /Length3 0 /Length 11209 /Filter /FlateDecode >> stream xڍT.L#H7RCwwwwIIC 0  %-HH t# \q>׺wZ3SkK@ ;7@V]O ŃNO9ӿAYBA'%PPqwpDE<\\:CEr` @ :xCv<0Y3pH;`kK@frzhmu`IstrpJ0<0; [hX:jgvSl󴄂OG5]e5 ڟlOCpvvrx!#,!߆nO`GK'?J(Hk,:?7k(v#0Oce@Pܽ9Z7C6pC elDlA0?/ yYqNC[ԃ ?f +ss`k d I ? p=яO :C1cŜ :z  ;%* qYӔ./0ù[~&.JH=ӟ/'޺Þn@ kjpA@jaO u n `/P ߙ#rv~Y\\{:.k퉒@O!G/B-ѹ~F 89 ΰ'Swg( ?$,=]/'_ p 8OO >IPAA6IO'f92ڡO >yyJ[CRx@ /5⼳h}]i2O mtfvE'[,Utp<Ӆ/Ԉ$;{v/SDEhzR;~\^9 w绺 ai_{)z ީP}~_=V?UP,}U  /ًG*VtXk+z#&6h2+r"\ Qwj}{'\fvt? Tcd`e6 '\BnC{l2Lt{qꝏ'ycd2^3uwcJ($rZkZ)R GWw@\Z[ֺC=(Y-iQQJFKbuE{EIDdMF'c]]blbع84GjTeU+Hd՛ӝ nz~[^|d7iѴ6 }xPuL]D`I>\^vۜ(0y{Gjgż=Q%N/9icG˘mx_..?ݼQ;)io5Nَf ˁ+ͅ?~9)X9iH$[WP P& /(E'(~!4 7K%ر72_pk(]}YX>.kx9&GFzS3ssx>>.iȖ;ψj^`Y@ Dhae ¡ ?-c+ӟ>(;!Fk,]zJ+~|JTtl/MzL#e&ㄛr|<_{ʝ ܼR(9]HMNLG2='%笧ؕ(,.j8?SbUWKin}7k?~E{1 _G*Xwl|C&؋ƈ S6Ȏn´/([>N>8-+=t)RiR21xe1 =NmLNtS^)T̕:Ķ%|7C N;6.Cy~Ș_;r=`(zR%HIzv5ˆȻ,k" ^3=,<-t55 W::Ye̪ ӞK_g9]d>J)a2`aC{W, e&CerKwp^OTd/ݖYHNYP50T08eR}NB 1p6} (lOx+*CBaX3C{\Qzrj*4xw՛or9x`,t\bG0nݹx9]YyyNopDD/!{W.Rgؐ2秌~vv[pgZqeKUh& ih@ϐQ])kՓ:λmgX>⭷u`t\GѹU᎘RO|_LoRXHp1rGIqβJ>/0-qwz~3ā_`!˨L3|ԏ$'C:Z 䳩T-vB!dCUg`<;lkڄAU!>;~,Y>w#2s\")Y]j%vC5yޮ b kšLxbC |y"bN*cw:!g2B(ʣgsc1]E/EBEkqoq" V|zL0z\t/ h6~Cg?' /IY8vscym ˶Z2\(KpGa~\čVfyX], 銢r1O6~ ) j6ZY|J&ߕ LF5 ;9Ξ'~Wl++K@ZŴ˄]̗p}ɹ$ojI"Ѣ.n  C/D[ l6(3e&(UtLXQgN{Y07bB|^:~4MB y $*!QO~i)ZAxƩ!%Mej5qWdNX4!'U::f Unm/Z|MC=`Lmc@]{ϹUdLԟQaiSwdt#:sQteq@-SbOÍD:){)mʈl4CY9IAV2yC B~_'smS@ XDV{QNsSK㶦nh){׈$[RlCt;A&>yq0Ȫsڭ=mسо#P x)~4W+kK-ۥVs gAܺ@s8m#0 梞>(\:xS ~dZ*%q0woTdsL!vK0b[5K6fm8hol(V SJ[5Hr2#nitVbt5U1Y.BoXnoa@B,Yx ̒`~ŀ_zZrAhY`9|ԦctCy +/b~UqJ۪$HBxRf"H Ԥ}-{gZGɰ>į|G,WM#ɨK[y#އ B5T'2~+T9T=G>дLkܔ[˴hԟk*F/OGiUԸ$$iJƴO:Zmf :Ğ%UNnѥ"Qւ. 86ڢ^nNU=2-uSi*H5oxaڐ|TY?]j)d{AKzh~AY@tڞqM*PLhrG{2pEGA (BhWG|i_زF,/F E,9md]>mb߼wdӌޅə:OksY5εdF,l8&Gi ٧yKATT7xc 9\%-Π&`JI mXj'bBng$ :2ጴz8WčՃF]l 2{a^XCr 6-5JYnj[MPyjጢaD׌?-374VzRiɒ=]& E/J̊]&N~Wz 10{NFeLcuУ Ԙ{@_j~%* o\nj˙^P1}`FGV@;HU$?cJ:DGk U,veTvh ЌLt{{K8%vF>F$2Wr  Ɇ Kn-5P%,Hoiɻm >9fkq-0£[W!)'̯Uj}J~UI=5ڋNΒף %ۻ5]去Xud7ҳP],Nj^g2Fz8H(K(Dzzy6Yy? YaZ,ڣei :@]G_ZG x~r7/A$!Z%}o+ȹZ|QM}Crվ4}$0s5@opb Z^&4,+<ET8S>4}cU4kG1 T6KJܽftk&7DsݒLG i,Hۚh8{͸VHGI'7 kpI)Ä;n[*~MtC&ӔЏi:D_I'(]~%WIWY/gx$.2Ք=%Xm'Ik(:AY8!0jO͟'W[ p-Njʟ`Gpx(s%Hj%⽚ᬦ'#'ΆR6xS!ex E1u oŃlIfeo?S3US Ɍ|sX@p$js@WmՕ2+xn,>)f b 5Qcu9)zzr K4PrUTnuQk8[5P;x8U\]~|B:o*jGgxfE.(Y]IVdAHò>N6X-+{^Cgh&[ք#w&wfbt5jqq%撕#x* ohkqM,2U `{bspG段s yi^}gp=_#p+7mM%XXFjpZ`NlKoahL&}R~ūpP9 P6QppDoYZTQڕ̺ aPsMuu_UfYu&YZ9f)@C[{'IEm0x WU%Z&&6^=M>GGVk 9\馏&6aʴZH23ނřG|DMtHӦ ʎ13&06i-i;8©BGx9 Xs9F*|n?頬Y_;̡̫N jcڳjIUA ?h8">>"q~lc\W_ؿpJG(nd,F$m"#3*BW2چE}{ ƄWd6abKn_x{pvL'5M{Qc|IA9oƳg3(wKfLAoet9hr|,b2X,I/B[[<$4<#~azRGU^%(۪ Imlj͠#_KM%?fsu]^)K63=r'݌E8c$\A؜ʦJ%Ҫ}Kg%J5$pYuGH,]tEjJ@Ƕ ÎΐGѯ6a~@ F\HAR~LDʋ7$ce䃌/U#wO TՊ*ԌFzWOnE=bLp_9- +j5!k)gRؕ%iL)E /B$ IwUN#/*[ʷ %3p)[pf sML" ٧Ͻn(PO{l:2B,]8'+HQ5_iy"bFGpV{x:I`N)y/}o~pj (> ZCY|\;I8oq :t\~i܅žU坨_9Jp! s"b0H`gW?g09m #[cLG|pړU<ϑ)c[|LĞ Tay}{/TRoPin}ņK0(Sk-Q GQ%pX?`*k^NާUˇ3C3%팃Azr_5KM#s~E/eL?3Jhjt$d]I` @MD֍H_Lv]?"@er q $CLN>H]3 ̰bMf!']\ez'{c:MDR#!=`E=I {qi"Bz4.Oe|XZ۔ogtXoSդU3i  Rжgݛw[.E%ZQxLyJɇ#䱩,Gܷ߰*WBy3&UU+ޥ]TI J+}Mڅ&LQRxljl;70pyα[}i?5S{,]gQE\z~yܸ*SټqW%uYoB0D7דw? K.ȪElStlWak(#& m=xq*9D0q,"Ҙy *?J;)FZD;znoљ4; ;2ivq^MLxo ;%5[hc rݓg{1ʲpғ_̗@oPwH PߌA?,J rj" gr-ae㾖VSigH|l85sD/2s^ۍYp }6MTG,AsăPF_]Dj^rׄOʗPw@؝'+Z,o\fDZ|jbގd|2 IxQD8i߻ 9BMw}w$螕:v#Dùąr([ERxNmoo#)$Tl 8F?h"۪ilhZc6Xg/qQ+Zqs5CWuڵ>x$Bz-Oϒ& Q]0-À?ϺU{u6 *F;ί{L~+B&xj>7u8Aqng)!4hGPXp0&AS*M3>mY޼hd4;c*[\]R97{_iԈִ|p{2A0\Z!i:Tlً$j-Tֈ2EyM+?Oce8\VhBm%A]'l4<l0޸(<8\O|N3~PͲYpP`LOU)HyMm=Ra\uϣ ߮vmk /z`T<#Ax$o nYv?MJzpjg *!̝x)p'hJgz1J6.[" d_ l'F^j вt>8K&^4קcf-`L%`(־^Yf[tF:.4n߻zMxӼ<=4I&ܰ&n:6Iz=Jr1IqDB?/Ԕb5b>PnH'=)ʮ2Smn=($W}}ҲwpCR~/#ͰgKWzv?C"|2/㌛(_&JHC#L.ù(rSPIZZh`WuF(,+jySL$n+DGDג꣋t4G[FDG-Sbs~`_IN+F'>NgY !tU&dpG$n:nXѬMbӐFzݺ޺jkf7;FN?dXKHu=S2ޤp`_ڷ}F+ѺlCqPD;mY* \ҁWr0m 0?hT ^$ mK?p6t_"Xثs0sSyjL ;~r}eYMCWXN9qάg)n%1d4$ؾ}$]Ůe--]89cF9h'kA5=oZ6jy_؂8:h9fh;i(WXs@ltӤzʊ#O0&R`Xa^qVGn'x$[LMz8ɠ;n_:MY}L$勂Ҟ׹R{ ^񭰊0nI#^&4BRWB? s IH>p ң=|ݽ*Of/>1n a7t9_k1O|a]g+˴87>%gi ^ A'X0 OZJ3Kۊ= ./f(kA$>GIB&ϋhmc&1ѽC-B!)x5i3)&.nDIad~ -TOzM!գQ"\Չ[t[؛:X\%./2A9d&o˂ap]ihj{R"&VU姬hڝPSz{=y{&:؆PJpn"_=R B'Vư癇r¥ endstream endobj 409 0 obj << /Length1 1921 /Length2 6452 /Length3 0 /Length 7615 /Filter /FlateDecode >> stream xڍ 8O,Yn[}ؗaf cd"!ٷ%!KgKdBlzz_wu|^sιf BQ0-#,&"*"f;_;1 @!PG M; &Iˋȋ⢢r E=M̭ @#]0}}199UA 0E9!`Sp`?|{l`3`NC-a ! _.UDxk!aP0713wFy#~,䆽=-wD:?L\JѐbQl'KIAbi61A0%BxRH@Hi&I@IV82Cas&9ljqMbض9BbXFC`ثcm~;8ɇ=k/ĦX/jq g e:B ?( 2P!VoĞ'vfPJAX)!v/* R:a+%/J ?'4{ "/a0jkmTjU&?AqIݸ}"%MUu=GTV6K?su Ίow(~+Rn&NlGwv /&,잼 >y詒] =l{:u(|-ꨠ`!FsUqe? g܉/ 4@bB\ X'y\>FɴKn':3R<Ʈ|yYc}:LۛOI7ۺ,_gi[ߧhc7l8]ζb5D?}=@/ע+yu>F >&PwSDAl9U2yW2&(Usr鋁q1((/IYO6ٍG/8Q_Zm5]yz}0C4h'w='OYAr~!p_)1N?Ϥf÷[kZP+yBUW3NQ^#>7 k/q4(Te=mzUng0kmHtlPzj!w7yD[Wh2y;w+ 5}U<'Mu[}#?P}\Y;p2]{j"[c.9}C]{x wzq"R^Ϟ0qo`KTvvKip :f ap!`jA؍aޗQf|ƗYRp(>x[^/y2eqPzH1/rdr\}Sa{![) ;^ <νs )Ùl/L#ak$ *Ґf,PeuNic٬;//z;"͚X-՞ etB."@_ %['W{ƭ& L[ ? a&gnK~u|uSTa>62!)s -1Gf ̵ScG&T<"Ndw Ng1*LyZ!3.6:ۊaZ[Abb'RHJ]ớtV+;${loD=7V4 'g+fI Nju48sV*FD&bieW8O5=nXgAetMř٘ >Jz(ۃ8Gw^-m7܏^ U&leWKt?{0y#R̻ѯBL>ߎv"}}3ֺ)˄`=G1=rD5bzं#!(/]Qx>"K /uz7X%,}Vj-.mW?D|8YYfwrVK `)l aςo ]ˇʯV ^0]665 ejm/h֟MhLQL[}Optrn#C$E}B$4HC/DcƬ6d1C(&}%ń&84e~ݔjzm(6~T륧MO5Mxfx}`?؟MWy>IP?ތceYI ؓ\l~簸fŒ'RTJe7MU4Tpҹ*u:|v'T%34S l2kNDH4:7ݚXേM^~ W2NބІm~^gh|(E%v:jT#Pg;Fϕ _]j>p{v6 յ5nWEodBqVⴤ%;$m&^Agă2^d(l0AmVAq#S ԺtLZ-s0WD;2MuɃsyh͔^͋6K,E<âϚ&t;Qp[RcvX$ͨfܹ8f5уWdO ez n{#\iGs7&y6vO-]fmߟδu9բY(6i8fnTEJ;N-o3q96nR*EFL9/m=[hx;=ܞ gL1_vhP0HʅZRѾq}ȷ_R;ӟZ=8-AT 8vQHu.er9*ཐ7wH!h_9v-,Ov:y[A ?GI͎%*Ww],^Dx q<5&JߓנfȬtN-5&{yC.QIUȘwkxi$kP [uN;+aΖQ z*٤aI(PIĄO7imu-'W0`?ɣ$:>$ Qbj#+Uߜ#?N(NH3ݻs,`f-?ƺTjY`` ]@R_MG{, DJ. ֈq>$?1:V9^X\@ v,\/Ϣ&o3&4,䮔`˜/&GD*FeǵoFdȮ[ yz3\=u PϥTDx6Z5 5Th^Qf(=ҨO(Ѝnk %K>֚h?ބpSTr2sMu~Xyz0'& kw6qg,eׯ01BZ)h46Zp̊=?`NϚ^|.y826Jk8p {۬:O— OgޜUNc>Y@qXqz!p&ԩQ|9EL2 8y).nQ.wv!I{wAE u<#aU{ *IC }Ruw9l~obJWVU{> u((j*N]#dQewkK4NDp=ꇲC'%+ݖ#?OjvXeXx74Y9.4&eȸA-Yu( dA0mY# W0%C(/*R)T%ᙲJ[>˧ufq"c;Ne9CULHsGb@^׻cI_uBp dRW.ڐ ;h=H$Uՠڐz=#p@%c;33K/_+GA 9P&,]V?ުaoED,x/QMta$0F:j+א2u0 L.8rew4'WFlO`QXtԬ*K\_uro]J(}) cd'x_51S2]z4.5Tҋq+iv)KؓQfd/9:4qV}WUtl>[^߱Id^K%6)#d,Qodh+*9`庋Y "H?}i mUp忕1m WlHᓷXLԩ7:B =B M v둞 M9$>Vow[]Kx;ǟ$wKҵGlΥAFF83R}y~C휐iង [g؊F)yy8| B_Bi}jVm'mlq~c+FJ s3]ލYڰy} %"=/Gp~g;N$vsqуwkΒۗIƻBch0SDSmߔ.}wQ.TӮ˿~( 1~scׁeRw`).n<:tg#[ 9DΌʒz#G.:NF GL B5 %eWvŞI8cDNzI,SV|6M{Zqjy8-A\ĝ H%뫡8 jjL6LOn15\Li"VQPϿw𫋸NһA]G{] VN/g݌m ȭZmho|d%p"Ԕ(=apүmղr-s,9VtJ/WAK/ZU0qjhA yw'ʘ;%5\u'RN.Öp W5HÉ'_ S ]ٴ;褟0&Kpm/LZ=k~w`0$=k=:4R ~R3wn,::7,S5g6pšm^s.q`'᳙@^{UY.'zT-)dŖާמFnG*a+ᳵg~,@qזoP蕌hd~"plV2>V ` zhm.L!g cO/06_߲Z%{J]&N$WONpI|/ﳆ; , |}ר! >;.TdžwJy˞0ֶߌMFH5}aNq1Zmt5JQ}HeޤC&*ʾ's{_U]p5b~7Nvk&$ ERGXdCͮr:9*R 11|˯r_e ךdX^.YhږԗoF 1Ƭ+> stream xڴeT\[5; ݝ! w\r>{|`@1{g]XAN(fgLD U1eb6032‘ ; -lE gsG#  :~8MFY= @ePsr32tpm,l)vfj'[ ehlede05H>*;[`g PjTEJ ]8-@DPNET*oF S']VTEPESA5@G'?m3>RMlj2wvf`pss7sqrs4 %ɇ$ cOݿ6R~$}؝CC?5p_2 2C [gG/7Є_aG?=drOS2k/C]1C['hߗmlgd@5{?kfaMVPNRLTYNcld>Աwvw+O=An'#;1&v66'b񡓳LZM-lMLnbϠjk633tݍkR?$񲷳Z;},L/p^N@ 럎FpL c!(pU5p߮[~6)5M?=?\ mT-FXX{wP$[8YM,ΆC/hkf XL~:,+>f`WC!IU)QIDmL,llCGGC8Ə`fcx1} !09]}vp /``0H 2#' AoS@AlC[ &L?%ӿ!Gx\A`qL?Y~02}mvv,>< C|>z;o=_ ֿ ƿ8aegG;+!Ύڌt?ܽX9t\l&ٙXX|+__7s@w11OeRChhT)$9i9 Xԩ69X?X9};`Av-[-Mia9rW52喦`e׽~60=8cM`r5z,gPna\Q` &*O -0P* B6 @bAR9{ݞR7E5])-_6#;2_%R[rI`m">BDG֙ѐF\- aR8Rd>+=$h;_Jt Oq WJz zr7KGT  #*sJ.ܸͻJT y~|yw%3Pu,LJ3V8n3nSwxQHIXo Gz< ,PEH؆y'sÁ^b}.Iv:~ #/k}qF`S1=2Rosr?sNcTs{(orCcKVXvūx]o GcZ?*(*wh?]ȍFUyPHᥐmd78M=RS%?%#MȪ~(<ԃ/@j@Id",^/QI ?4 wmfy, b o|-=A btpY{e]qݘW9pM#[7,56S9>tpg7ԖZ@!gzDtb-z )"HBYSU<]$'/u"6‚.dGVTྉe)BhHH[‹jӞщZa^eT wQ^x1q@L]V|&ŘfiU`qzZM@?, Ev^5WJ[inş^ JDc#iP^[#5Jm/"Y8*mЋ2MNAyL? ކ;ˬI쵬74y*fǡvn_ʋ*cy VBڶt*/}*sQJ]P/z咨qѴ뭄Ss-j#$'!<3Qi>.,3,x7˹%D$=JP[ֹ+tu|fAbo6b PY"e^M-k.,ݮZmc^\sVnEu괄"i $}~ oS&VA/G{n #ޫ>ꈳAWH`Z]ܢ4*TQ-NBM0{{L{DZզe@D%oso+x`LVHJV0#3˷xB0zPsP}%>к%Z- RWE?@.T=sR&K= ~NM7ؠبaL0'&!>s9"uE %cP'\{N&̤4cMM(%S(VPac.2ޱ܂_\dݜ388/0.CDx C iϊ |i "c>e]9XT`Qf $x<;ݬjVSo،r"8d2|R͂!?Q7@5&gfYs^/^He]}EgIx OAd䫈G9N%(DZTuJqFE:uC,Pi3rN=6\jLQwZZ lrg|HO~7+| t ;;FwbRn0I(N@Ӌ&C:gY](ʝm&EVt$T=R(Ok{ <O8jWwidօq1, uܗߔI( 1Jta|=XA+!q7^[@/@wt% /ęa.Mcr3Wؿ(4_NH5ʐA*bphq6= uM M)&]pNdKFޝEZ@aΛ?E:i\[7|r5oW ,ải@z5o/1IF`AoO?.+!64JR:\iEJQש6 Z\n把aհ(֖OȭB3 Zȗ=y{7}[ r+X%!l8[bȻy"E{aa_op!(wnq.!?8*C9]   Ayi <N xO/Ax~;o4~dxLZoZP@ AU;צ[1LCwl\qFPNsCb+MjRkmQT+9;C4c'464 MU:{"fAqغ@!^*}T˷:H[e53E)uFb!޹:=E0p,$2dQyݟL1w\EFWm-FR-aח耸[=<($0[2*iǹU)|VmY['N 󝄢'|h/{xgJo2=zNcqݵ|B;=&meb1OZ$,t܂} pT} gF76Gqil/L" W_M:9% gUŻDQ]i٢&yH~a~i#]q>^FN*]D} 68niq0YjnK-xfgɱŏǾlj6@!C|#Q* :Y*l2" &q_ z)LD ;vY+YƺkBٗ('TuO1OZ"do mGWu7삝KKһ (][2tqG3"廉\54n t7fИ#Sc!Y n@Ml4Q{hInkts8}@MsLh+´yn]f-<tئAE`] G!,L2M 4"6=#;mT܅:TU>U}IB̕UK"*͠3%),wb+MwܰkH2T3k{9i;ŲC9䮮asN^,ɮ–W3zQGRC06 'Iʾh_iߖnmšOSy%>D*ɞ Rx W"TCr͒oQw2Eӿӄ㸵:>|RpHLZ}WR+ş$]N+чUJzs[` -:?+ܴ+#PƋ_(Q/] %Y= ?pӉ6 a[O Z*.G1h~6}rl`,yG-dz=њVE?dK<oueI ^E,etn`07lAmp8K(J9.1)d~;~zrgSDĴyɭ+4uPߑCHH.8mqiJ2 X55hGew[,)_ a%:U Z%#4>N6U7M4:mGo,7U:NVWK #f@|P] I3ZwAnOT3Ǫ>7qj2f1$= 5fȹ\c(fW%GL.>W68ļey[+Z^sMi-AF @bښ)Y=fjo?O@| Fy o VMG+w4-Ud "QqBgpҘھQ{Fq;.$iMTSY\ЖӺi[,SML[Eݤ{;aOߘ+5ML9"hR6&iıTZO$beȸ U)F2ku,h5kPVLFA8l(>*A A +$g^ 5%X`[{u y3G;eT)o }$8[=k%y,s~5ZL8T-=N䛮Cܚacpiv{sqigRHnIXi{,ɢİ ]6Z5r~Dղnˮ1լϏ,OdDfάx*dg8~kga1Տٖe (qGԳ_֘. yQ)c=uC#A;Gi܈m EoHIibݻhlz+5< dILNEp]Xy -94WJ,&,Bp7ԗ0-`bI*ϣFppƥ{8vIDlirJ]M\zi66 X3%#g㒌֞byYa2.[ZT| {~~F ZK"e."lQ[2zj}v4* ɏ02E1̗>2ߘ"fĔQ`pśl,1s δd\_Ʃ1˜HɵkU)J 4B|x5Jlhظ$y )by @;a}`")xTz{zp%VUN$'h,.1myίZK Z1`mNE=K%'|~=DD|1@Td!c0+f/zSrL~KlJuP ra=:a$DAWVjT,71w JMigEF|E&Rc:T*v%57೪[+ۏ69T9-6G̨Pշݱϟ- ? xUaPINLN "λ0u Ik&cR/p~Z<7WL7q~5!`ڦcMvm>hWs|`K C2دeGf4v36屍þ\ 9$.}? tDyNKȏ&ȓ*)u)e-F x̀BVvA*By9?M`7S ^6"m뫡1y^R%!!UT,6nso$Rٜ h 8>4T[L )V؆.2@f:+-LШ.xژ;Q!%Sg$5 [Ts?S+ݙF&R!́"VV?\^XK%|Tދ0AE!ޱuTT ρƢh5ؼƬ#f_t9,kILߚ(.}PlSe*r&4^&ۉs8yRD? r1@c&mT[ $c,pp1B4();pS;*# "A*}I:MaށϸRz֑ Ox1 2;ffta?`g`*p/4Nmxz-@~O|c5K~5e./ᓕ֢h*CvY^%|+n+;}H]/ɺSsҲe?C9m2lA0d|1Hhb1w@AСqnX)QshGNpeӫ0UߝIS=2O#6bЮ4[,*wߦfXmuTTٴ, 퐍J? "cj]lGۡz9R}[f1IAMHIܕ7HKOV ܃;TZ𤟄 9 -k<8",s<YݩK%T\aŜ]z6nm~y am me2_s)"a=)}=紭=<~1]P>Fe!kKלO^.ÞDדI?ho6-T~əM&cbģmuQv,NRd֒oLe=VdƛD-۬>';Q1H a+`uc(eEQhd`4>Fn9fF}B du $~]aDE4;9%CS5!牘/aWe#~ O$Cv3 C6 hc%2pT5I  OFV Ɔ#.ܑ=_>7\t;R-d)cC7]G]-Wi?Ek3!Ҕ{ik-nю˔nbx9ڒŵˡY[T4$t4}i Jvk",[3ݫO|հN?uզ .N gc/F -/[Q</;~c}p/h QܫbA֤ʞ`ɈmV)7x=@*W9CiG " k@zH5xTaV#IML@U7AP#ndܒkfRpwj;Bn֋A̷ c3Meˢrř(5i};c:[ǭ}uĹwO+U#{;hoLs\X@؈>CW]) f%]ZLcS-Ee"fd}-gP]sU;-RΣ',Mݟ?+К3%6x{8\\-{%MJ`7\ &"-X4(ŧ SF]־=i(}UĽ|mr>]wXxX<5s%?G&%Kx[7Hs[LfH*2@>͍GZ ?묗Dl,|Feu%!R8>2p(>b́ q Tu5џtj_#Ճqگ,yvt ׽60jH6}zm/[r-S"N}n"d~E<8 unO:'J? tr'P`6Ufe揚9 tYeh/8饗VD>ٞeMWٝvUn~c>;Hv@uTY8~RCgF '|Gw4摒KuIȵ+3#b@jQ|ƊwPe18H@'ku=Dӊ PltQCZy#X=4D[aEI1YC /@hXd9Z6UPJ6O td==uG6є0u,<Lx5{w:8~pRϧm!E۾K?I ln|xeDY ,0|TQ1 `D\M 3Y  Fq&EwI Iڑgm{7PmF aiUCe ,fgSI}e8rN,>ʰxװƴɜD;Nn=4Dd];P"/ 7%#%.")6HZڮMwP?jpK8֧c ݃S@w !q'xj .-QoR8˯ۥW@a!I})xrQwe,e6@W0l"3)Dln@*mU,x!at3^Ɖ OS D"47{vaI{:p{j''%633":NYӵԙR?EVoc* #7M]ٽ@ lTuɄp.ޝq Ȩ#zXJZj1(0V=_n۰>$- ih9)V{VLQai~FxΓ &iDc*!84滛!*.>bm1Wg܊SZ=£c68Z#!2O!O1v U}[*3k1+&DNdhKB|P6#m-LP q7wA)xPMT09) If*`^| V=tJ\Rx 7~H&'Die̤˃t3w)l<*0Ԗ\D1{ 聳&m A9;݋ĢaCx bnO"X@Sr5py8U xGCk8)n͛9pEvܷςnȑE3*Kac@p|Ų=2X(Lc];ndfSɸ;Śi7xة4nX uY9ˆ huA v!E,L@HVDbTFVpQh2ӓ+Ε!'I^PTԅYffәm1=0=y^EsƩ:qcqm~."xlluQf< oiSc{MАQ^=V s=#x0Aύd3:&En0:^lr4K KIU0h{Ėm_z*.SĈ32}˒Z]cp 5m$E^-y77S+{- mʾ'a>RyP@2+:ޱ#{1굡ݘ]C^m4+Rp:EqE$uzUh2GEL*PnۆD]|L:MBqF׻k=# /^FqoKZp溾(p#Jج3 !Q1˥*"6iB+YU*9Vd/4śtG N_pB6wtL_t߀1fZ$*a9}WkDAؼCʭEzR' . l O/iB%,}"ncdrIlOVX=1̳A*{ʴE}O H쏨F/r ~j%t9vkXe:yǿT#[,Wcٿ)c;IWOV@^vH| =J".va@]wQcnlk$wJ,]E*`bGJ/p|=j~";ik/SK1/;pֵ¢5FŤP?9bPlM'Lҗ`RVݭYrh]E >UU!N @dL}(xk^9NSnLE {Om)vz錽K/݃u:sr\ W WQʇ1nXwiTPoZ^YVwd ֕.,(ũox+ !TȔ8g=T<'?nWsVk-/jTS`Ѓp8ň@)_/s~+42|9,I:#Zu6~&IU:}yVPL]dؠׁ4nԳ3,Tcl$?˼se16amܬ~ǔGĪ3%UO"Kla!^1`$bS`AYQ,rXmP|| }?+mW!coRx+䐖0ckUh?wδ*v.†}cI"nփH0doxqG:KtzcG`*?u\5x_# (\/Xb>L/Ub*ě8&R'H;ap@9֪;wZ;E^ri6Ywj7Lvz]Z=4~ nbo+&F=S֎GCV} $d 4߷i"; JK4jg h?Սfџ;( r bIYv7`9rU^~^I>jAסh oeqMv11d2D'0Iw'vf\M%TJ+$:b~ 9Ā3e#mM|4kkEN/<.Ւ$7̯n`ED9Ir%tJh^4ȇ2@CrVjJR~;\#<@I㣎5(FO)y4!2$uh!;Sm|y*a_ CL6(w"../,%EKZ\4Ri,SU@(JgDXU%c2icJ[H ^I;ԩs`,!S3EZ]L,CA )dw &,qo_Cck0nayD{+ue(l!]M5&qI~ydTr1?x(GI cxqd<Dt)H_qKj}ı68qͯzhbHP'eq\`b"W 渉{ф":ޒu(_V6Fu>O<WA\`ZW]ř$ߢaca˥|k*FVeGHy5*8Cr롃; T7:vT.HR9^΅ ү_BCDbIU^jt"&7nd^X{t;Cl3^|K~RǦF'MCJrc&[R(.^ZGۄ*mu*XRt:J~d%dO:jf]*6@MI@֓1S|OH7Nt{21o8ch?ؚ<ä$&*^ZUp%M{-@!TJ"S֪WWil)'o'#nyղ.I%%r `b$(\{c$Z7zPL2qc0R㡠ۘ].С'[RǓ(U/QXeH]XAS}[e1ҍ2ۘHra2wR'q#;>8 w5mscf;"mzzR J<|~eR&9_AX\ Cps^AkyԕŐIAˋlH1HYhcOϭl<^Vq5p! .~E86o=lYY}4-ĵТק+h6vnRL4Y'1݋2"B!E")<9.>9}a0ܮЪy[kՒvR(X`V,ۂe37L?r5~ŭXfΡ&\iAzMsps\X0X96~BZ%UvooFEg pUTPe!"vt3yfʞeX^3ZoI!U4K G @: 9/F'\2j9)e-XqP᩹>\kf9FQ0o `@hLUJ#nea2~TRP ܐbPG֜ s\m61| Yȓ2N_f1(l=jwmͪ2EJ{ u6t qƻےnuZOP:/sW:4B2>73>wMИ ~koX (((1xa8CR~!Bi!=  Z00<lb~Zx5t~7;iz`,f( hQdT#Y5Fb)^v= \[o(. K̵gBG{$>lΰ WЛ6vgס6x _ihA%MʢƢ{vI>bq^f}SIhM\c z`%ϗ!5E!,ҘAv.=Hd[Y^0hӢm1D tՔU/؊2Jll:͐#c([ 7h32x 4#!zW-PV3m?HJIJ:6y#)Sɛ-~ ,Lg7HRar=(f"B6 J+2q RB.96G^9V&ŻZB̏6djRBOyrS"@aַ{OeTG:WlNM̆/irZ\& kmU_;Ne8~3rC8Gݤ(*ȵwXT' C`4f; ơ^Zh5gy7LD3qo 7chcNxwmX: 6};zŕЕ8x;.[!?,!wDFOT|,,zэ{DfgUo x_e=;f̩yC߉?, Is$G__("^E D K-syU)L̙hsv[YCj7Dm1?&V'wum%" E6sL C} mWo"*5M'[eGg#7k 3Z$ʧ, ^33P"_YHgdM20viEkROz=cӪK]98"ʱ|)>[ط"A3ṿ 5I[D'ظI$˭WelE颤C+{FvY$z %'.(_,@iyq6do9GaOnGBŗv\DΌ\ke iDBn]'5҄DD H."")Ȱԩ 2g?nBpxD8y{-$ G9pu2~qfd zm[kpL>' X"0> Gw~s/gQdm$[WE)\D4GӘEo /)ɣ™٧JA-,ţ4LJX` QMyph3uSYl2 ?YA]i#~0?GDuVG=#aSw}d-(; ]0ĤGh_,۟4vIӘ}ً;ͽYFg^yiy8< @mB_XSN^nX0Jtgxw>.ذ.z'|p Q8ez}-X2d|Lң ᘂ[c?BdnA >5a787}"R 8 *(J z+e5ZEvև0FN Y{y^$T ~:j~`{#kByx *Fy-"u{=UI # # zL KJ%b=1B[uJrƧE;r[q%J@.VPV $w͋HK1?5#Aю\b| *nl,iOR Xz4G^ذA׶"^BPT$։T|+08!B[Ak+m] (PDJA/TuVV()At918aV nMg-@}i3Ƿ~0],. ӈ RGR-TdgBvvm^3! 1XO v qk΋W"iH=Tq wRQ{J>!=!X->½ db#kO%V_?l6EPA8=R}38ţu-μ-Zz>R2hھ X]X D9I١3Yp~IrŔ/m{hb,vD-x|xSBĤșb%va%ʝ-jJ[C']?N 4i'{x A*`m3sl8C"x^6q C`)^ݧ`Lp iN*2ӊ̤.jv[->(ΣK~0iJ @f7 '6:Ov'M>-:LT:4]*_s.(:4h>+VԽXu6>u,&O<+Hy4uVqGBfIiTgGR>lq_VQW`|@K<<̑{%JSϝxuq2Go~YS<V֫k©xɨڢW$y/ɹF3yc} yȝ6m2yR(F"xt./|lzer/ t LVq'֩Uw2뵤GN_Dc  5$}OUGL[/[= 8a yl|y n5X|m{T5Z~D1 ςNa'G dNL$?TbՑfvi@q aALژ.hk4#Z%{>[JVL>2+:'+zx L雟u0rɕ[<5,޼/M3v[İ{FOQiƘ)f3Y<$T"pٞ/!ަ6''8m{S!eqɅRk#+chW6@Muefx VAyq,PհƝ[^5JM6xˬov}eՋ9t $n^?eSvdirfe-hIBZCWJjH)2Uf`CfUQ:P&ryQ@_N7o3VEM2>/4G|'H{[q! -i~uenK4Ɯ X}#W'ɼU# v_Nư2ݧx ÷D 5aid\YY/J< Ȣ".i C,AS^POJ.U컷E ?ƫ8Tp߿}%EkAie.hJ~tA {Z=P `rP>8P*#rG|F'&j>A)6d'VBt$| Ta@;VfהpR/ca8OiUrgL.Φ\ G)/)lE\qG\YVjuf /oqt&pRP1?>%ԎK\J` ;vB>-ɉdzG'^qPcTU>OFez'#=7O>Ь{~.gW csKXB7p/Ҧ>Eeix7wnG5~s2McVጦrn<øAKd= (5pxasl'd\Y̟ƫR}zE9>rW4ut0gxKӰT=8ŊMBɖW#3GH:4fs+2Y==".,ux: ϵܮUxexCFNu/3ZiU JD0Cy,LLfNȤ2vsgryØƠknzv!ȥw8x]rLQVLOs2fo9Uhl Sá˟e*`@6Gx%vGG!E$mI4 ntyhRyn,@qE@uuаip%79\w'48w2>-2JK㔜Ac4 `]JsΔ&gRrߒ_tޔkif_u=7@ǽNDAr$*M:`O_EӎXH&}^8Hj2޼ :o4*ڦ$MsYT98*[5i{[D1KxvcLzߏDst*B/1=3 q5 @9X&MP]oɎbLMT ,DYE+MP/p{҄BL`!O(>܅B}|䉀y-):+LŌԨ"\|ޕ &_k+accfVpq̼$W/ )qjz[jfm^BoSP:2KZ}⎢;2goӓvH>G%.gd. \צxi&QD:NzԪrM@/rJ샥P90?b=er h 37e}>fAxl9PLlޏc@54FE߰0hδ.}mSܵeG$AwDT'B UD, UwNT=#5,C翰`ʂj>!R61?[,f$U9)"IRH7&;L\;iZum_q bdz#҅Ikbso0a&~vR(b[ɩ7(\"GH!θڂ0&B¼jO'Jq"ê!" .>/W/3HB]5UEkp:JoIѱH5:v^-{kP\,ߝ[Tn3K3!`c穁V?dnKa XUC7Lu.0ŦpAz}N;UC욼RPATFLqJ(H:_:C'zVkTOpG 8 ]e^o%̞(CIH]@4=GYܟ0}@d±^]vy4̹?t=U~<Z(TǖL`njY ~%ߚm8ЧlKiC@ʺq:CkHɯmriƽ`_2T]_n9Eiѕ,X{Yby)܆yUu[j,' I|/'pWdg__J"#jP"ՓbsAO9i9d[DhAWL1> stream xڴeTJ5LҸC;4!Hn;޿E7ST1(qcgH( lAVfzFFV822{9F p49;@`:(y#ûhcjnzwٺٛ9BG'oaz%``c Ȃ\ޅJ hf`e%1E%{`%'[[qQRV * %?@wYw<eĔ5Řp;I?ߙjb+іŅdoJok?e3s kU'r:ϖ͍6?N)K.wB8i/s_i 򕖗X8m l  "8!o0}eV^.c6N/d`@{?{fnLFH2{ɀޫcCxBNFv+IlE@ON {7jKJMmMTɖA UlEpLFt52cN#f#~/-`b`27y88N@/*1q͍ߛ}PɿU )l@8Y{3P3cKJH_+ks+5gsqsWٿ/WG1_"?sdް3 @?^4:8Rk?| -AIꋚ8_Fb6F csS3; Ll tI 6 wdg# BDBAo``0 _F,2#N={{ſ{+AoA?@Vo ;@&ߐ=ߘMI?UXSgCX;k -`z_?;-wZ;p2pow= w.(ٻ?;Sρ /h a"cho~81?O}[XA޾t̜\&ebb/_]Ro@# #@2or(2.J,~u8ȥN|@BV Bn$?bu@L׵Īkc-oo|$1Uz Enbɜ|ٌvBġWgCe2vyJK {+4E.o1BKsXж}=(HH>/U zzvOzWc25aV˓}HRYZTwy#m)Q !;肁# N /ʭ8|6\{]P|A)fr=;wJzA|| I <4pުbm]UL4yj!/=lTct7ҁv'Ul "\''zX\~P)Dzb?l =:6!nE)f4 mW)B}SIԶs0by8\= x$ ǘZ=Cڟ)"0#RBjDY3u{HW9}SΉFZ::Y)wWב %qlrn#(iP\u؞zM1~$qF BP;c%]@rx\T]Kax8]&G@`pϒN~6ɲ^osPnyw(N, DOK/kf0b>?GDeU (ܷ" ؝I64$$RN'] M.%="#e0'U^&qwY9&X肗)MORWm59k"k`{$A=3X\ֻix OID,#yfl[[ίޡ?x(koՇ&+v(E4+xjch&=ET=ܱiź[kh OPC'}- =j ̡".jDBÆPnS#| 52&9],-0}ϫt2'>!(H&pqXmՆ}4o̩KMDnt4属hd$ p/*>Ӕ3ps6<صbP4nuK p,o9:TA' 'dAdhBWfzvf Ïa,jΥX7Kk+VZ\YzyH"&I]T 9mQċVF<.G|(ŒùOk%?Q$*b EN:Z]\,m7~8[?ۇeſUO g陌65a%ywj̥k:0a ;@q|ף{ooFZzbDQڲ 2de}[k(갞l( -]w+ M"^XͿaZ4z2軠MMЕ^v'ٮS<mcbM 6t2Μ5^Æt^m. DPWK6 (IpRT2P8C (Df A:+Ma;[|N4[.ky/6JCAgP_,0wS!_O8B5ŷ'z}kSfԘK/bqYj 7 iLk=(FTH h( %\]xo٪SPa#O/(zȂվD:IcRwlEB2z 7$[6FrÌ}&s:*ζG sB8A[Mh rhNҲLv$pp&K9+&1)E=klx@$ZU9[^| d4Wy&baFssGWr vpRu$+֔"0D*殴6z(7Wcdj?e9pC-=h"t+mKVV48.ۑ3ku/';ӳd.^Ǒʱq—-l4 r^} \&,˩dPIq 3Kg@AF8~ z[5iQ+J3P!C~m"Ù{\S~7L[om5sW^ ߞ{HdqinZbGQS&]xXT\kT 9a#||8N~JǺ5szgUM&Е'"0SS??&دXJݿ#iInvpֈS.ʏ4J([gx#/xP G7&7avF2nqE~]Q$goy3u^~@L }ޱ/T0|sy8D$n5Ԗ 4py4(^" KeK:t5QCrvfSbKaxu+[M~xķtr,~jʦϲwt(Nyg?YOUxħYf<k6)BDX* '*%%ʂ~o+Vj8[XhKWAbJ7%[?^Ym81'VЪ+Wt0BEAgbO̚2ih=F_ ~՚#¨[YUĸPQ~XNx3^syygkif? Ԓ0*}lcPh=>}֥mm&6;g44E\;Ljj-U w.]8Ģ4OX[\vQ!ߣ%Q#־=ûR*6qīKB+x"@y`,BEwgN*ÆkBB-ҹ! nl ͼusBϷd8rd3>FZp&I]:>CV).(JqhډTF?c,4)|=Bc~$YkB䕡!갤&6A&.׭`Z)0nph`<^YxyxᏖ61_ʴ O.MЉtF?.P<\/ . z42P]V[s/cN-PJ_LklK]Ph2=Žސzdr+=m2 M2=ܱ&*ʥC֢*a[4~kBSxf|DV ^YaITh]S3k4gb [-([V" ڛU}Q#ɕ,I /@MV0BiBz/㎬qN 0bD9?x̸YRHPMt[ 3MמwT>KܲFzHm38kY gK/'6xSK%u&▻w2}#tY &U˵((Wu4?M6TUD |Y.ٳOuǥmasK/ qawGqt"z:o,(Cۨ[FMdc%yAitS+pc_A)8ZkG4#t:|KGTiDq:xG'Pdܔ$"̂5l<`!pj#xͣ咖w!r]0z \%>K"1XuJ~^r{Yn12fFQ%2ȅ/C燓9)MqZ&a&*#AOLFI: blU^>bk*@>:M )}ڭqH*hF,e]1_BKTR:Wh8oA?NPZ7P^*kB E9٤.5h(qfd|HAKpx5ɇ *#Pd%0vR&YCԔxcѝQ>P;bVTB5&k,(}~U׽!P| ̨2g2!VVy4 s v)Fu Y}eЁ ~MZ8@9 \r<6S SASKEs~~Fe|6!,)c\APT4خ: Vλ/~ˋE=h#--k/>L$°-j4 T' ݯ.s4F 7ŠOKGnܼV/kYU甹6t=*+a3!Pvk"g)nfM22ޛ,炊JF4tl80L#(>pfou.ؚv`kӕt?}yRʅOښAkU3sZv\wErGjc$BW8 R,xwD,RW%17Ļ賂EA/;p}ew^?a t .;4t"$%t7+ ܤcBLNfZ,Iw>BS{y՘SQظ9`R%tTkҸR즺 \(IդE]|.0L5R6G!r; 8ax?FHVDRA %VT.cŹQF7hb,\?NVy%j̡[NcZsJv foe"]qЏJۚs='n "XkHG&ǜDIVv{EnAưeHS;lNp7WFQ!,=;I%F)6h$Lfx+l0yaUfnROeoY9b.@hXw]("5U0p`N _=PS 5d [ml]xi+·b32/2/ 9h/0D 颲Qץ, H-cwv(f}O+,c*8.JEG7MS\>L{Fb݋o{F (9šH!+h+O,y`l>>%^ y+dU`>/ϥ<5آamߩXBcMdZ4Id/hi,)8/{s89+OȪwx,i> %-?u:U q'M4˴ETv+0m!!L5gnCoG| XPV5I`!J2(lH@= Q \At6䀜~\F8:7) /mX RRo]m$W=R$z߻Oՙ(<\7gsĊ$XKC: _S!,ך' E9Q`?WK].Aa \Nsͥ- "ֲJBjas,ld٬ބWچk=` kʽbbBFD̦TzՈ3IgD]MZ[^uUGeb̩1DE'0٧ 10jn}vǑ#6Ӆ^r¸r&~Ml to;왣;N±53CA_mۚt셳Ggigl= ǔEu3#Ď5t̗K[PӲԛ#=Hh'ktG,VA[$3%G:":^X,fW "Hs& PQ&¬E2GvAï#Ƞ mIfZֳŘM~FH0#:ম .V[>"-,%_g^^f)@޼.lG)LJRf!;&w68~Im l`dLN }N@<c9hgɿ[*oH(+n18#1bGRwkӊWR*S{5?If0_xW>hCژ;;Ѹ8#7I,uXGWvijz [8x QXVbޖ?yNw6= :̟m]@=>]v$Z/Kǎ'ٯّ.#SY ~AOZiogD,&, I* K/+ixiA% |D"FK,-?.B3 -$ZnFJw{D?-ɮ(F\'\a AJM $Lt"UvFYyl̤r{IDOMaB$Z~v~*w*$po!P;ĵփ]JX+0W[1~CU~%Ar; | 68}epWcч,cki%""FdnlˋF"QHYqX,G}ԋ>ڬؑZYd^HCc yjb Syeߙ EP]|H66Sҽx3xp z)v(Ndt8 ,Vtok@_Bc#~>KPZa^ 5mFl˘r w 3И̼ccaMʒ񅛮 c%Sξ1C~Į mvgNe.g5\vG?9eyO#bjJIj:W)v]+#ÄqRanߧBlMO^lG=h8HS  o.v4Lw>4 a$?oB,i"9q.ԇ#QvtUP[TG^&8//¨+L1/$yk'%þӳ&_Tkt)7q̡W"{΃/<? Xl DXU A}[A#qIXJό8yb Lu$L >3M*e)E*p"P4"" ͬ&`SN" uw<"wph4tO#3ߊfkt*顊\th]zOR SurD懸L@tVHoSuӽa#퀳(g며C`g6N:Idھy2X;m% c3Տg5m*.bUtSjSJ)RUQܶӸ5,©m޹_SԱ LG)T5 )و;|H (~'!w5(o0_7uEqLfя4x95G^`xGoRr~` %C*ʬkuFocOgosY/z7)O*܌;28{S786eCuR:P /#ʳJ,5XtGg8+Y"Co?MmRd}.kFo5>LnK$Do%*>p2{gVg4$O'擅vBJɶ]{Ʋ`O4)嚰j`EUV9ɇ&9]8RI,Pf| \SDX1,[Gg=OX;0j)QJb]<${ zuTsމBؒ3ZMCJ׬l1nQB$:*rvg~v)pCbҍ~!&lq_.7\$m +fY>O'LçM}j$-4-z;RX'|okϤ݅tSF70t$)ωIH35:6鑦f ā89A' (VZcdY.Y%,_#r΅.)~+l%Iu!l$nu|F"y DX{bF,1?_p۹=] Ood4"Hq12-$ ɉ[A{_:x}+wg *Bt4U%u4ѵuh1ܖI}2,#rj_OLȋ_  hbWaTTм>0nK:b {%<\jQ]tAbU'87 X3RS~PP `bg\eiR _iaxW"ɐ[ਁuSU)K:]4>zzi#,thدa=Qꓺmܖ6X>f*sT?wΉo?}0o8|lTU3 BS1[sA`gIΎ'/gSd{*IﻅCNh!RsV++vh=̉uu3BhCtW`3ec{ˢQèƝୟDɡ=&f>VRmYiK()|2 M7tMWw+PRjn[C q6Cmz^iPd\V*{G曭t4 =bVM^_[nW܉ %a?3xtyeoH8k~-L[Vb׳%ߕȶY<~8Oqx B2FUgC/ctxhBUX/M3ATB!)4i*ö'|iz4ll=Ug1-x#ӏDqT RHzLvÆgq뻎hǣ;),T?(˂X*\Cw[a8bBB1=!&vM$8/t ~,,"LTgs+R G#ܡYΝ;.&r[б tOğk (&_KS7v6P)AJ vgY-sn\7yucqKٮ PZxm!6K\: mm|s+t{ "iO_%#="輄@$Sn. S̮q*a!@㘸2v@H<.o!$,Αho esy$Z‹`BfѲ"JҡL}[w7 o#R#Waأp5)T LH4^7. -C En6IyA?!8>W+|CϵbyWΗJiHZpo+m V!03Tn}$G.\xhyHq9͠Jsm2g7wL#۸CJ,UL­P bp|4Zr//^k+wZl3 >Hٓ2d v()wRmDyw2 |r6jW}N誩pgHB`1[ꔛ%gOeCc *T|7&MFJ VMZ&vXmixfGѓu)mqW}3z T2)11a㰑E|_|c{&ImlXJmx|BiFJWnsNKdB-MyLy 7aڪ̟ч7ZH?.F Oce~ Y+uCoIl9鉕S#l13`/ Kֽ\~#mʲ5 caS9O¿A<$rIzj9s\"gFΊ8G!:ĈUŌŽX76O!Vԃ%3&׼4?ؾzpfҗ*H$6 g? O \d&Nu]>hjZJ dtMܢ~ oTuՅ:r|;ŤH1voK{~ ȓaK-)cH\cH:4רZ%}Z 2ԑ1"Be-Ol(5$18ن wA-!$(@S !' `S9 Ql>~ ճfTWՁe+C'ִ!FGѦ@qRF9 ɒˁ춈424?%0E)}>{z= יGiL~JqӥFc;Ob_=Uɮ'i偎ӄ25t:&}e3e!mJ@f;u~ Q{-t3 >5Ϟ {9*ҨLEr0eQto"dGL>"ĥ>tjη֔ۋrp5ZJ~@Ycj[W`u{q(ɇ#Nt Xx"Kiь-"ˆ/jd-Y$~fjZ^.V72cofo|H]t0;RI<<xzϞFkgVB۶@si3G݄֟<Yl4qD:gco%4{jFU'@d ^]~7i*zf=?>{,+r)=ַ oۯe5 9ʦ t|R 0 $*D?_F/Oh8R:`'-@>6WWPA % B+0dRgL<6V6%?7z-fޮT,X{祭H*z]aHT{<뗻wͨY DM*߸hϵ1&rڴI/0I4O3wwOH\'C#v^`>7{e -ӹUl[_>V)P(1DJ\ kE<>y1)ZA_Һjo͒h{@v ;Syh85M|Iw= E|EQe{,Eb*SabDVnx&G}U*0LC%t8S%k{,$ڶmm۶}۶mmO۶}rJN ob8&ed_v -&B. ܏\=ukzɣZaph_;Lu9<2(̒ϐ1F>x}HWbCjb#4d2Xaw.f#cn7S'B1:;ځ%D  ,t3 3ynmi~[A,Ci sMux4?|7]ʙ#JhEiՑ aD{71hՆ{×:p>2,0"q"u 2=Fb,`Bt<6gdܾ]OY VQ٬rLQ$h+)=kVdGυߚ%hnu joQmLl}G'k?暰fAxBj-cW,3u:!ˤ]Yg >h!«,Ii *kJhZ2Lӯ*-@f(F#6 $ڨ3wvB١CZL^`$ZoGkDm*;l=BqA{rgkJ 9i(Ty9QI#+أd. {DlapsA u]>clw)Q$4YT>] R~Yy9VA4B}֏u~/3f+3m|ɍϝkيUTj%C];5iX-&B< xg>0QɈ xoBgbZ~ 5f}F< ŌxPq}U)^>Fe 4ۣzy{!+n%KD{tq!vRI3|Mk_/aܼ G3=f,3^Uz(ϟg{]0IŸP,δ~DK*ILV+PLk s( E.Qs?}iaDhq֯ b8>@Em40-9 ڵ.,Y6 lHxf2J;n9Ymh XZa W͔U5[$CWFfХM nWelHSzРmHrSJ/غo$0JOqIpx+]g}L(.Z,+J(1>ZO(qrNO^:vBO*_ʏ"-eBOKV~& vWXrީJeMU"\!$8CЩwT:q`řHJ Q#WiOߜJk%4SMNROË}V*3>5$5j"Y?sU9֖命|f-̋p$`[C/jdtm稀N|y-ȭ#djv4 W" |]{+]M"<9,xpNE3: qc@fjh#ն4S Z~z;A@ymKu\ve$=$J=.B!~\GqnlV ֶ- `{@ hR lDQT}4ϪWr_wq5vZVg_5MZ A0r@ -"} ak}87dBܰ/ͭzWz=`O}WG*Ң*漻 nћGt5TD{1RVj1&;Sn_ܾu H7-s2;짤jbr4=82VGT69Qn=ڬ:1?Q-ҡ'Uu|mJ0C6PM:mZ:.˾/( g!ײih~n3A16V1FPj%Q|Aww !=xo*Up)y<JZGʹgRZ v:\g5  P);1eOYsW jߪb)ua(:]*0DRI՚ܜ i*`*TRצy`etEE"l(8v¼1(ےrMsUwL,iƦuse aKuHT_܋#rqaHX레쫟OqJ8(~ntb#N G^e1]:vUw-Q s߅e{dwG 73p8JY S:0(bނW.#gh|q$҅HA>X8 ʵj?6zr7`8\eNub#]xNo1y^Љ܀'Sγ_w(<dZ6mU=K33J&J勭BeK04^٬d U8kl 7QoE(F qaM,Α)Sq} 1ETHspffzװ*ai ?s]% {Z/E@z|o;A)/" \1`-,> CKZ;X*a/QUb*ʽvtHc%']d 2KPjjJGgM̚:i>bn tux'P`bPl7#Jvtj}da rOO|uwV^6*k~<ۥ2Hz|`-Q=n?wl%)+}κ_Cua)Z%Y k> Ir \\22+  rGhFG૫g^2gw]o@4-ɒ vXgV+A,#*I,]єڵhCw$7f:{04~рxw9BR"sU]O!YrVR5,׋֌_]7{T,YsdWީ))>>mp8wSd}c={li$ " x5$ކZ`&$:+:VT{NVZ1*x&.^4-ȗ|Q,!Nq+MD\1 'қ3Wg@E$a. 'a!'<x{3rc2<.~rRK20!Wxگ6?Y2{Y^ زD3:8ͅFjkH5%jYC+ۢQpp_YkkVDo)xR _W52 X.V51P4ܐfDS³^XL8v빴FA8 /lE{T^BlaK\;ޫr|1ީɋჭ"!@Ym@g8UtC_ڸ`_}5#JY yTO- ^ϡ^1ZA\7{/!ndb+|<T!o8}:612s`5z 28O0NQlb À}JBR'h-D\!F9pgv?ݬ!}k?| e=ﵝ FУRC[R%UVsheuVno ]?4|y x; J۪CG(oAs`r.KvbPgS6:n>~t/ s1_Jzա t`L/:AdBJXA зmp1ݕeCV6ũ#ï3#rK- BP#q/]#/.+Yƍ7ztM1ȴ~\aPD.yzӟ 8,k/p !umŀ;f&SD% f

*5X)P-&/ ӊ$?8-ǷBJ.3LSQ呐- HU #on9 LևuR228yCݐ-\)oZ< bˉ O7gRum@٠ $kP^R$I=GGd^n&vw+tpdqF Aގ`=BA3ʿn2v]/lL+E׶:"`wjgTufAcȸֹ>/ `"߲Lv :H^o'')3'ukѭ;\%>ܥ;ECWoS?& 0o+񵡞~В[I\Йf摧<\jaiXwNT6~=O$&E>*k{smL\D2ˏ>9I➥& _ 7gǫ<-EuHQ`3x˕j0oXWCB"t&]49.*%a-;mJdE%Ov{K9,{C(?Ҙ &I+f`>A0SέZL1/9[/}Ga|y$dM]bw=C/\'hX:<[UxE }?OeQ:5N3ʒ E0=܆͇v_ٷ#v_rnV\>- NBI#DC9ֿuѥ]2xuPZB 2% |0΄:W;l>9·e@}PW"+'eũxQV(ᦌ3٨3gBW # 1a>^&oLHLɒkɄt#>kg@m͍ lwALhG By-KZ;a' íߡ@(DOthAx1Y,'EsuGPGk} 6Qtj>?x8S0ďhBQ|7C~]9%'Z"-;If NFN@d3<,gnt#?dM^}rlaD0LIYsZkShN-<3:a{p^x 2:&;2}VTO񻼍G^Œh;hj%=*$PIBnx-ʔHkC9Cşu~ϴ%Df ]>,+h&7Uī-ԧĢ-Uv*1뽦b⠆1C|ZP5]}SDϦ!Ưӿ/OmѤO#N}l$5N`Vq)BS+w#$Z#i|4^w61:/0:^o0;o]++i\Q-Ӆ_} +-|rn?YNNV jS_P+ND*-̦Y_KʽH_+UyYlw =݈,nAOzeVƊ՛8G].ӿ|p 󄣣h^ln]~sKqJU@:7bP,4O2)D%512*_ޔŇ~ˆ-J_ֽ; ay 3ԅA膶<%:=6e-?\GzHd%m|yjw9'@PXlk3#P͇C`Xۨ{Ǭ{C)V t̺uovG=$&^ɩ<<"`%f7Z0|`iDǗ"΂ȹ ]XMkҩF,1x/j:0=u"yc_޸#[ꟑ}c~׫]jD9ר/B2IŽ1OlxƵ9<}j4>~Kk FFzT7&5̻Z(<sns.)w՝7_2;l4>\Fz+U@&T։Z|DCv&ՍG67,-胱Еe٠i/i}KO^ssUf(t C[1|k?tUGc!|/3H+>(Q,?ޤ >]1n`U*O D/+L !|,Kă)H5T4ug+hKNj?*P|%p(yq>|^]s(\r,mܺ NmVqkǺxK@Ky|:x;`j|%pȶ"2shHpeWt{_[@2@G~N0s3a 1 j4F<"Vnp|H "eiBRq-|n`)Y;fbR8=i]|^nKjj_7^Zևz4lıH3./$6b{Mh7_-V :@ *$<5^<?gMؽ2gB/7s* !9(\Bm176͐8Έo65/c!IYB 9P>mH&Zby8gEC(E? 40yub-{1$un.F[Kߑ">fDOkҜ(x}ԍ\9J7R8JM{}g̓aƥᘂ&_Sj1@-a\N_$9vPp{-Gm CPՆ}d%~̄V.XU6!{ЗrkM+nØ>[å>= %!lp|b/b"2! To~pW09 `:(s_;\lڪePҬ7)Y^ȏm'K hwOGi^5%ں5JVp)`WUN\ FSރDx8T!&V=my1jvtUvR4Yp$Ŗ9~_A&)w>:_tr`)RՈμWXC<pEm#I8fʿSqӌa*֍&E96%V@oz{|cB:*_l& &S_DjW8:FRm*S)t$U|1yZ xVg.ܞM%ufT>Vfxbӽ-!fq<M%B!ׂO&Vc{V cTjh/jQ)fW͡g{j;A18 5o9AMpfC|+n$nٔ>|՜vkSb kE>c$`;Lt1INjwYjYO1ӎ^ݾIW) endstream endobj 414 0 obj << /Length1 1759 /Length2 2822 /Length3 0 /Length 3922 /Filter /FlateDecode >> stream xڵUyH8́jC#;p5`'mᦍ0SCR<.g0s.""·\^6=ˏ\^(y L8@ sĻ I9IBĐAp? gs]dWCcA0 :p>}և< Cg ؅xn ޿mQ"+ŽEc4(l~^6 Y|?W,6(f ٸ8:PWD{ƍLx6Z\063#֩İr8k>J<>{2'C7/ɂL.0p^kG8bOB}0(ĭg#vnd$Qa0IchD^(>m'p0:Gg E0,:98. ͹&ߠ&bZb Dܹ0"ϙc8N=_i[?ż\cYAf!J@Al xc%>mlD_nb Q,=|? d~al;Cn_O_lD2XP@42h<M# !QD P0+!0CYp&1M"Xa" ~0 N8k&*}܅6qCA'!Y8%،GG@ۯ?5~r~ȶ 0DC4B74L?e.Y#{ @: nQt6!Qvrk_RtG+UOvk_i:o9j%*g:oexlŸŬ\`SJTjO>]OxؙO%6|U>S2-e<}jGRh6뚷NZ^K-y)@o$S}\wq7 θ<ԻXJv ]INd$A82oZ5m>g e{+P%T##UfL#,8<vF?]ץ%yލSUw/T+z=청ipރqf1=pȳJV˶zw8.ػ$_7(+FA^y Awl#8UGyR݂;Yx7(ݾqdžTa6~/$THmWK2K+ŧ/m {ogMn(3Bӥ$-6 Eosl`|7e:BY0ЅW*VTQY?;*L+4yuکN>lxp|:iQ]ZK K7_MӋNDQ4u5_Y*i( toTi^C~8tRqYnZtd1/"/UU~Wz,t1qfJ$K*;p@7Jdt Rj}CϺOhSKӓw,+=PrpSݡxs5^JځY^rM Xi~$?g=40cw56,LjRN/hS3?iJH i26}+,KQszlǥNWs]$\(; SoKP4h8UgnHr֝'P?y^Bl+&<) {-)nIk>!+?y8GR3DƤ&(P :[N{?{`Ս.Yk0I&ޢww>Cq *%ԩM}o/:[?i_]G>Oe]tw8f'缼6K UUF=&)Czcţےo89H.]?uLfx*nRFdd,zkZejϗG_yQlȾ}eӐǺ^unk+j)mLؾWi%jSazDM_`F/QJN}괗=:.zԍD,#w1"ND|Kk"zX8<ד=ck)_JsPIo"屄/]*)_8$i'ʝh؀<u- D֞dGn|εzb}QmT xd!lrס5wHo֥YL̊#<+l^ Wl-ݯ,7 endstream endobj 416 0 obj << /Length1 1681 /Length2 1818 /Length3 0 /Length 2856 /Filter /FlateDecode >> stream xڵT{ߣ&;p`Cq2BL&Mp HN" up-P8,E&7E !(!LF|89n APؘHq„Q"$$0#edَq°q <(L "#0á0embSlP>`qb{g/oo6n>loٿ7CL7!eLoo *HNf@ <&7F8.R#""(!1ND!!_;Lrb$( Nx"ò$Wl) $;70\V?0EPH,d<BPF!C. x`@8ID"YG6;b1PP83n6CňOU2bٙ!trwua{= d&FRH\-iVtr0@-&sFpLEXo^rySJT r- Reʑ23AIlñ&^1H|rE[. CWwGy2H>>A!ɘZ.摨^N3szJ|/H3ӣ!iq &@i>DD\sBHOr@C0 )43KƔG6m|BՄ.7oe1G(b107`iaTgW7W<`\  KDPF(aabp\BJA1HB  x$;fBDf$}%Rh`}k6.`_KݟPEH!:a'~hIe;:b1d3: 3,}ʂE&gK`Z6#a6Ymb֔gydkZ|O=XBe|1k@iNzO?y B܍!)SšfOb>+Fϸp*ɺv4ۤ\?~q٥VW CTfy˵N}jwhEpא6:ezR)=iAUpe_c7S(3f':ݝWznyIuYUtENiJ}f֢f&m ЉΉ֣aۋS" ˢ- |[}Cܲd·Y $Ho {aiC,m.[ eC*Ik9?Q_Ng .ݰzڳÜ2@iw}6S DvI7w onT8ۨdn]_!U•r2n ѻ&]*~kG5Z}EMөGsd2%=frrɐvB|gxzJķd'VT*+PJU%/j[{kJS/D449w'.gԎlqǥ랸)1>d?g endstream endobj 418 0 obj << /Length1 1673 /Length2 1427 /Length3 0 /Length 2455 /Filter /FlateDecode >> stream xڵT{\SeL 7TlCpK@jr؞#9L6AC0DBxA QDĔYJX*s~g]"#TPc$GLHѡE0ɤC +!a0Z $'BP!A $($Z H Hr#XAo*%7X4IK58Q*O1& +9BQ1Np q@-^ "-T*ThOG(#fP#p 1'Ot8F xE!ϡQ$ <^JJ 7d$85lbD2A1&LMIj`pUA$ >STRIEI #/"F[T=b$LEi2AA!{"i zNl.-Izbf2V5hP3C1M&GJ1)=#)v0.i&mt=H g_ u*^O62hD(޿;SԠZCd)1t FP&s[$UZئ,!i4Q >4# 0/@HJ0l#1 gb`riFVc: PC 'IJ %1trD`D,>$,Ҩr#!>(AP@IvA`%!#}=Jztk /6ԪJƠm.H2%egԀ\/d4p|]f ~KæRgkzxfbt⪐5+e[ş U2EusuwM%}ݫ^f'I/sߚ3bY]Nj-*.kϬ86FTѧԺ巽Au>mzp'oҊvH}Ap B7nrԵa䓁77!'>'|6jcspk+ HxGskeFhskˮf2R@e&k }vO؛~iK͕NX-9Ox7 k$G h1IEQ 7'h :_Jh1O EuNI zleZCI" 5T-15S'GYeH~$Ό4^zXPxr˄a( 1G;D:vr&NB];46`sgus<]O r֜9g=x! ]Bkd, Z,_(Z~##.XUYMϦsٝ7=pwLy{:r<}Yqb\@vѓ.w.~4^}k[\7J+R⍩c,nm?ZalC޲?轙nbHaTgǖ)GYBݺ:rgr5r7Wcsʫ2SOfU>)]R++gF~=V ;#:>nDϴU6MYfK75F1/v_up{jU c DmuzYuPÛb7tJ9̅9##aTMv{0-xI6Xg?ď+b2m'to,u+~7-׎,ڂ.8Oj ]fkpb7We fpXcų%O_iylW+0Fˏ|Xs~Q.& '<~ӲQlaxg_v`⣾QnGYG*!ntkb}<ccBZE=z`ibUai蘉[Ea0ؚ[ѿ.ouTi_>IuwQѩ'R-#zV,gCޭ%&a߹9oyAZ9=7৬'o>"ӵQQ.ho| թ#'Wry=e{SCկGd~?34eagTj-;yp7+xκHyMpc9p-gwid9WDTy0^NWskny!*H`uDYǙڷt;{)sKkn- γg2 1U endstream endobj 420 0 obj << /Length1 2776 /Length2 33338 /Length3 0 /Length 34880 /Filter /FlateDecode >> stream xڴuT=LHwHnKK@b6 ҩHw#ݝ9{7ļr{{\UYl ۻ0 v&l@KW Z hb 4q x]*f.\H?25@ht8%@gP;08C@{Kk{ =$Edmi'3JY&f`wg[k9@E vt`{)d[4- )u = Hhhj0$Ŕ5@m&ﯚ@{K&&$w;5n@'gm 5H:+VVwwwKWg%~Vw- #9DN+ ;I/DJHob!\~+ G+rUUv&.@{{3H3?6'М_ W'=6K] Y;_.XX;3klJbrR̊gVCԱgqp'w=1IE R){s 3o$!:kgikտ]2b d[1i~@م?ֿ/3; cifktvp+ C&!?qRf`sk{K7 2 ov`=+=ppuXo(7U_*X% >`7eJAV?*qX .A+AʿAAQBt !>#.H)dl'B߈ o Uc߈oAe~[4bg/Y(OxL-wt5 Y>d@&vUз9 ,T a焐Sw:鯂˿ dVV hWf,// k1r*sCR!O_~$?d pC )`@@?r?r:sE;5ϦqAqA?=!ǿ Ddh'7%7$O} g_% ,pwr58eJp Bw BDsk^ AHyϿ DP? Nj?B` '-PWb!4s-.f悌43?,2k@9+!/΁Cl>VIMTQWՕOx9ND I)lȢ)+ ڗR=$׌_n)K ihe)-TvRqMe&%ۻc8^.S)U(s/aX$ ZvyǍ6[d~_7,Ӆ ]"+n;KGquI ¹ğR@=O ?6|:ع5PnB67M?畏rPr~-@zVeaw)@|V|ÃhO׻}};aMĻɤYG],0EORhT։nY-1`Hlay.ϤC  9iu2*]NRM;P{dե?zy/0[Z[HщugB?\$Q^,y|U7m1*r2 kΩ9C^l ʧ//3|gv8 w=oQ|m#L.8>Ws}`xaT_=_p4Н~~|d0CIq>J{1ts^dJPo*4X/ʖuT4zbD 5t$ڥ}j;g"/(X4 #I+z).o 1 Y *0ug_JmMufd#h>vt_lg\T#L%7Kppf5@9$yEk#r 3N6|j̞4VGh7Hp}PGEAQQr)%d&~jsb)ӹfWԉKU*>{tv̥oKz'?r1:P&+M?n^[c͢ g!p:KK ?p22G\ML]N9ޜ/9S4@PiU}9cЁSH{fBESSg: ݎd ܹ٭u9bxf4LUD4΂L?{ۘ˶8nb^`¢A)KiB?T{Jb_.Vgz@D%;Ncbqj b q ʂ"AH`,`L,j]gJ*BcYx)0;/hd{{ FP|]f(9@ nW֧n\_?$R4) ?x!Uv#Eq~:}JzAq%)SD Z8 Q|כt[K.Wvysql=gsmuؖ!:Ż;kN|܌z ]t=0t|z~S¨i|ݒr5.D1Hp +|NeJ֓-b*&$Tlh2;Mȴ0?Ob0ē.k ,vK/ϴ&9wsTJE2 +SfZ&Թl}Aܑ.w2Ȟ < YR>1778Q "}-AM!&;Q׽WfffɫYߑ+!zX΢`.;n3j7&J0VDk&GrUa̵xN?iCLMl"55xtL.댂tCsL3N4FܾTnH%GۘGW}HZ`1*OeK!;ƥdM'S[. G(:` S\XB"BD }C:u&+?%w|g΢|૯RK/7h0Mu|nQ%;&mϨ Q'V%ƛ;U5ǽ<--0mASDkAjth`hu{ẙ/jw;]:3 ȷO]BHx,*2=y;3^w!<7dgeido+v\pWHBTb9"eg9KF._UӞܒqEgr1~(eJΊD(%pNU+nl[CB7jO;EBO}|}wة1Tۘ.(S&#w7c<_)ݎc2Ǖև g6mDlVH*oEQՁ7}*|f|x㌎pѣin2|;¶iX+PYtT)og&ڿUy1}tk>WIBRJa(8Y060#Ӕ(we(Xr H~P .KnSV{"FBp%bfOeѡ35$\S,~Fմ8fuIe)*Ns4,]$]slǹv,ڤ7fm>aªs@Ҭ}ei.lHze'P sz=loG؟>Ҹ%?ͬ)ުa}7p{t}ў RYDhu+( 6leJcXހ/4Oo=xO؝zvRՀvt^AEFȴ5j?dVO !SNFQɀ+tƱ`a.avODDjR}W^ä Jk):Jў3&{M|XHyn=]ނ>X^9uY2#@m̑m=վ@hXN8.r>,ҞJ3B2.4z2#Oei^>1Úi[mk68xxOb]> !YG>VI7.ijxS'm@$Q'+KD<9#P̏1P.cz)grUvPV͢Iَpqdy&ZAXDO!Tӓ=ȌyT{mGi&ޒ_JCOXx4^V[J )ܚ9?UZս&e uk\ɾa>QMG`"oyBN16rwKE /XGݭa޸MD>;,mVM%aEbXA!8AiXPP$KLnV7qeQ؞ukB5SO[B} ;Zk~pc\IK+*HVЙW(M'94Rj',)0@~v"oGB< ݛ;XOSdk!{PPf]B?WYo*Bf?eM;9@N*Gx>ffM`ia})? []xmʴu;Pna:u5YK}cS.VUɍ?z!G+7;W Z#TiW~Oy0ybrzW ve/n!.Z_g_X^Z;883/yhd.Xꀙ<Ua,{k*<#Ea᭡aP` [+D?쟰X9=$_`ђxIԜ-SXXfߪ01x7J x{tfEq-yZF:̂3Oӹgo{Y _[ݓ/5zyzSfZE*ƿT6Yy˵uHrUpkqC4*]Zӱ3rSϻD r2OxBZJ|%0'q>K7a+TEX 3^9LqVewKˣ0֜׼vva@oĿ㶦܁{ڑ(<1T6 qumuO}=O@FF3s]3vZOT.\̧Uky5F4|Y OhwYg'nEbK\/ UwWC*kKLh< 03V#oeFa46K^+f(d]7ŵ F[ϸ?s6a>B S#o΃R6{ņ"-yS(r6[=u,V-Y6M^5d7TԤHEk ϩ1xJJP67j}P7>0+`{dDZ,q*w9<n'8dyɊ/ /}oRC-4DU!h7IgatH3_ 9QUPJJV0W:qlj*r5kl~!&~Jv":fk#U1Bܪ<19=G>\?해s!fOO` IpHa^%++([]89-kXºfcbڀ:1cna4d$ͼXl[? 6"G0s>P~ᬑ.AF8|aF̷mEwޫY+&s+hSؕߒD@ .ǔe5l,#m䴷U .5ₖE I]UN#ǯe8 Dکiސn ")8ÖLɟomɌe\UZ儑s܇VRZ#'#ԎMa+E\}}Q]Ad;mA RV/xn^gm15aqֈg(d=[-+=`p!mU2L'(JWg ɖ!(b jʾ;Ʃ=k~5Ja0G_ow+nz]o^J&Q;?|vVh8 rX,QC{Pa9Ɇs6yfgb]rWdUJ"~Mxpœ9qPoK~5wtn22C$d)C*W zW-3$ &2K2iޏ&TvQ =ZFe~6GO5 ?H `\ l՗Jֆ3 ':NssN7*ӧ$%%d!s3_tMeEzF "[VQً vݺIzy>="Dq<ߌo{f`B-&:h YMCqRu7ك5XtEV߷[Q{¥ٔOǹ[F͚tluvD5hӆs#q8uQ4FvҰ[EJ<̪$6'ֶs&RCF͞*˧:6a|{>2->2iI"z1Лm+HJme6h}ӷUaҒF#փ$!QA?oA$Gjx}ZKWoԾQ~`Daao9Ů̜-G($xsFkm;He~6r#?Bs.6ڷk&Ŋd(5'[YY2 R9x+t9`H%re?}`d_|()а2.3#~`V,J=7#ȏ\\; ysBM|hH=ÏnBn'.6֖(5T^GBݥ _/ dc7>R2vGцy+_@˜ O%J2TqmpV rڪrW[> aw$чAZn|w/ٰk Ub B 46e7E̯NU|UM@st$YFsAT'S1M%e8o¾"&3ouȸ/b퓴-$eriox&N#|D⛟\D:}MÑ̅(7g^ Wj\O' 28N<8ƩF27A%|.bzB^cn,wA5q z`<#N (7#Mv3ztVݳ2n̈́ˀa瀊yqm搹Dل;F/ lJ̢V!58ճ'Kų0ߋwd∏b+8RסEdvc-Cy |9Gb\;Y$2~ FM0ԩp#3zp6qz(>5%J/bi}P%B_k !۟/Q~]\67C52z 76P۞WU˵+OՕt8cRa}4^NJORHD4~FVi{guΛ;yg&,٨6ĵFź*c1DxUl1|UVC-2/=65hϳә!i;yqխZ qS|9b2/4XC=$rFf{@aBX %-Mc0Q~g5EyCWbnʜ I׵v[AD"yѢ ꄥzbk~;~P&}۳AWD 1I4sP(HyF ~]gxTwDj}S}sW .d4n NIduF9Jo^+Jr9j;!dxc5m9Z|+^kdǯ;LcI a0n2^A|B_xkwKuwTbJJ%h+ϝ1 wš"u9e)rdy ?ONЋ2?֥\}o].55*?dsy_T<._s˾G.ّKs7;ͳr54FxC|a<4D:HJl񱔿-<éngQFpc@-1b|{j HmJie^oD#n@ *(w$ ٰN8$SrDZ DEZKVwb6z{+?kJj j=QFuIF1 % 5%4~|zN3`[,^mڏh]TLGNβFL&k1@9"%*"bl11z"@dgs/dvsxa+YGaJe_=mq.4>l**+RY⤌RwdoPK޵Ė"YRtuLj7d%@&fmUUSNg%0Dz9rUܸ^wDp2LU5$Կ2c] ll6vC(ē1t=m#yDùZLVlO&-nq8z>z˚ CM7יQL\̫w58%.fatWj?pv"E~M Jۙ\AHK/̏| <*h >,P䨥`x= |8n~?O= c $SF)ϔ!#geBCH==81r-@ࢥmd5R~  Bϓ@D`I ׯo܍)Zȹ[Z6^! ٛPQp vЩ- ܕ~}ߦӖ(c`ŻQBT) Wxĵ.mio7 E0þP ׈2re 78@wMTIĊBJ| By_lvkýi싾=yf>~Uws螐Ơ#c</ʪD"mMj?aH8eyVPm ݯԹn. G>{QwՏ4fbjC>FޘXad(I8˦KF,:3ztz-a}z\(8Uscֱ{{QӼ"qcvcU8}Tiu}{A.7w fQR50_uJ:]e~۫[X̪;m7"_w Ȭ/¸X=S$+ 41t4l^O#BfHG|(!O3:zt-YX nw'0&O+#Q*]&>cR4(Se !0E{iWrWS A;R<`husqnhB1k^+MWF׉=zaR8 -G0љͧxϧج;i‚S6=Efsԏ8TBPKqg}Œ[L#K6+a{R^^t$Pd[OZ6VRwIJz7z#'c̾׮¢.t6Yz0u-=+_`싽J3~U:ld|⦏!k~Kwx{QߢJIoRORs Dv eb#~=kZI o{( Qʢzs^2r1Ih|* qky^6LEgj6Me՗*Zh/5Af2gA8_ KBcF~ ksuWQ-#Vy?g&[]H|VCAmW6hZ%uatOQJir-/Xx6 htԡ&VpbUoʭ ڧ(DZ9YYG^^nClW_(T}v0cȳ(rfF=(p>r&JT:q!\2Zrԣc#vtZp evAVїȆ $*R}:aT=vh׮(FV>2hW[glja`zG{T9kq&oU2,-O= R8=~a"/ӗ!St!ij x&5Io+a{ FVքg9zcg .ȽVώz0lsTgӶ@@R箕E^)We[$U7>\Xpu{x-^;qmVK[ lU'W5(ViB|Dʉ#NM\]HL+:{ 1bXgQd=B2k_1;8Lk&R§S)hܚ#ٛe> шȧ{*&]А/oq|s`/&#J#Fxb"Ww)_r^5M.U;Exk"\~vvuE[U<^6JOQOENxCʡ2$^y) υg}CZ-bIP㕕NVBƁWZɩwUcZC v[cjCy1.yOnߘU,ȧQZ;Ͷaqly~41x[CɧHm<#8ńS6 ]AߎbnORۑ5ve:aT[=(U#V'`v\s(APh"ي3(2#:S'Dۯ>7'4}-U=ǩܙ#ʿu~ 5F=و*ʌ0wk$sxӑ+ %UΎZЯ`t k_z?yK8Ҏ`·Q9eͲ0kwrZ=hsՑcSkA _Zgi^߽AL*qkqRN0T uEM섉iF^'7054l[wH5w5oEdA ROZZO/(e@oW'tP`D(]jۣ_Ac`T~] ۯu-8F;"Q+?GQvz *a4]|LEvxvI :U_4X\|Pp[$[9P'YS2U̵ɾ>;'A~3;ZT^0u TG;8⊨9J=Y:^uRdyoQ:,U_gJ\lN2FPO5-#T">vm dS,2'c\6S2u$$tĹphHpe{ 5ǸW,+y-Ϭ졂PmH닰+hfZdDҽF{\`"=-TmUX4ҜިDh`+,pR5TZj|tEUxcQF۹X bXUEMP' }"LD!S Q,e_]g)ڿbâm^.P_]fNFF춑=s,sµk(ѐ _4jrvւi_zY 5ILQwsb3dq[uVA5ȑIDch>`0p6](+ۂg>3f/Ɛh=f~EͿ{'g_ם޻92/>%Ε3FiF<!':(;RĴ$>9plJ]D2/J]. _E`Qe8y[#w?tyVA"]܃ CKYyJRr>>k{uDQ DLT3vPTՋXW={dʰb"Pw]Җ9'}k=;Ukпz*J)ΠcwO65[u#mj8}N7ξlr\n2Ew,5 s=Pʋʱԃp^a~H.ܒ[hŵ\la;QAc#XE T,ýbnsX>1+C^KO] 3jE4eu"|?{)[lhKo)8bp}H1ZvGu}"ՏWg==s0G^Z9"ν'R#^Gk 20zQM(eYCsޥ+8NSbW/_ r"սj hh 억@K>Y7:A02c^ۥ,JybT5_ gHY߸eJ TCM(.9Nw, vAoI P}UHpGk@3̳{jB<ŋ{Hz33"BA1{XvgV^06oʚWz&R eKKN}?<@ ?;R($}q |q%sL Gs M,+S#?6iGEm۶m۶mm۶m۶m9/va%+T c D2 @ eN5n]EE{^f55ל8JLgBWQS=2aigtUZwW"E N@U4\5ayn=X/Ѱ{>(((yV aK?&˖#Խ/ Hg(ȯ4%[з){a׹k`}AHљx#!jt,wA&Hxs:x=20)]D R}B|`ƶ0&&:W㤓ՎG >$Ċ]]9&r:cE6[JAxðA)v&\n~}*#w1H 񟊨3+׃)h; -Rꡒ{Al<:qQ-jA6"R蝢?:-Rmr5Th| ^-:K %ꅦ2yYp:ڕwH =JkO(=p; <-0?&r'x/sz31A]bct=Ag_J.61[cMXv3i/}Wk7c^'gIWn6ͣ>tb\`8帪|{`khs/]܌Dz AW2B 5!}գӕCm>UB'#MHpP3Tienf`4f'C~DH"cCTR |uRp~ʿO ;F*n:t 8:y>1WO3i nxS5@@zx]XY.)!4v@!8yvE˨+A5.@'x_,"Ew*:eI<@و"Y]a]beEmG\paߵieX[Js@05`}/az8:wI׎mۨs$LQܭzAK tނ>GxWx?s 4Dy*#Gשk.(rdRƃ&+Y?.9,mPM7< &?}`${X{*XjR&4Ұi7XPZ{hRGxaC01gHvS@X6IiS ^^OVrީy8 ڭ}h+ y/BQ,VL77PE b*ԣh38†?[Qq"v"I a"4;aaz4 &zI4ow<GAד 0EhyZv}N~WSZCmYWy{~Jb;g4I*U[9c)}{vאzfu8qQ/r+}t'LPUZh}򕩱lE!b7N_4ژPxJ_]E@U?Qk*t oF' Fo5ۡ~LZs&U)SKIҞ :8CZ`m!ɜ=TNZj봻1f싡dX(z'h= S30K@xq"wKS6^7{W0֛r5Ei/oi:S8߃f2{V$y0tb_>=%SJbO^ѹN*LHI ]ρaA$oϺnK5 yfy7*[zЃEzzSC"-ƏϤG?&H /7.ZQQor|ifDV4Q0ntk? ڥފQ9{NҐM,[ˑp-SK#mbI[OHm/t8?ODZq[Ē{(fzejĭc6:T]'F5#K'D+jx1/{u'F =HE3ɺ;ҡyݗ٨{2ʻ&kX q~![xm5ԖnR$.yʉ◖ټ n^nd`?-y˂fêS_>w_Up-;ǐ(-Rmf#)Qopf 27d-_cpmy*N E8u6mCPRp..()ih@tM=ۓ#Ǽ!Dh.k0ռ<%h |),N 2$wuLY{^2LF 29bj6Oܚ[ ;ϭ,99*6 Lt~ĭdgw`tIMq68%[}:g8܏n3p\.F ׫ECyd+(_3ܽMHg0vƕo>QiIX^m(~jҟ|9)Jfp"ʥͫˊrO0m,ZH-(%Hlf)Xy'REZ(@faYl%*q Skhwv.[ l!۪4B]\\JdšA˅ #'/PN﯑4uF+oYߣ Xx+! <8K+J{^" U/;y&O˄%HvXW6Cx2)F57ޟ, &bs$++0{oWwƒ e i}R~Vm5I6ɻf4:Fq 3sjf8R3 U>3s[jȔ>Y®C;\KWj1 m3J`%6Ya[ J1ύK$(nh<9`Efu>\rۭ֝%5w"63GMG |j 8u/^(pChĔG&[:a ;M?+qj l;%u!r_ﱸUT>vWܱFS%P HSEXkܽ=F]`dsc0yAȄB+Y;;-wx!Nsp׏Bi 3e}qm-7~x剙;Jg;m87If29D7 j:!4SOg½&6B "hGWbdUPl,gf,,EY.U2ˍŃ`zg^|A'!l wL:E \'f plUz YCFƛgAP;ȘsqR;eZA< |# QXFAuLLgT#@o,d"|&8_]8ͷ(`ׯN|-֥10;ofpPo~WRkFe_HܜƧIK"Zl32w ~}Z~_ll!0#݅ndrV龙=% NO(lܸ“Vheid쀧g_Dm:[ySh/\gwqb˚SG2ۺ ?|1,y N :5_?Tvb q&I6_;BI4HG}ݐHl %V~њZҚWCا|=9y~-袋pvڐ)~TKe-uwvᙦg^rOUEySŢnUU oul!)%D atB9TSoMH_VNPѠp Kq0Lَs2^<.͆E#8ȩY'^0^_ݐ!ʤ"1HH/V uzz0?l  WP_XO>ebb:=-(jd| Hߘϝn%hz2 %P#^[O* O3^xU9~'ٴp sK.QN2O3,߉FY]|P d.8Ge/2WzJk;RFp|s# TD݇ũ مM͋rp9;O+*fNRiVa=hldK\v&Gj/h?f}7]k=%gj;< ߣOF׻)A"q( Gk]dG˗`0wv`>G*Pu)'Vq@0$jG}/Ɵ|“1hR>L^5`FnBo\g([:6R*(wiƘeZa6oa|`BsE,c-*Oمk<)UI *>U'sits `X|,@`-Tۉvåظg̢g ՓthrA#0FٕGedq O:݁=8-9*'0IKs$ņKopGʲG?)ĦV^dk%"&)>@qkס{ŋwVSa^22x8@lP'XℓڄDC+5ܮt48u+(Kw3A_m>HYb3l?G[/>071cm;bv&?*.b #{Y!b n̑kB9;"B] <^; R^@[SMٴO|\mA}ΛTMʂODX_Tdx2z*j;iq(fr6r+c4&ݏYb n-ޞߪB 9\ ~뤏0n#XR^LS=%f#Q)6q7rWP#lD@zFnʹI/}M 7 je1|l4VT6Zhk2s1\!Yl] :dI&F $뫤' ;+p"5%]ZKr &4_g%VM{kTGM,.$v z8mn/&N%A`5qVgBG xX[ֻސ&k]A׏YtipO^rr/l֞[LS 8=î;ĉxGMU## > 2QɷEkz+7i/VWM&"y,)\IZ} Ü+K 4#25((u#lDZjx:ZlҔ!)dpj*h|b1 Hgez,9J1"eQe+@PQ'$%s8oir_X D6Ua8Qw?"Oz]lcJ8Vv Q >opx q2, pji@bޟ:;Lqt_mk`R/sT[HFͣ '\HY]NQzK FK 1,Ax>횈=zWoTZ(׆|C0,ҰƁ _Pk,V\m`k3 !UWb}3~! qݹ+7l`` 't4ƛT 4"朌q4Ǜ˲1x\yY7`mcݷ@*Hw1S!jYwqeۯ|b:ż-j?][&KGF#A-#}ٱ/X3UCm^|ATjqfPM+U[*fqWXڋõzrRQ/@u,ob0遐Xv /|D=c"_::㾺H8Ayn:XM;'uvT#`>dņzYx|uw~ga,<ڵNhlĒ=_K\缶ud^2{dH ?rM)YÝǼ]Tô1 fqӖf$+µ/ 8SPUVOnsIz'!~CS5Nv>:ಙ3썮 ?գcxh4T,8c ߮:!Jb5M<8#|ۨNmi?g/%lTTEy M>Wh}prS#M) %,g2&̡ ,J\BT1h>ŃAsC`_;?J-%{u rX5jx&HmMjB,.ʏ':.><ؘ8͙ks * mAr\aMg%NMՠY8lXIoAX3i @<`"=א(-.i}!Q`wL~,G=e=^s;k.5/!!G5b0VꔭPuu(608^ } =G.2|ʝ}E^M@m[ŐY8 +%4X|%!e;0j 1围 @gLpi@?LX; q2-$@-S1J^퍧4wOn{H#EQiIYk7i23*LJD|ȿ64Zr%QFwY%E=x ~&ފ3H66ȸR; bC&xּ|9zd*M24N|^5l)f|ݗݬT5Jؤ|Q]iЍh€(mʕOc7TCoj #Z)1a1fO;Q\qń[89%5k-n{n?~$a%B~"~ʦ!U)cia9SkmOI8r Y ?9I?J<61Rg3 lfE|6awOTDNM:,݆zF֥>'$Cޖ?6P_ O#l @3ַkm [(Ɨ;4/(Q_xFQVK~5sdOш$-6אҴ'k ¹TWB3aMKpf*{Bk-mQ`aT)[5Dр?g-w}- ̑"i~?(74R&]B;ֈ5= GF~Bp\z,"u}ujBu-FmH&୛Xչ7zeѣl d*iρ2C-4j=$ems9(6b. )eVYTTx)"`̄M&JGok-nLضYUC_?Vci²nvl}l_ $@˭:) RW|7YE%9O$?GUuEhY.)h3vƑ[CO$#62$ޟE|nB(4 ,6'*W`屎iG0&EY;12S^%U3S."\7/!ҍ}u H|:%Qxb-!as%v4Yyc>Fۃ۹EDȞO|~.W Fq6霫cMz6Z[bBU607ta|i]&= ۉV Tqhsޥ4 Xty>x>d6[8mWB^(Xh֯"Je鸋l%)B MoeN &yUgpaS%o!F؋ Q0hSBuOѡMV##Ku CZ}ΙCU($r=3!N_(),8ŕ"X$щrQ7uȘ sONӀA'*uWA ml7U"0 cD̞I3"JؔUΌ!jǿ0KA4U6,T>aiS6C^dnl%>۶g%^ Qqa !G W.e=:A،梅,{jgM~.IՔ43z~NVl^mð+Б`z[vŞHA_BD, &僕TY 7U:34-@ C}>`,eIy)QohS4=QQLt0Y~-^ՠ$4SQĴ mTU\h}Wˣ47| 0\" On+oAw$+n6xDK8M3`jèC7‹y7V>]`.H{"BaSWU/`;W:w^Ky!t-R IEtMKy\jh?OjҔ(#@Ϫa):xbh: [ }^TI4 ѥv67 >"a~pn, C Qc;,yp??u3x r K@q'Ɂx=yތNV8Xsh:cai|f 2+ L o|$R ]E5^6 cM,n~K{^<`v'6FO+E:l/SH_CyNAp~Z%5^9xIEeHi$,%Sm-/h;Wiޓe*m(4&GVe\E,oQ}ff 4NjJ5r b nMQ%Gd%L%ª\$IRsgV)8~X uMώ1h*4P2V6oytݦdc4s_/҄d3 Ҳ%,g蘱Hߵ2%|]r}N#vZ:3>%Q6ab+7J=BJ드R\!]gYe=.4HohepܴTOWʴW+Cz_o6(총zl0B@& 獪U: Ļ HzVdBGn H eh4^cg9t ;} !/XS2 N5d$ c)XnH`]zwySD1cCUm.Pɼ1Q=GאmĚ"'֬ETڋS*q/*snW64M@QcC.IE,?6թRp>* g(uEmXl\F$cwd`^RuQ .'瓬pp+_)&0Hd~ېBVfQwLIq;w>` #DNBN*rx{!Z#|JqIO#ZX0 |'ItJ>gӃurڢΔ(,!q oڏ~###4-]J^BhJ]QĸbT:UT~ =໕FXB?osΛjq5&-D;R괠_mY*nLUĬN-_PNaX'za5^1\~96L4 #_l X;#˺j-tcsb]z?G'ϵjp2`A M%|7ȸWLyrBVr)/EqpB^`lFG lVhی~]aV?Q^&6V *do Pc>Mh ;! ݅aq,E ^*f< nbY$5+bX{}>O0'<4s4PzkrqZ͔ Ydb<ϮE':}t*C[Z@eb.=GWt]yO̜y!U|. hӤ#r'[,m 0jW?\ ,#1&w߹>7 l0אc' RßOQ*uEʉ;DrI_:OֻގS!BHh2mآ|cbZD 8lܑĝ $ [W@7>8[c*ԷN [!2&{JQ:@Є7ar_+9۶ӧTw(P$5Q&`q$KMrq.\:htz ]@N+UARF{*\<S[G&Y2ܬo4AGLۛظ_Pa&4lFXNIE`M?;Ui$qg #uXR0p\l}l)?|Cow /;8S E8ᝁ`rEv$"% sR͓,?%A &yyѓL)r6~<-EfⲿŔaTc6!2MDrLo"%zf&Uj*%=iӱ g+"܄mkyWVT!IW)u1N] O(+{VGxΆ6t]xKc֦; %Hv Ƚv_hL_88y{"dثzo4ikZg2mVz>{"mI=E3tA֍td!\*K\* /\ zx"$ Y{P<4_RMbSNŅP3n^qЀ2Fo4`iKT :D)e}1]yND]zRZ [#|&jq~4,<CeP/-qĝdlyvXAIZl< 4O_wx.&Ώ؄ !^k|6ٌu!elyRLغ̳icr:>)ƻMF# fαطz b^Cd9BjZIݧW.*@M]oX3Tit$-zWpq!^bNo[:=lEʀ.H(Dn9aR050d@aἚ}}K!<,fzIDw . [SHWoE#lNcC w]uz9`LJ}z`QWloE] V \!@QӥGC ޽JA x-S_-~Mlh$F-Օ9q`N~uO*%ڗ;ЭqπOE51y.L;;M(07ZOX=ڑP0\hm\O$,Roy~u" {ےKA!"߉,.u%{W8)΢jԃ%$t &^u 0(RlWұ u!.j|ʬq(rf~>s`b'rRftĎ0)D:<ݿrg` b}49"`ej^DO~PWHAe 4-Mܲtײ[E> pʼn`mBHM/PL a*rǤ`X;&u ʝܒ" 1y‰h5-t^iW PI 9}R824- JԁBfMh3'@?Sw<5ү u .m]yb5 BU ڻţ dUn@W9f 0VLtn<> | בokȥ&j%cy˪I 1mD/K#a5+Y8YHe;QƎJZDCb}EcFSXr{N_"f;㨆, >OTWۛ#XV.uNU%dΗ4u<GXܜ-=XX"U|kt${5g`i,8-4,xtZFW,^s"Qhx{]{t5>8 )q{MM޸x$D7ں }W= *P22. [  DN/%%w?Z!vqD6j^Rz_*Q oczsl1Xue%tYSfP0`mM,30i4I^1Dm|6 _neXk0e!6[Tv)2g)v>fEIa 3zc.pgMF\onDtŽNS֔K ed 3.S,Skۃl憱{:XA*|Kll o$#/H )ނ|uS>ǝؗi: ,}(bvġ?yT }L[td/G~Z7Gv9|&,dVLVatDSx>\F&ly\LbK%21  7|:n&ilH]^yGok2MƼmВS!vaG*aŸ ɔc5}R lFIQz["֦&:@tfHtRIV$s#u5,hs mHv")ՋW$6=Q(ed{& 7+nqc> w)i endstream endobj 422 0 obj << /Length1 1744 /Length2 20842 /Length3 0 /Length 21952 /Filter /FlateDecode >> stream xڴuX[߶.+Npww-V;G{=ysDIA dٻ002ȫYTNVFffv 1'^ r(|~x03 P@ t1Vt@. &f=#D dea7LEƦ6 wg+@QrPZA9dPjU%TTR*J4U]@NELUM] .&jU~?[>|8 PQV`a l`@hN  -]\x-\]ANSrlW'-Ƹڛ}be w hʏ9mpWKcb()}[ٻM?]]\F>@3\֐o)EA+ӳ5vωۻ:{[oMAV.[w;3+t" 2j _>g =?p3sXx8$7}vF>q>].coKME*>5z̀L JP:YzopEKr3L_^"Rl?qX= a=0G#G0)((IiߴOdfeo`;9{"0pAl3?d01ڃ\>B.s߁p%qLG0do⇯,,&п&9L.{Jw?fejZ}"od?뿿u-ZT1nN DVkG",΁LCK$&J)xOʱdc&qijHB_)A_y (lZ*o̔Q$DF4vfjOEP9i|:*TJ{ K-^;D;#("F#0ݝh)Ec3;jjFo͝'+Hg/a(qj مƯ6XfZE'Էؑ2K'̵ZR&.IRTelyAr{F @ZBxQW,ת$8ͳ;|n/'hFJ³P&"ԴfRyӓk~حW˪WfiG!++o>/{ŷ1i8SԳn%,R];\zF]xÚjC|e6:e55OT7wYW[Qa'E'd#?ոE$o򳿠=o>DE;cIY}dVTHIM0ȟgj:LT30m[L,YE{ ?*Y>NxvVRSUT3Yd>g ,}hٵM7'fV,#h`K=}֛N*AroR 'NlvdMk|$)"MLFx& dF4t3ɨ Rsb'<O|;V]Vq#}CЩ.E+/m!>{i3(9RozqP: Z549ww70}tY-5=@=А:6*]"'ҟ&bJzœ|(&1(엌rv <e&83n_4!_bfͱ*Hh;{+'V,rӖ2inH[4 V5Aoa7z`PR I檫T8_V4 eNHo M `R)-55߻5jH={n6jl`Ds[$v=os.d5-/<~8}:a3~׸Bn|)0ó!)cT!{' 2X|L ep;f5n=tJ=Lxr<o&`̋ acs:\H~GաE&,Q *BK6|(]n!njj]5󞴾@?M덼Cz9="taٓPWo_EVN?,3OZ2`}@niѴmN/FE$lABBL |L.h6$F73E8yf4y9AyPqkTfC]é~\ 즩b]r?O[2&˄+dzڕuv2@w)Bhg{r^ E&ˌm!VG9r.gɝx%ag7Х\Y/) fI 岀9vΫ]fU2${!E{bu#2G)zc -6n^I4Vh+sX@f~vFUOt"c P~7I5NAEm\9K]>EyQZfľTQW%J$+IDv&i?v,4 5'j\i<`Mj3wL]iZx{M=^ZƚY!rk\#5!Λ= E].iCz3L_*F[QUSS^fS ~R#,l`w#|-dC9ϧzh`6ذa`Kq*Wdf5,b\gӻPM7F TN { qpg *PS%E>K˒'xev9>4+YUkWNzM1C|fn頁@k w&ɈnO%9RQf$ rz 7ȗ%pd0*drO Jng 1/pd$9& :T,S%k̫*Hwᜇ>DlP+ց\j@c`XyNkLH?wd@qBc,ȳgۖgQk>GD%*7m8"R-R KqD E,>1 `e7%McH}G2|陙~eI rV:"Z~"n4LӜʎ׆ bO/UT@#ǻ_jOb{HP`'^ =<¤#)lCgQwj6yiaK 媌A*mn6$$TJ ض,Q!8D%FkZ)TGbŖ-)xn&Bs'r4hi"tqbbylbDJh]^kk}S3 bN̉R:M{v@CsF L"†-*({BrkJ+󏕛N*T9 |&fΰĠ,,PXд>1/9^+%chw b k%_x29C y=c#/pv¶H oWhD~sBe#&>~y!ND撂o!Wet0J  P;w)n|,ѱI4zx]>Zi@7 'yمa!\./yFC^ "__"]F,%Aё: V+&5안ͩrNokN\hI͟5v;i tdI1 @Iqa5uܓ #6s uLMw(17 ʾ>,{G3t I8^@q{^e3^韨>žaJo0;$¯xC9DO]_Og<%a 7/a2M+|e/sA> 謚8\H/~[RD2{xj6R|P˱',r?,oJ r2\k0N[$9|%pָ̗k'eQz # IYNIzjAѩn«Op;K 0jVT)'H -HE2PZ04+&ΝNKiog1;x}{"JJ}`]"c|0}VVoj5ٌ{gk E&ol9nOvӠz=$a*c7D8{$521}JVx ϝ`u~,.Ž#>Y-ҠEK ,({(ӹ.6#Na)ȳ$ڄ*qc1~]'Gw Z +d -KlUTl>\ݜ|[B$ -cc*f~Ъ"۩Ym#um fȿ{RH;]jX}Ãl\(2AԲNSLqE\F-hB:4!&x Ggn>qR%ò(Ģv.{ =̼-[WK- aqQ\E`Wąd漮{r 0of0U\N'مcQY[%h2-~`\!⹿|ӣ쐝ōRy6v q5QlRF7uW9\4Yʺ#رQ38q&$EE*:0E][[v :U.}AnT4u4,O|LHL69էnL6u` gR?uB+<6yZM>kP؃>,NfRn\><ȫ[O --o\ ( }b>U豧dvٝa spV唩 3P3Y8s4}Pb&1].pq :P15LݕnE}y,P'M76{<н ;QlW8v'nb%?ނdTpľVAq:R=CM啨z]MJY?\aPt' ;YF+\eKO\+o^hD6\̶Ie]ɡɜwòM>砞r2k, 帔̧}k*o#BTu=]6Z\S6= T O{Gw")kV(|[ŦE1LKs"ȋgbHpQ6ތ+=Uw*Vano+wt'pڛPӈ ȞV AO*mx^ TBlӯ67i\"[Tz k[O(_6ĵYJ_Wm?aﱓS=fIn2Di\G2 hWq=n:!'T#ˠVI1v Z@IlP/M}!Zj`h`_Bv -Ґyۺ{W[W!RCzIHWiAiZj_K-\v:夡 ֓ G6!^.TQ,y* A#m?RQn;`'17 C,Lߟے J|f t_}%/ң.cwaKnM)Q wJE Osq~)s-lN["|sAFukk/]Z'F=0z;N*?MQ֍zLBUD/zj'/l.׭*/G͔2H ͮg49~Ek7W/a&h)TrkIE] zKy5 cők'BB0_z2B>W "f<+U) >QoXD_-w0roQOOsMy/tu/>pl`/AHX$vd[͖h2xqy!'xVs\/KRtfŎi #_ԅ[d=>ͷcsژc;A%Yw2n[&Q*lM0ؙ8sfw$kMD E!ۤ!X^"tFEwBm4FE8M"(02Qbt4 e;awqg|f0wFdGh_iJ5꜕A/P"{).$4 HO5{jxN7^Gz[!U|]47ef) hHc?6U`Tj\rOJbC%c͇' EKN4 Y~;zkҋpbN -ol񧁯9|,ߖ㩇/Kv˿#=+TnBT;BsE~Om eb?N%pPܫCއ4BJ6S=<#Em6"kV,y?}X}xN9c_u tzї:ڬ"c3 yYȳ! #íi{WKHQ.P~ <)qݻlN>ۺˢ#=X=No2zM#2[ن.'\7 אk 3$LX.ئ=)vO;8K{ݧс9xe;9:T&TdBFz( cAE>iC \8ԨE6 `{IC>{2R 6>.;ލ* r;%-1ۨ)zz5M͵7T9Ul!b?䛛 eQ-Lk%i?Ĝ@d.`Ε QؔQ1/2¬WR0IB Q9m#ܘEϖo,M˼tG Ӆ8=jf _raL6pjG!gl{܊Ř!v+TPr5Ha-d7;PB5Qkh^b.H?'eN%uL6+ ^Fu`ekA4l8X;fcO\j-q}v8}$tKj[J@kea}RJw痵Wjƅ7%qYu{~B$VHY\a?) A'Yȏ-dWE}_n2u..ɰ橯Zj{[Ky]s,W k3|_PޯЌf:UA0E]Ro0 7I4KH7W$g~whu״r=8m(r?Rl #r (Deӝ~qTlnp6b U Dd/#;?=sG0&绰[i#50b()Oca!Q}˝#κ%>~9]3MWLЃw(z$ >?iqӠ0ЄOU4nusxLZ+"D>L߄ olm²dR]ke\_sr1d1@*P`ֻi(x=i.IWE^V=V('C;&rˮYWh{!dg/ctHfT1.D-G}k8ɦ7J&v--^L;ڠӃ4=D9#S+Zf V 3i6WNbxoW4$Y3ʔ&CH}lm>ck+߻w9 ;YßRz3 ռm\ALj_"6%l4h,oyjyݣI[. Q)S\UF:n7$ot7gJfR1SKz킇S_m)li?\fЮq9zX?8*lim!4 &M m"`!n ֎;M4߾-Ce'E_% h4[SCdn8wp,bD01ǁ?#=7Of<&Q=wwzw߀>(E2>\ @}d^^_s\Vx/dzvq:4>?_tb0 z3u Zz(?$YFqvXL…ZYq 2PU =.0ZLW`B~:m?ݦ31-5@I A!=s%ER( !cz7NVhվ#f|nvxSX3ٖw_SVhՓF&IV&qJ*c~]\R'|)=J(kIye>Y՝9ciaK3̤URm̻KH4_2\DTpqjMkJv`o6ݩVςBuifrC4:ߗm.mlvuu* ۰%V/wb` qCoOpap͎ޓ(5߾YtD; M?E_pXS,%fWcKg8,+ƆnN< ׂXqm }Ax;yM~]0ዥV1f_ H>eI3z8oM[tB񪎖&y[ƕ-5ןRewR`T/>C=$E> io M;X(ŐqQ T+{PI_ytUup_n2U :bGA)_Պ VkLB.SR}Rô֊$zO¸'~O<2֌8v)D$S5Oд o jPX~Ub_C?ni@*cs'l2͞#zw?W~$z(=HZdyu8Heo;[E XݖKNkZᐗllATtWZ `[V*|+_4-Y6M\\?CtdKl/ixLY{9-*[)mg.U#$hq~LùLA "UrɓU W>/UQhr^Bt8JҬ\7Citٛdpm k_/l1_bA{Dָaղr63ZYL)sRHe;D$zʶ0MBrc@XZebZJO&Cװ >qf:VK i=>nq5h^OLMK-Y*Z.U$527 /u"|zF7}W#_ZjhEjg~#<@IgNαK s]8RH K$󈘈5m`.v@V 59TÔ{ӆ|gD^5Ť%PH!GCNq=$/9݂ùw"՗g nE ǺYCDGgbhϔ*iz[ŶN3Vx$IPν%lxB$9+C|U%xl3ii 2TضT'($]}k}ˑ]+9C9w qr^Jͤ%u<}*IO*?۞t4x #?‡2j.s0Yy!Yy]FjS1],d[+כO@ɒE/(?1ѕ*z-cfj$69MqW~%(|79uLܼp.P$EIailnDW\bh5ύNbm_ٝ4C{wxy l >m"0Dήfd7ɻk jh` eˈo-9[IźeOPCsGW ܒv(eY /;A9&s=Bp8<{2h3O{Mj{&4/VBMڵYH/WbJqZ |xy+E.|S WPShItӳ}] D괞G [L'!3.ph:?"O=63t_ į* O<Ieݩ)(nu[q#[¦t c # v1ﻉJ ϶ bgn~}* gi;w6|`E@nۿ`e$.,4?*f|=Mջ۶ b0Ga6t 9=N(\zx܇ptJ2ę(a(܋\p$G5 P=e;KQ}ՐׯINE)çxeƢ ;JL¥\݅哹WQTuѱ~J&9`۹(В>#jv6)hj;-nB$XKQ8܊d.5J4s~# zw8 Ec9%OD>A'RrD)h? -;iEwf1nמqagp?ffli*#)ŒI/YRvMWX녪T3WȘ̿&͓{ҿRjܞw+fc$L)|1r_dqty+WVHt'o oUpw[xQ̱N}V BC Jn7'$jKVl:(@[67W,kn3VsHH)Y)~~&Y+Bk/}mN{F;8McT O2RRM4l'iw-)K->}ɯ ܽw/_2א_"ٲ$J:M!T7z<ڵ6lUG#Nsp-y8ٙKV`C!ھT&ADե$sWpG's;R|Vb;/Ύw0k;|ɟX&^XF5.;z3YO(p$†I"Om+cʯ۹jBqAd]/4m&3%҅VEyj{ĆC "MI kn>M4l,|2j,ȗ9;0=@ =X>w+D-StkmPb7yOLppIJ#|֜M+ra0'nsc*&[$ںvS*kE/WH!6= ƹ&f/ 3cJV22F#&niz3˃9ߴ5Ѯa# Fiٌ Z 3ɴ,& :8Fiu7zzk{A@@\O -І5 $A+~e;} ȼj'~Po4oPnW+L]ɥ_a#Gf 東mnJTed =+hmxX5_5vcyznI턯։JFJq"䓬)S2#aS|ğNˮFb\!ƌIqQMq/?g|= Dyyw'sKGVےjjԝ` +4 y#/$}˽1 3Ćui+6 /2sDA5ҡD*AMmH)R#ٺfiKzݫy)*%,~,KOȞ#NR]T)EэeP#){Xo ?a6c;P*'1,?d/#`G7u" LbP H3pΕ>xV1<ׅv[#Q _)h\B+Xn(3z;KSW a؛^0 d&2>qvX]4|?D|f]XCъ;> "ҭl˔;Pul T"aevkbL|KF&]=s;չt=zso5hh@\CRS$~F8hTԵmSTr8TW籽RO}r 3Ib^` MQڠJtDӓ |^\<|U~ xMxp՞oۭ}m#xn̢^trKĜGA/!nٌ);ų$52GʃHyB$Sj,n$w8<[7R<\dv=E:$Ej^ڻQM(&Ea żD2荋`iF__P<,4'Ϫ*͔MO*\F9RpDd͎^7ϧ-v&Ǚ a>`WJz+-[$NfӘQ]-C8(E-JH JtCJw5 %"9Rttt7Cw u_Z{mVv:>€4"ƿ/;17kC CdH]?ITס~v,yCQnU)I&ވ=)og!+ rSxp(=pxUESClEH?TN)r1ǯOfWi]߽ Q1?d}˅O+ݬa%D(}θ6+=-LmSBMF\/moA uf>3jI4#{JP2^I>QѼCS|1xRa|]у:/]5Qd.wG^zɜN+Nس`)VQ T de=?/ S\6%ݭXO"psɵR%IWi-ݢ~ g x17_"b?l~މ]袾\VPKfL\w O6h_V曢?M ĶW" SZdw mNS,H /Pa؆hG!~!TtzC_\?L"X&K]gt]7(A[l=!zPpd( ͆ naiP̈́pڵ2 WGžg!S4m!>7;*geqJV18,RIcO VWc*Xs)2^ QԟK١ᇊpܨ[*e*<鬅g 5t4ӻ_y۷+Tr;Thو˓m+`j+3E-[ y484(W~UQ#)/@?Ch LQ|e]#pwˢ+pU~;g/B+*$0;M̛q K+dTOrή:i:l2:i\5ć݀˞7~YJn8 +P_&2JcD-@4g}q%[5oNɻcZp\:VUیgG٘˲ T:NK_Lƞ;"1``8G@[չč1Ģ'e(-JcnѕZOLD_٨ =wTfkFyU{7Z8Xۀp=}_ajQ)xEM.}1 {O3]ut.c HZDg/v7]@?{'W8Gx0tg~v.S7$ǷY\\FDK9~֥HVVC1h1MH8 Nx(r2N){מ6lBrMɲ]ܓGydr_uWiOAp%5KB|5eFP4oY9F6LYT4<֖qStpg\xS@?22S+ߒu4Ғq}#nW  /_V&G"jܺ SY}9 ݵ募fhzHl2˒ަ2YTD2k׉ C t${;\bkM/`˦MkYXDު +~{xAw6HkѸdA ?zZ/Gʥ/P9ɔ+lG|\J~(Kڃ=Bd5(,g_+06`<ZjLClXYs5 m)ҥ*knuk,BA;=7YMhyFYoɆv· %1NKZj%~/~rzISckZ^ղ-罢6Z0!d!_-_!*z^elJ;T5CqZTˊͽ6zfP퍒0t#mһeBexenvO4KK#n:MbCe o+)u DKp*$ v(okY2۫Nwp%65,tW j38(HAZ6x$kS<±@"wUyy\:3G*1:-'{nq:(r^ꀖ|8fq)+Aq^(V:E`SYYWnQS״(vfOuhѩU۴4@d)_ieϗR<+Rb]`$a8/*%Q&\O/| ]N]1 \,96NJz#?6԰vI6*o,:79$iƋR꺒9Xn$v PU#{8 y{_C\O^98s驭w)'}j;ѝuIGv)m9)PT(Nw'ց\_:jM;#0z'fj յ Ti .mH'(=f!#ɥx/F9gRzғ7r3*`7=TT/ѓZΆnx3X{ ,}Öi4_D'N&r⿉fLG"G,ܩ)/!-.KLeAAlfxkKFONRP +ExsFT  &"gަHAV pW<\!ޓ7y 3\{mtRi E-'arHSxA*%" Uѐ&ALHCEi-*歠3kb G,D(M>LZ0\`s}`ѧ}Xʣ`#|q}V *HB` hq;]%ukm}#Olill#X&lj+ߩH+EUh(oMbD={% 4z`e\P/\[<&1kbJ D7U8{ m.qcM4C9!q7$Z_е)Kgg6 inVTb3]]W.쳾՗v#‚Cy1Lmxu9]B f >T5r̼(۱hXnqL@ п~)[MK'g xļ^K$@? DmԑaRQ3z WP\`ʸ? Y[dYl1HPj:^'WhjmpO֏yv3W4{tb|]Dۡ㫛_HJRWN88'i3÷9LϽtz  eџ6 $LWcΥPo3X}.9)xETffiQZ%쑿<˗WIݍZ2.o6GMO.SH9bˎ\ћx;XY)ԟ˕[˺X&yyQ@ܨgo8p0 Az':[V!j͠^k/i 8.'+vEPK;_[ `ͤ[7s vSz!wa(1>:?騔G\-o:g0P^jJ'KTB;"]xDt]5U:-iJfm&CVnï)7h|+7|toLVdMjvHzoRv*|䮄1"̍F9)N@F&Þǵl%}RPv#g 1TʐLYg=8wH.Bbymuv(`ۼY;|֏^&bϫ`kqV+?3x?$qNZ6%ΉHzV"h8"8bv).u |_Br*8o̺9*i;Y&m뾲rHɺuzMk6҂j|2PRC"v  ͂j2ϽySbQaݣ:Xmh;'87pY)r߼KbfZEKuE%G|-KulP>.IMoY/uoFU^bN}]E5Q3k.)|gPF2^M qzlhe`y,uՊd\@j9aLL0F)'T#>_M$ L.BoJ%Hqtߪ8 70ʫ~T81"Zw6 +;e4cb(o)e}^!pp3qעGmFJ0J2SfmRgU{" 3Dmݘl9'ݻzc҉ڧjk~NDO>Y`'^_m-|VW bxexx&1|| kӔel̜LG2}9GyJ{.2ys'/J?R'٣l7}`/$Tټ JYH} ޼w%nEM`M46kQx̷lm~im$KY@_@eDCߔa#r1Z.Lf$(6c &PeY1.?R *Ey?Y~6n [UOEЅL=@YM<د:gS Ͱ:[S sߝ ta[|.jF)xSqhmyw!%x-xPBp]l#'|#7oMu endstream endobj 424 0 obj << /Length1 1638 /Length2 16442 /Length3 0 /Length 17525 /Filter /FlateDecode >> stream xڴuT[-(E{Cqw+b]>sod$+ѹ;we:AP d cg*l m9蔀fNֆfzFFVDrra!d+br8yc{{##"9@h txw@ @eωLgdښYSAvnf5XV-D248ZY mMR9˻@ M S P, WWUP/dgr/,*A9QP Uhߌ =o(k06wdj f`pqq7srӃbnp9Xޏ@k?8ٚ 6EXm@rڼSn`DִW86憎((l -l@[C[@!` 4@ @ov96 ]~e:^.bNlc#_S k_ ̻ldA҃]D'(" dd0qE*jk" yG>w 7KV [njakby';U[ {'Ef@{؜oN`jh0= ; `ba ~ S]KT *l&@SD9]Ts3R'ghcaoJ`ch> G1 W_. 6|׾5}Q1'wݾ=.#%ile tt߹  ,?a  [33; ]llwM]Q ~O9 Ŀ`0xƻ(?kg\[+D`. w[zбr蘹Ll/L̬^kMQS'@;h=3L/LW6Kg~Y :x:˕na;cK{O(Ų"Rw H1<ۻ-˸@HP4UpesmMc\wZxph. KLdQIwJDKB!aZ3UJv\dR~^Á!Ě}gf=uw˔g\Oe5,)@@+4<_B*e[zVsE+\ !.e _αr dic1RQE|Е 1:d}Y|^ٟ(h{9"` \C0 JÖlE|'m/P=a<ŽmE0bŮj6_cߐ*=1) Kx-Ca*㫥[3EK؜yw28%+BR EA8~lP턍ZL^_sxx('ڞ ~iёb-ʪΑ` Cah^e{{' rN4fQ ^vQ`r8aEP뾴ܤζVw" [O AcMͥ/ʒ.iHa~WF)4^~?T Bݛ6EM1S.I_vX;3vwʵp X[#B6م$B^3?.RD92ɡtS6&쫝RCBļ@gZә(jqaU;g`SΆ+}ᨈ).6^oLO{1/B/:W^zE7CA XT2,˥d|$\:hr.`IJj.i["/K;mw>fE4A{MRme=>"xjͥ7Ԕ 9,օ&%FPd̋|^NUZ"_&1="m)]@>ps$D Eís a' fn6S Ă$ĥ˜[l%ўŴ|8VODhܬ()FN.EQ+`Gd e n\n"kK[ ;1OQp8#*ڧ۸KU`Swh'ymҨF`ֹ~YVr60+ϟvn+9Ix4"d K̯.jjh벋,3:(tb_,;Jx}|u@h< yL<| g1;| rR;[0WSDC1k3\(/Öf1j'u:F ~4*H.jfl~ScM6s7+YUajm$ZUcz .|QERmFL*;B_$/[|ƥt-Sv+gnH;Kn4"g4%`hש/)ߎtÍ HHz^}T_;wV4`\KG0̯5$2Y@h >.+WK$"2殫6F>b9h\:9: (+O82ŸW/? h Q6d?8".ж4BӇӍ}.G[7Svs$QafC$9~.!Xyfn1aBʟEADOLvhoLj3|b]v;82)`T&/L775+#疎A^345.FxA~2*X.$ $n~eX~vl)ݏբWmZڶD=^ {(^7cS#>y](-'5 (0_c+E?AWR4~1~F x}7GOâ9=wW~‚Q%`2FYrN᫽ 7ɐq%=Fv0xiNPҖ|)O֢[[?OғSq"h`uOkBPZ˒t:(z=~:EMFWgrLy0~K8%8?kVjc!ZBI *,L  py+WT"|8p~XT'Ҷm3xXijj$+uU [Tk?-u}jI(Ao.Y gхo[7eϻ<WqF P^>wTҭ ڦ簔1w.oBMm@> |9('6R,t*0kT<=i#g±Nz]s,:e=͞HsW^ag8y*.MpnjQєiݎ*H֏:ߪyBX9`V[JK{wP( 50 EYZp,|z3nɃJ AkӊfW0>:Үg7}CO3mlb?Knv `ڇ Q'@Qݟ`mFj< CҺH%ndYP*NL}ӑ! T+r[&nǏ@`o FZHC?)=u9W0rLUDKzhjsEDX7o6w4h?kʢ*(REaΦ*pkd|LKjϡyRUaS *@SX%Ω͡D#˝0Ea E\eM~ pydԶQm܌צup(;=:3I#᧱*Ka}ΚN AWC&v\:z+IQ)mPz{/]Re;f]"7Te꽹 Hx@D& %>, \16{TnJ;{uDzkn#RU$yyKg 92 i-},Fr_ha9x™Mi -miuߎ|֧2J0ȅi?.^F(6"wGo&Y"}9]XN+'n|wsFFcO8'(7=| *,!|MD{hP>!uWbe_ v ?|sڅ ]xmEO6L(|~̌{0,#eZig6䊁][?^tބEq^MHQa.DCXԜlԧ7t-@q'3%AZt# פxV^t5'<U҃_ ؔSs{X2P*^x] *L5o IQ׺ L$}{#,ĭj*%{t"(MU+  UO.B@r=B+r xzDZxODEL՟* 1ظ'<&W F_X qD}2 G}Fix*0wnQi ^J)M 1/ QVzKN-/&=Rj>&9 .B-GIn5Ƞ`pL0(zu([@(߹ NVg疽kh+j]_(sӦ+ ƛ_Z/9-|GE #=>A}s׭˒`@g֦n]OU!٪uYUأH4Rqq <>)J=Ư| k(~cG+ݾ/%F@uHHz]F}R/h-+U=P"!mm0]܌[y,Qە^ E£^Kiw ן&1djr^ jn)AŝG`=}4|gfCu d]XH<£r-A\ZDR򇖉 k MIr*Ǩq&% {,}vXJN$i,jIqsbMӴRuӽP.5Bu }|Y׿=z1O8vĜ) yr0M-Oz?UCQ "Pc8s%@xle&Mv{ nu9ܽa/߉FɵXO`8M=n`E>3 ;?lUU#Pbԟ:XVb|B !Rvk{5cTyC6VtC*́t"QMf:♧FWN \{W vo9OX3VIX5{;&?`B-fӬG^![Ά(A{$,d_#?"8ESƢ.3I^~=݅9K %5F"H˹AiXs2S O7Y%nSf?%?az PZu.pCW۟HoCĉ~qBaԈ\^h~TƱL{=LY:-2A+ ",6πVcM͓ b) |IOƴGs.^qj'C2 Yo8lbZ,_F,ρoA61` q{vfSICQ8q(3 6:wA|c=B[0/N'ot1sPt?J-yDp]Mj{t9e1k&Yaqx^b4,ǔV/0x>'F&3aCqJ(e&hRǐct5| 8:2JOP85Z#a$&0ºnWv P}I.vdOiC c( ƏqP5\6|dxdM@-Ud[L^z 4M^z y2`92Fa '!y&$1o_]P)pzMF"RK\p57:qY,vzsh '±9p a8'wp%Y oW?[ E|+5`7G-xj;5Mފm<'W19yaJ&zOw'Oʬ@ؐ&KCȄ D2S"ŇգGԕĭaF7,|8qj|RTl?h/Ż8֌9W+4YQg%Ҁ+i3T`fz/BK9ҭ^%5Ls/5̑\Umrj?d3yFH,շn"+1ĭZ̏~6C*Oɞw}61 EM3Tɬ|}mқV/Y=o¦)H4GfT䈎~BX-9 Viu3ր)ٕSe6)sOmj\U_tk9Da(WR0l>}dFI5m|tέj6a"%{] =aSBh#AaK>o`'\Թ,K!SF r- iz4m 5{,; jGOp.X O kJ6LD{6H3 MVx!>^P Lq`75rӠ)!obw?sA'IJyK*f5c޿߲98:>tb(Nǚ 5y:7Nh]33ul-2P;UEnMx/Ѥ>6_5R.-V/C.QoCpO4I + >DAm'l6ejM\c40'&"K4~8#MG)q1u$m6#9_19݇⪚ 'y!ȯOGI_S%OΩdSZ8ܵt*}B]lYMSy.Kܼ0bNr@!zX1OxiWsBv=«9UwT*HAv/&xAu/ˣ1>$??X$RY>96RaHd R!w)rkRPǾ.蟮l3Ã_Em^O-'܋nlDaUX@Xť2un^1Ϥ3FG~m ^͊;p&]n|yu5%QGTӂʑ?9[#kIv&>a 6=. rLY|dm=%=uA&a&QjgSD%&ﺻ~vϣ^ {kIKUdⱥ}?dSd+ׂ`IGYb 4tW>/\W[ Pt1Ye㜼T3j*c}J1WɌO㭟AL%"%O;|p KLvcjmp|俌Usc,U;k <兦jUZj=lmyBMR㗯<*; }5/`|3=5\`7gG{FXg.POr);\"+/:XpϽ_4@A_\f6t#x*pQKN\2HϏ\JӡƜq2‰ԅڌ ) 0W_hHc VHޝg2R|];kB]aG*H9;tRc>D<%J(F~Ad)Pn /1a=? hcO!?Yk\ ;a(2.+"_VQHPRDs>biP@yNy,GY Oo9bgĥM/ cvh<5[p8f䆦ġUVM1+IJpWOD PZȖ1˅S"6}6c$̫]o {lM!xLo6 XIjج_{J,7[p2B!Cw,jyv%uӷ0##-%A}߂a⭕}*l LO:,,ȥ%a~gmr$bGl}ROUq&l7K}9Q)}q=L2ys-+1&m"Zn ɤ@R<\-~Sޢyg*tfԘɈS5&}V_ō]cq:{޳Bȯ/l!|qlv\!uASТүƻWV*kԐMh Ry8cCӉQY3_0l U;(\\$0H6 " p9W&Au TnOҴ2+ƣi98M}J;% s~]-Yw9O<QN,UX0ߣ6KUVyyv0d;jSsS9mR^w˴ g@c m@@&5^ؘ/Tx%lX趼+⾢}+ O '('[ahNC|4Gc9 j+ eoNِwuZ knox # $D-gbJ2C@;jBNW4f~Q$k WCօjet0ڌ'hk}XT놝uFn6f\*ʋm%,. A~?S+6{m%rH_zcPy4FICpQqő\G=@K+BoaT88Դqox=wC:6HtS|J[pC ~vNg=D0'$BvA ^YV{f/d =XV*jTux("-ZG>n$g-ҪϡpOA A"_?zG.g>xu]z0=;,ty` /A7-c(\}ztkc \HÙ3,j?R|JJG~(̵!땊-*S:\Eٔ9f$T%[AXXLݸ@VRIӇܵnz_aMEӳ%`~z/թMnf DGҮX+N*P5qYa.ZS}/xq5H&BԤ(Prl}Q: w5-ipkow!ua%ay HG]->*5p͙O˙EY+^eDd2;u-xU-/$;|ux_ER^/cC{Vv|•DK|$>I45,2v0#ii[vBv`A$ќE#$. x '9opr{ޑ&t13٧a8|5xfKlP;SKh[XkkAeJ¼mO, =oZ`0 S#&~VW$VGlhۍ UA;k. 1`p ͳU!w)'uYpOZOy&4QUSwV\8P{Ll]cH-Ps {\Cg/d]ƎGF4ptZ;ބW0& <al (B\ruq5Ws7@ĝLNDM V-J1{>{BSh\bQ6ϊOԍ푒0 (lT]d{4V~ObIUmga.?+ECkZ"u.~a=k\P);͖p"jnPpXWԡS"UGl&R5q̡W,w΃וּ0ӥ6fP$v~iM_*XU5 d܆|SJSxXЭ4 ŝ8Vo3s}s3tfЮ$be ()K!I9Ψᕯ%ݘn <()+ݵLf5 |N+CYWtN=*Ͳ-R&OW{~A'Χr_'F&lG[qfa6l_ >DH*@ch8vhvn3@ꦅ74 jSĕɟ/;LXTP856Qi]J$tqKв֧=sX[ u&)GXAzô (nĒ<+d,.JV]'n2#[}̷tH-[GLGrgw$-=/fqIv7\R>dEM H-̝!$cCq QOI''~,7SX8caxifz5Jc/ǞpGm/Ix!|$wvjNJxP.gl\e.3>i]MiNa5-'MbYOȧpmY[wpֺ|T\#eϩ0L3wDzIӳsZ 37R﯋(XDnIT~ֲ֠{Jf\:3Nʫ`Fvg(2ۊktX~Gwҹ SEr\kSDrTBn;+ Kx`쩚oKC! AI5[4M+zOjQw/(T4=i\%|Y0h=쫇,!N!=4z)=PU䤱 EW ٕ`WRk_l0ޚy~h[@[T )ڢn{3=xQٜ/?0L".ڊ;w j5L; sjUBUNl@gvAiqC۞SWkQ7gC64:ZB^Ǭx d96veIm #2ΗeI._RW2CXv8`8Z&jE7Og^6W<#SPe2S+>& (3wIJ_izH@>bM4Чͭй֧RAøgN@_[8@",<6GEr)> qsЧc;СAMqSş_mVdG?횥F)X{c!e{j;ˢ5t{T| Gչڡ|6'=c0ov7Esvrįү$0/n{Օ{7[?RS?piR˵rvSbEc0,XaX!cT‚;n,ˉ 84) eieVȲt0VAts=oO҇h g-xD:=1ds]C~Xwj{qM~w` nR^\wmHM!ᲅ[®c q{7^*ߠynKor0f>A -V t_pIܴ=0{UޫCĆ/AF=Ȑ~!h RIJv03ijyL"ʶ f>l?iڙ8s^Ԛ!Ј4`gqE i磑X`֚Yj!*8F ?&c'RR0\Xj'YEdm*O?+ #-0$R_wr'wz ;g0B:HFY I%eNptf&2 sY30U,Q%lEܙ<;7=ܚX0Ib!O<,yCl]52 QC Ԋv7PNX_LRCs܍Yϧ^d5,QtrE]2IŀȻH+ul,ev۱`:] i8&@+Ů5) qת8ֆ"cR[[C烸=YL{荠_G#_'jjJ_lxTM]><; 0-3i輪tCs vwyC9 s5lr8=z>C5?@䑦Cp&SJKEr/SDk@c,M_5uM.̮QӋiiu[C~vC|9!),f? endstream endobj 426 0 obj << /Length1 1746 /Length2 9554 /Length3 0 /Length 10696 /Filter /FlateDecode >> stream xڵwuX6H#)ҍtww 03 ! -% )Hwk7g{=s 65$ٹ9*ZP' ]t[x8]A@8 A@  bȃ G5 uA&A [af 1?HC\v9xg-PZ9@=``b PPA=`mP- RfxL uim]y6,ω!n0濏m08쯌 =?:UI5E9YmvGAU݁p=瓔Qrp I* :9=an Opz0wݜ9u!`7̿UXق.][~ ua ? pW7 -aq VG?. ֟(@/#E?Pdũ?Ϟ ϖvo~eR:a k 0$`}EZ9>~_HM+q13 ,MbCl܏ܶya ?~+ 8UINHBNv}#rs8/?R1N[ w:֏sQ]\$~? 0g-%a=vWqy+ <yfV"!)ޔ2q'}e2>BI&F/ j `,( 08/}N;֔W|"+9ǡ-y[)'߰kFS|3ЎPKu4I2IylHNҍKFXx PL BğLH.'b/+3X 3FlVs˩M*")ś—<4YYRS1* 9blksqlfeB=I׵Tr\hH0nFMqS#n="n)sutnq0\=ωR')}Sda@nb.ﭮR K˃ _zB!@F}㗞AZvg>+_}'UT) Yƴ!U^xps Cf/[1N3hol Ѥm"$>3P7-hդ4sLX,OяU"诏HGXmx89|%W.<ۅJyS2fcu3[{N) [{/خ6EѧJf@z~7W G3OOݟ_cQ H4ƖuH9 6㾣M=,q:J߰CR{/UcVߑb8$n5IuoB[eA=hda_F2rPFؔ*O}m%&CD$H'd DePv>{/ZN |dqL–!rOC BBQ Ȇig1}xd6f(I%Y0yh,)a)&}jDGRI|@f9bpU'q;QXěkQLzX*)j)'<7g:@mô*C=öʸi8ZOa9oJMEsL5?KvnƮΆW$ l,y"J;p͹pڥ:ӎV|4}[~F!J3"`ZMw\KyT,\_Å/]A̢  93 CLYҙS3纵]W?cهDPQp z:v#!B~woq┕NcHOYOil!t<6hK/a:-ފރM< ^U.L\ʽ2X{{_VeiVUϢ]Qd n=̫=ED2e,-ŦK_3c o/S5bŇ3ԜR3^|ܝ*.x6=l0VN8FAm},<1]'xbZ8fD٦Q9虏$LkV4=䎡( Dl:燧_hFa^c3gx9_D'^5C,YMLPU=b/92z@m67[{Y><]SޱpT1'ϯTRDLEsa|?Av41w bfWo!@Xxq]{&jR'ryo;4FC<[|ȯ䅒ġy$6Y%o UʎN*Bhxu]o?|-bF!w.vpSIIKe>tL*v]kPܟ~,rVyn;2=X! 0EL﷟sn ]B'SckEڻ9s0SꎯC~5@$RHhr@&AFTG9W=9?}4ʮ" BvS73$ifp"`z|#p:O38SVghqwC8VH3KƄ)$-I'ﯸ* Q0ŮYz r:O>i,+[lى6'9}Q c;{r<c!4и{Gg?oRI}Kt)b԰wW6H 8^x6G2|sL jY3GPPu{>A9|zpZU_⧊C[Fq}>dJBg( "Wލ<1颃P֥gЋSq}vop3*/)zk8ӥ$c^di[/0Ы׈ &xNʕa T;.m?TS,oO ߠ( +nxm7Qq|OsqO'ۺ؝RhՋ@-NzI"o$P?U'z=(98X6k%g ".7gJ2ף帽ػZV=!?`njldӝ ]cr#{`$v9,W idwư&$h\}*fsݳ"C>qgmr1Ypv)ئ>chj{wO%_"Ni {c Ag>R2u6i:' ;Qde@j'_q0Dwp#=*ެv&jAbMdadY\#lVìBX8Iǵ"&Q,VDC5dV| /Hf^keFK878 hN6/2;EhO:6x,)i ,$K밇BPx]ؘL|h<_g>{WrRN<ԐnO=_nahi-H'vPzv6Ш+#G5`_=x kO{;qU^&~VW6ۇ3 iسNB40O~DzkWYԫ݋Tgv9u"D#R wTIQ yp`e_^H2q"~~L'UBչ[_ QTlBNj[/'p# 0j)is91YbImBKc(EPH ˿ZlI!UqZ]מ0!w$c'pƎ+ W $[/Fiԙ%C<聍-,N+< S`Q*KJ?lƷ|y2*zm"YzjQƟ>Jw'җliZ3-89P-2kWH/+awksOo$-l3)#~66VMNv/׺Q]`8ܴ5v$YMvGsflDƕY TXߠE4j\[vf&d8(2qzh/>-D4,K:|q)Vƻ&c ,~qq"1'-Pتisww~U-!/ϨY6&d+?Z SE+Ww7ǔw&"' y_] 6 \-XFuwXOM"WxOq:o/C+P"zvdmŕ^e =|2[t'\rT*+Xo@0\(G]>R+rfϻ]6Ȋк0[5G}%fW(~G 1_RYPs ޵vo[Lƴ\݌>2?w# _æv4*<|t"2Eߏ6s|`/g/31;=NCCc8O^AZN3a:P֞y3@0ݝƂ+ҖQ:o_v$ЖVl ?•+a滧 gz{E^6/p67mW?U"覂01T9 ҫӒ >TC8}j4UUQ sM#Qa7خݯ?7I^a VC(D u]Gbޮ+xc#a4]&\|4ƝipHpDUCڕ$-!Hwmwz^FʨC):[(?$Yݓwoċ$Puߘ%L3ӽF2? iqyS S̱:1{m۹DϺe[G" |wYž^h ޼yFVD}nGny Y0'>5DKe{;z]XSb:dih\Wֳ_F_ ![|W'V'{LLDnDsBnDzfFkHև <761vRAOՐG%ʅ&F@0$:7[ 3}oz$d)J֎8T:pn?|*-ڦTjܴmM_Cr4nنwHqQTһ w#_w$#&6QYTo656 ƐbW_ 쒾8( '[=zkuyU[M7SEuG9gi/w ͡4<4N7tТo&`+;kmoBb܃~x d.$iָGy}}gDg5WHl ~Ơc{Z;ek:)׵ˌS'c}*YZGFB]kTc?DPq[4"x o 8)<ƞqXri' `3(@(Bk5 I Ya?Am Ǜ ®Wj61D︎Wss^v8Dmaeoy:ۯS{">\$' U\>lЙRXKswO^@Ë&ol%ԓăOX&xm8Q[T}_c&L.YP;ʬ']oB}쯲% RO_bOeIqW"9ɻ"X ƮDeЦ%|9ruJ5c =23\r΄c^,ۥ <-shsԼT#mqٌpi>A30kAIh n8. :f߽}(_T/G[#s.?G~raA{B({»veT_Kx H|\;Tj-7/'WZTGe%–#9YU3vb Ċ$4/:Q诙dM_}m݀U9>?Je endstream endobj 428 0 obj << /Length1 1729 /Length2 1983 /Length3 0 /Length 3055 /Filter /FlateDecode >> stream xڵTyF/< Yh8_@x< |Pd;9XfG k>~&::& ̄@F@ {@y ` {?x vώg ~ͷF`>GA bB|AFvvz0N`؁h4*ֳs R2uJYAO(‹!}O0"`VXQ\' EFv0-D0H;B3YhH"\ Ppq|n@yQ`|׎oW8QLD̛1$]mH&J[c#9#(&-ef;39w]@ "vFx&{C Bi;Ll.P6D#Sypؘ ^pBc ĸY;!xI9zQ'Q4FX L3Dbhbđ1hFP,F@ Oz Fhf >de$>u*i@ e'Z{<$XUU+gٱ節o W66Ht(؎dS$[Gkk  z{-'.T$PKU)ĿN+Zl?.֛{aMd6DmHJ|ɫs2taY0 +֭^D+5u^X[nܙW^l}fK4sİTe}.r-pJ{ я3 G3Z,IQl.Ҵo5Zevwk2㊔cU'2~,Nǿ w]>x'Ń.y2 G`$lw^zk o!,ECe_Lw5|#*19NtOOO:|5/mJrSgytZtWIUeMK6ˤ?ȔS@fSDj 4zBxM mr7i\)+=ȯ~`HR#t^)&ʪz4"#-9 K>j(lqmĶʓ¼ic ^CG-46;^~a;sztwKfwϔB`McȉL{//Utnhؑ"cͷ@(h8 D Cw0MH פ/.>(ν cΰھFM[P->6y$|Ql 0_7ظ>'Hrq 1o 0hZXedqtmbiQs;S+Adž۹VmVаKΫWWG%W> y8d8tcV勁VvG?ɕ;7%W\v1+X~ Te80z8;P£ȞtQFNty7waiᎋy ?AU B4ˮN3.רVnmLYtjQ*%%rR^&Qe$oݯeGzžײ]u}=|S,Y+ ὠG?{83}E~Ogt;O~_pV}]UU髙&jSfH/MxޜX$ӽѿ_欱، c2r}&U2N)~-E7^It/+P 9w?}ƪ=ĖYOh'w'Uܾm~ϝʂJ;N~ !_d%߆_"ҫ]}𺻆 r!=m,&E |vuNy\Wrg_ }KMɶW=6&iS7*Lk:˰zRӤ>9֬J"]cP^v]~Q&BH endstream endobj 430 0 obj << /Length1 2424 /Length2 28604 /Length3 0 /Length 30016 /Filter /FlateDecode >> stream xڴeTͶ5Cpwi!qhݝ5{Ȼ͜dUrRezA#P d@Yd@F&&6xrra;ZD`b`fb`ab'vo tWq2 {z}e5EdjgfbxJO;oo! ombeȁH3`4շ4*@ 2@\I^UA- +DTD@5:*@k @Nmw7** ̌k0vf67e?\@V$P:802:;;38;0Ll,ѧbjfpY^퀖hm8nd Nb-ZwF8i/s=iLQPXY; zpo@# ~%_Bt,={m7wWD [=3UVy9kzY[w\OPD`a00M=dma rvfFƿnhèjmf7 gt0)do7Ow Xif |{ww/'g:Q'1/M.Ps@N`4g9?r9ZZ[moef_e-63s)9ђos/hmb |ے(Gmfhobg քj" =0XZ̬M,};;}Wx)`ag3102X\6c`0 8B'QzqE8b+Qb0JAod F\oQ SzoE߈-[-X~@;3{?oEAoE,ߡ%2YZe `7uԷM_.[a$m)o o_M_xSo{l2M=/Oolv<  GloX: UWcR/7 ?ϓdN]oevThes^VvY͌L6w3sfz4N?ӳYv q{῞oߏ.4_'5{~,&fY-!:F+1Ϸ'"$#k]Ae_>qmiGK֋YTp$KA/Mvѧ=TVf!tZKL 1@uH!ee{hYFL;KtETvvp_=K43zA#R06ݝZI*1wyZU&:1IzWGϧHm İxk{2oBo!hQit>-VyֳhAI56Jixf`2fZĔ"b^}JY=w2"D^wmOgTXl4"Қ{VSPtyѧwH(J`]Vz?A,8pAغu=Az&g:6.Z/G3|NHEco3]Z2=&3r+q 7pI.l)]1{Gnd8Lwxk̲S"XBx;߽8LXNžK rwxd`y0hXVq 6.$HΑx?K+\u$Ї۾./LG#}n-N߬vUgWqx'=X Fq`둕F`.`"ChP9<hgQ8VXTp`m!'ZvyODh ˜#ӔkK6zӝ{g|PߓA [-c56BPfFd/}=ܵt63l|{'Y#etY:;Is% jjWC]O,ru6hџ4e"&iAc{9GJ `+o]j Ѥ@1BGKv Swu.OU]\t=Aq~u㣭5izﳥR *h1ڵ(FL ֊ %d,1RP^,]e'ڐAl*//ױIy3fb69 @ {IYPCJ7&dJ{Vޢ|M<6u3iܲ)FLa$Wݒ?94ص $wMcL9,j샖gZKGIi/6?vD~Sf` 6%_e6x>Wp(J*OSYQa O p̬lsJUW#}{9X)KYhs9ƓnWFD=@pܻDN"H*s%_gf͒|cfF0t9 {^\]__0ꉫ24YAmR~yfvf46U bSZ3ث͡V== dkr@?Ae'=o~>fH̡w4\Xxџ(yT;A}U1uXsW~^3GP3ؽEhS1NįA/RB^6_ruOl[\*h7VtGTW_1l1ahLy j(2 +},Y=Վv"UyIz*F7"5ql=LL㫅dl kV#\67 4͂C4<]Sa ܶ=*c-%OUـ6 dR9լ|wfٙt>2L_FSyϛCp,%$H!|הo(FFCڥ(Z0Uܒ:G94TьRXEVRhQെZLWI֙vY 6N^,F_mEhǿH>63/!aۻǰbXMhkDG~$[Z^z Ht8z;i6p6]I\Vw )Sr˃>G)ƺ6r2x^?ZD]{F4 FE&t vzj9%? 9cӭ+heo 'aNBEG׷3)t̓Ovrdƿ(^g!6F`2 QM|&N!a[2_B o*`! f qM@%{xد&&~@h*Nt#~UF"$U 6^ͯoB:_A|0lʃ^ٽ~DSf=Xpa ˧w8.R5um}1h[3F6vOU#-z+g:< ]w/opqByиa#' >@%1: l( j/ h869 T._ =6Nw;7-_? r#9)Iiq"C/hI(Ū8[֪—?ڼ/}V$l>{I! 5B5wڗы:g?>a k 7!voԫQ,Bpd7i/J ޖ-/v$VxKSD1ܧcN{ bez62<ޠ/<5D.7uv9&7)*+o|W=9ID%B&!\Į!q(( 0k),K殶۾KBxoe`-KIvSx _/e+cR,y7f#U9gԐFcl}hAU!We0k&99o{SlU 'iP",VJ0v;Ǵ';|Gm0"ͺ-6lu~L] > U܄2PC)$ȝKQ|zptg`YBڨ.&<DrD-aQ4,%jDnih w& |g+,p&'d+IN/nm-^C!dYgV/՝܂! q^ {1t(xvХiXФCGIF O㇥1'w}`F,/{2E|:`>˚Xv% vkW8Rv i˂T{Is̒t"<3Jugc6NS )]Ha,D<2?%ŗI_5ji*FZzȎ+=5Q)hl4vAe{(e:v̫̑lU*m"|݅бuǔMKa$}@&SJ+yZO6!msKk v?Z;UcA s_n3Ikpج~whOHiJùt`+;I+Ij]Ui8BVum륛T`}U\nUGy4[ %\=YȹRh^El bCk{ꘆ~khg$p OԺQ@P|[+ܫ"upO\4hIt!^ٍ͙AԀQR}:|X'L1Ͷ.7NC2ԅ% ![40KAOH#-b1LwI(m 16`LaĵP^7g[Ԟl|ٮ4nK7V$7c]`;ʆb}t5XEMtDz /4'>j%!˼fּ=_N[xP̸ WXz y8BT^*D vumgXZddT|T 9ِܭ2)PNZcT)*NjR~ Ƽ$yƬ6Xynﱚ P,5OޮcG2z=_X$kqǤPH| $ҚLJ@GҮ^4!!S2!: "S+ C[9[pϵrƍvz5t<ϙO`KC/V8kJo'K/;(^ܳ\ n~C|Tg"\hD{#\=mEЯ/ p9̸9ž-a❶,5g3M?5-v =*{ϕIۋuHF})MQrlu`HN})^OJNR[4o?a(V쓝)\P1||O?5Wi_,8Fd -5c2O<>ULj@z/9`vH̷?=U!;㖦hyHvX"oD//~Tnq,-C[ZM7{2nѡlВlyN^<|t헗9k8 f w$2mk T۫9)@ҺT;t|Z7bX"-ZO6}TBJ5"k#]vI].AMﹰ5~p'S >t~N;rJqo*m);zb$:x?1 -57zffaOBMN 8[KT"2.H@eT@HXRv=`'-0tM[r,C(-L~ :/+%E8zUEY?0(gQ鼫_N:^ä9P蕱ǎB"5LŊT2P !L#iI65뜛^2Ll'C%t;t0]yP<ݍc'AcweC\ mF<;9 /)npQmm0˽yZ.Av/G}'Xƻ;;hB_Se?ikHL-a;_% 簤E?PH:)+[A@*R)S3 JrVuПF>dBcƻ徔D7uq߯A#eq͏44] |aҵ} HKeFn"_ZUufФ0n/2z9~aQ 9n{^>c'O$V-IP8x6X.+~Ŏ7C+wqeQ۝U/{!"sپZOGw Vd.?PXĻF1GVc,>:&ШO ?#c"Xĭ\@V]W衝=LrAeQQ8"S+\^= fnB\˟ִVzkVocG*-LZJv`"2WAK?[Gd0Y,26)Ћ7{{$KG+V)I-'wia(w*¨=^\?!C32fCϵ>uPiA\d`2 m@?`nkmJP%.s]͞jHoSdY@t?r:7Ȁ=|ms*ʕQ40%Y,(,8-dRVCԎnD9R%UZg̍ؖc/ZdDNު3i<«ڷg5/~vj+=8M8LTe0z/Y-A QE|E?j8\ciY$E DӝR5S] *Oԫ!fQvǻ-xCj{M!ҶbTϖ%eRftX)• dz]m_e:.Ɠ ~Zz{:Rb|ů@Y!L: b;k T<*t67FľB7!]jEGk,@I?vnhtDG{|UR#[{zr^M6ݺrF&v!]? 2Ҥr͓n9^1vu[aGٙnKfhΓL%SMX::+h,4ʥ!#.H, *FJ;[}_ X2Ἆ:N9=6jJ`3wgNIQ_*Y!sA\TAvJkoO%uX7!oԀi6N 檛E%\ LG(|n&L /WS( bVE#565@1CiQwH^<|}C9 z 04,&ӦUl_'{^:Gؔ:"WhnP?5lS3ėFyu%WGQ4\Bf>V):a3sxdIjWwjyzN|PZREmq` `&=H\=2P X#EKP|1XN*Yo a < ,'f 6lJB$6v@4:zUVga555Ukl^k'QJTC78cwS4#I$2Y}Z( wo#KX^<"R:A-Dl2XHlBuTOYRr_vO{7C}s=9E3A\2lHƳE %ngx&~` gz kt/`#fKS: ,+Z]hcF;5`/ ~?\%$^<Л4~MqU)1x1GZн҉Srȑ2]{Ի_T /}Y^q4;N4ǯ0#o\/ :dC @ 9; (]ER^ ˞eq6DZjOk)= u_p;9ò3]iikAho~/og/2-$}^3{}0m8kK,N!ړc1n\[b<Ss2ZGo6į#bH S ^2 G`\Ixb%ޓ}?v2!*%61cњly mA2BVh}@[3>8Ҩ4 @_EVcIY0_Vɠ.DkA`é"Hd'V%`QM{fv ߺPeW>el`ns/HޱKqZܓ6S%f#r>}j@FPkW*CV-FkzUqRzdw5o-̨ Y:3]% 0<}T6T .!o2cEiG|Jas\1ES/8z2eI8mO/qg}9v}NŐQoeNX$'h! i0IY>::vC@zU~qMk,mʲvxYATGDoۀN0Q?xxsf?h)Qq^z3 VÂ兊vdzX?tH, iG%ґuuRvaA@J+}aU,]Y>{ B#Hzu2wCHNv̲`i?#4b;bBL*n)mbLWQ1 gՑB T B־^,p#Z^3wLBN'#ظٗ-)Fɾ~cꇶv[Φx4޳Q4`ݛЮ؅Yzi0`b ٦ n 5b;{|]Z?)}!mmLEON=of0I:)ZLMRŎH}jȝTBjǃKc7nAxfs\2B4MQkr =XJ@!,G&%gq)ykו|k.E5lKxlchЩ7)Iڎ&t/vEqE \Q;YGvx1Iwuk.P`8ѥ~~?sXD^P-;6|J#]mQA4-gFp-4|YQl "W6\@KWPbɟ"Rӥ-$z{Q}ߟpq{T|i3(O}[$~"ҫbI'&,̴Һg,IUtXnsU7 "פ_n4gpEעn=B mn9|kL+xTk$,Z{a,2_gh!q^K~ִ{d gr/[ƋƸu$ muXe4A"'1v !)ZGoxM9Fg6¥F~85N@ҷZ?6ff>lmh~ IkQ`PvbS194O@[o5b?pY@)Qc泰|&r?y\u,n?C^lnk>E)zj#@8_%r:G]q, ]^Qzx@nh3X;}Fǥ\)e "#{ ql/Ks"F#T"Zq#+sE'/RW:>2;9a欭ZpjzJ7V"ux-FdӉլ ً">jtI#$+)wGנLE8{U+ɖCy:2 E =N^Pw j~Ta~ࢡ+75#$0ܧs,D PQ09_}89}X\9}vYuK]`E>78"& ֡B>gD/_4ς=RO?&CdxzO )() fV"ܒ> ;RhI^{zR$ďBm4 (<5\Q˺"{ކ~V&JiM˜""}xArM8BoA? \ivV)d4=<N v-iN=۹ou2BTb bpŊtR)V7t\0°?;2]D^GH~ xA vy#y1B 6Tָx0bh&ӊ"E$})BtF1Qq>!4Ÿ&Cx 3gF^;m+&; t uPVD,R,ݴ R^18&LzEIU͉ kz`W&eE9D'.gBJr|-A%yB|ѸKޮ=IsYau\|}O3vo)r 0͏P]ieELÉZN Z?IMl$J{1܉ftcۮ`ymz"9.s~L :PY×[ {}T78Sb:Zkºݬ?? uSY=ېqMKryї%&/oHI [)u:`"~ß>H Q]9T){ѻ(JܓBl=IXZ0b߭Bz}p5Qb!6-K1 |q ~ը:%ᕰ#stEG&wY8H5HTiKw"t4d)pE\?k(oX< N䭌.z]ґ܉^pf:eFe O,h Bl]𷰴ESyRfmpFoeHƃjSpBLD3,ÛO˦s, L4 NuROeT+~ox|39 Y#&\Bw"4(MNRt|-[_A)Ud/xrkgn͕d}RKHG|h!hXFjG{!S|HŻw)try,B֤ZXH C^<`P#IBFa"Ϯ_߭敥7T}%0Sk?|.Բ+Êxwgep7zTt 8P|v&8*I!Frk/ S趀`,oy1F((x5(PAz"T<_٫KeāP0NבX\ hc'ԟ=eø}.XOB$M4̐\=K9u0NVhi=d޵Xdغ⋓Y%™ь< Oy?V1ros T%ǭOd7 B$My&4*C,(]3zC<"XR86FƋBsGNdG/B$ǢO5.U !|xW(ZU4{0EWx]5ŠJmMyw4!dftT`ChEw'4 5)\qWP8JY|T:!\ 8!q}8f#w)FX~H ?6᭢k:-s$R,<)]͸H[߳g Dc{~d,g}C Z/W'+ ˼ ?u=%p nѰ]0ZYv˒Vy;Sx uUO{eN-pGUW4Ro;+cHE# ߍQ~elLb,xtAX1+?HE*oqX~^`x`"h$i͠IG#" ol餙 Q4snv!л20YG]X =l& g{ \HBon7BgwtY oaG;q/C !N]Sz5K> [~Yq0r3bY_&R[lS\V{nYjPLyL%`X*K -0lvߍz!J/g,N:b?%n>.#C2:BnKkBZ q 浝k;E'-)Tݪs K030d`~&SQcesb-4>y~H*#?@D>d$\$^'Mz}@Fs9d5Q e'^i2H(Y#|7P*ث\+{l֚O*)?-#MÙ`me'yu݂\Io'Vvtq`$ZЦDt1"I 鹆~"ǸCO%(fqQ>8ː}ѡ{~W9,:4MqzWI$#U Ԕf=N' KmڑS#L64̄6:pJQx4/^='Nk;>Ⱎ}j CJ[~.yfBU*I+kZ^w m)F(kVE+Ӈ|棐W`K 3n9"Llq6q: ͧ7#T%"/_5{smy! iQ?PIN~TLor*Ы_Nml %LIWF kWy|De>!R NoE9K2ڗ72^yb" \$EIxvEHY#xRc' s?}߸Χ5gɮ m]QN ɓηaIJvrd'c*=  Z`tJLWUT ҥԓsVJ nDE ]M⾧+*VVNt ?c)/II w>cہyhdq@DUNuR Y "nF8"մ4 @QKRi`0*q2^vk˓.>A'y PWPMc}чViC*:U(]ҿV#E֌ֈ2OjBy"@x>}.Qe !RcmPC@0Œ0G}Ȥ,/>b%Ks6-~ZzVRsZdQu2^.<\|-0)$#|)Mƈm0>1`_^'lvV> +CzR*OMp2zW[-/;9@TSL #_F!s8 ɖ-y{buux&߉S\H/ŕUXאwB$SdYˣ51N6u kT>Cj&[<.zQFŰ,+&3A['X !j;jVMHJo,^})lu"v S!IC[R_&Qy>:WQ-_+ܰw>a>qwX |DJU(Lţ|lF$ BwqxP?Sc'CϚ]_\ qa?J1r 5Dua.e3Xy&Hjs|WDEm u2ulOyA>{PB5#vC J}fQ"we aPf[`B˜u]71P_ɇX@6 9r\o74Lqq:JlM"ǟ 3%d}3%AOAj8f9٦-7F mP0燖r,\fy, ]O^cv:1=3c^VP3EpJTj!&ݷү"Z˝O3L;Dl(ﲌkȨI|]r<*iO{ݑof0CϽSS\)+#`X%&DvQo[e>QLr   ;*H%;a TQ4HT?ȟO'B/R4?(nP$QVY(8 fS|Cϋׂ@'EEdrP嵀/Dҥ ~yo{ic̃KR<8|1_Alyu~CT/J;YycR7;̿&-fؐen}AA~d>n(|]$\&c"Q7Ta&c:[}P[x@E|c1j46oL &#dp @%&ᚌѽv'Υс ";aBĎgny8RaA-/2$okfw/q_2^}s<g@Va(GSEj)VsԾ ZlC ~jfꦉ#LlA: ,wR(*K|%sfA:=K|9V[g/U3{$91alZ.}*įq0}#ol=j,3KG`3i-Q4])H>:?F@i4!mF 58r w#1! Xai&qQ1;(CJH D*_ [*xAq3<`eۃTˈSova=5r-lv_Ã#rpg]??,nq8fXZ!C1ȒѻQMCG0 @Bn=\,#pPMcZ%&e$ A1zU!J%jJ 3vœ7MoiTي% WPqrZKUqA?)0$Ϫ}%q϶ 9NbB: "MITEFuSr\ܙ jΓm@J{ !L7XH{یZ2&n'eID- [4jqmo*bjPwW~`,rDAiIf/y\mtM|doQ\*$ncZ+s+锋ʼ/yp3翇OCIXq+f|^'@#wuzp)P%a1')x?㌥3\ԷQn8FQ\ִѫfjb-~/xb~5u(=޾@̃|AH9h9w|FǧP=9apIYuX,? P7 uD0i%n}M\'9ǝiךQ^$o\BMnDՊi 8GI؎ 0TW~y :\Іn{2J"֎R}l,*tw>l@4}PR@Gkm2)y.3L@ 8:iqsYא<4P|B`#4\S$;*uZ˨7Z[( r{p(I![RLyvLZϧU~};J2 [pk ڡ2EtS hh%%' p+!Fbb!$vBr֘z`it;J&ދY=>)] Q|i>@)#7'E͍)_:݉;*CSv5Tr20*>Y E9p`"TJ/ }fx%vEoW j:!|fEj3oc/ƟƐ1@g,dEunNCʊ4Ni݁沓[&ӉI.Qj{n(xy-ds`R%n>wLEHh3}Q jFh-vRkX[\1dr#XMeNlUww֤"D˕5#,po+Ez0E'1?N)JBQьG\ ۠03/l 9:ˊ?e@la*vx2bie.>sS M,m \у&=] d1ګ<ʘn"6|"OQ> owGoIڜ4Z}%]}aUF'ZYxu *O<0w2 S˘?p)UOUb4N+-P~V$/mQOJFj`Y X, 0Zр9_Qh(d1ۛ{~r&ͦ7#@*7vD<ܬ1_Fw2o/7%`Ϣ7w4YUۀET<~}̽Y>_՝ N@!‚ @ j_w_{zu' 5qCt)cBl¸@DNL0$(cDϵk\waWdJHDqoJXr8:'3 BId/)F{,B.m=Tlo$ijj]DB+g<(J~hiKfC~Ⱥ',v  f5"|,f:`>Dn&)EF"y P6PxφN:Bwtω> o {8HP4,l.:#Pp!6H mt|=-UĴױo6`|] XomVzg/=w۳"v̆Rn 5#Ka bw0ϰChDHC}k)oIִw~akЃ 6Ƨ˪\ cЇ:3p8{OqӞrkaIPzOѲ ^ @ bDpD &r=H.HxUx*FR` ˥VJʩqMެ=#kK$g۽̧KԮAJnReboD;8?w)/l;C $½ؐ!T]+7w6+*  |k6IYivz3H0kڨM.<ɝOFI[7#(^ee>Ư@em`P>eHA #WhJTrVŧBj9TTjYM'<7bj*8UT88/6(O@jzTF.y Qhan|s}W>#E5{98(xfBJ*ڈ4{mKwȍˆT=zUmf5mX!x!xy<,򀙄U{M&ae}_eΨ^D"ed+JڠoVuڵ/$Jr miU J&6TؖEUn3CtJ PO*@Ӹ'[ h :&`cH(HjG*/̝hH?''ށ Q#jjo[<\lr (u$nQ ko(+OL==+[d3(~7{QD-VGe}$O>n?fK!*!QuKS?UWM*}2ij}8A1 lh>,Æ=n`eL7~\IHnf؝8d|%slo%&xH*3_ =c[%rFc*J~K&`Yv:/mj>(%TiO4HŤUs|NJ xBKCcI8f8kRLJZ~kB \ȸ҆ia*S!u3r=mxze:Ou4G"&^+Lg.OvȲ5vd6]!$-^;JV^×Fe'f,/5tRr=kGBU' u Fxy3M9&ە菱ѻ%ҙπ&X~ *uc& V}i`זrxFQu5^F:mG ċ">,>Sv..Z@rI=Άm5P>cN9G<B(8ES 3aQ\BU^Z8_lyeDii:7&%@ b]BL\<0N)1E?FH8Jjoc%`ajOE'+zyAhh,ERB 7{&̕͠2=\b8Z .x章, ZɺhܴnV}@n|BabZ8$+3`B?ek5b7dj_ƒg't\<ݗ ÑĬ 6wU{z۞!W-~|:{ۿ2t|QF$@%'PHZإ$ cG<^*8BgKX4/!TSvXaD6*A? *ӏ*}בِ!3FMuqw\VҙWau^,WarӞFGTl0,Ëk0xvw׀ L*Z?xKFyƚZײ;0Imi>Lڴ8gxRcTff\K(S?=dTkO^{s6OT]֐(jůf')o M>~61"toPVTu.NgvG`"-Y5A8CV#0T ޾8bX@eOIk&ec; s?,~eÛ(vj['Ս !pcv$|b@*alHHqlph&ϰ"D!-;1sQEW,FC&[ ʺ%WAXIk'ݮj;#''˘.%Qy y4C܃"U mR]F<+Js5.e8iP >M(]Enhd}%{TQL$kgU-IQNf+z2_BV :O(K .~m0 Rgx-UZS}<c!zEN} \@@2!8m-#u'goka>XNl̓ynhs8Nl)DmX3Q{xk100# /?{Q/_kθF4TF\,:("eN ,ا~41VjKf_k~q( L!a@$jWdx!1K##T KjBf$]Z ~v XS%5&?>EsLSG;}4NsKH= TrLN7}="f (g.gr-"K[a<b.z#bvvt)zSCJaPeCƋ47rMEK4(}(rmBL*N ""qg(7F`ӢN p~+r2 HTa(g2udQ {3K<,6K>tЧE5I˙0ث`40|gMP6p歍C=v?V鎋 ["8IFd~j{qAkd'yJv(@[i߀D)S:NcF~xB}o'1jږ-l6LV ,Y*uh1B_MPԑU_{&.PByh@g֣2Xdj߁xZ$+`.6\Vi/sXJJ@քW d}3%•`y$3{\'8FC6[>u \`/@uẖq*K'ƁdzNv%Hf[1 a:zW*Buv m rS{үYvc {M|eb'3{M5'$' JL!$MLD`f -Ј\?u`Z6(j-R UDw VbZL:_ 8i7(Y|fınCVF_3-ޜbw` x+88V"#7G,YK ~A5F.Џ|7}ՉϓVDqxt1.rS{9z ày]1p ҋuk13e%~W6AS 9<n Y'1~-ʡ!3>|qؿ?Fljnʪ<XH5ORǏR9>]i];.E4s`e; !o],RPy}E"ʔ lda_*uذ׏>Y ft )0c9iTA9K@,ض2 O1PZRKі/`]6 #]?隒cr={u=]?CQ;jl&]dæC& G%<_BSd5LTزo|IuG(&Mًݖmݢ?] !`2|)D_q(Nƫ7swm8]Bƽ[SFWد-f]l \,9AXƜ0h)t+E(x%lMbpovY: Zp/)(V0+"`fRpn0}V84-(!|-yL0) O1׃#ZjY}Y T)C=!@]W(Ǘ:te//jw^UQv` ;yfw:[b~ 3@Waf^(EjdlGh醁Y76qjl$RHLZI'&$hEM|mHB['aSRA{dE'KtsK`V_!t-C“ė2oVFPʀXh6*klh&&Z\/V5.t>J8 \D=T9ga?xugc \hz{ZP='aaޢi-EE#攍Þofi n> Hdޔd FʠM]?RS_ؕݍrn@s6ulo9=>nٕ}Rz[M?~䗶CjWmHq 4(5" nU wkS?yOia~IɌ f.#6ɦx7WJl-C׀w*&'4;4l2̌O 4 Vs6~;;o]7G3Db-CJ \aۥO:FP )'O0i GGUdgX.aNmMtlB %7:#a>gzsi-T"~h]_R,u@,$uf0q;Hkk2i&wi$z0\=d9яóvdı abs<0M!Yc.F~ز6joO 5ˠk,+N:xSKv%wQECԼ2ݳ.[0zsʱ{a´Cۿu_$f@(%RUZQEdF)IJ.K%-*9j3 }G-:(eaK4LPۆt2^q-Uo*|VIjg?`"s%Dܯ{zT֟KbhD" iBFDfk t =S\Fw 9oF>ƃBgA]24(j]*P{͗a1go{h>ZeB$Z$ǸQ$WϗWЗۉv] R!,#[Mi}+G[3zU{!fKtK/?YsC3cpi4LW;CPN5`IՄ5=QAM{ݬXG& .}؀bmD}.GH[s&&e֍E$"6qf:> #=ĈɅ;xHzr %ktFmX9ӉBl$+K*hy & TkI;4xV/eɖW1}(:d[ጞF5,X]"^u-NzOKI׸× k~0 ?G[nXHr?ՏІp6;$1挆s@Jx=(+h[Ns!WE{UD$C jU&4xG/~M!|RR=.Lsc8ln1\SixX y-;1,pݯCGG #^`jKZhن`~VK7.zo6[.Gw@d rX C`ȎQE Rkd|9Jȏޕqݣi'}]{j-(aH7P L,mE/tN !2?`M>|G$PصG!Ч =*`vTR[Q!\- PS0/@sSU^PZ3fj楞;ؙt!c,7Yz%!сlHX FuOr3AF^1d<|fL endstream endobj 432 0 obj << /Length1 2923 /Length2 30477 /Length3 0 /Length 32136 /Filter /FlateDecode >> stream xڴeTL5[ Xpk=i<,ݹgd} }Ȯ]% PWPٳ0-\mlL,,HbN@c+{ b P2u=XXx(R@ q+N.NO_ۀ@^ df[y3Wf +PF`:̿-ͬ`|ƶ@+s # {+7͂Ov=_f0w Qid0#1+ڻ}?$]mm4+YzxhsQw31+gI+w?*,b n}-&4~&[pۂ'ij:;xU`0K)k(j?- d`;9{  ni3?`fٻC.>s{'ߋ `m0Af?,%Y̒+`2YW pu?\]?\]WWU pu?\]+hA`.ZA`dv5v6/?vf͟P4L m-Jfidl >]2s-$2]˟? g fdjo nmk,`6݂<,܊*{O0?pc,,`s=lwYş"q.,%f W/$5#VßR\ #4Yj%V\Hs ßap c_o>;[?\ج*VOXDGW{-ibҬQ7~KhN_Xp X:Zo\ w _,_W16pz XB? 3yUaϐnX?g`5'{m1x߹벀GVW?7_Ѣ^`M2qc\>j?73?݁H A) !eLGBڲq0 m⹛d@OMT|I@ڔAX/͉_ǮTo*dP/AF{ dFK\ 1@cP3+E2^Yr[4k;'[ y4!]^Dw,M`9tЧkvx6Op e UV=!(2I'*э.tΏI7fn hk:=V}V| .VH Y7VI[P LLt9&"D3[5 iI[cv%,!St|qcOh(8xU򙣯pzl`ZCT~u˕V /_Yh6Xpdx38hYL-% zA)RqY jxS,f%Gq e?= :m0bn'x5HbzOw9Z=mT \:tRC6.Pu r$"Gzy/?!*١f^7 pVUZ7`/'֞U6-#ˏa\tFo^Y'\̣Zd?FՃwn9Tl>3]pRGme?_PgA(g,4P粯H~{@_ gϥ{<&YK=k"$S~п3>Jmi'?ƳnXE^-{ͻ˭o%Dbu>W}tIJ@24a򩶤\)cыKYETMrV\ʹ"g1r-kUAv`uϟxX| W?D {YkG(<&~qo rk &N)C K'+t4Q!rbI#OBJ>ͷ L~z{֖̃H\7F 0~-q<9LT]XP%|VDž>ӬsǗGCۏ6LQUr=\nsyCJg؋3f6K,3.E(Ɓ#t(oJK .vXUG -c$]IƢ յ$> E=;?eEP3JP1d85YBಒ:6S?'p&Y)OM8 qB9w bAuK]b G$Pl,_I,WE"OKZ뿠8Ǐ a/\RL[_4zT8L1~z;sÎu:]6TC4&4הkq55Vٴsa.(×I ڳMJm:K(|Vv9fׅR=.ŗ/ s kJm}liZW0ʦY Q3mQ;g>b/GvlLaelɹuf-Ե@c1ܶ>u n;f۷/}(;t2(3Δ8xjUUU*/6޺tV s6F/߄(eQ2U8&Us51RQ2BF {>ٱL, w+=Լ|1O-:,f41Qk$L""հseh%SBԮ 5z;qlf -eoaāZy;qAቂşuB_H8L~NZΞ |2KnTbP5,uX CHvQ=[G0۾:8IJ+~(Sڳ i,Ӂ`N?Մ:[͹%歯}F L-֕!ꈱ^kRwڥ󪤜6\>謅#8CG'B+?i\$J7b񝭗F۝e(~ ^"(wa$ Ցr[UWf/?oBh|c5a0U)fŏ#@2`Mwqb{Ou.94_0-ae@*ݛ)ښ'fOUn΋+Bz>ɔVMHhHO zڳPڔKK,B  A3-0KWN;L_Cg´:e^8BWك1yf/*_Bj.`"C4hb&u'DONʿdHHu~*l dcb([XE[~\T%PKhL]dvzЪ|mT\- ^<%C(+F\%v9jQy+t2GY÷>u[XY 3O(#!Y(<ƽ.x o@JՍI3ѠџLp7L~Ru_PF.e eP9l'O Z_o;v[o8{ Vzݔ Laxtn<>c6W''ϳ^'6c%~/ E^ݑeD9yeidԥds4Qu6n S+&@=H|甹_wZem_\D@3wQ4IEЧsqy ђa3G>㯒yG"0s"yV}UMز4<ARl=hQ*ITIK#(̻*$ ~dFAcȋVibVZVsշ~bcˮ a, j*|-eyF[QNS@#p?Íd'Zç|8!p9aTUD„ ƜXҔ'>aah[/cũֈc/#t rH+eK)0\E3f&(0pGsΏf*Dfkaž:8h9o[31sH*҂=g>V.s:޹@Q$jX QWRgx!$BEwb^>K8#(I@zN㲜o㔵XA+Yv'KRcS.U ^vJEs7e|2fʶAOɏoY]h̜QFB4oĘ@ 3M-*c;7P惂Q nlz# a|y1ڊw.65cX$V/:M1s˯VMi^`?] bsxC$/ Tu`o.įEm ,5"+i 2(W %ie~3@lG%BK5SNZq* Ugm!ۡ=%DOM_ꡒQ~Jojv.xcc=Ks8K((fh%B|_[]dtWxD=lrǏ,y>Kv$cGhq51"3|ze(,Z4 M1D?el$b/"z3y -7QeeD&Ҷ  fBBfzm' 1խZ=);Cjm_7}#lnJg%cmn+<);VT%k~VǔOQ)՜\|9w sեׄ% 0W+3o۸UBMJգ,ێLvRCӭ)_л;ʯ p#΋SY- [.(ˌ4hE_ Lm(A{+/.~/P|m`j&&˲+^CPVuʋGΆORc.]wk7I=6׹w@?dT]X4N^Ϛ:,<"lJ!=czE*bU,k6,NA)o…n\Ga+skKGuQ ڃ Mq޻v@ZlPD_nj-8"loD L)R gu(ݣć>wU( ^0D )Jq 4E]PLJ\nd7xs~[I4*xcm4imCoċE)a+(J^UpIjUhoȺ}'ty,qlޭ ws'sgRޡ1g[4mnwiM ;iiu"$ fRZ0>~o& ֺN|Be‹@aM\( BN]17?T(~ 7~3Y#Iԥ)`KlrBXFӽ*VA:ZED 4n^8NikU|yr.ˤ$!->>A̴ `i*JcKvgo_7VM$:?8RVi)٠ ZڦIeNfǜU&d0 }'E¼[Fr-nY_UIwyu3byH "O Fh[6JyVP4-[0H}C: 3JULP5BCɽs+(zzмSfPdn0lGuqnZ3 +R,5^"n)xpIk#ʧb7ʝѤ@dO說}+qYw(Z v[kfM>\O"_>.]Z9w`ɰca) UOeϝ+|;Z )͊EA%` 2\DPsFZnfhr^V*> . Aj*heBI8M[=3u"HqYiV|V:pS!D'=Cg/7*[Pi_- cօcJ3ON9<2FEnS^bFu=K,2wEPpӌPPeje쐗sP5&G[aM ҈z4lF-Xȱq oOA L! &T BޜOƜ~*a~ wB̋]F\ˆry[N,BFhS$vK>KޙX5/D"rw.jҘ7\5 3߱Y) p!Wq\*Fd}ʬ(͹!O* y%tIٺyrykj 6%@KýoZQdیl\Ȝ5*PrCS8+ z=XB€BhIڨMGz@kPC^j@({ƟTaYAJ֌]dKkY߈FB!_oȖǔkR`Jvxϲ}ub:zF8]5 -6xV{9QKwUIru0?T`,lwDsxXG#;3ГVbъ,MD}tr>)Ȝt^kuyzfN d|ېeGZ/̱T*;v x"ʸ6fMѰK]x blVY;S;:,yp8'tLEQgFLy |-_,J8s{\-^UN׃濔#=wA>{tQpqEM,4E0ZKL`8f+}H^lH$f4Ok0\z80Yz83|W͙4[tK9Q3ͮt>/%餞B{pGEiKh]tǐ}ajˇ-G!QAԫFV-d?%GFQ;!@#* L"*Y2iȟѹ|A({9'AE ~yY|YY hdx" c]6X'Ȓ|r\anzj17kh]lV}Nf5rB_]0L09=y>6 u+ȧTrccqG"j$EF}@\,g{-㎈|MNľ1D5|zh_wvGZU3g9=o"z!\n#ƴKxTrḦ́9WFԹAl"2FyF7џF•nϻs\r8j$EyWFU36>IOԒǸ cx(zIm膨I.h>MvA̹}*DLl8 ;]9j[na%R&Y)Wur*T- T.s|{J S疓O16qlk|sQO6u^΁Ptܬ+ev@qe1P}6jN;"oJhypa1mu{,NnU`8u/e0 +0o(u4|yϭQpݐGՅDn`3tHXW}69>M9(#?AqV^yc T\V.o̠Qͽ ౵7)C$&p](USN frp_Fۋl>~L(],uF!v+ݰxvϻWX8R43aXg Odje# ^ȃNn A$çc-[vD?'mwS<_c,ǏH46q{.h_,ZMέ[P_D!ݛзgembAcƫHCN4 A_UFxWʱ3X^MYwI[MvPE=3iIX_z3x]QnJD8m Va(Tb)mYvj=^~HY*O^!ynie 2צ$%;;k:Rb*lV᭘glthܴKQ3#e\T!CdLWuzՂ F۹nTݓtLͯU_)}pqj+;3u2*׋ysiq\@ĥHkb ;LLKX :`ڱ=0~ Ϩ`.D;| Ƹ.7'}uk]Z­:nԞIt~dOBˤ =bnq׽(o||" #͆2GXd߄ ܾS)ru} -v2YFc6ě0ǹ3N9]u45ۗK)mymIrgpsm\dYHPm/nM`q#ZZ71j ZJn2lTCze TCN$H6*wQH2>"e7C?CPeDMbԧ0F cŽu]=0R+_࿪7/HRA !C>p&Ap'y3Cj{5ЩGoVLP6cM3qr+s^)|P"4־޷L/ p _,.ӓS"XC |A>V4,tPIQ@^YʞaBSGomE^C׽*&^Ić{ Φ޹QڽTm#q 펂ؓ2>ǰXţ8ݷ^;.wj1H-Kwͯ'C% nzB,8Nm9R9u),+so?ҫY愠U$}4g?:Þ=Db)|,Yum!_W~2;$Zܩd\Xe?BV}a7'~1oӖyҲҘNtǯ1b%yc연.o_yp6uSO;9n秱-m<KN6EWi|ŶEl"[^ q]0# ^23w؏nQ$m{˅_i;TY(Q3l6%Q~yEimw2) U*' \uj5o"u)]z:2J\ $2HV)yP87=!}y*k6t8{|Al ?0ƆҔ%>sVN!fGO t>xqڇ}k@ŕ˄}E0G~w9*Y:;i*2VaՖ6[hGX}cn&4}L(X<8Tq@eA[Ӓx;35^ 2V0On&&d[lQl C.D\ VUץbݙ/5w\RBYs3cZêp ܰ<2D ¬(mh+6eT1Ȭb!R7ReCh'){a>uljDvY:{w  L'yjɶ*TU Xw<"r:}hb IˠiUZ#qXedc_ gqtl1 AUH}d=mWB !Y)c j'/zy9PUX.*/RGAJD@^J&:X'` 7#J~]o Cww'fܫϭ\am\.3^d/ݫXd֯n /}vFCL_i8QsPe/:Af+L6=7ѤZlQ3Q,޿ǡ{}e%~G]bFzQĵDy~Qד@b9'4$0`P1~[:{e9[^vbKkƿ`odDs^2Vs ksPՓ,g$ZY5'HU#B@4ƉCĹ(amq >i4e .'{W#.XZby1g9Cl !y)=]'$ t]8""ǧd'+A.85gM:{uHl_󵠥uR>0k.}->\ '`$`wI)'Űouٟt| ,J"=]" lsؒ2'3Rsc e9Y>d]Š/J bFE anG\0iUo39b6>ڗ eyZr`. /"g&TMwf^),|2Bfףz$nx;R];r06 և%naQa$fI$ɳ"kH =652L6#3kw{8]Wbn%,ծPQV۔rBF#SZ!f0M/*7b}Jcydv6!w1몡h//r]<憄~B]PnDz=fGkzW [q(ʰm6\^$`p Z?bx88Rm#S@gHӏ"VB2q2`wu0nw3jBg?)P/@ҡa'{$7c@y_yυkN`ÿ5of9&i6;CsAw}TUۧf /[93j-m<snG=f×Y,h_JD6F&2HO[v&rNf0@8\QIiF57 ; 6<, ZV/iLWr20)ˑ9Bn%ılR,g:x%X yQ<)DX ad RɷYmw&[˃m{$Z =TL]NPGgbg#B~R8). xz> ĖXtɸY1١?2wtflY@Yf%ΈX8}{[K$3^pPFk4B>S!ylU8wuba{<),fT4|<ǃX~SZbԖ rJѵU ֥> 8Ǯo_l,,rԼPZn 3Utac @d俎d8DJwd˄}v.a{3cq}jǯcik}~I Qwg;%B)DE)]} 򰌿0eq!zgkS{4NU |=c3•fMź4 __v|/wLt U7Q+Y^ WJIȉm7BIc#hOO4W\8 zs,7ؿW{'ȟi)f2 ~6M6J; ֡N ,{h`ܽ2](Z}=Uc8}ӞDraDDD54BrA٨zb7bu"|BgwIo6VV,K~!x7wKqGLG5Hf$j\2l?35*Q%«}fAXӶׄdS:t߬^ ^P*>\bTxDZw~KrO)a)u&czf5q _eؐÞ m؀HPyPj6&Ã}ѻm[R*$2P%/.7.N iU)|U51(/j $I'e8'֒:~u~xYf}<6>\g G@ë]Ri]cwg/XyD5".rwϩ0aj>{+j"U~7,C+>?_FPSa[lKxJǴOUŹ_?$d }WAn=$x2hN~g3xO//+vT2[5bv2w&tᤔ,c4̒ BZ9BxTX㕸b&1b.s)S2fflz;R!YBHyQ)IG'Ԩ[SNbà0.2cge):`P %΍ RJXmM~!o~u~=/jȔ %emY,*$ C*S/KR[ta :Q^+nڳ1f(}^Єe3[3u^j},Þ51X˄Vo{Ͽ@:ˆEͺ0wet3EYR./m5 NzFgbT.+W{;PW (WxOzp5{i4PS]= Ntgfi"xSK;*<:&+3n*l: ^ȥzȒd ig&&>y$8Po(60AfYV'%x5$Cd|_ 4. Zb>Ėm@}=ME[@G>'+ߓbv`hyT/JAFhb~MբiG;/Hf8ΗA$p,R',nՋ-]P|ˆ kpP=TEFCC2?/:DϝymB;C(Ć8[C'm7pnŒ٭iwz:T~̾>/^FiS>. ^\3fA t 07ZL&:;q2 C~O6x4ܫhqPMd `Dz*ջ\!Y ;l'8:gΚ͊Z)/ N:; Q9hﲰt#:P--qE28+gHhN]2e|3I2Tۖ5Qiv~͋8=ƜR ʌ[׵ h$ΝobH"-оn~5[ƙW!-~23rI^ G?BݞAe zqDm'd C7fA;@,'akjvD:Z3J+B9{?gt&``f2*4] Ii <{[q"tsrc,D@Z m4(r6'j]#R<* ax/GVRR5NQyA#7 *|Q$ETdS=U db!'xz/J"a8|{CGϹn(T+;9~/YXn|&JגGK2N3}zn^1A$BZambX0JAIJ}QĊ2-?:<#꒮3 sڝ P+(@ e LԫgSrv4{+~Qbs.n-N le-L /zS: FPh{޲ X&Gy6ƙ~=ə /ъ[)U/-bu0-ۺ YP9 'ϐIG樏) d :>AL|) Nj_8rӻK4_ƸpW5B74gE JxsIZ+Ĺ6A_Pa۾uOVrm'B{{&':!v_NOoi>8 'ȟnPr0$dޓi;Tzqd>/MlwΧE:-͑ !Ywmx53{ '? ` eg7૵Lwkoc/i@cU: XH.?ycQ/f>wjsiE2Aab!:XD93ZÖGwW;~>l^V~H=)sJ}U3 ?ː}*2Y2b^RSkNoC7Az턆bIJE֐S@~|ݱn|'mlWueǹ$'7]xoQm LH]uI(3 0PM469Y9%_VR+DB6Fs,>AR<O: a1tId=~&E·{*#]1!I:!*AAWL{J6[>DhŖUŌj%zPL<&jxIgl*QPV^ 2/z_[M67ћH{7Jp3s-lRXns_>b g-ozEGp#쉝 xi*s4tFm`K Ө癐 mA ]W~ <kB"K{9++KubbWpȵܻLa~icPz~ሠbV@z nDy9mg"w Dw7V4-n3e*bDԿW=a͹C,-d}GG>Τ.joD<>`vLHsNr$"1kiZn=ܠkp'@o߂k۷ΓZnjwt@Mc۳+wNM+_VF+/w*ԩH_.wpd Y9qG],ˡ]Nn$5G߆ܞ I{Gž6)~lH@F^;N.2hjr>O,+ff ώW CAul32]8uɯ`b9LYn?UAx9)bt(æsi@ѯM|Kz?+]_Pa͋"S)IZ'sg[޺US?09}]*ߴg)MǴ& wלES~-?,qv$: \z3_uy=O&/[R}Ը}XeE<ЧD'Z`$oz S߸]i'3+&/m/MMµ)3nYXe-}voUW_j*]}c墕٭G-Nk T=kO|^FQt6 .BDX/B 37:EҌclU^TIy:w((2OF\cR`4sg݀dh4}\3e^,כ;䳔s?wq, NI*qj G.]k5,׭8r% pl5Q2Ʈ8 תMoD%4(7!YDwu[ 5u{Q~k9/aU#&4ǨUN`sZn[juW!aZgLȶ]Yy_BO3NC³9 ZR~U Ȇ}1/aGeDzKfI?o ?9< ZiȔstOEŸqc?驾Fʣ hۗU9I&7Q/(`/,M˖Sql4-9s?x=ɧ%Ÿ#,~{\ $;]ͯG-1ղ24*B춚oպ'dS͍_A!ۣ<=0yP9@TCjLT6=>m` *s>p yE.0ҴⅪ8Cm97#Ҹ5&ǐ7, `7[B{9 *};oZ'+yA|b͈Zei.,IsGbI^I-fdlҚ:gR${IBVM*wKmT-&=rZǣٽѫLL^P~j0K"u ڈA&vFx3x~`YTnT]./R=24rrt ]|u- 2n$=E: @Cl&A3ObuF1' 9Yjշc'q)@̐C(#GxO nOYgj1~S[ۗw))x5x  f3 A*_eG$l d}o..tjdD,EbdG/DYǡ {5U![{|i=ŒT=Д׺>rshX>vo|oOv!:_ۂqgMM04}.8.f*,f*S8x64T٦:l&,[Ju=\td=\fmlQ++1h6YIiZqEs_,МbFszF0yK]4a 6{2~\XJ%!h,u?akB2j͎eYD~=^>rr*Fy-"u{< k2M찆6F܌!&+WZ OF$mapʀ>V"qa>[D,YNk!jQhsgOoWO6.d@!,ostHr c.zEpEݨ^]$Gt[B,,cmqfؓF&hFuq,4r+V^-6K]q'N_-2NX"-ʜ&ZIMCYG`y]65 ?~Y"_*ԥQ;,KGI<9V]҉p?n?rf QJ,,?‰qw"=f_ͮY7!Bs]yZ ,*h__QPb$LbF(q U@iЀr% 1j,b(պ\{\'ȫxbjdoXJ{Mz|A a3Z>u73N7=!с[U7̛IdBzd cZF-z0jD/Q eVzKzԞPr=џ>+ZPјjҙgS _^ݓH՘>ڹjcL޶ŅeU}Mm0XA"Je ;YBTEyrfH=C05J``?(U5AŤpy}ӟ 0k8MAkV>&88ݎ$5dZaE>ĬMjZyTkNo~TL ,9L+:?ȋCGZoaK cD&"~ɢ6`0(D 0tFs"m>aɖ:G؋S2ca:2"LH"щn>(3;BXK "P9u¶.cWJT!3bU-߿~SǎQ~z}bD󔷞Mnb@ bQHTQS0[: 2T69_~g= re^C*wst_+x*^'OVWPTVvEA}&szYGA@4 x^z|>pVZ)^/ܙo; TnjjREּH6!:>߭S=uxRX ,~Y7zxwuQvcT14V%vD55~I{ywEbnIdjL;^7o@M;\U 7G{c:@(mީdD+|!(3|E+Y۪((7C1c͊rY4%ʴDgUæ/81 q=DZvbz~yɷQ\_%DUq*cx=d0>0C=" )`wғ;HT*)4` 'vyΔ`n??OQ,'oaجo=I=_ U@pIz#b 4#ND Siڗ+5ȦXiWeK+镤>Ò4[} It["ű=M=Vl B Ma\e7ޚ\ÿ9g/< 5 )^k ,:TY-#~ _(UH94!4I9E(ϥ(`h:ү,o|BulSmu'NkR2r:R88Aݾwv ,ۓ\9CYu OZc#$Go"O0#-stQ>s* j|XݡAw&^/ c~l-n ሻpݘul㫼H}`v%8a"*ԅ?؛!#rN"Vћw5t3]),9H&o:1~2^r#LIfˮٸshE\<_`=/7/20*>X%^ih:VRf\ڣd{\d^ \;Dp10呂j_ TnM#(1O'͞/rR}YE+ErgBcSE%yTE );- Q9E̓?B/wO 5c^N-N7!)sϬ Q ~thһ+ m]琯S9fV񫒯g>2;= )?_VNb2l:BzV=&&WV̓ՑfӐfDVc+5?"`Fܔ}En_ B:|.eFAa[ ~sNkglmtqοd6x,q&#3_ۧ\5K|K$3`2DOgFT"k9+BBUi3}̀WNY؍ fhom8aڕ˴&(svX`&v zH"un`Z'ZaRyN2 _Vɭ7IV; | O -A8\Ctj $O0]bU`|M }5UvcЩC'LҊX2PvcϞDD:lY^G"v?|׆RhP 0CvKߕOUu9#Kҽ`?w.1P:!t v;h }R,xHV$0A^&B%R:G{ղG77]S~EތcK Dc~5qv75gh9UNYF1@Zn1ak'bc&OGNWSiG\ }L>ٸICfZL=aHE/ ͺ`|UQ~`* SPU |C+JAh N^coJ6k ieXX‰B Mzr"?&[^,^ j!EkGqQ'~p N@w3[AB a$-b,CٗރN.@h$TagClWȓ-b6^_Xs38q''Uڹz9<* }Be:{7drK=L֪]HmTuVӺX=utE_.F%!b|t8fBiFZVZf{$ත\ @z‡ERy`c9z(Ky|.n 䫙 oɂV[]Z4c1FpКy':&cEzuA+/KnWmBuZy\;Y@LKGKC)0K GГV91;c{xT'R^&qLwNjRj-bRe H';VOEh=2#*h"GI{p5qD`%OVI rbV ^CZi>S|%˴ #XR+~3IѶZ/ഉRl]uS VϰXR;tKwd גKB{w޸Py܍@:P83Ȋ3r`^ YiKį<2"e.J51^Ii}궟%)9VeΡN`@d"JHAV^4Ԭ8Z#2Kr*haW%nbh.uʆkUfCWp _Mc\2Zћ3!OCX Fb.NEkЋJeIbdY!ZZšD=p$6ϵ'wˉnj'd˫#5# 1Al ma8Q]ᅨ,{@TZ*~3?4\).vWjt,7ڳ aF-׹TUK_7\L鵩A"UTFBًM+w @9NB5o/u:M .,;2Qpq):qx9ȿC-p\.ykk\uY~h~Iwr}dSёtMا&MT#Oi](&4{9:kYWZ3Gw¿`CY 벏DֈP ##Ic|nmْ P.&Cf,&gEZF K`]ZWiUG!ӢSeڸ8'w^vX?0pۃ Dr/t'yQ}FT`V JUy ɎM_r^B H_&_PSJcaPAc鴪"m޴Vƾq.ɰnث$9D/U9ʊGFQɚ"iց2K.`SlO%IwE4aq^4=j>:\km{>Kb,C@cqֽb65ɽ0d{#;lgFA46U[&hČ:(< g(gM\}3#3U~7ɕq*/Mmؐsӵ)<[fԷLcꬭ4 -:O&`X 0, X.5ws&q4Oiߜ;NUhHq]pSǔ{ ,eIr+竆T΃R+\a$}TmDf"G!PmO++[b:. u!k-kF VZ?HI4S K04N,-@rsV +Li^Cpzg9ɦ2pUpcd=v/^(NCb V#}0)M&fE_(n)zҧHG. iV[tm)aIS<׵h-I3%bjo k`HE-yaII~h^Ά>"%2ZCB<; O$^mP*`\<}%jS03ϴb-\"WjjOW}#$%'<K2Q'ָ33\eYI#D24_JH֮Mr$ӧsv9Eݧf1>rԀ]n=ecN.ۈcI*A34[=p2Ӏ!,cTes;Ry@ z11ϫU|Qv~ H?;>\'*Ĥ:3RsPz稢xms&q;fmTV㙟Y5+ g [16GҰxByzM1҄$PFXR;$X)CPQ*cg~lat\OW],E [V:%},\4d[<'\1 ol^#yԴ-qC+he3(FeoQZU^ḛ\j;ai ' 9dC\q%D]s7}Qlx{^ 7rv(eNޤ _bZS:4UBa_QJ.!2XO=esq_n@xC?O5@]#J?K{m,O}ǫT=X75H :Y澆A4?+m,c B7Z ;ǭ+Y:=8, IwEl_ARwG츫6L0sZe)NK^߉J{N,՘$ZK$c 7YHȇi!AQ,j">B]Z ܻ"jX|s[ -{σ,H$|zwHqcT P+O_V-EQ \QL+JْE5qEFh6hYKѷO@`KkpBD| "|(i˙g*[ ѥy1FJ&\7cՈS&Du9| . Sݲgziny\ښUUo;Ď+aca;nO+ݜpd1;aSs^Oc~x;Ey9}u)Gd/iKoͷI-rw/Lj1;n{r߿Ըx=A!vF+ Qvt0"`b]nXewo1$ A: (̀N*?wĞ Cy=f|w)۞30V+i&IﲘQ!M`TcxpUk 4/_ia g0s&ŕǠO_-?@-?qdK3F&Oe-''u0:ɼD<'BZQ)2޻l66F0޵fJ"`p.>~$/UGBL(&PY,@2j.-Dӧ̽tVfmu~XGi-߿,]"?X|[X8#X_-؋-I]n;@ȀTi@P&j _š~/c}VX/¬0 *,t" Mu4[/Y]X^@s Sonw؋&'uPUbI(_~u^ZJ2+ڗ,fO (XS|7g%^?Vp{ԵVh}&%Ol$̋"~eEYHT1k-!prd|1R(F8]_ VݨBmNuIJR>0J|= s%RVÚaG@B'T&X^ yNJ1E[>g**N3pnryQ8)78E鲗B;ѭ~)_@9u-=CVVD`sE{A"xb$Jj*?@z0u6sbp]9JVckd UoL<􌢦3+V',uIV!w3Q endstream endobj 434 0 obj << /Length1 2022 /Length2 14519 /Length3 0 /Length 15792 /Filter /FlateDecode >> stream xڵeT5 %Xwҍ{pwwwww} KgU=FS+  M@@Gzf& bft40001SPۛ8Z"&<NGs{= n4w 2&n&jJ% Kz= ^p=,l{)wM4%B>KD2̞zQ_Do \n/`MM:ցdn xO6[t$:v咆)9W}Y$ȍNEzťR7`gt!T- N\e={L%-|ja^~)r3KbХcJ{5 ($2֢`vNvĤl/pyg9o0Yz ؽmx.[" JMחILKwwU7R!,UvL>|}gj۝]#:9b"C灶YS6o.EAP__XSJ2vP~)3@%C (۵գ6!8cJvIPeVeE"#'zsy9WGOy~+ p8NvitJmk!ez"+}+Ť/׶ _+CHZ4B+6EhH)y3OCKAvYj਋8~ަ6υwB(3gvn,q1GlC~:`+\ 5\A4U+~; eS-|kG~3pJ>f2K5-> S8atKCUJGS_3=|>Nu~$ ~q:@;CTԌezY4l3q@W/m~nVy+0Rܯ38KGdQ>Jr˜0AXO ^y}lino5,6KV:Y+_kyXdm]|3+PBm__:@otpV[U6vPON~FHōF+@ zȺ 3omeIأ)ul58ٕJ)?"Ԋg.P8%q jG o5V*l*}nm9`G'Au\hΘ_g\[|Z|L̂p39WGdd4c[xX*8 Gr`NOU_ne}sHI 8TŌ@=uryQ(;9RܷdQS o?f A5$j^'^D5 )h]]6JR%2J[*6=iҋ хnވ&l' XR7ܘ6W_WYhh ;範W;ic S~|cS88%pVK m:[D}Xa0'`9#P#rk!U62]V)#?ɋ0~ ѢU Xi n3~~!Z '5M|7qʧa)<({c!)ׄykYf iĒUd%#f2Ƃ<6Tvk><G2uA%*"iId1 [K,;Vľdq{-_4w! * eIU]>Y?>/JIÆtz.._yvM*V}sI[7[P_a8D' !~Nn+()ebGݥUk֗a@}F䱍.qSڽo_=iFB1I c24(WF;X@, YWrC`̽힋*GHy b [`8P2pGI>cXtޖ4U )8r&{Z2 qhb )W9JْH+4ч1]% d˯4V-=j 0 zL,܈w!8|[1|1 E~r%W"ý*lr2w{k [XQtFœF0s ?+`D9n #< J| VxfPbH$һLIN-X$Ri-)+ܩ2Cq:hΓ+v/u}S^RKMP&l0'L2[R'U XZ̐4nM5̝-L/(r%6D~"f\O^ ʁ߮ azRsWxMk;Zdz'І>ֺ{x~ mV*JCNoaUqВa=S4Y2i(qcᕩ)hI(YFN Hv2|%l.#z!1]&oXj6tQB8S-VXYwt<d[=?{5%*)I$)x bv-cBɦ|IN)OD=rY7$fĺ؇m6\SZl5Q1'B&j(! آ_ƹZlYUi#S'x Z;=.6WWfkO|<5yaonrc;IH?4EUQU=Ͷ-ywB>t@0V\]1<. 'N^D$j~93w?_ImiFP5 `oƓK{'E*8B̽Bp <}|[E"9?<Ư RVL [-$Iwf[oSoY 8 )K>B3M2 NJkLpUZ.>,a?=}4?GfB150*@D)y|?tܒwp}/|l[MTE]YX`]F+{';XGUuewn~fxBÕ/ Dz1+V65{α굂I? q̬ SY;O@5R9szWSxCp {ѩX+ZvL4Nߏh*|(P` ejlϒ8rGZuJ>Pp1)Hp}z#6bY$^(Bp֚,:~VTea׋Lj%]-O' !cO~s*F+ud'4vC^zniuQ9?(j2/e1/RL3^_{"y9W`֧\Ve8ɾ|ξ"cyYcgTrPEFt9 70p慖 ̾4SJ %3¼+""Or/P6̦_2_lt-NH*fuL<>3&ф(!ouYJ͸.M}[̰Y}{|[(_gxXɘISΊ .(v'eՍ |=  Nm;d*ye7hKұjFh/hXLEOs. ]Tq.L-XO1ݕFEwVab%d YFXš 0)mBsuv M!?HAZa}QxE66[I6YZlǸ&BE(tC|g<#0Y(BfPMFDkΡJAϖ*D[Lnϰ4^|ɮ{B.9Yl(QvxOZzA0w!q7Ԣđ :NGYwZkuQ Ӱ+ \{lɿb+M<ڬGށr(&K@D23~hƖd/x6\ҵ/i]I_Q1JϑCۗ&OL`WC5`Q)+aVw#kKeFMJ! !*1h 7kqdqB8H#0n"bL}l`cWmPqA@,Zŏ"yx⋎tko e;9]f_LOh88ZbDP&6.eob/>E$%f?`y@ hl겞Mߞ1[$e =,dc4P5hj7]tgr=Yv7!Qv&Iim@]0}NA݆A.*E/::d9Y]:+h gmxLSoȒp'cuT6̡ |oaWsI6C>]aLVv5L?BӬfwCj#&캮7&!mTG`EZ(L2kŝV︺E#o:Z|UZm @Plv>DSIsruZןH>0V&/&(?* L9m5C4L鼝Buܾ;yZMN$1?T*R(-@?1;Jsp#Ua5x0auXvcw|\cO ³(wʾlq!-)#:ZD~.ې]ނ=:㱺8[ёZ~&<,&7^P#_9ܷJY˱ٶM>r_D(sp ':5wǟŁ@AC㷸6P@M Y3;G! Z#Ix\%r$tuΟiN2* #" q)~JBRZR];0 Uw'Y0VByX䝙b=?:B^l@"^#_^ bV '#u!cw4XPәW]OO]x \|Z̀J7 lt=t!CI '9!|Ð'ѐRv^\0x@j !uԽPS&v[d3kV` C7$LOD<63CU矎IF^ï5uR*@ݥ [SsNYm#:Ko)d`]w<0pZ-=`p{ Sv}\QB_2{ )z4ƞ6r7yG /U/6rZ!9Sr9aFL;DZMu!|rIetafer ĠAkY#V+TV^r>T)B;Na*7~ ^a#jfJvSZ,[/=^:!?778׭f2G5e8KZfY.6=e8;W)7z@jz@+gM^669,MGhrR7S9IgH+y}Vhyk!.Z'GIֹ M ZR@?-buqteڷz&kQnCbj: w U1l;v )\4s~qSy͗i S"r;$oUiʝ>.&‚Կԅ[6()b \~Gj`%>o?jRk3$KP )}y\%ӯf7?NʓOr2 ,ُocn-h 3z_D-퓮.\1RAúb봈̦s֎"hC >W k6OL"v1.*|{.E+M g٧ӬyY9D;|(M6_2y}[> ~t(61`pU ن{QxwýK8=K% ;ɖ+9dA[>" :\ik$EQؙRus#:,-t%O6y,:l'OCk1GEg5cQ Rneej|Hhݱ\,?3 f[nQ烜{຾+tg8v'cY4};JZ;0-&wBu!8ձ_lƒ%T5U_m,u 襆37VT{(BE9FRVl! "^y_؎wFE. |tJǔ>Ff ~b5 FW  bf.Unɛj{_eٗS43!D_:FhёXO<3ezA4haOo>Kl4,gLB3;ҩCȋ+Rn@U^Р5w4'Mh&BgU\u.~@Է3,<@8pUbmġ}1%Y#Al +w8U%eʀv+&4^4etP!;Pp6׀c:N[}p`Q />ÏR":O5gݫ\UC$ &5_%VMSmԥ_XGG*A@ڡMTI;N?hgft> zF>;EC[KKcN˦@yMظzZh+'`'rovy{Ng)pB0QRƥzߊ 0WݮP$W#脕WoSg_YҎ*PYi0AIy%u3$t!Xy/o>o.fjyu$kJᵒ7i&nj ,>uT6q%E +Ql ǂقڦPernszh=s݇Ҡ=Kg;+Â.V>; 9۝s lAG\#mBЍ^|m!pLeMv*:gudNIi wsg!̬J.`cvFO9z!.B"31%(w~XerLK#x/! MN$8#+ f)v|lh72?GV/IrOzL'ndkySf=t+-UPuDy~/$&33;S;~m!a3zhkZs+%ۻf7&g$H>Ao^$(v8p/w[A9`H$avacoHer=p9!p}BWݾl&,wV:Xn{**pՔlt]9:]ˋr56Nӆ%5VN>H~VXbw_KsÑH}9vN ?0͂FuIp2Qv-Zl;>֤%dP.Z66$ǿB֫,KxvN)fd i.e䱋蘎`tf Gq~`.F^%+ȹ4EQi`Ća&W"B0CwU} os̚)dw`>jՑpWC}̨1?IĠ,¿3ϠJ5ǎ4J ?H6s'i~xRGً,$o^%*A:k$WM/tKLAb?XL:݅%}̣j…]pgߜSۯB6<=P, իEFb$@v׻C{|\ljTbao",S ly+$QG SKsw4Ԍ]4]$p ċ=}^d@7|h0qZll$Zss4Dw472߮B -T(94m꬇Z]7gKHs~bPdL:?"ʊP)v@!";nj#3^ܬS-ZG U`Ğ PsO_KpzJѼŭY_qukUj`{l9]K(E{NԤ;ɿ5wG 1 Ɍ,Wˡڷ,1TdEdB$[gIL:!PJu~m$E-SqwpprlmÂ#\Oƺ\nb$F38iuN&AN,詈~TbUYίahҕ+nfd3 @-23K w V)U7k3ӧyCqwr+4^LDy2aQJF>/4X% ϘѨT@׎}4S99_2$B*s`@h|[ZP{GA,v>0e7jbJӊM,rAkh13 1'HrsZW :D '#p!'U1ˠRIY/\()Z()]0>,FNq;lđۜ襬96q(f:(\}93np݂ UѮ#O"ZWvtn1'Fnm{`Ű_ꊭ _DBqgt—r͉+MàrDaB@ɷrhx V!(ո~ÐnT-9L"o>aA=A v t#R"| _ή J+;g ZzŋpУv#O#^/qp( E!'j3`G[~]3eun%k ߮6_؏I#SwN]cT߃D&1|g?]V6e%QVuv1f蜬8V=\%U7#׆1yA@ 9gO9"\mǢ)bH!q&!_IL"; !*~9`":#OtH2'J >H +QTS}FZI~a&xҰchh϶>rQe?&lG>#ÆyE"ho } ',v9/p;9~lغ 37*Jo%xpXMo~D *aw=j煊5`bQE",NKᔙZ">mhB@ /1z2X<=|ͺi<IBADү8&^94C1p[ #9P Qga{d[t廯Rβ&t y',B鍿=Meu 7O9ϲI|? -v ʡIfܰ?KN{t^">%!> stream x[[W~ftZ!@.BRy00&ImI챱[ھt%5#i_}0U!4+R߲[B1|B y!LU( |Faх4΀q8X08X^0ʂ s.``&=`/V9'  t8 YXt:B nKV N:V=YB2pGB3RRbX**[(bʠ#P(K"@iHs|4  xH -56[2UK`mg \s Ԭ$)ֶ'+R1AQ8ũoe! [9 ]Y0֢c"t%6=؛1H{tH|hʘ0& S0I G3&q! D@ ךjY,60=AIb Κ4`sσ1CerN60l~[CW$k݆r}i1kCuHl^uw&%zO _yt͛>함I.Wj*AA%8+0i#m ۼESN2^vNFř Bh+SP]6 ֆM ܌K+Mpq1l&3%*N)1|):@V^IGbXU^<x\X/U !vDST%kN(oW@CA&yBE((ݱzoeVp(ቌ\}ҵ < G@IF]a!gIr+Z)7?J;(q fJG4 JB e s'eDhTBۢ"QYb +}CjT 5rQkxhXݙtR]a?ז; &<`d4( Z\*CO2IXJг%mIvhiF:&>m}襭jC * YHLr0mJI*_VxXhC5}i $)0f,TvA ߚ9B;. o.f6 `KҐR[ (GvTVyuA~z?oZ&31ȟi#EF7Z:_Rk\#d> , 7H47h+5_\ 4 .759\p Eڔ#Ph^w}j-o镯E?͞Ӌv6Q#rؿ2.~zDgxzQft+ou$64FKz?\<] XvݟֳI+ʓz2.ݤ*尼)GOѰ0 RNi9|.gzRqyW~.|T@ %Io{G 6.[ЉPjn:$uLf@a`k +QF@np)@0 x V / >@/6 |E'ԢG a2 * T C/E>^{9OIzG=EN{õ >LJo嫓MĪG z;E2tD˱r V̭Š1P,p<|NQH=c21-~_!/xLNt NWⴋLWCQ]kg1e /]-#ޞv' >O)Z:ŸK!)r]=l?וߣ+J]ovz~|dL;^n̰-fQNZUB:Yn jIC iv鯴 v?l]X֟/ ЖuIr{]|1 ]YMvx7-?R,G'V^OƙMVIvK2:ԦbUyۦ6k9}wz|@G㗳E9^D3P%\b*Wk̬pWc<{Tl0:\|֟*r<Dg=dr(!ReLwn3dfUr|vh{;^M4VŒbf_n{0#U'9s{dkέ2yq5`~ݜ~VxV_Cg: 8wfSnvWOwn6%k%*? 9_]{2bixlpmΟ:5cw]/9|PL'a_pk'BU1vѳvi ~k7'z"]ԗXE{O!_ Ъ+ Wba]p,,1V8$™Ged;>. ܄5XvofB<1`KOKz5^'!R=rMVo6 iJxnFPŔAC;KXOkY(Mor0(R }ُ@W .gHs7;3:V=YL, f3X"|(dˠL6B5EٖpslL'muѿ(X4Ap]L,4g{Vr-zs%SKJ织=%3Ȓ|- ,qQ@V@hS!=OsOmT@{^(K"~HhSU!SѤKD[޾~(R\4E!$3I]N$[Ko&N+%l"qx4g#=\OBdbpI5!]!?QeUto?€@ȃm÷fxkKhA2,Qtk qDFߢyd:5`vc}Qf/fjW՟gYjc:[M? Lx]#J/=LTDcl*1]V;_hi۱Zf6kWLEKZZ<-hinZqXZYoUnҹeYfRȲU^u,E惪Ky*iI׵jVKv$iiDJS.FkiU-Eqi]1.kcgT2="6E ]%պ> endobj 439 0 obj << /Type /ObjStm /N 17 /First 145 /Length 918 /Filter /FlateDecode >> stream xڅVɮ8+xh,R\$ L.Af,Amk"LTkqh~9Q*v7),g)2e\eX9EU0% 3*YKq m#?$HK(@R1s>'MmF|" > n\Cu oMLڟH*wcUg;N[*OAX%D7<!1ͪB7Fь#yb1:Qcb1y"5?&7 ib5c51Ȧ:VL{-qmf: X2(Kgd5 9Wn%ڕϕ~H+>7LE񦝅|vCz`+I9_{rhѴf*_qo9TW3ˡSOi|^[gXoeK_6a7/12W=?gí݇_ endstream endobj 457 0 obj << /Type /XRef /Index [0 458] /Size 458 /W [1 3 1] /Root 455 0 R /Info 456 0 R /ID [ ] /Length 1059 /Filter /FlateDecode >> stream x%oe{@9b[Ji)iC)-"Rhb0&...NNag-*j 4ܮ&]˕;=}oK)ߪD9G Ph l`3m`;:@'`'`}`?8C08^G%p R:0l9!`̂xK`x< u 61DьhA'$^o6a.63S,O⊋` `yq63΃ ".`Ei2*8m]7MV6Xw*Xwxܧg)JO_~!`OXFOpM&\ ׄkb`515pM&\ ׄk5pM((+QOX|%p\J' zB=aE=wD``Iؤ0 5eh>ePY%k*Wy] |-OsIo&Zgf<(DܺmsTheC+/ReK_agH!E"|-僵-Q>& 8RF3sJs9c1kv -f_b5\E@-M6hA6l6d;@:$ @v,Ӛ}  hM?S"_41q~n| _jy56gu= 50% ## - number of R processes by current user ## which match $FILEDIR/jobs in the cmd call of R ## ## list-jobs FILEDIR ## Return the PIDs of running R jobs operating on $FILEDIR/jobs. echo "[bt] --BOF--" CMD="$1"; shift export LC_ALL=C ### Avoid any localization issues. shopt -s nocasematch ### Case insensitive regular expressions case $CMD in number-of-cpus) if [[ `uname` =~ "Linux" ]]; then NCPU=`cat /proc/cpuinfo | grep '^processor' | wc -l` else ## darwin NCPU=`sysctl -n hw.ncpu` fi echo "[bt]" $NCPU ;; start-job) Rscript -e "batchtools::doJobCollection('$1')" > "$2" 2>&1 & echo "[bt]" $! ;; kill-job) kill -TERM $1 > /dev/null 2> /dev/null sleep 1 kill -KILL $1 > /dev/null 2> /dev/null exit 0 ;; status) # remove everyting till load average(s), then delete commas LOAD=$(uptime | awk '{gsub(/.*:/,""); {gsub(/,/,"")}; print $1}') JOBDIR="$1/jobs" # print 3 columns for all processes # use ww for unlimited width in ps for command output # we count all R procs, all R50, and all where JOBDIR was in the call args ps -e -ww -o pcpu= -o ucomm= -o command= | \ awk -v j=$JOBDIR -v sysload=$LOAD ' BEGIN {rprocs=0;rprocs_50=0;njobs=0} $2 != "R" {next} {rprocs++} $1 > 50.0 {rprocs_50++} $0 ~ j {njobs++} END {print "[bt] " sysload " " rprocs " " rprocs_50 " " njobs}' ;; list-jobs) JOBDIR="$1/jobs" ps -e -ww -o pid= -o ucomm= -o command= | awk -v j=$JOBDIR '$2 == "R" && $0 ~ j { print "[bt] " $1 }' ;; *) esac echo "[bt] --EOF--" batchtools/inst/CITATION0000644000176200001440000000332013606041641014525 0ustar liggesusers## -*- mode: r -*- citHeader("To cite BatchJobs, BatchExperiments or batchtools in publications use:") citEntry(entry = "Article", title = "batchtools: Tools for R to work on batch systems", author = personList(as.person("Michel Lang"), as.person("Bernd Bischl"), as.person("Dirk Surmann")), journal = "The Journal of Open Source Software", year = "2017", month = "feb", number = "10", doi = "10.21105/joss.00135", url = "https://doi.org/10.21105/joss.00135", textVersion = paste("Michel Lang, Bernd Bischl, Dirk Surmann (2017).", "batchtools: Tools for R to work on batch systems.", "The Journal of Open Source Software, 2(10).", "URL https://doi.org/10.21105/joss.00135.") ) citEntry(entry = "Article", title = "{BatchJobs} and {BatchExperiments}: Abstraction Mechanisms for Using {R} in Batch Environments", author = personList(as.person("Bernd Bischl"), as.person("Michel Lang"), as.person("Olaf Mersmann"), as.person("J{\\\"o}rg Rahnenf{\\\"u}hrer"), as.person("Claus Weihs")), journal = "Journal of Statistical Software", year = "2015", volume = "64", number = "11", pages = "1--25", url = "http://www.jstatsoft.org/v64/i11/", textVersion = paste("Bernd Bischl, Michel Lang, Olaf Mersmann, Joerg Rahnenfuehrer, Claus Weihs (2015).", "BatchJobs and BatchExperiments: Abstraction Mechanisms for Using R in Batch Environments.", "Journal of Statistical Software, 64(11), 1-25.", "URL http://www.jstatsoft.org/v64/i11/.") )