snowfall/0000755000175100001440000000000014530647156012130 5ustar hornikuserssnowfall/NAMESPACE0000644000175100001440000000250014261002026013322 0ustar hornikusersexport( "sfInit", "sfStop", "sfParallel", "sfNodes", "sfCpus", "sfType", "sfIsRunning", ## New 1.83 "sfGetCluster", "sfSession", "sfSocketHosts", ## New 1.6 "sfSetMaxCPUs", "sfLibrary", "sfSource", "sfExport", "sfExportAll", "sfRemove", "sfRemoveAll", "sfCat", ## Snow wrappers "sfClusterMap", ## Currently not implemented. "sfClusterApply", "sfClusterApplyLB", ## snowfall addition: intermediate save of results (with auto-restore) "sfClusterApplySR", "sfRestore", "sfLapply", "sfSapply", "sfApply", "sfRapply", ## Currently not implemented. "sfCapply", ## Currently not implemented. "sfMM", "sfClusterSplit", "sfClusterCall", "sfClusterEval", "sfClusterEvalQ", "sfClusterSetupRNG", "sfClusterSetupRNGstream", "sfClusterSetupSPRNG", ## Unit tests "sfTest" ) import("snow") importFrom("stats", "na.omit", "runif") importFrom("utils", "assignInNamespace", "data", "getFromNamespace", "packageDescription") snowfall/data/0000755000175100001440000000000012254301027013022 5ustar hornikuserssnowfall/data/test.rda0000644000175100001440000002311114530626460014501 0ustar hornikusersBZh91AY&SYh@|F>?߿+\D@hiMW:{{|@ﻢ@( yzWm!=Q!T v@uKDhh &MMSTSD'fT&SMO2G@2hiz4h2=M L#ASFOSSL&h4CF2d4hiCLL4JdjxJ{S h=A=@i d00@hѡC@dhd"M3E6)GL@ h12 hhhdh "IP0 2 4i 2hh4@42d4h  @ &L4h&SɓFSI#F)➧ƣdFFPh1Izc-1q89?9{w즃Ex)IhRC*vJ ;)5!m1d3 ѝ%˰)cfj]1ABLţDV3Fդ*&6lf19U;mH1ga56PU[$ӲASmA:$:s`ؘسjlҚ z=aNIQ8,P&/NJ 2DW*]I @Er I*bl%`""# /J(7'YZ_3d61~ Ta> M()*)()b(*QST<*(6SLE4Ƀ+5<)ۨŇ6Pa)' h.>;RPok4+6cd/ yo~弯d:CT['")+}q{ gw}dN9آJ(=x܌=(*&6dβlCmgKYLr LAoE̓K望_87RkKx(.: 1YCK+9P]%,! FJd mc~AGKC Vt:T_#$˖ߨДugI*"M4)boc7f;-F-TP5w0i{9P0PrZs\˙_Rj<񀋢)E8qSXJ낃úF{.;|v.+=.9d@|@IQc=` X!dJW8 , s䲝YN@`efCIJeL(B BPp.cS8K YA&Ie ДLDQ2d45$%AA !D@B *g8'(-("89@布MP KP\X-W',4 b!%;{x.=_;s6.EOlZ]VthK#h輀]ޞ?;#Q*K Չ`?k Ujb.i.bF#&.#r0X:?߅{W1=PYF" G&O_Kf37V0TaRl ? $;]\Y=OK 6"$43~ߘqqr5U}eBU-%l$cur8He9Mc (J%=:(w6ǖhSa W(,3o>!$}PW<5FЁGu 86C|ύ bĥ%+z\rXy"*AUfEHCD& %,T.O4(1TOߎ&" "b"*j*( "&J `t HR@RD!2(CR*0 H LE#DD 2 MTL12 !2,ETԋS)RDb Dz)0@T*|'4xA@-HDsPBR)@PP0 i^J, T*A,!N !R ^!`[A.BxP 0"vYĀ" q܌ X!HPh_-֮廱&>=QkkC ӎ7DXJ+SyX7#&%pqa]I8ˤ`  =im)Lʡ7$6lZGde6Uh'²3y)cEҕLb Bqp֥L.σmQ .%TצDl7ҢcV9JS1m2  1Eu((Ŕ&X,|zCag0;p#r<*EТDI[U_N"r ,e)4I7]DGz"ifRS\u%)X03˗I.q=*ˮS;+?$Тh^轟Fi*u)2Dv¨*6L"+AQn Z5 tt74c\1nvZ4h0J:QcbPy̗Q4,L2Q^I¥V`+U~U(Ә.y-e \a)dQPlF} <my,Ù%N%UUUI$I$EtQkw2= tZE+QֻF8DrH ?-NNO:&uPMuQlƆ)_^ć()R)'EY) , 6I37*p*L̸\PqȂ 5i#abD,dnոбفbFfES565..^IjJbdTkAl(`n52(Pɚ7 pbk&Pfr00SQFglh^hT#`ʒ$ YJFgIn LTP2f#115 d2 qylDyb e8XD#  ##YS<13182/6\v>~ˈ$n&n3.1(PybċB3CHHȸ0&\pʗ 24,jfB L ʅ409D$Jn9efЂD3M,^T145(^o,hhn*j7;MFGeCXs32 E Kq2/(\Am2(m *HbX4(p[%)HfF& 4(T#f|5FX,Ife ̱̃x,AbىqIMeFoy#-ACyA#͆҅jL(\]RfYБm5 F"*1>7F07 ݸydqiqhb^k.(X^p\+(q"Q+ 2e J&j12.^0f2 &PAL2f…C1MeD1hhl3,X&h3Bf$,;T_qyîۧ K|~Ox$n/<1X<1l4F1(8jtIABKWɞPI*z2W;CHZ$%Av8h&Exqfe5\# |?x D1K~ѐH=D$*P$ȤxC+)832uu )FhV) 2P\|)]?@  GI$rJK"CuLHp>P$5I D fD2$ʘI,7gM&0; ܽ~(#xSpýBFbv] $h)|ISx܃g܃p\䝯ig\wPkd%!8&'dJ5Ha9=1VJ1WoϨ <1m&CEv BMΙ|nd-XcwrGdB~97":INgx:I:nj5|q.&y ȼP5P2ƽe慊,Ay2EMǵ(Y*钒MN$+BiJSce(n5bl=n9nvAm_CN7YnAv'<}.!\OGrP-f0acrp1hEHEQ*@".V "ӽ,dPc*\Lahm,!wF`Px};!8FVl6w+| R`aQCH4б &b|| g;϶@IAyQLgO)%Bt%H:.g7ʛKRdJm2]P& m"Ģ"?BrbΌLT  d @r\c3 dIp xԴE9l aJCfr">M6 0,h3abӘă"&FD yđB z)Cs=36TqJ/y 0< 帨Lu VVS/&Y+e) ||@N-9DD0 /-N]vj-Ǫ =d"D0XQT- V*CXpz9"ͥ/kκ6[t=:RmniϽ? mwcKH(?E"xfn֩RO3CAt7$vPp)d:a״]Jѓ #[J֏ɸY<sy(TXqbdzł 26&LI "eiÇ2:PϢJl6]94ˡ\aI`S,'SϽ5" -Ӎ|ϯ לT8&k9*@$/&ۑ;Ȉ]\&7gw`vy>ͺF^g}ܞEKU0ʟ 2W@H.6ydI"'.&~ |[ o$g&sɑY|c (l!Rpʶ3 vȂb'Q0ؘk ͯu b3\g QAw!\DR I)/w>.>M" i$ *)Ppר <=| ,o^ ӎ_tH\zx_`BQU=2ӿҚJ*dDwnPmq I3fD.~8l3^eYP2-ܬilrPyO`řӋ !\[Zbm*E- wĐeG+kkкVHS^D7^FE5yH2@6%G6[zƵ9ad0' cHq7K;AQm^Wnl|vQ`C'n||{̅ T5|Rr֜-c'i(ron~fU413(`e!VB7-o#Kҿ\{BiQLӟ@=BTwDg j"8D))ZItNcx)m95FlDqs}צV^->I, cD(LrFKZBJ\%T&ČY[qQmᏍŗuZĻXik$qdcP:nG3JS$'T2:ɺ $d4\. cIxξ+Yz$u>Џ@鞯z*9l(ы[8(TK'$U!,~j9qvRD֏ZaS #<&qFMJ:»u4\y"Mh,RʍjeKP)U;5?05+k` 1RĨAQ(V1,82ܗԢCЀL8bFBuWmvoD`pI٩@g$/o:z^ȿ8),Z~zDǖj&~%8d{\;]tzͰj[RlC:Yw/5XYha*LBvp@v<|x%iw _R䠂ĥ$Rf1ACnNP;C_.ZP&hQgW堡vX@J ދ@X\"(3 d{tRnh2ii$9L3Z%)7$]+MU3#@ !Oe%G_::ɢBl5il "//N^Ջ2d{r͒;~{H57|H?dD#w~]Tm}>~u:mⳆCWݛ4C% ĎYl_~:`:uUv%48 |UĦ.ץtz ϸ)QQ1z*wgR%[em>|r,C}r$1/B@I SSI RH_>)Ph D()T @) 'O4KE M RR5:DNP4J%ER+JkgW(z>/ꊽUDPrIwbiTqY-[)><5:֞ze}&-*,)h Ӑγ$]JG]q8>oPyWm8*2 g;9 F*y/x~X[2뉥<9>TcR(3$FBU&@#+.DC1m Q:s9qŠz3$VITΙ<;[N+hp.6M.QѨQP=uDJs{l.I>FbQHR;b3}*5qL*ܧ(Հ&ZR! Q}& d;l{xJ zc :Pv*ͭHR6Lev v4I/sEIvG楉ooӶnX(z;Rd9w/6p* a =%UMZ<}Pι_nڂYeH)„Esnowfall/data/config.txt.gz0000644000175100001440000000012012254301027015440 0ustar hornikusersTs u v s T  !.`_?`3 P%7RAvZFsnowfall/man/0000755000175100001440000000000012254301027012664 5ustar hornikuserssnowfall/man/snowfall-b-init.Rd0000644000175100001440000002155212254300762016172 0ustar hornikusers\name{snowfall-init} \alias{snowfall-init} \alias{sfInit} \alias{sfStop} \alias{sfParallel} \alias{sfCpus} \alias{sfNodes} \alias{sfType} \alias{sfIsRunning} \alias{sfSocketHosts} \alias{sfGetCluster} \alias{sfSession} \alias{sfSetMaxCPUs} \title{Initialisation of cluster usage} \usage{ sfInit( parallel=NULL, cpus=NULL, type=NULL, socketHosts=NULL, restore=NULL, slaveOutfile=NULL, nostart=FALSE, useRscript=FALSE ) sfStop( nostop=FALSE ) sfParallel() sfIsRunning() sfCpus() sfNodes() sfGetCluster() sfType() sfSession() sfSocketHosts() sfSetMaxCPUs( number=32 ) } \arguments{ \item{parallel}{Logical determinating parallel or sequential execution. If not set values from commandline are taken.} \item{cpus}{Numerical amount of CPUs requested for the cluster. If not set, values from the commandline are taken.} \item{nostart}{Logical determinating if the basic cluster setup should be skipped. Needed for nested use of \pkg{snowfall} and usage in packages.} \item{type}{Type of cluster. Can be 'SOCK', 'MPI', 'PVM' or 'NWS'. Default is 'SOCK'.} \item{socketHosts}{Host list for socket clusters. Only needed for socketmode (SOCK) and if using more than one machines (if using only your local machine (localhost) no list is needed).} \item{restore}{Globally set the restore behavior in the call \code{sfClusterApplySR} to the given value.} \item{slaveOutfile}{Write R slave output to this file. Default: no output (Unix: \code{/dev/null}, Windows: \code{:nul}). If using sfCluster this argument has no function, as slave logs are defined using sfCluster.} \item{useRscript}{Change startup behavior (snow>0.3 needed): use shell scripts or R-script for startup (R-scripts beeing the new variant, but not working with sfCluster.} \item{nostop}{Same as noStart for ending.} \item{number}{Amount of maximum CPUs useable.} } \description{ Initialisation and organisation code to use \pkg{snowfall}. } \details{ \code{sfInit} initialisise the usage of the \pkg{snowfall} functions and - if running in parallel mode - setup the cluster and \pkg{snow}. If using \code{sfCluster} management tool, call this without arguments. If \code{sfInit} is called with arguments, these overwrite \code{sfCluster} settings. If running parallel, \code{sfInit} set up the cluster by calling \code{makeCluster} from \pkg{snow}. If using with \code{sfCluster}, the initialisation also contains management of lockfiles. If this function is called more than once and current cluster is yet running, \code{sfStop} is called automatically. Note that you should call \code{sfInit} before using any other function from \pkg{snowfall}, with the only exception \code{sfSetMaxCPUs}. If you do not call \code{sfInit} first, on calling any \pkg{snowfall} function \code{sfInit} is called without any parameters, which is equal to sequential mode in \pkg{snowfall} only mode or the settings from sfCluster if used with sfCluster. This also means, you cannot check if \code{sfInit} was called from within your own program, as any call to a function will initialize again. Therefore the function \code{sfIsRunning} gives you a logical if a cluster is running. Please note: this will not call \code{sfInit} and it also returns true if a previous running cluster was stopped via \code{sfStop} in the meantime. If you use \pkg{snowfall} in a package argument \code{nostart} is very handy if mainprogram uses \pkg{snowfall} as well. If set, cluster setup will be skipped and both parts (package and main program) use the same cluster. If you call \code{sfInit} more than one time in a program without explicit calling \code{sfStop}, stopping of the cluster will be executed automatically. If your R-environment does not cover required libraries, \code{sfInit} automatically switches to sequential mode (with a warning). Required libraries for parallel usage are \pkg{snow} and depending on argument \code{type} the libraries for the cluster mode (none for socket clusters, \pkg{Rmpi} for MPI clusters, \pkg{rpvm} for PVM clusters and \pkg{nws} for NetWorkSpaces). If using Socket or NetWorkSpaces, \code{socketHosts} can be used to specify the hosts you want to have your workers running. Basically this is a list, where any entry can be a plain character string with IP or hostname (depending on your DNS settings). Also for real heterogenous clusters for any host pathes are setable. Please look to the acccording \pkg{snow} documentation for details. If you are not giving an socketlist, a list with the required amount of CPUs on your local machine (localhost) is used. This would be the easiest way to use parallel computing on a single machine, like a laptop. Note there is limit on CPUs used in one program (which can be configured on package installation). The current limit are 32 CPUs. If you need a higher amount of CPUs, call \code{sfSetMaxCPUs} \emph{before} the first call to \code{sfInit}. The limit is set to prevent inadvertently request by single users affecting the cluster as a whole. Use \code{slaveOutfile} to define a file where to write the log files. The file location must be available on all nodes. Beware of taking a location on a shared network drive! Under *nix systems, most likely the directories \code{/tmp} and \code{/var/tmp} are not shared between the different machines. The default is no output file. If you are using \code{sfCluster} this argument have no meaning as the slave logs are always created in a location of \code{sfClusters} choice (depending on it's configuration). \code{sfStop} stop cluster. If running in parallel mode, the LAM/MPI cluster is shut down. \code{sfParallel}, \code{sfCpus} and \code{sfSession} grant access to the internal state of the currently used cluster. All three can be configured via commandline and especially with \code{sfCluster} as well, but given arguments in \code{sfInit} always overwrite values on commandline. The commandline options are \option{--parallel} (empty option. If missing, sequential mode is forced), \option{--cpus=X} (for nodes, where X is a numerical value) and \option{--session=X} (with X a string). \code{sfParallel} returns a logical if program is running in parallel/cluster-mode or sequential on a single processor. \code{sfCpus} returns the size of the cluster in CPUs (equals the CPUs which are useable). In sequential mode \code{sfCpus} returns one. \code{sfNodes} is a deprecated similar to \code{sfCpus}. \code{sfSession} returns a string with the session-identification. It is mainly important if used with the \code{sfCluster} tool. \code{sfGetCluster} gets the \pkg{snow}-cluster handler. Use for direct calling of \pkg{snow} functions. \code{sfType} returns the type of the current cluster backend (if used any). The value can be SOCK, MPI, PVM or NWS for parallel modes or "- sequential -" for sequential execution. \code{sfSocketHosts} gives the list with currently used hosts for socket clusters. Returns empty list if not used in socket mode (means: \code{sfType() != 'SOCK'}). \code{sfSetMaxCPUs} enables to set a higher maximum CPU-count for this program. If you need higher limits, call \code{sfSetMaxCPUs} before \code{sfInit} with the new maximum amount. } \keyword{package} \seealso{ See snow documentation for details on commands: \code{link[snow]{snow-cluster}} } \examples{ \dontrun{ # Run program in plain sequential mode. sfInit( parallel=FALSE ) stopifnot( sfParallel() == FALSE ) sfStop() # Run in parallel mode overwriting probably given values on # commandline. # Executes via Socket-cluster with 4 worker processes on # localhost. # This is probably the best way to use parallel computing # on a single machine, like a notebook, if you are not # using sfCluster. # Uses Socketcluster (Default) - which can also be stated # using type="SOCK". sfInit( parallel=TRUE, cpus=4 ) stopifnot( sfCpus() == 4 ) stopifnot( sfParallel() == TRUE ) sfStop() # Run parallel mode (socket) with 4 workers on 3 specific machines. sfInit( parallel=TRUE, cpus=4, type="SOCK", socketHosts=c( "biom7", "biom7", "biom11", "biom12" ) ) stopifnot( sfCpus() == 4 ) stopifnot( sfParallel() == TRUE ) sfStop() # Hook into MPI cluster. # Note: you can use any kind MPI cluster Rmpi supports. sfInit( parallel=TRUE, cpus=4, type="MPI" ) sfStop() # Hook into PVM cluster. sfInit( parallel=TRUE, cpus=4, type="PVM" ) sfStop() # Run in sfCluster-mode: settings are taken from commandline: # Runmode (sequential or parallel), amount of nodes and hosts which # are used. sfInit() # Session-ID from sfCluster (or XXXXXXXX as default) session <- sfSession() # Calling a snow function: cluster handler needed. parLapply( sfGetCluster(), 1:10, exp ) # Same using snowfall wrapper, no handler needed. sfLapply( 1:10, exp ) sfStop() } } snowfall/man/snowfall-d-tools.Rd0000644000175100001440000002165514261002646016375 0ustar hornikusers\name{snowfall-tools} \alias{snowfall-tools} \alias{sfLibrary} \alias{sfSource} \alias{sfExport} \alias{sfExportAll} \alias{sfRemove} \alias{sfRemoveAll} \alias{sfCat} \alias{sfClusterSplit} \alias{sfClusterCall} \alias{sfClusterEval} \alias{sfClusterEvalQ} \alias{sfClusterSetupRNG} \alias{sfClusterSetupRNGstream} \alias{sfClusterSetupSPRNG} \alias{sfTest} \title{Cluster tools} \usage{ sfLibrary( package, pos=2, lib.loc=NULL, character.only=FALSE, warn.conflicts=TRUE, keep.source=NULL, verbose=getOption("verbose"), version, stopOnError=TRUE ) sfSource( file, encoding = getOption("encoding"), stopOnError = TRUE ) sfExport( ..., list=NULL, local=TRUE, namespace=NULL, debug=FALSE, stopOnError = TRUE ) sfExportAll( except=NULL, debug=FALSE ) sfRemove( ..., list=NULL, master=FALSE, debug=FALSE ) sfRemoveAll( except=NULL, debug=FALSE, hidden=TRUE ) sfCat( ..., sep=" ", master=TRUE ) sfClusterSplit( seq ) sfClusterCall( fun, ..., stopOnError=TRUE ) sfClusterEval( expr, stopOnError=TRUE ) sfClusterSetupRNG( type="RNGstream", ... ) sfClusterSetupRNGstream( seed=rep(12345,6), ... ) sfClusterSetupSPRNG( seed=round(2^32*runif(1)), prngkind="default", para=0, ... ) sfTest() } \arguments{ \item{expr}{expression to evaluate} \item{seq}{vector to split} \item{fun}{function to call} \item{list}{character vector with names of objects to export} \item{local}{a logical indicating if variables should taken from local scope(s) or only from global.} \item{namespace}{a character given a namespace where to search for the object.} \item{debug}{a logical indicating extended information is given upon action to be done (e.g. print exported variables, print context of local variables etc.).} \item{except}{character vector with names of objects not to export/remove} \item{hidden}{also remove hidden names (starting with a dot)?} \item{sep}{a character string separating elements in x} \item{master}{a logical indicating if executed on master as well} \item{...}{additional arguments to pass to standard function} \item{package}{name of the package. Check \code{library} for details.} \item{pos}{position in search path to load library.} \item{warn.conflicts}{warn on conflicts (see "library").} \item{keep.source}{see "library". Please note: this argument has only effect on R-2.x, starting with R-3.0 it will only be a placeholder for backward compatibility.} \item{verbose}{enable verbose messages.} \item{version}{version of library to load (see "library").} \item{encoding}{encoding of library to load (see "library").} \item{lib.loc}{a character vector describing the location of the R library trees to search through, or 'NULL'. Check \code{library} for details.} \item{character.only}{a logical indicating package can be assumed to be a character string. Check \code{library} for details.} \item{file}{filename of file to read. Check \code{source} for details} \item{stopOnError}{a logical indicating if function stops on failure or still returns. Default is \code{TRUE}.} \item{type}{a character determine which random number generator should be used for clusters. Allowed values are "RNGstream" for L'Ecuyer's RNG or "SPRNG" for Scalable Parallel Random Number Generators.} \item{para}{additional parameters for the RNGs.} \item{seed}{Seed for the RNG.} \item{prngkind}{type of RNG, see snow documentation.} } \description{ Tools for cluster usage. Allow easier handling of cluster programming. } \details{ The current functions are little helpers to make cluster programming easier. All of these functions also work in sequential mode without any further code changes. \code{sfLibrary} loads an R-package on all nodes, including master. Use this function if slaves need this library, too. Parameters are identically to the R-build in funtion \code{\link{library}}. If a relative path is given in \code{lib.loc}, it is converted to an absolute path.\\ As default \code{sfLibrary} stops on any error, but this can be prevented by setting \code{stopOnError=FALSE}, the function is returning \code{FALSE} then. On success \code{TRUE} is returned. \code{sfSource} loads a sourcefile on all nodes, including master. Use this function if the slaves need the code as well. Make sure the file is accessible on all nodes under the same path. The loading is done on slaves using \code{source} with fixes parameters: \code{local=FALSE, chdir=FALSE, echo=FALSE}, so the files is loaded global without changing of directory.\\ As default \code{sfSource} stops on any error, but this can be prevented by setting \code{stopOnError=FALSE}, the function is returning \code{FALSE} then. On success \code{TRUE} is returned. \code{sfExport} exports variables from the master to all slaves. Use this function if slaves need acccess to these variables as well. \code{sfExport} features two execution modes: local and global. If using local mode (default), variables for export are searched backwards from current environment to \code{globalenv()}. Use this mode if you want to export local variables from functions or other scopes to the slaves. In global mode only global variables from master are exported.\\ \emph{Note: all exported variables are \bold{global} on the slaves!}\\ If you have many identical named variables in different scopes, use argument \code{debug=TRUE} to view the context the exported variable is coming from.\\ Variables are given as their names or as a character vector with their names using argument \code{list}. \code{sfExportAll} exports all global variables from the master to all slaves with exception of the given list. Use this functions if you want to export mostly all variables to all slaves.\\Argument \code{list} is a character vector with names of the variables \emph{not} to export. \code{sfRemove} removes a list of global (previous exported or generated) variables from slaves and (optional) master. Use this function if there are large further unused variables left on slave. Basically this is only interesting if you have more than one explicit parallel task in your program - where the danger is slaves memory usage exceed.\\ If argument \code{master} is given, the variables are removed from master as well (default is FALSE).\\ Give names of variables as arguments, or use argument \code{list} as a character vector with the names. For deep cleaning of slave memory use \code{sfRemoveAll}. \code{sfRemoveAll} removes all global variables from the slaves. Use this functions if you want to remove mostly all variables on the slaves. Argument \code{list} is a character vector with names of the variables \emph{not} to remove. \code{sfCat} is a debugging function printing a message on all slaves (which appear in the logfiles). \code{sfClusterSplit} splits a vector into one consecutive piece for each cluster and returns as a list with length equal to the number of cluster nodes. Wrapper for \pkg{snow} function \code{clusterSplit}. \code{sfClusterCall} calls a function on each node and returns list of results. Wrapper for \pkg{snow} function \code{clusterCall}. \code{sfClusterEvalQ} evaluates a literal expression on all nodes. Wrapper for \pkg{snow} function \code{clusterEvalQ}. \code{sfTest} is a simple unit-test for most of the build in functions. It runs tests and compares the results for the correct behavior. Note there are some warnings if using, this is intended (as behavior for some errors is tested, too). use this if you are not sure all nodes are running your R-code correctly (but mainly it is implemented for development). } \keyword{package} \seealso{ See \pkg{snow} documentation for details on wrapper-commands: \code{\link[snow]{snow-parallel}} } \examples{ \dontrun{ sfInit( parallel=FALSE ) ## Now works both in parallel as in sequential mode without ## explicit cluster handler. sfClusterEval( cat( "yummie\n" ) ); ## Load a library on all slaves. Stop if fails. sfLibrary( tools ) sfLibrary( "tools", character.only=TRUE ) ## Alternative. ## Execute in cluster or sequential. sfLapply( 1:10, exp ) ## Export global Var gVar <- 99 sfExport( "gVar" ) ## If there are local variables with same name which shall not ## be exported. sfExport( "gVar", local=FALSE ) ## Export local variables var1 <- 1 ## Define global var2 <- "a" f1 <- function() { var1 <- 2 var3 <- "x" f2 <- function() { var1 <- 3 sfExport( "var1", "var2", "var3", local=TRUE ) sfClusterCall( var1 ) ## 3 sfClusterCall( var2 ) ## "a" sfClusterCall( var3 ) ## "x" } f2() } f1() ## Init random number streams (snows functions, build upon ## packages rlecuyer/rsprng). sfClusterCall( runif, 4 ) sfClusterSetupRNG() ## L'Ecuyer is default. sfClusterCall( runif, 4 ) sfClusterSetupRNG( type="SPRNG", seed = 9876) sfClusterCall( runif, 4 ) ## Run unit-test on main functions. sfTest() } } snowfall/man/snowfall-c-calculation.Rd0000644000175100001440000001114311320650361017515 0ustar hornikusers\name{snowfall-calculation} %% Separate alias for cross-references. \alias{snowfall-calculation} \alias{sfClusterMap} \alias{sfClusterApply} \alias{sfClusterApplyLB} \alias{sfClusterApplySR} \alias{sfLapply} \alias{sfSapply} \alias{sfApply} \alias{sfRapply} \alias{sfCapply} \alias{sfMM} \alias{sfRestore} \title{Parallel calculation functions} \usage{ sfClusterApply( x, fun, ... ) sfClusterApplyLB( x, fun, ... ) sfClusterApplySR( x, fun, ..., name="default", perUpdate=NULL, restore=sfRestore() ) sfClusterMap( fun, ..., MoreArgs = NULL, RECYCLE = TRUE ) sfLapply( x, fun, ... ) sfSapply( x, fun, ..., simplify = TRUE, USE.NAMES = TRUE ) sfApply( x, margin, fun, ... ) sfRapply( x, fun, ... ) sfCapply( x, fun, ... ) sfMM( a, b ) sfRestore() } \arguments{ \item{x}{vary depending on function. See function details below.} \item{fun}{function to call} \item{margin}{vector speficying the dimension to use} \item{...}{additional arguments to pass to standard function} \item{simplify}{logical; see \code{sapply}} \item{USE.NAMES}{logical; see \code{sapply}} \item{a}{matrix} \item{b}{matrix} \item{RECYCLE}{see snow documentation} \item{MoreArgs}{see snow documentation} \item{name}{a character string indicating the name of this parallel execution. Naming is only needed if there are more than one call to \code{sfClusterApplySR} in a program.} \item{perUpdate}{a numerical value indicating the progress printing. Values range from 1 to 100 (no printing). Value means: any X percent of progress status is printed. Default (on given value \sQuote{NULL}) is 5).} \item{restore}{logical indicating whether results from previous runs should be restored or not. Default is coming from sfCluster. If running without sfCluster, default is FALSE, if yes, it is set to the value coming from the external program.} } \description{ Parallel calculation functions. Execution is distributed automatically over the cluster.\cr Most of this functions are wrappers for \pkg{snow} functions, but all can be used directly in sequential mode. } \details{ \code{sfClusterApply} calls each index of a given list on a seperate node, so length of given list must be smaller than nodes. Wrapper for \pkg{snow} function \code{clusterApply}. \code{sfClusterApplyLB} is a load balanced version of \code{sfClusterApply}. If a node finished it's list segment it immidiately starts with the next segment. Use this function in infrastructures with machines with different speed. Wrapper for \pkg{snow} function \code{clusterApplyLB}. \code{sfClusterApplySR} saves intermediate results and is able to restore them on a restart. Use this function on very long calculations or it is (however) foreseeable that cluster will not be able to finish it's calculations (e.g. because of a shutdown of a node machine). If your program use more than one parallised part, argument \code{name} must be given with a unique name for each loop. Intermediate data is saved depending on R-filename, so restore of data must be explicit given for not confusing changes on your R-file (it is recommended to only restore on fully tested programs). If restores, \code{sfClusterApplySR} continues calculation after the first non-null value in the saved list. If your parallized function can return null values, you probably want to change this. \code{sfLapply}, \code{sfSapply} and \code{sfApply} are parallel versions of \code{lapply}, \code{sapply} and \code{apply}. The first two use an list or vector as argument, the latter an array. \code{parMM} is a parallel matrix multiplication. Wrapper for \pkg{snow} function \code{parMM}. \emph{\code{sfRapply} and \code{sfCapply} are not implemented atm.} } \keyword{package} \seealso{ See snow documentation for details on commands: \code{\link[snow]{snow-parallel}} } \examples{ \dontrun{ restoreResults <- TRUE sfInit(parallel=FALSE) ## Execute in cluster or sequential. sfLapply(1:10, exp) ## Execute with intermediate result saving and restore on wish. sfClusterApplySR(1:100, exp, name="CALC_EXP", restore=restoreResults) sfClusterApplySR(1:100, sum, name="CALC_SUM", restore=restoreResults) sfStop() ## ## Small bootstrap example. ## sfInit(parallel=TRUE, cpus=2) require(mvna) data(sir.adm) sfExport("sir.adm", local=FALSE) sfLibrary(cmprsk) wrapper <- function(a) { index <- sample(1:nrow(sir.adm), replace=TRUE) temp <- sir.adm[index, ] fit <- crr(temp$time, temp$status, temp$pneu, failcode=1, cencode=0) return(fit$coef) } result <- sfLapply(1:100, wrapper) mean( unlist( rbind( result ) ) ) sfStop() } }snowfall/man/snowfall-a-package.Rd0000644000175100001440000000744011320650215016613 0ustar hornikusers\name{snowfall-package} \alias{snowfall-package} \alias{snowfall} \docType{package} \title{Toplevel useability wrapper for snow to make parallel programming even more easy and comfortable. All functions are able to run without cluster in sequential mode. Also snowfall works as connector to the cluster management program sfCluster, but can also run without it.} \description{ \pkg{snowfall} is designed to make setup and usage of \pkg{snow} more easier. It also is made ready to work together with \code{sfCluster}, a ressource management and runtime observation tool for R-cluster usage. } \details{ \tabular{ll}{ Package: \tab snowfall\cr Type: \tab Package\cr Version: \tab 1.61\cr Date: \tab 2008-11-01\cr License: \tab GPL\cr } } \section{Initialisation}{Initalisation via \code{sfInit} must be called before the usage of any of the \pkg{snowfall} internal functions. \code{sfStop} stopps the current cluster. Some additional functions give access to build-in functions (like \code{sfParallel}, \code{sfCpus} etc.). } \section{Calculations}{The are plenty of function to execute parallel calculations via \pkg{snowfall}. Most of them are wrappers to the according \pkg{snow} functions, but there are additional functions as well. Most likely the parallel versions of the R-buildin applies are interesting: \code{sfLapply}, \code{sfSapply} and \code{sfApply}. For better cluster take a look at the load balanced \code{sfClusterApplyLB} and the function with restore possibilities: \code{sfClusterApplySR}. } \section{Tools}{Various tools allow an easier access to parallel computing: \code{sfLibrary} and \code{sfSource} for loading code on the cluster, \code{sfExport}, \code{sfExportAll}, \code{sfRemoveAll} and \code{sfRemoveAll} for variable sperading on the cluster. And some more. } \section{sfCluster}{\pkg{snowfall} is also the R-connector to the cluster management program \code{sfCluster}. Mostly all of the communication to this tool is done implicit and directly affecting the initialisation via \code{sfInit}. Using \code{sfCluster} makes the parallel programming with \pkg{snowfall} even more practicable in real life environments. For futher informations about the usage of \code{sfCluster} look at its documentation. } \author{ Jochen Knaus Maintainer: Jochen Knaus , } \references{ \pkg{snow} (Simple Network of Workstations):\cr http://cran.r-project.org/src/contrib/Descriptions/snow.html\cr\cr \code{sfCluster} (Unix management tool for \pkg{snowfall} clusters):\cr http://www.imbi.uni-freiburg.de/parallel\cr } \keyword{package} \seealso{ Snowfall Initialisation: \code{\link{snowfall-init}}\cr Snowfall Calculation: \code{\link{snowfall-calculation}}\cr Snowfall Tools: \code{\link{snowfall-tools}}\cr Optional links to other man pages, e.g. \code{\link[snow]{snow-cluster}} } \examples{ \dontrun{ # Init Snowfall with settings from sfCluster ##sfInit() # Init Snowfall with explicit settings. sfInit( parallel=TRUE, cpus=2 ) if( sfParallel() ) cat( "Running in parallel mode on", sfCpus(), "nodes.\n" ) else cat( "Running in sequential mode.\n" ) # Define some global objects. globalVar1 <- c( "a", "b", "c" ) globalVar2 <- c( "d", "e" ) globalVar3 <- c( 1:10 ) globalNoExport <- "dummy" # Define stupid little function. calculate <- function( x ) { cat( x ) return( 2 ^ x ) } # Export all global objects except globalNoExport # List of exported objects is listed. # Work both parallel and sequential. sfExportAll( except=c( "globalNoExport" ) ) # List objects on each node. sfClusterEvalQ( ls() ) # Calc something with parallel sfLappy cat( unlist( sfLapply( globalVar3, calculate ) ) ) # Remove all variables from object. sfRemoveAll( except=c( "calculate" ) ) } } snowfall/man/snowfall-e-data.Rd0000644000175100001440000000054711320631665016146 0ustar hornikusers\name{snowfall-data} \docType{data} \alias{config} \alias{sfOption} \alias{f1} \alias{f2} \title{Internal configuration and test data} \description{ Internal configuration and test data. Only used for internal setup and testing. } \usage{ config f1 f2 sfOption } \format{A matrix containing basic predefined configuration informations.} \keyword{datasets}snowfall/DESCRIPTION0000644000175100001440000000142614530647156013641 0ustar hornikusersPackage: snowfall Type: Package Title: Easier Cluster Computing (Based on 'snow') Version: 1.84-6.3 Date: 2013-12-18 Author: Jochen Knaus Maintainer: Jochen Knaus Description: Usability wrapper around snow for easier development of parallel R programs. This package offers e.g. extended error checks, and additional functions. All functions work in sequential mode, too, if no cluster is present or wished. Package is also designed as connector to the cluster management tool sfCluster, but can also used without it. Depends: R (>= 2.10), snow Suggests: Rmpi License: GPL Encoding: UTF-8 Packaged: 2023-11-26 11:34:08 UTC; hornik Repository: CRAN Date/Publication: 2023-11-26 13:55:58 UTC NeedsCompilation: no LazyData: yes snowfall/build/0000755000175100001440000000000014530626457013230 5ustar hornikuserssnowfall/build/vignette.rds0000644000175100001440000000042614530626457015571 0ustar hornikusersuJ06N7Ľ@^]x{lOK1MJQɝk".b 9_9/.c1IdIˌ-͈q6'{m*2}QCP_)9PA# y=@6o`Z/*S 1$ switch to parallel execution. \\ type=X & Type of cluster. Allowed values are SOCK, MPI, PVM and NWS. \\ session=X & Session number. snowfall logfiles contain number, but only needed with sfCluster. \\ restoreSR & Enables restoring of previously saved results from \texttt{sfClusterApplySR} calls. \\ hosts=X & List of hosts for Socket (SOCK) or NetWorkSpaces (NWS) clusters. Entries are comma seperated. Any entry may contain colon seperated value for the amount of processors on this machine. Example: \texttt{{-}{-}hosts=machine1:4,machine2,123.123.12.13:2} (this spawns 4 workers on machine1, one on machine2 and two on 123.123.12.13). \\ tmpdir=X & Specify temporary directory for logfiles and R-output. \\ \end{tabular} For using these arguments, just add these after an \texttt{--args} on the commandline (which forces R not to treat these arguments as R ones). \begin{center} \texttt{R --no-save --args --parallel --cpus=2 < program.R} \end{center} Starts R and forces snowfall to start in parallel mode with 2 CPUs (in this case: using a Socket-cluster, as this is the default). \textit{Note}: arguments on the command line have lower priority as settings from the \texttt{sfInit} call. That means that the above example only works if initialisation is done via \texttt{sfInit()}, but not with \texttt{sfInit( parallel=FALSE )}, as then sequential execution is forced. Further examples should explan the feature: \begin{itemize} \item \texttt{R --no-save --args --parallel --type=MPI --cpus=4 < program.R} (start using 4 workers in an existing MPI cluster. If no MPI cluster exists, a plain one is started on your local machine only. Beware of this, as you have to shutdown this cluster afterwards manually.). \item \texttt{R --no-save --args --parallel --type=SOCK --hosts=localhost:3,singlema,othmach:4 < program.R} (Starts a socket cluster with two machines and 7 CPUs: 3 on \texttt{localhost}, 4 on \texttt{othmach} and one worker on \texttt{singlema}). \end{itemize} \subsection{Traps, Internals} \texttt{snowfall} limits the amount of CPUs by default (to 40). If you need more CPUs, call \texttt{sfSetMaxCPUs()} \emph{before} calling \texttt{sfInit()}. Beware of requesting more CPUs as you have ressources: there are as many R processes spawned as CPUs wanted. They are distributed across your cluster like in the given scheme of the LAM host configuration. You can easily kill all machines in your cluster by requesting huge amounts of CPUs or running very memory consuming functions across the cluster. To avoid such common problems use \emph{sfCluster}. For some functions of \texttt{snowfall} it is needed to create global variables on the master. All these variables start with prefix ``\texttt{.sf}'', please do not delete them. The internal control structure of \texttt{snowfall} is saved in the variable \texttt{.sfOptions}, which should be accessed through the wrapper functions as the structure may change in the future.\section{Using \emph{sfCluster} with \texttt{snowfall}} \subsection{About \emph{sfCluster}} \emph{sfCluster} is a small management tool, helping to run parallel R-programs using \texttt{snowfall}. Mainly, it exculpates the user from setting up a LAM/MPI cluster on his own. Further, it allows multiple clusters per user and therefore executes any parallel R program in a single cluster. These clusters are built according to the current load and usage of your cluster (this means: only machines are taken with free ressources). Also, execution is observed and if problems arise, the cluster is shut down. \emph{sfCluster} can be used with R-interactive shell or batch mode and also feature a special batch mode with visual logfile and process-displaying. For further details about installation, administration and configuration of \emph{sfCluster}, please visit \url{http://www.imbi.uni-freiburg.de/parallel} or run \texttt{sfCluster {-}{-}help} if you installed it yet. \subsection{Starting R using \emph{sfCluster}} An \emph{sfCluster} execution is following these steps: \begin{enumerate} \item Test memory usage of program if not explicitly given. This is done via a default temporary (10 minutes) sequential run to determinate the maximum usage of RAM on a slave. This is important for allocating ressources on slaves. \item Detect free ressources in cluster universe.\footnote{Which are all potentially useable machines.} Take machines with free ressources matching users request. \item Start LAM/MPI cluster with previous built setting. \item Run R with parameters for \texttt{snowfall} control. \item LOOP: Observe execution (check processes, memory usage, and machine state). In monitoring mode: Display state of cluster and logfiles on screen. \item On interruption or regular end: shutdown cluster. \end{enumerate} \subsection{Using \emph{sfCluster}} The most common parameters of \emph{sfCluster} are \texttt{{-}{-}cpus}, with which you request a certain amount of CPUs among the cluster (default is 2 in parallel and 1 in sequential mode). There is a builtin limit for the amount of CPUs, which is changeable using the \emph{sfCluster} configuration. There are four execution modes: \begin{tabular}{lp{3cm}p{9cm}} -b & Batchmode (Default) & Run silent on terminal.\\ -i & Interactive R-shell & Ability to use interactive R-shell with cluster.\\ -m & Monitoring mode & Visual processmonitor and logfile viewer.\\ -s & Sequential execution (no cluster usage) & Run without cluster on single CPU.\\ \end{tabular} To avoid the (time consuming) memory test, you can specify a maximum amount of memory usable per slave via option \texttt{{-}{-}mem}. The behavior on excessing this memory usage is configurable (default: cluster stop). The memory usage limit is very important for not getting your machines into swapping (means: shortage of physical RAM), which would hurt performance badly. So, simple calls to \emph{sfCluster} could be \begin{verbatim} ## Run a given R program with 8 cpus and max. 500MB (0.5 gigabytes) in monitoring mode sfCluster -m --cpus=8 --mem=0.5G myRprogram.R ## Run nonstopping cluster with real quiet output. nohup sfCluster -b --cpus=8 --mem=500M myRprogram.R --quiet ## Start R interactive shell with 4 cores. With 300MB memory (MB is default unit) ## No R-file is given for interactive mode. sfCluster -i --cpus=4 --mem=300 \end{verbatim} For all possible options and further examples for \emph{sfCluster} usage, see \texttt{sfCluster {-}{-}help}. \subsection{The snowfall-side of \emph{sfCluster}} If you start an R program using \texttt{snowfall} with \emph{sfCluster}, the latter waits until \texttt{sfInit()} is called and then starts the observation of the execution. The default behavior if using \emph{sfCluster} is just to call \texttt{sfInit()} without any argument. Use arguments only if you want to explicitly overwrite given settings by \emph{sfCluster}. \subsection{Proposed development cycle} The following development cycle is of course a proposal. You can skip or replace any step depending on your own needs. \begin{enumerate} \item Develop program in sequential mode (start using option \texttt{-s}). \item Test in parallel mode using interactive mode to detect directly problems on parallelisation (start using option \texttt{-i}). \item Try larger test runs using monitoring mode, observing the cluster and probably side effects during parallel execution (start using option \texttt{-m}). Problems arise on single nodes will be visible (like non correct working libraries). \item Do real runs using silent batch mode (start using options \texttt{-b {-}{-}quiet}). Probably you want to run these runs in the background of your Unix shell using \texttt{nohup}. \end{enumerate} \subsection{Future sfCluster} These additions are planned for the future: \begin{itemize} \item Port to OpenMPI \item Faster SSH connections for observing \item Extended scheduler for system ressources \end{itemize} %% History. \section{History of snowfall changes} You can also call: RShowDoc("NEWS", package="snowfall") \begin{itemize} \item 1.83 (API changes: minor additions) \begin{itemize} \item sfIsRunning: new function giving a logical is sfInit() was called or not. Needed, as all other snowfall functions implicitely call sfInit() if it was not called. \end{itemize} \item 1.82 \begin{itemize} \item Internal refactorings. \end{itemize} \item 1.81 \begin{itemize} \item Change in sfInit() MPI startup so sfCluster can run with snow > 0.3 now. \item sfExport now also works in sequential mode (writing to global environment). This prevented sequential execution in some cases. \end{itemize} \item 1.80 (API changes: minor additions) \begin{itemize} \item snowfall passes packages checks of R 2.10.1 without warning or error. Internal state is now only saved in the namespace itself (thanks to Uwe Ligges for the tipp). \item sfExport can now also export objects in a specific namespace (argument 'namespace') \item sfExport: behavior in error case manageable (stopOnError) \item sfExport: smaller bugfixes. \item sfRemoveAll can now also remove hidden names (argument 'hidden') \item sfRemoveAll is more robust now (some minor bugfixes, more checks) \item sfRemoveAll bugfix for multiple removals (thanks to Greggory Jefferis) \item Bugfix on exception list on sfExportAll \item Refactorings in sfTest() \item snowfall now has a NEWS doc ;) \item No warning on Mac OS because of default Mac-R command line arg 'gui' (thanks to Michael Siegel). \end{itemize} \item 1.71 (API changes: none) \begin{itemize} \item Exporting of objects using \texttt{sfExport} is speed up (round 30%) \item Fixed a bug on Windows in \texttt{sfSource} \end{itemize} \item 1.70 (API changes: minor additions, BEHAVIOR CHANGES: logging) \begin{itemize} \item Behavior change: new default: no logging of slave/worker output. \item API change: new argument \texttt{slaveOutfile} on \texttt{sfInit()}. \item API change: new argument \texttt{restore} on \texttt{sfInit()}. \item API change: new argument \texttt{master} on \texttt{sfCat}. \item Windows startup fixed. \item NWS startup fixed. \item sfSapply is working as intended. \item Changing CPU amount during runtime (with multiple sfInit() calls with different settings in a single program) is now possible using socket and NWS clusters. \item Dozens of small glitches inside snowfall fixed (also messages are made more precisly). \item Package vignette slightly extended. \end{itemize} \end{itemize} \bibliographystyle{plain} \bibliography{all-bib} \end{document} snowfall/vignettes/all-bib.bib0000644000175100001440000000457412254102616016116 0ustar hornikusers% This file was created with JabRef 2.3.1. % Encoding: UTF-8 @Article{Knau:Porz:Bind:Schw:easi:2009, author = {Jochen Knaus and Christine Porzelius and Harald Binder and Guido Schwarzer}, title = {Easier parallel computing in {R} with snowfall and {sfCluster}}, journal = {The R Journal}, volume = {1}, pages = {54--59}, year = {2009}} @ARTICLE{TIERNEY08, author = {A.J. Rossini AND Luke Tierney AND Na Li}, title = {Snow : A Parallel Computing Framework for the {R} System}, journal = {International Journal of Parallel Programming}, year = {2008}, note = {Online Publication: \url{http://www.springerlink.com/content/3v37mg0k63053567} (2008-12-23)}, } @ARTICLE{HANA_STAT04, author = {{\v{S}}ev{\v{c}}{\'{\i}}kov{\'a}, Hana}, title = {Statistical simulations on parallel computers}, journal = {Journal of Computational and Graphical Statistics}, year = {2004}, volume = {13}, pages = {886--906}, number = {4}, fjournal = {Journal of Computational and Graphical Statistics}, issn = {1061-8600}, mrclass = {Database Expansion Item}, mrnumber = {MR2113277} } @ARTICLE{HANA_04, author = {Hana {\v{S}}ev{\v{c}}{\'{\i}}kov{\'a} AND A.J. Rossini}, title = {Pragmatic Parallel Computing. Submitted to Journal of Statistical Software}, year = {2004}, owner = {jo}, timestamp = {2008.04.21} } @TECHREPORT{burns94:_lam, author = {Greg Burns and Raja Daoud and James Vaigl}, title = {{LAM}: {A}n {O}pen {C}luster {E}nvironment for {MPI}}, year = {1994}, note = {\url{http://www.lam-mpi.org/download/files/lam-papers.tar.gz}}, booktitle = {Proceedings of Supercomputing Symposium}, pages = {379--386} } @ARTICLE{ROSS_07, author = {A.J. Rossini AND Luke Tierney AND Na Li}, title = {Simple Parallel Statistical Computing in {R}}, journal = {Journal of Computational and Graphical Statistics}, year = {2007}, volume = {16}, pages = {399--420}, number = {2}, fjournal = {Journal of Computational and Graphical Statistics}, issn = {1061-8600} } @OTHER{SNOW_PACK, author = {A.J. Rossini AND Luke Tierney AND Na Li}, note = {\url{http://cran.rproject.org/doc/packages/snow.pdf}}, owner = {jo}, timestamp = {2008.04.22}, title = {The Snow package}, year = {2006} } @comment{jabref-meta: selector_journal:} @comment{jabref-meta: selector_author:} @comment{jabref-meta: selector_keywords:} @comment{jabref-meta: selector_publisher:} snowfall/NEWS0000644000175100001440000000276212254102205012614 0ustar hornikusersChanges in snowfall 1.84 =========================== o Bugfix sfExport: if exporting a NULL value, no error is raised. Changes in snowfall 1.83 =========================== o sfIsRunning: new function giving a logical is sfInit() was called or not. Needed, as all other snowfall functions implicitely call sfInit() if it was not called. Changes in snowfall 1.82 =========================== o Internal refactorings. Changes in snowfall 1.81 =========================== o Change in sfInit() MPI startup so sfCluster can run with snow > 0.3 now. o sfExport now also works in sequential mode (writing to global environment). This prevented sequential execution in some cases. Changes in snowfall 1.80 =========================== o snowfall passes packages checks of R 2.10.1 without warning or error. Internal state is now only saved in the namespace itself (thanks to Uwe Ligges for the tipp). o sfExport can now also export objects in a specific namespace (argument 'namespace') o sfExport: behavior in error case manageable (stopOnError) o sfExport: smaller bugfixes. o sfRemoveAll can now also remove hidden names (argument 'hidden') o sfRemoveAll is more robust now (some minor bugfixes, more checks) o sfRemoveAll bugfix for multiple removals (thanks to Greggory Jefferis) o Bugfix on exception list on sfExportAll o Refactorings in sfTest() o snowfall now has a NEWS doc ;) o No warning on Mac OS because of default Mac-R command line arg 'gui' (thanks to Michael Siegel). snowfall/R/0000755000175100001440000000000014530607632012324 5ustar hornikuserssnowfall/R/sysdata.rda0000644000175100001440000000054314530626460014466 0ustar hornikusersQN0lĨo~]"ȓ!2ZFM] V\{ν9=7̯6xxOj@Kߍ]*'L\+NFIXTQrAy"/(*.6b\GHfl# RqahEu01ai|+ph ^^b? L>gVK:ghLd'֎(Zd:-ϣA<0')ZQ^h}j?`)x+~:m FV a5HY*vr傪;f/BWsnowfall/R/socketRequest.R0000644000175100001440000000332212254102065015277 0ustar hornikusers#***************************************************************************** # Functions for handling the connection to the controlling webserver. # # Base socketrequest follows code from post on R-help: # http://tolstoy.newcastle.edu.au/R/devel/06/07/6196.html # # PARAMETER: Array|List Arguments (all Entries must be in format "name=val") # RETURN: Array result from webserver # THROWS: Exception Connection Error #***************************************************************************** connectWebserver <- function( call.args ) { host <- "www.imbi.uni-freiburg.de" path <- "/bib/bib.pl" # Parameters for the call. dat <- paste( call.args, collapse="&", sep="" ) len <- length( strsplit(dat,"")[[1]] ) request <- paste( "POST ", path, " HTTP/1.0\nHost: ", host, "\nReferer:\n", "Content-type: application/x-www-form-urlencoded\n", "Content-length: ", len, "\nConnection: Keep-Alive\n\n", dat, sep="" ) sock <- NULL # Needed in catch-Block for disconnect readSock <- "" # Connect. Catch exceptions regarding to connection errors. exception <- try( { sock <- socketConnection( host=host, port=80, server=FALSE, blocking=TRUE ) write( request, sock ) socketSelect( list( sock ) ) readSock <- readLines( sock ) close( sock ) }, silent=FALSE ) if( inherits( exception, "try-error" ) ) { cat( "Error connecting to: ", host, path, dat, "\n" ) # If socket exists, close it. if( sock != NULL ) close( sock ) stop() } return( readSock ) } ##connectWebserver( c( "kat=ben", "cmd=list", "usr=jo", "usr_sel=JO" ) ) snowfall/R/init.R0000644000175100001440000010026114530607632013412 0ustar hornikusers##***************************************************************************** ## Function for initialisizing the Cluster. ## ## Also the predefinition of (internal) global variables is done here, ## mainly because of the code check of R. ## ## Compability issue: snowfall needs to know if sfCluster is working (also ## old versions of sfCluster). ## --session could be set by other solutions. ## So, --lockfile is decided to be the sfCluster indicator, as this most ## likely will have no use if not used with sfCluster. ## Therefore setting of --lockfile (LOCKFILE) can cause troubles. ##***************************************************************************** ## These variables are used by internal function. Need to declared for ## the compiler warnings. As these cannot be altered directly, setOption() does ## this (in the namespace). So no global objects are set, internal state is ## kept inside the namespace. DEBUG <- FALSE ## Static switch for debugging messages .sfOption <- list() ## Configuration of this master .sfPresetCPUs <- 0 ## Presetted CPU amount (max. allocatable). ## Some vars needed at specific points, which can handled correctly otherwise, ## but would raise R CMD check warnings if not defined before. .sfPars <- '' ## Tmp. var for sfLibrary. .sfLoadError <- '' ## Tmp. var for loading. .sfTestVar5 <- 0 ## Exporting test in sfTest(). ##***************************************************************************** ## Function for initialisizing the Cluster. ## ## Attention: this package does nasty things with explicit sideeffects (not ## only using "require" and "options" etc.)... ## ## "Nodes" and "CPUs" are used identical (unbeautiful). ## ## PARAMETER: [Boolean parallel - overwrite CMDline settings], ## [Int nodes - overwrites commandline settings / DEPRECATED] ## [Int cpus - overwrites commandline settings] ## [Boolean nostart - If set, no cluster start will be run. ## Needed for nested usage of snowfall] ## [Boolean restore - Globally set restore] ## [String type - {'MPI','SOCK', 'PVM', 'NWS'} ## [Vector socketHosts - List of all hosts used in socketmode] ## [slaveOutfile - filename for output on slaves] ## [useRscript - Startup via R Script or shellscript. Only snow>0.3] ## RETURN: Boolean TRUE ##***************************************************************************** sfInit <- function( parallel=NULL, cpus=NULL, type=NULL, socketHosts=NULL, restore=NULL, slaveOutfile=NULL, nostart=FALSE, useRscript=FALSE ## snow: Default is TRUE. ) { ## Flag for detection of reconnect (means: non-first calls to sfInit()) reconnect <- FALSE ## Get rid of that stupid data load to global env. initEnv <- new.env() ## Saves users from many own if-clauses probably. if( nostart ) return( TRUE ) ## Are options setted? if( length( .sfOption ) == 0 ) { debug( "Setup sfOption..." ) ## Add 1.62: list from sysdata cleared and created again setOption( "parallel", FALSE ) setOption( "session", NULL ) setOption( "priority", 1 ) setOption( "nodes", 1 ) setOption( "stopped", FALSE ) setOption( "init", FALSE ) ## Load configuration file: delivered with package and changeable by user. data( "config", package="snowfall", envir=initEnv ) configM <- as.matrix( t( config ) ) config <- as.list( configM ) names( config ) <- dimnames( configM )[[2]] ## Node count are limited in snowfall as well (as it is useable without ## sfCluster) and you probably don't want an arbitrary amount of CPUs ## requested by a DAU. ## If changed preset exists, take this number. if( .sfPresetCPUs > 0 ) setOption( "MAXNODES", .sfPresetCPUs ) else setOption( "MAXNODES", as.numeric( config[["MAXNODES"]] ) ) ## Startup lockfile (only coming from sfCluster and if available ## signalling that snowfall is started through sfCluster). ## LOCKFILE can only be set through commandline --lockfile setOption( "LOCKFILE", "" ) ## Temporary directory (for logfiles, esp. on the slaves) ## Only if set, if not, take default. if( as.character( config[["TMPDIR"]] ) != "-" ) setOption( "TMPDIR", path.expand( as.character( config[["TMPDIR"]] ) ) ) else { ## Default tempdir on Unix systems is R session tempdir if( .Platform$OS.type == "unix" ) setOption( "TMPDIR", file.path( Sys.getenv( "R_SESSION_TMPDIR" ), "sfCluster" ) ) ## On any non *nix system: take local dir (R_SESSION_TMPDIR unset on Win) else setOption( "TMPDIR", "" ) } ## Addition variables for save/restore (only used in sfClusterApplySR). setOption( "RESTOREFILES", NULL ) ## List with restore files (for cleanup) setOption( "RESTOREUPDATE", 5 ) ## Updates percent output any 5% setOption( "RESTORE", FALSE ) ## Restore previous results? setOption( "CURRENT", NULL ) ## Currently executed R-File ## Default cluster type (unchangeable by config to ensure runnability ## of a specific code in any setting). setOption( "type", "SOCK" ) setOption( "sockHosts", NULL ) ## Restore file directory (for saved intermediate results) - not neccessary ## under/in TMPDIR. ## (As log files prob woul be set in global dir, restore files should be ## stored under users home - as they don't contain a session-ID or something ## generic unique thing to differ them. if( as.character( config[["RESTDIR"]] ) != "-" ) setOption( "RESTDIR", path.expand( as.character( config[["RESTDIR"]] ) ) ) else setOption( "RESTDIR", file.path( Sys.getenv( "HOME" ), ".sfCluster", "restore" ) ) ## Remove config (as data() writes it as global variable). rm( config, envir=initEnv ) #pos=globalenv() ) } ## If .sfOption exists, sfInit() was called before: restart. ## (sfCluster should be able to handle this - although slaves are iterated and ## slave killing is only done through snow). else { reconnect <- TRUE if( .sfOption$stopped && !.sfOption$init ) debug( "Irregluar init state (error on previous init)..." ) ## If not stopped, but initialised. if( !.sfOption$stopped && .sfOption$init ) { message( "Explicit sfStop() is missing: stop now." ) sfStop() } } ##************************************************************************** ## Values for parallel/session can be in the commandline or the environment. ## Function parameters overwrite commandline. ##************************************************************************** searchCommandline( parallel, cpus=cpus, type=type, socketHosts=socketHosts, restore=restore ) if( getOption( 'verbose' ) && !reconnect ) print( .sfOption ) ## If given restore-directory does not exist, create it. ## if( !file.exists( .sfOption$RESTDIR ) ) { ## ## 1.62: removed ## ## .sfOption$RESTDIR <<- path.expand( "~/.sfCluster/restore" ) ## dirCreateStop( .sfOption$RESTDIR ) ## } ## Running in parallel mode? That means: Cluster setup. ## Will be blocked if argument "nostart" is set (for usage of snowfall ## inside of packages). if( .sfOption$parallel && !nostart ) { ## Internal stopper. Running in parallel mode a session-ID is needed. ## For testing purposes can be anything (mainly used for pathnames ## of logfiles). if( startedWithSfCluster() && is.null( .sfOption$session ) ) stop( "No session-ID but parallel run with sfCluster (something went wrong here?)..." ) ## @TODO regenerate session id if missing. ## If amount of nodes not set via commandline, then it will be 2 if( is.null( .sfOption$nodes ) || is.na( as.numeric( .sfOption$nodes ) ) ) setOption( "nodes", 2 ) else setOption( "nodes", as.numeric( .sfOption$nodes ) ) ## Preload required libraries if needed (as an extended error check). libList <- list( "PVM"="rpvm", "MPI"="Rmpi", "NWS"="nws", "SOCK"="" ) if( libList[[.sfOption$type]] != "" ) { if( !require( libList[[.sfOption$type]], character.only=TRUE ) ) { message( paste( "Failed to load required library:", libList[[.sfOption$type]], "for parallel mode", .sfOption$type, "\nFallback to sequential execution" ) ) ## Fallback to sequential mode. return( sfInit( parallel=FALSE ) ) } else message( paste( "Library", libList[[.sfOption$type]], "loaded." ) ) } ## In any parallel mode, load snow if needed. ## CHG 131217: not needed because of package depends. ## if( !require( snow ) ) { ## message( paste( "Failed to load library 'snow' required for parallel mode.\n", ## "Switching to sequential mode (1 cpu only)!." ) ); ## ## Fallback to sequential mode. ## return( sfInit( parallel=FALSE ) ) ## } ## Chg. 1.62 ## Temporary file for output. ## If sfCluster is running (LOCKFILE given): session is taken. ## If sfCluster not running but user setted slaveOutfile option: take arg. ## Else (default): no slave outfiles (writing to /dev/null|nul). if( startedWithSfCluster() ) { tmp <- file.path( .sfOption$TMPDIR, paste( "rout_", .sfOption$session, sep="" ) ) ## Only create temporary directory once and if needed. ## Only needed if running with sfCluster. If user sets it's own ## slaveOutfile, he has to ensure himself about existing pathes. ## If needed create temporary path. Problem: this is executed only on ## master, not on slaves. The clusterstarter needs to manage this. if( !reconnect ) dirCreateStop( .sfOption$TMPDIR ) } else tmp <- ifelse( is.null( slaveOutfile ), '/dev/null', slaveOutfile ) ## @TODO Exception handler. ## @TODO Timeout on init. ## Ebenso: Timeout - das ist extrem hässlich, wenn das Cluster nicht ## korrekt startet und hängen bleibt (z.B. wenn zuviele CPUs für das ## Cluster angefordert werden - was PVM schluckt, macht MPI anscheinend ## Kopfzerbrechen). setDefaultClusterOptions( type = .sfOption$type ) setDefaultClusterOptions( homogenous = FALSE ) ## On socket connections the list of hosts needs to be given. ## If no is set, use localhost with default R. if( .sfOption$type == "SOCK" ) { ## No host information given: use localhost with wished CPUs. ## Else: host settings overwrite wished CPUs (important for error checks!). if( is.null( .sfOption$sockHosts ) || ( length( .sfOption$sockHosts ) == 0 ) ) setOption( "sockHosts", c( rep( "localhost", .sfOption$nodes ) ) ) else setOption( "nodes", length( .sfOption$sockHosts ) ) setOption( "cluster", try( makeCluster( .sfOption$sockHosts, type = "SOCK", outfile = tmp, homogenous = TRUE ) ) ) } # PVM cluster else if( .sfOption$type == "PVM" ) { setOption( "cluster", try( makeCluster( .sfOption$nodes, outfile = tmp ) ) ) } ## # Network Spaces ## else if( .sfOption$type == "NWS" ) { ## if( is.null( .sfOption$sockHosts ) || ( length( .sfOption$sockHosts ) == 0 ) ) ## setOption( "sockHosts", c( rep( "localhost", .sfOption$nodes ) ) ) ## else ## setOption( "nodes", length( .sfOption$sockHosts ) ) ## ## Patch Markus Schmidberger (Mail 11/25/2008). ## setOption( "cluster", try( makeNWScluster( ## .sfOption$sockHosts[1:.sfOption$nodes], ## type = "NWS", ## outfile = tmp ## ) ) ) ## } # MPI cluster (also default for irregular type). else { ## 1.81: useRScript must be FALSE. Else sfCluster wont work ## with snow > 0.3 (on older snow Versions this option ## is ignored. Also homogenous is always on. ## 1.83: But for non-sfCluster usage at least it has to be modifyable. setOption( "cluster", try( makeMPIcluster( .sfOption$nodes, outfile = tmp, homogenous = TRUE, useRscript = useRscript ) ) ) } ## Startup successfull? If not: stop. if( is.null( .sfOption$cluster ) || inherits( .sfOption$cluster, "try-error" ) ) stop( paste( "Starting of snow cluster failed!", geterrmessage(), .sfOption$cluster ) ) ## Cluster setup finished. Set flag (used in error handlers and stop). ## Also: no function can be called if init is not set. setOption( "init", TRUE ) setOption( "stopped", FALSE ) if( !reconnect ) { ## As Snow Init spawn all the requires R-processes, the proprietary ## lockfile can be deleted now (if it exists). ## Problem: now all R procs are spawned, but the observer most ## likely didn't catch them until the next time of his observing ## loop. if( !is.null( .sfOption$LOCKFILE ) && file.exists( .sfOption$LOCKFILE ) ) { if( unlink( .sfOption$LOCKFILE ) != 0 ) warning( "Unable to remove startup lockfile: ", .sfOption$LOCKFILE ) else message( "Startup Lockfile removed: ", .sfOption$LOCKFILE ) } if( getOption( 'verbose' ) ) { if( tmp == '/dev/null' ) message( "Slave output suppressed. Use 'slaveOutfile' to activate." ) else message( paste( "Temporary log for STDOUT/STDERR (on each node): ", tmp, "\n", "Cluster started with", .sfOption$nodes, "CPUs.", "\n" ) ) } else debug( paste( "Temporary log for STDOUT/STDERR (on each node): ", tmp, "\n", "Cluster started with", .sfOption$nodes, "CPUs.", "\n" ) ) ## Write R-Version and Time in (slave-)logfiles. .startInfo <- strsplit( Sys.info(), "\n" ); .startMsg <- paste( sep="", "JOB STARTED AT ", date(), # Global Var! " ON ", .startInfo$nodename, " (OS", .startInfo$sysname, ") ", .startInfo$release, "\n" ) sfExport( ".sfOption", ".startMsg", local=TRUE, namespace="snowfall", debug=DEBUG ) sfCat( .startMsg, "\n", master=FALSE ) ## No master sfCat( paste( "R Version: ", R.version$version.string, "\n\n" ) ) ## Remove starting message. sfRemove( ".startMsg" ) } ## @TODO Checken, ob dieser Export wirklich noch benötigt ist (jan 10) else sfExport( ".sfOption", local=FALSE, namespace="snowfall" ) } ## Sequential mode or option "nostart": ## init will be set. If someone calls sfInit with nostart and aims ## it to be started, it's his or her problem. else { ## Cluster setup finished. Set flag (used in error handlers and stop). ## Also: no function can be called if init is not set. setOption( "init", TRUE ) setOption( "stopped", FALSE ) setOption( "cluster", NULL ) } ## Print init Message (esp. print State of parallel and snowfall ## version. if( sfParallel() ) { message( paste( "snowfall ", packageDescription( "snowfall" )$Version, " initialized (using snow ", packageDescription( "snow" )$Version, "): parallel execution on ", sfCpus(), " CPUs.\n", sep="" ) ); } else { message( paste( "snowfall", packageDescription( "snowfall" )$Version, "initialized: sequential execution, one CPU.\n" ) ); } return( invisible( TRUE ) ) } ##***************************************************************************** ## Check if sfInit() was called. ## This function is called before any function which need initialised cluster. ## ## Previous it stops with error, now it calls sfInit() without parameters, ## so sfInit() does not have to be called explicitely (requested from Harald). ## ## (Not exported to namespace). ##***************************************************************************** sfCheck <- function() { if( !sfIsRunning() ) { message( paste( "Calling a snowfall function without calling 'sfInit'", "first or after sfStop().\n'sfInit()' is called now." ) ) return( invisible( sfInit() ) ) } return( invisible( TRUE ) ) } ##***************************************************************************** ## Exported as userfunction. ## Give the user information if sfInit() was called and cluster is not stopped. ## (Maybe helpful inside of packages etc.). ##***************************************************************************** sfIsRunning <- function() { ## Add 1.62: stopped as argument if( ( length( .sfOption ) == 0 ) || !.sfOption$init || .sfOption$stopped ) return( FALSE ) else return( TRUE ) } ##***************************************************************************** ## Stop the (snow)-Cluster. Just calls Snows stopCluster. ## ## PARAMETER: [Boolean nostop: don't stop] ##***************************************************************************** sfStop <- function( nostop=FALSE ) { ## Saves users from many own if-clauses probably. if( nostop ) return( TRUE ); if( exists( ".sfOption" ) && ( length( .sfOption ) > 0 ) ) { ## Only stop if initialisized and running parallel. if( !.sfOption$stopped && .sfOption$init && .sfOption$parallel ) { message( "\nStopping cluster\n" ) ## Stopping snow cluster. ## NO call to sfGetCluster() here, as sfGetCluster sfCheck()s again. stopCluster( .sfOption$cluster ) } ## Reset default values. ##.sfOption$init <<- FALSE setOption( "stopped", TRUE ) setOption( "parallel", FALSE ) ## Delete probably stored resultfiles (can also be used in sequential mode!) deleteRestoreFiles() } invisible( NULL ) } ##***************************************************************************** ## Is programm running parallel? Wrapper for internal Optionblock (therefore ## exported of course). ## Also: get cluster Handler (prob. not exported in the final). ## ## RETURN: Boolean Running in parallel mode ##***************************************************************************** sfParallel <- function() { sfCheck() return( .sfOption$parallel ) } ##***************************************************************************** ## Shall sfClusterApplySR restore results? ##***************************************************************************** sfRestore <- function() { sfCheck() return( .sfOption$RESTORE ) } ##***************************************************************************** ## Receive snow cluster handler (for direct calls to snow functions). ##***************************************************************************** sfGetCluster <- function() { sfCheck() return( .sfOption$cluster ) } ##***************************************************************************** ## Receive amount of currently used CPUs (sequential: 1). ##***************************************************************************** sfCpus <- function() { sfCheck() return( .sfOption$nodes ) } ## getter for amount of nodes. Wrapper for sfCPUs. sfNodes <- function() return( sfCpus() ) ##***************************************************************************** ## Receive type of current cluster. ##***************************************************************************** sfType <- function() { sfCheck() if( sfParallel() ) return( .sfOption$type ) else return( "- sequential -" ) } ##***************************************************************************** ## Receive list with all socket hosts. ##***************************************************************************** sfSocketHosts <- function() { if( sfType() == "SOCK" ) { sfCheck() return( .sfOption$sockHosts ) } else { warning( paste( "No socket cluster used:", sfType() ) ) return( invisible( NULL ) ) } } ##***************************************************************************** ## getter for session-ID. ##***************************************************************************** sfSession <- function() { sfCheck(); return( .sfOption$session ) } ##***************************************************************************** ## Increase max. numbers of CPUs used per process. ## No check for sensefull values (if user wants 1000, you get 1000 :)). ##***************************************************************************** sfSetMaxCPUs <- function( number=32 ) { setVar( ".sfPresetCPUs", number ) } ##***************************************************************************** ## Internal function: ## ## Search commandline arguments for Parallel and Session values. ## If there are arguments on function call, these overwrites the values on the ## commandline. ## ## Basically the arguments on the commandline come from sfCluster, but of ## course set manually or via another load- or sessionmanager. ## ## Commandline arguments: --parallel(=[01])* ## --session=\d{8} ## --nodes=\d{1,2} ## --tmpdir=\/[a-z_].* ## --hosts=((\s+:\d+))+ ## --restoreDir=\/[a-z_].* ## --restoreSR ## --lockfile ## Results will be saved in options .parallel (bool) and .session (8 chars) ##***************************************************************************** searchCommandline <- function( parallel=NULL, cpus=NULL, socketHosts=NULL, type=NULL, restore=NULL ) { # if( !exists( ".sfOption", envir=globalenv() ) ) # stop( "Global options missing. Internal error." ) ## If set, copy to sfCluster data structure. if( !is.null( cpus ) ) { setOption( "nodes", max( 1, cpus ) ) ## For socket/NWS clusters: force rebuild of hostlist (as probably changed). ## (If not overwritten later by users own arguments). setOption( "sockHosts", NULL ) ## If more than one CPU is wanted, parallel mode is forced. ## Probably this is not an intended behavior. # if( .sfOption$nodes > 1 ) { # ## Potential misuse of argument: inform user. # if( !is.null( parallel ) && ( parallel == FALSE ) ) # warning( "Explicit parallel=FALSE, but required >1 CPUs ==> parallel mode forced." ) # # parallel = TRUE # } } ## Defaults come from calling arguments on sfInitCluster. if( !is.null( parallel ) ) { setOption( "parallel", parallel ) if( parallel ) { ## There is a slightly problem: as many users can use sfCluster without ## session-ID, the session number "XXXXXXXX" is not good enough. ## Problem: we need the filename on clusterinit so we cannot use cluster ## here. ## Win: USERNAME, *nix: LOGNAME ## LOGNAME/USER ist not set under Windows (tried Win Server 2003) uname <- ifelse( Sys.getenv( "LOGNAME" ) != "", Sys.getenv( "LOGNAME" ), Sys.getenv( "USERNAME" ) ) if( uname == "" ) uname <- "___" ## Add R for RunSnowMode heterogenous mode. ## XXX Check R version and fill in correct version. setOption( "session", paste( sep="_", "XXXXXXXXR", uname, format( Sys.time(), "%H%M%S_%m%d%y" ) ) ) ## message( "Forced parallel. Using session: ", .sfOption$session, " \n" ) } ## Sequential mode: reduce to one CPU. else { setOption( "nodes", 1 ) ## message( "Forced to sequential mode.\n" ) } } ## If socket hosts are set, take them. if( !is.null( socketHosts ) || is.vector( socketHosts ) ) setOption( "sockHosts", socketHosts ) ## Type of the cluster ({SOCK|PVM|MPI|NWS} are allowed). if( !is.null( type ) ) { if( length( grep( "PVM|MPI|SOCK|NWS", type ) ) > 0 ) setOption( "type", type ) else { warning( paste( "Unknown cluster type:", type, "Allowed are: {PVM,MPI,SOCK,NWS}. Fallback to SOCKet." ) ) setOption( "type", "SOCK" ) } } ## Default value: socket cluster. else setOption( "type", "SOCK" ) ## Global restore setting (for sfClusterApplySR). if( !is.null( restore ) ) setOption( "RESTORE", restore ) arguments <- commandArgs() ## Search for currently executed R-file (if there is any). Detected by ## argument followed to option "-f" ("R CMD BATCH" adds -f implicitely). ## Save filename for options (for save/restore) ## @todo Find a better way to detect R-file (is there any?) ## Last argument to be ignored (as no follow-up exists). if( length( arguments ) >= 2 ) { for( entry in seq( 1, length( arguments ) - 1 ) ) { if( !is.null( arguments[entry] ) && ( arguments[entry] == '-f' ) ) { ## Switch to next entry and check if this is valid. entry <- entry + 1; ## If yes, take it as filename. if( !is.null( arguments[entry] ) && ( arguments[entry] != "" ) ) { setOption( "CURRENT", arguments[entry] ) break } } } } ## No R-file given: set to DEFAULT filename (always occurs in interactive ## mode). if( is.null( .sfOption$CURRENT ) ) setOption( "CURRENT", "DEFAULT" ) ## Go through all arguments from commandline. for( arg in arguments ) { ## Non sfCluster-like argument? Skip. ## (Only empty argument are '--parallel' and '--restoreSR') if( ( length( grep( "=", arg ) ) == 0 ) && !( ( arg == "--parallel" ) || ( arg == "--restoreSR" ) || ( arg == "--restore" ) ) ) next; ## Arguments in form "--name=value" args <- strsplit( arg, "=" ) ## Marker for parallel execution. ## If parallel was set via function arguments, commandline is ignored. if( args[[1]][1] == "--parallel" ) { if( !is.null( args[[1]][2] ) && !is.na( as.numeric( args[[1]][2] ) ) ) cmdParallel <- ifelse( ( as.numeric( args[[1]][2] ) > 0 ), TRUE, FALSE ) ## --parallel is allowed to use without value (means: true). else cmdParallel <- TRUE ## Ask here, instead there will be a warning if used with commandline arg ## --parallel and sfInit( parallel=TRUE ). ## Rise warning if command arguments are overwritten by sfInit() arguments. if( is.null( parallel ) ) setOption( "parallel", cmdParallel ) else if( parallel != cmdParallel ) warning( paste( "Commandline argument --parallel", "overwritten with sfInit argument parallel=", parallel ) ) } ## Marker for general restore (only used in sfClusterApplySR). ## Both --restoreSR/--restore are allowed. else if( ( args[[1]][1] == "--restoreSR" ) || ( args[[1]][1] == "--restore" ) ) { if( is.null( restore ) ) setOption( "RESTORE", TRUE ) else if( !restore ) warning( "Commandline argument --parallel", "overwritten with sfInit argument restore=TRUE" ) } ## Marker for Session-ID. else if( args[[1]][1] == "--session" ) { ## Session-ID is allways 8 Chars long. ## Not anymore since sfCluster >=0.23 if( !is.null( args[[1]][2] ) ) { ##&& ( nchar( args[[1]][2] ) == 8 ) ) { setOption( "session", args[[1]][2] ) } else warning( paste( "Empty or irregular Session-ID: '", args[[1]][2], "'\n" ) ) } ## Amount of CPUs (formerly called "nodes", kept for backward ## compatibility). ## If set via function arguments, commandline is ignored. else if( ( args[[1]][1] == "--nodes" ) || ( args[[1]][1] == "--cpus" ) ) { nodes <- try( as.numeric( args[[1]][2] ) ) if( !is.null( nodes ) && !is.na( nodes ) ) { if( nodes > .sfOption$MAXNODES ) { stop( paste( "Too much CPUs allocated:", nodes, "Max.:", .sfOption$MAXNODES, "\n - Call sfSetMaxCPUs() before sfInit() if you need more." ) ) } else nodes <- max( 1, nodes ) ## Really set amount of CPUs? Rise overwrite warning if needed. if( is.null( cpus ) ) setOption( "nodes", nodes ) else if( cpus != nodes ) warning( paste( "Commandline --cpus=", nodes, " overwritten by sfInit() argument cpus=", cpus, sep="" ) ) } else warning( paste( "Empty or irregular nodes amount: '", nodes, "'\n" ) ) } ## Type of the network. else if( args[[1]][1] == "--type" ) { if( !is.null( args[[1]][2] ) && ( nchar( args[[1]][2] ) > 0 ) ) { if( length( grep( "PVM|MPI|SOCK|NWS", args[[1]][2] ) ) > 0 ) { if( is.null( type ) ) setOption( "type", args[[1]][2] ) else if( type != args[[1]][2] ) warning( paste( "Commandline --type=", args[[1]][2], " overwritten by sfInit() argument type=", type, sep="" ) ) } else { warning( paste( "Unknown cluster type on commandline:", args[[1]][2], "Allowed are: {PVM,MPI,SOCK,NWS}" ) ) } } else warning( "No cluster-type is given as value for argument --type" ) } ## Hosts for socket mode. ## Arguments come in format: ## nodename:cpus -> On node X are Y cpus used. ## nodename -> On node X one cpu is used. ## Any entries are comma seperated (no whitespace allowed!): ## node1:3,node2,node3:2 else if( args[[1]][1] == "--hosts" ) { if( !is.null( args[[1]][2] ) && ( nchar( args[[1]][2] ) > 0 ) ) { cmdHosts <- c() hosts = unlist( strsplit( args[[1]][2], "," ) ) ## Examine single host for( host in hosts ) { info <- unlist( strsplit( host, ":" ) ) ## No CPU amount given: assume 1. if( is.null( info[2] ) || is.na( info[2] ) ) info[2] <- 1 offset <- as.integer( info[2] ) if( offset <= 0 ) offset <- 1 if( !is.numeric( offset ) ) stop( paste( "NOT NUMERIC: '", offset, "'", sep="" ) ) len <- length( cmdHosts ) + 1 ## Insert Host n-times where n is amount of CPUs ## (required for snows argument format). cmdHosts[seq(len,len+offset-1)] <- rep( as.character( info[1] ), offset ) } if( is.null( socketHosts ) ) setOption( "sockHosts", cmdHosts ) else if( paste( cmdHosts, collapse="" ) != paste( socketHosts, collapse="" ) ) { warning( paste( "Commandline --hosts=", args[[1]][2], " overwritten by sfInit() argument hosts=", paste( socketHosts, collapse="," ), sep="" ) ) } } else warning( "No hosts are given as value for --hosts" ) } ## Temporary directory: slave logs. else if( args[[1]][1] == "--tmpdir" ) { if( !is.null( args[[1]][2] ) && ( nchar( args[[1]][2] ) > 0 ) ) setOption( "TMPDIR", args[[1]][2] ) else warning( "No temporary directory given as value for --tmpdir" ) } ## Restore directory: intermediate results are lawn here. else if( args[[1]][1] == "--restdir" ) { if( !is.null( args[[1]][2] ) && ( nchar( args[[1]][2] ) > 0 ) ) setOption( "RESTDIR", args[[1]][2] ) else warning( "No restore/result directory given as value for --restdir" ) } ## Startup lock. ## Add 1.62: ## should only used from sfCluster => is the marker snowfall is started ## though sfCluster! else if( args[[1]][1] == "--lockfile" ) { if( !is.null( args[[1]][2] ) && ( nchar( args[[1]][2] ) > 0 ) ) setOption( "LOCKFILE", args[[1]][2] ) else warning( "No lockfile given as value for --lockfile" ) } ## Unknown option ## Add 1.62 ## Add 1.72: patch from Michael Siegel for Mac OS X, which sets --gui. else if( args[[1]][1] != "--gui" ) { warning( paste( "Unknown option on commandline:", args[[1]][1] ) ) } } invisible( NULL ) } snowfall/R/snowfall-internal.R0000644000175100001440000002443212254102065016102 0ustar hornikusers##***************************************************************************** ## Unordered internal helper functions. ##***************************************************************************** ##***************************************************************************** ## Helpers for managing the internal variables in the package namespace without ## awake the R CMD check for later R versions (which basically blaims many ## global assignings). ## ## The given solution has an advantage: only writing is affected. Reading of the ## objects can remain the same (thanks to Uwe Ligges for the tipp): ## reading: .sfOption$parallel ## writing: setOption("parallel", TRUE) ##***************************************************************************** ##***************************************************************************** ## Set an option in the snowfall option list. ## (Basically this is the setting of a list entry). ## key - character: object name ## val - object (everything is allowed, even NULL) ##***************************************************************************** setOption <- function( key=NULL, val=NULL ) { if( !is.null(key) && is.character( key ) ) { option <- getVar( ".sfOption" ) ## Get from NS option[[key]] <- val setVar( ".sfOption", option ) ## Write to NS return( invisible( TRUE ) ) } stop( "key or val is NULL or key no string." ) } ##***************************************************************************** ## Get a specific variable from the snowfall namespace. ## var - character: object name ##***************************************************************************** getVar <- function( var=NULL ) { if( !is.null( var ) && is.character( var ) ) { tmp <- try( getFromNamespace( var, "snowfall" ) ) if( inherits( tmp, "try-error" ) ) stop( paste( "Object", var, "not found in package" ) ) return( tmp ) } stop( "var is NULL or not a string." ) } ##***************************************************************************** ## Write a specific variable to the snowfall namespace. ## var - character: object name ## arg - object (NULL allowed) ##***************************************************************************** setVar <- function( var=NULL, arg=NULL ) { if( !is.null( var ) && is.character( var ) ) { assignInNamespace( var, arg, "snowfall" ) return( invisible( TRUE ) ) } stop( "var is NULL or no character" ); } ##***************************************************************************** ## Replaces the tilde operator in file/directory names with the system ## depending counterpart. ## Used for configuration files mainly. ## ## PARAMETER: String directory ## RETURN: String directory replaced ##***************************************************************************** fetchDirName <- function( dir ) { return( gsub( "~", Sys.getenv( "HOME" ), dir ) ) } ##***************************************************************************** ## Is this snowfall session started through sfCluster? ## As a backward compatible solution there is only the LOCKFILE option open ## (as there is no default for it and setable through commandline). ## ## PARAMETER: - ## RETURN: Boolean True (running with sfCluster), False ##***************************************************************************** startedWithSfCluster <- function() { if( !exists( ".sfOption" ) ) return( FALSE ) else return( !is.null( .sfOption$LOCKFILE ) && ( .sfOption$LOCKFILE != '' ) ) } ##***************************************************************************** ## Creates a directory (recursive) if needed and stops on failure. ## ## PARAMETER: String directory ## RETURN: Boolean success (true, on fail, execution stops) ##***************************************************************************** dirCreateStop <- function( dir=NULL ) { if( !is.null( dir ) && !file.exists( dir ) ) { if( dir.create( dir, recursive=TRUE ) ) { message( "Created directory: ", dir ) return( invisible( TRUE ) ); } else stop( "UNABLE to create directory: ", dir ) } ## Never reached. return( invisible( FALSE ) ); } ##*************************************************************************** ## Add a file (with absolute path) to remove list after sfStop(). ## Used for save/restore-files. ## ## PARAMETER: file String abs. filepath ##*************************************************************************** addRestoreFile <- function( file=NULL ) { if( !is.null( file ) ) if( is.vector( .sfOption$RESTOREFILES ) ) ## Check if file is already in the list. If yes: no add. if( length( grep( file, .sfOption$RESTOREFILES ) ) == 0 ) setOption( "RESTOREFILES", c( .sfOption$RESTOREFILES, file ) ) else setOption( "RESTOREFILES", c( file ) ) debug( paste( "Added file for delete: ", file, "\n" ) ) return( invisible( length( .sfOption$RESTOREFILES ) ) ) } ##*************************************************************************** ## Clean up save/restore files after successfull cluster shutdown. ##*************************************************************************** deleteRestoreFiles <- function() { if( !is.null( .sfOption$RESTOREFILES ) ) { ## File names are absolute: just unlink all. ## lapply( .sfOption$RESTOREFILES, unlink ) for( file in .sfOption$RESTOREFILES ) { ## Does file exist? if( file.exists( file ) ) { if( unlink( file ) != 0 ) cat( "Unable to delete save/restore file:", file, "\n" ) else cat( "Deleted save/restore file:", file, "\n" ) } } setOption( "RESTOREFILES", NULL ) } } ##*************************************************************************** ## Check if any element of a given list produced a stop or try-error. ## RETURN: Vector of logicals (true: ok, false: try error caught). ##*************************************************************************** checkTryErrorAny <- function( res ) { return( sapply( res, function( x ) { if( inherits( x, "try-error" ) ) return( FALSE ) else return( TRUE ) } ) ) } ##*************************************************************************** ## Check if given argument is a function. ##*************************************************************************** checkFunction <- function( fun, stopOnError=TRUE ) { return( TRUE ) state <- FALSE ## 1.84-3 typo try( if( !exists( as.character( substitute( fun ) ), inherits=TRUE ) || !is.function( fun ) || is.null( get( as.character( substitute( fun ) ), inherits=TRUE ) ) || !is.function( fun ) ) state <- TRUE ) if( !state ) { ## if( !is.function( fun ) ) cat( "FAIL SYMBOL\n" ) ## if( !exists( as.character( substitute( fun ) ), inherit=TRUE ) ) cat( "FAIL EXIST\n" ) ## if( is.null( get( as.character( substitute( fun ) ), inherit=TRUE ) ) ) cat( "FAIL GET\n" ) ## if( !is.function( fun ) ) cat( "FAIL FUNCTION\n" ) if( stopOnError ) stop( paste( "Not a function in sfCluster function call: '", fun, "'" ) ) } return( state ) } errHandler <- function( ... ) { print( "ERROR IN HANDLING STUFF!\n" ) } ##*************************************************************************** ## Treat given three dot arguments as strings (for names listings ## like in sfExport). ## Ripped from buildin R function rm (by XXX). ## Returns list with names, stops on errors. ##*************************************************************************** fetchNames <- function( ... ) { ## Dot argument to list of characters: ripped from rm()... dots <- match.call(expand.dots = FALSE)$... if( length(dots) && !all( sapply( dots, function(x) is.symbol(x) || is.character(x) ) ) ) stop( "... must contain names or character strings in function ", as.character( sys.call( -1 ) ) ) ## end ripp. return( sapply(dots, as.character) ) } ##*************************************************************************** ## Create named list with all parameters from an function call. ## Idea somewhere from R-help (not tracked). ## This does not work if above env is not global env! ##*************************************************************************** getNamedArguments <- function( ... ) { pars <- as.list( substitute( {...} )[-1] ) ## pars <- as.list( substitute( {...} )[-1] ) ## pars <- lapply( pars, function( x ) { ## if( is.atomic( x ) ) ## return( x ) ## else ## return( deparse( x ) ) ## } ) return( pars ) } ##*************************************************************************** ## Ensure a given filename contains an absolute path. ## Kind of silly and lame. But works in most cases. ##*************************************************************************** absFilePath <- function( file ) { ## If not starting with separator, path is most likely relative. ## Make it absolute then. ## On Windows absolute path can contain drive chars. if( .Platform$OS.type == "windows" ) { if( ( substr( file, 1, 1 ) != .Platform$file.sep ) && ( substr( file, 2, 2 ) != ":" ) ) file <- file.path( getwd(), file ) } else if( substr( file, 1, 1 ) != .Platform$file.sep ) file <- file.path( getwd(), file ) return( file ) } simpleAssign <- function( name=NULL, value ) { message( paste( "simpleAssign called: ", name, "VAL:", value ) ) if( is.null( name ) || !is.character( name ) || ( nchar( name ) == 0 ) ) { warning( "NULL assign on simpleAssign()" ) return( NULL ) } else { ## 1.84-4 ## Problem: it is required to write to global env! ## Comment censored :) # assign( name, value, envir = globalenv() ) assign( name, value, pos=sys.nframe() ) return( NULL ) } } ##*************************************************************************** ## Internal debug printer (globally disable using package variable DEBUG). ##*************************************************************************** debug <- function( txt='' ) { if( DEBUG ) message( txt ) } .onLoad <- function( lib, pkg ) { ## options( "error"=errHandler ) } snowfall/R/snowWrappers.R0000644000175100001440000002257112254102065015157 0ustar hornikusers## Wrappers for Snow function. ## ## The wrappers do the following: decide whether we run in parallel or ## sequential mode. ## In parallel mode the according Snow functions are used. ## In sequential mode, if it makes sense, the sequential counterparts ## of the Snow functions are used. ##**************************************************************************** ## Wrapper for: clusterSplit ##**************************************************************************** sfClusterSplit <- function( seq ) { sfCheck(); if( sfParallel() ) return( clusterSplit( sfGetCluster(), seq ) ) ## In sequential mode return a list with everything in element 1 (means: ## everything is run on one node). else return( list( seq ) ) } ##**************************************************************************** ## Wrapper for: clusterCall ## ## Catches for errors. Return them or stop immidiately. ##**************************************************************************** sfClusterCall <- function( fun, ..., stopOnError=TRUE ) { sfCheck(); if( !checkFunction( fun, stopOnError=FALSE ) ) { if( stopOnError ) stop( "No function or not defined object in sfClusterCall" ) else { warning( "No function or not defined object in sfClusterCall" ) return( NULL ) } } if( sfParallel() ) { ## Exec via Snow. result <- clusterCall( sfGetCluster(), fun, ... ) ## Not enough results? ## @TODO Check if this test is needed if( length( result ) != sfCpus() ) { if( stopOnError ) stop( paste( "Error in sfClusterCall (not all slaves responded).\n", "Call from: ", as.character( sys.call( -1 ) ) ) ) else { message( paste( "Error in sfClusterCall (not all slaves responded).\n", "Call from: ", as.character( sys.call( -1 ) ) ) ) return( result ); } } ## Check if snow throw an exception on any of the slaves. if( !all( checkTryErrorAny( result ) ) ) { errorsTxt <- sapply( which( inherits( result, "try-error" ) ), function(x) result[[x]] ) message( "EXCEPTION INFOS:" ) message( paste( errorsTxt, collapse="\n" ) ) if( stopOnError ) { stop( paste( "Error in sfClusterCall (catched TRY-ERROR).\n", "Call from: ", as.character( sys.call( -1 ) ) ) ) } else { message( paste( "Error in sfClusterCall (catched TRY-ERROR).\n", "Call from: ", as.character( sys.call( -1 ) ) ) ) return( result ) } } return( result ) } ## Sequential mode. else return( do.call( fun, list( ... ) ) ) } ##**************************************************************************** ## Wrapper for: clusterEvalQ - renamed as indeed "eval" is executed and not ## "evalq". ##**************************************************************************** sfClusterEval <- function( expr, stopOnError=TRUE ) { sfCheck(); if( sfParallel() ) { return( sfClusterCall( eval, substitute( expr ), env=globalenv(), stopOnError=stopOnError ) ) } else { ## Problems can arise through "enclos", which is default set to parent ## and therefore here, too: on this way local variables (higher environments ## are visible, which badly are not visible in parallel runs...). ## There should be a fix or something. return( eval( expr, envir=globalenv(), enclos=parent.frame() ) ) } } ## Snows clusterEvalQ uses "eval" and not "evalq", so this wrapper is an alias. sfClusterEvalQ <- function( expr ) return( sfClusterEval( expr ) ) ##**************************************************************************** ## Wrapper for: clusterMap. ## Currently not used. ##**************************************************************************** sfClusterMap <- function( fun, ..., MoreArgs=NULL, RECYCLE=TRUE ) stop( "Currently no wrapper for clusterMap" ) ##**************************************************************************** ## Wrapper for: clusterApply (snow parallel) - lapply (sequential) ## Adds additional warnings before the execution (esp. in sequential mode, ## where exec works fine but can cause problems runnin in parallel). ## ## PARAMETERS: Parameters like clusterApply ## RETURN: Result ##**************************************************************************** sfClusterApply <- function( x, fun, ... ) { sfCheck(); checkFunction( fun ) ## However snow limits list size to cluster nodes in "normal" ## execution. ## This is a fatal error in parallel mode and a warning in sequential. if( length( x ) > sfCpus() ) { if( sfParallel() ) stop( "More list entries as nodes => use sfClusterApplyLB instead. See Snow/Snowfall documentation." ) else warning( "More list entries as nodes => causes error in parallel mode. use sfClusterApplyLB instead." ) } if( sfParallel() ) return( clusterApply( sfGetCluster(), x, fun, ... ) ) else return( lapply( x, fun, ... ) ) } ##**************************************************************************** ## Wrapper for: clusterApplyLB (snow parallel) - lapply (sequential) ## ## PARAMETERS: Parameters like clusterApply ## RETURN: Result ##**************************************************************************** sfClusterApplyLB <- function( x, fun, ... ) { sfCheck(); checkFunction( fun ) if( sfParallel() ) return( clusterApplyLB( sfGetCluster(), x, fun, ... ) ) else ## array... korrigieren. return( lapply( x, fun, ... ) ) } ##**************************************************************************** ## Also snow-Handler handling is hidden to the user. ## ## Wrapper for: parLappy (snow parallel) - lapply (sequential) ## ## As lapply parameters were inkonsitent ("x"/"fun") they were corrected to ## ""x"/"fun". ## ## PARAMETERS: Parameters like lapply ## RETURN: Result ##**************************************************************************** sfLapply <- function( x, fun, ... ) { sfCheck() checkFunction( fun ) if( sfParallel() ) return( parLapply( sfGetCluster(), x, fun, ... ) ) else return( lapply( x, fun, ... ) ) } ##**************************************************************************** ## Wrapper for: parSapply (snow parallel) - sapply (sequential) ## ## PARAMETERS: Parameters like sapply ## RETURN: Result ##**************************************************************************** sfSapply <- function( x, fun, ..., simplify=TRUE, USE.NAMES=TRUE ) { sfCheck() checkFunction( fun ) if( sfParallel() ) return( parSapply( sfGetCluster(), x, fun, ..., simplify=simplify, USE.NAMES=USE.NAMES ) ) else return( sapply( x, fun, ..., simplify=simplify, USE.NAMES=USE.NAMES ) ) } ##**************************************************************************** ## Wrapper for: parApply (snow parallel) - apply (sequential) ## ## PARAMETERS: Parameters like apply ## RETURN: Result ##**************************************************************************** sfApply <- function( x, margin, fun, ... ) { sfCheck() checkFunction( fun ) if( sfParallel() ) return( parApply( sfGetCluster(), x, margin, fun, ... ) ) else return( apply( x, margin, fun, ... ) ) } sfRapply <- function( x, fun, ... ) { stop( "sfRapply does not exists yet. Use Snow's parRapply instead." ) return( invisible( NULL ) ); } sfCapply <- function( x, fun, ... ) { stop( "sfCapply does not exists yet. Use Snow's parCapply instead." ) return( invisible( NULL ) ); } ##**************************************************************************** ## Wrapper for: parMM (snow parallel) - %*% (sequential) ## ## PARAMETERS: Matrix a, Matrix b ## RETURN: Result ##**************************************************************************** sfMM <- function( a, b ) { sfCheck(); if( sfParallel() ) return( parMM( sfGetCluster(), a, b ) ) else return( a %*% b ) } ##**************************************************************************** ## Wrappers for the two uniform RNGs used in snow. ## Basically, at the moment these are not used in sequential (means: none ## of the two is included here for sequential execution). ## @TODO Sequential use of the RNGs. ##**************************************************************************** sfClusterSetupSPRNG <- function( seed = round( 2^32 * runif(1) ), prngkind = "default", para = 0, ... ) { sfCheck(); if( sfParallel() ) clusterSetupSPRNG( sfGetCluster(), seed, prngkind, para, ... ) else { warning( paste( "Uniform random number streams (currently) not available in serial execution.", "Random numbers may differ in serial & parallel execution." ) ) set.seed( seed ) } } sfClusterSetupRNGstream <- function( seed=rep( 12345, 6 ), ... ) { sfCheck(); if( sfParallel() ) clusterSetupRNGstream( sfGetCluster(), seed=seed, ... ) else { warning( paste( "Uniform random number streams (currently) not available in serial execution.", "Random numbers may differ in serial & parallel execution." ) ) set.seed( seed[1] ) } } sfClusterSetupRNG <- function( type="RNGstream", ... ) { sfCheck(); if( sfParallel() ) clusterSetupRNG( sfGetCluster(), type=type, ... ) else { warning( paste( "Uniform random number streams (currently) not available in serial execution.", "Random numbers may differ in serial & parallel execution." ) ) } } snowfall/R/clusterFunctions.R0000644000175100001440000012374112254102616016022 0ustar hornikusers##***************************************************************************** ## Functions which extend Snow usage or implement some higher level usage. ## Wrappers for Snow functions are in snowWrappers.R ## ## Functions: ## sfLoadLib - Load library in cluster (path conversion, flags...) ## sfSource - Load source (path conversion...) ## ## sfExport - Export local and global objects to cluster ## sfExportAll - Export all global objects (with given exception list) ## sfRemove - Remove objects from nodes ## sfRemoveAll - Remove all objects from nodes (with given excpetion list) ## ## sfCat - Cat something on cluster ## sfPrint - Print something on cluster ##***************************************************************************** ##***************************************************************************** ## Load a library depending on sequential or parallel execution. ## ## Should behave most likely the build-in library() function. ## ## Running in sequential mode: normal "library()" command. ## Running in parallel mode: the library is loaded on ALL nodes. ## ## PARAMETERS: ## RETURN: Logical TRUE/FALSE on success. On failure, if noStopOnError is not ## set, stop immidiately. ##***************************************************************************** sfLibrary <- function( package, pos = 2, lib.loc = NULL, character.only = FALSE, warn.conflicts = TRUE, # keep.source is removed in R3, but kept here for back-compatible API # keep.source = getOption("keep.source.pkgs"), keep.source = NULL, verbose = getOption("verbose"), version, stopOnError = TRUE ) { sfCheck(); ## Generate (global) names list with all parameters. # setVar( ".sfPars", list() ) sfPars <- list() ## Help does not make sense. ## if( !missing( help ) ) ## stop( "Help is not allowed in sfLibrary. Use 'library' instead." ) if( !missing( package ) ) { if( character.only ) { if( is.character( package ) ) sfPars$package <- package else stop( paste( "Package", package, "is no character string." ) ) } else sfPars$package <- deparse( substitute( package ) ) } ## package is now a string in any case. sfPars$character.only <- TRUE sfPars$pos <- pos sfPars$lib.loc <- lib.loc sfPars$warn.conflicts <- warn.conflicts # Raw quickfix for R 3: simply remove argument if we are on version 3. # But keep it to not break R 2.x (although default argument changed # there). if( as.integer(R.version$major) < 3 ) { # As new default for keep source is "NULL" (from 1.84-2), we need # to rebuild the old default behavior of the functions arguments. if( is.null( keep.source ) ) keep.source = getOption("keep.source.pkgs") sfPars$keep.source <- keep.source } sfPars$verbose <- verbose ## All libraries are loaded internally with logical.return. sfPars$logical.return <- TRUE if( !missing( version ) ) sfPars$version <- version if( sfParallel() ) { ## On Nodes load location with absolute path. if( !is.null( sfPars$lib.loc ) ) sfPars$lib.loc <- absFilePath( sfPars$lib.loc ) ## Export to namespace. setVar( ".sfPars", sfPars ) ## Weird enough ".sfPars" need to be exorted (else it would not be found ## on slave, although it is a parameter) sfExport( ".sfPars", local=FALSE, namespace="snowfall" ) ## Load libs using require as Exception on nodes doesn't help us here. ## @todo Check on correct execution via logical.return ## @todo Exporting of .sfPars needed? ## CHANGE FOR R-3: attribute 'keep-source' is removed. result <- try( sfClusterEval( do.call( "library", .sfPars ) ) ) # result <- try( sfClusterEval( do.call( "library", .sfPars ) ) ) if( inherits( result, "try-error" ) || ( length( result ) != sfCpus() ) || !all( checkTryErrorAny( result ) ) || !all( unlist( result ) ) ) { if( stopOnError ) stop( paste( "Stop: error loading library on slave(s):", .sfPars$package ) ) else { warning( paste( "Error loading library on slave(s):", package ) ) return( invisible( FALSE ) ) } } else { ## Load message in slave logs. sfCat( paste( "Library", .sfPars$package, "loaded.\n" ) ) ## Message in masterlog. message( paste( "Library", .sfPars$package, "loaded in cluster.\n" ) ) } } result <- try( do.call( "library", sfPars ) ) ## Remove global var from cluster (and local). ## Do before exception checks, as there might by a stop. sfRemove( ".sfPars" ) if( inherits( result, "try-error" ) || !result ) { if( stopOnError ) { warning( paste( "Unable to load library:", package ) ) return( invisible( FALSE ) ) } else stop( paste( "Unable to load library:", package ) ) } else { if( verbose ) message( paste( "Library", package, "loaded.\n" ) ) ## If logical return is requested here it comes. ## In clustermode the programm immidiately stops it a library couldn't be ## load on a slave. In sequentially mode it behaves like library(). return( invisible( TRUE ) ) } } ##***************************************************************************** ## Include a source file. ## @todo Include complete source() parameter list. ## ## Should behave most likely the build-in source() function. ## ## Running in sequential mode: normal "source()" command (relative path) ## Running in parallel mode: the sourcefile is loaded on ALL nodes ## (abs. path and no echo). ## ## PARAMETERS: filename ## RETURN: Logical true on success, false else. is stopOnError=TRUE => stop ##***************************************************************************** sfSource <- function( file, encoding = getOption("encoding"), stopOnError = TRUE ) { sfCheck(); absFile <- absFilePath( file ) if( file.exists( absFile ) ) { if( sfParallel() ) { ## Load source on all nodes (with globalized arguments) success <- sfClusterCall( source, file=absFile, encoding=encoding, echo=FALSE, local=FALSE, chdir=FALSE ) if( inherits( success, "try-error" ) ) { if( stopOnError ) stop( paste( "Try error on cluster source call: 'source ", absFile, "'", sep="" ) ) else { message( paste( "Try error on cluster source call: 'source ", absFile, "'", sep="" ) ) return( FALSE ) } } else { sfCat( paste( "Source", file, "loaded.\n" ) ) message( paste( "Source", file, "loaded in cluster.\n" ) ) } } ## Same as in sfLibrary(): include file on master as well (with ## original filename and echo setting). res <- try( source( file=absFile, encoding=encoding, echo=TRUE, local=FALSE, chdir=FALSE ) ) if( inherits( res, "try-error" ) ) { if( stopOnError ) stop( paste( "Try error loading on master: '", file, "'", sep="" ) ) else { message( paste( "Try error loading on master: '", file, "'", sep="" ) ) return( invisible( FALSE ) ) } } return( invisible( TRUE ) ) } ## File not found? else { if( stopOnError ) stop( paste( "File does not exist:", file ) ) else { message( paste( "File does not exist:", file ) ) return( invisible( FALSE ) ) } } } ##**************************************************************************** ## Export a single variable. ## On slaves, ALL exported variables are global! ## From the master, either global or local variables can be exported. ## ## PARAMETERS: ... - Names or Variables ## local - if TRUE, local vars will be exported ## namespace - also exports from namespace ## debug - with local=TRUE, prints where vars are found ## list - List of variable names (like snows clusterExport) ## RETURN: - ##**************************************************************************** sfExport <- function( ..., list=NULL, local=TRUE, namespace=NULL, debug=FALSE, stopOnError=TRUE ) { sfCheck(); ## Export is only done if running parallel, on master all vars are visible. ## @TODO: Although all vars are visible, they are not surely in global env! ## => export to global env. ## Test if global object of this name exists. If yes: warning, ## if not: assign in global space. if( !sfParallel() ) { warning( "sfExport() writes to global environment in sequential mode.\n" ) ## return( invisible( TRUE ) ) } ## List of given names in dot arguments. names <- fetchNames( ... ) ## If extra list is given (calling style of clusterExport()), just add this ## list. if( !is.null( list ) ) { ## Test from rm, see fetchNames for details. if( !length( list ) || !all( sapply( list, function(x) is.symbol(x) || is.character(x) ) ) ) { if( stopOnError ) stop( "'list' must contain names or character strings" ) else { warning( "Error in sfExport: 'list' must contain names or character strings" ) return( invisible( FALSE ) ) } } names <- c( names, list ) } for( name in names ) { ## Also examine namespace (from snowfall package?). Only needed for internal ## functions. if( !is.null( namespace ) && is.character( namespace ) ) { ## On some strange behavior, this only works with given error ## function. Else errors on not found objects are not caught. val <- tryCatch( getFromNamespace( name, namespace ), ##, pos=-1 error = function(x) { NULL } ) if( !is.null( val ) && !inherits( val, "try-error" ) ) { res <- sfClusterCall( assign, name, val, env = globalenv(), stopOnError = FALSE ) ## Error on export? if( is.null( res ) || !all( checkTryErrorAny( res ) ) ) { if( stopOnError ) stop( paste( "Error exporting '", name, "': ", geterrmessage(), sep="" ) ) else { warning( paste( "Error exporting '", name, "': ", geterrmessage(), sep="" ) ) return( invisible(FALSE) ) } } ## Skip local tests. next } } ## Check if exists before exporting. if( local ) { found <- FALSE # Traverse back through scopes (get() with inherit only finds # global presence of variables). # sys.nframe() at least 1 at this point, globalenv() to check last. for( pframe in seq( 1, sys.nframe() ) ) { ## Var exists in this frame? if( exists( name, inherits=FALSE, envir=sys.frame( -pframe ) ) ) { found <- TRUE ## If debug Messages are wanted, print these (especially for local ## mode with nested functions important, to locate probably ## overwriting variables. if( debug ) { definedIn <- gsub( "\n", "|", as.character( sys.call( -pframe ) ) ) cat( "Export '", name, "' defined in '", definedIn, "'", "\n", sep="" ) print( get( name, envir=sys.frame( -pframe ) ) ) } ## Export it. ## Direct call to assign is far slower as a call to a function ## doing it (however...) # res <- sfClusterCall( simpleAssign, name, # get( name, # envir=sys.frame( -pframe ) ), # stopOnError = FALSE ) ## <= 1.70 res <- sfClusterCall( assign, name, get( name, envir=sys.frame( -pframe ) ), env = globalenv(), stopOnError = FALSE ) ## Error on export? ## 1.84: object can be null if source variable was null, too. if( ( is.null( res ) && !is.null( get( name, envir=sys.frame( -pframe ) ) ) ) || !all( checkTryErrorAny( res ) ) ) { if( stopOnError ) stop( paste( "Error exporting '", name, "': ", geterrmessage(), sep="" ) ) else { message( paste( "Error exporting '", name, "': ", geterrmessage(), sep="" ) ) return( invisible(FALSE) ) } } break } } ## If variable to export is not found. if( !found ) { if( stopOnError ) stop( paste( "Unknown/unfound variable ", name, " in export. (local=", local, ")", sep="" ) ) else { message( paste( "Unknown/unfound variable ", name, " in export. (local=", local, ")", sep="" ) ) return( invisible( FALSE ) ) } } } ## Global export only. else { ## 1.84-3 typo if( exists( name, inherits=FALSE, envir=globalenv() ) ) { # res <- sfClusterCall( simpleAssign, name, # get( name, inherit=FALSE, # envir=globalenv() ), # stopOnError = FALSE ) ## <= 1.70 ## 1.84-3 typo res <- sfClusterCall( assign, name, get( name, inherits=FALSE, envir=globalenv() ), env = globalenv(), stopOnError = FALSE ) if( is.null( res ) || !all( checkTryErrorAny( res ) ) ) { if( stopOnError ) stop( paste( "Error exporting global '", name, "': ", geterrmessage(), sep="" ) ) else { warning( paste( "Error exporting global '", name, "': ", geterrmessage(), sep="" ) ) return( invisible( TRUE ) ) } } } else { if( stopOnError ) stop( paste( "Unknown variable ", name, " in export." ) ) else { warning( paste( "Unknown variable ", name, " in export." ) ) return( invisible( TRUE ) ) } } } } invisible( TRUE ) } ##**************************************************************************** ## Export all GLOBAL variables and functions to the whole cluster. ## Aware of memory usage. ## ## PARAMETERS: [Vector/List Names or variables NOT to export] ## RETURN: Logical Success ##**************************************************************************** sfExportAll <- function( except=NULL, debug=FALSE ) { sfCheck(); if( sfParallel() ) { ## Vector with all global variables. expList <- as.list( objects( pos = globalenv() ) ) ## Now remove all variables which are listed in list except from ## parameters. if( !is.null( except ) ) { if( is.list( except ) ) except <- unlist( except ) if( !is.vector( except ) ) { warning( "sfExportAll: except is not a vector.\n" ) return( invisible( FALSE ) ) } ## Remove those elements which are included in except. ## Reverse matches for correct indices after removing of ## single elements (start removing on right border). ## for( i in rev( match( except, expList ) ) ) ## expList <- expList[-i] ## Nicer version, proposal Greggory Jefferis (1.7.2) ## na.omit is not a must here though. expList <- expList[-na.omit(match(except, expList))] } ## Exporting mode with explicit global mode. sfExport( list=expList, local=FALSE ) if( debug ) { message( "sfExportAll: Following variables are exported:" ) message( paste( expList, collapse=", " ) ) } } else { message( "sfExportAll() ignored in sequential mode.\n" ) return( invisible( TRUE ) ) } invisible( TRUE ) } ##**************************************************************************** ## Remove objects from global environment (on whole cluster cluster) ## or at least from master (sequentially mode). ## ## PARAMETERS: List with Names(!) of the variables. ## RETURN: - ##**************************************************************************** sfRemove <- function( ..., list=NULL, master=FALSE, debug=FALSE ) { sfCheck(); ## List of given names in dot arguments. .sfNames <- fetchNames( ... ) ## If extra list is given (calling style of clusterExport()), just add this ## list. if( !is.null( list ) ) { ## Test from rm, see fetchNames for details. if( !length( list ) || !all( sapply( list, function(x) is.symbol(x) || is.character(x) ) ) ) stop( "list must contain names or character strings" ) .sfNames <- c( .sfNames, list ) } ## If running parallel, remove objects from slaves. if( sfParallel() ) { if( debug ) for( name in .sfNames ) cat( "REMOVE:", name, "\n" ) sfExport( ".sfNames", local=TRUE ) sfClusterEval( rm( list=.sfNames, pos=globalenv() ) ) sfClusterEval( rm( .sfNames, pos=globalenv() ) ) } ## Remove on master as well? if( master ) rm( list=.sfNames, pos=globalenv() ) invisible( NULL ) } ##**************************************************************************** ## Remove all variables from nodes (important: only global vars from nodes ## - NOT the master R process - are deleted). ## To delete on master as well, use sfRemove(). ## ## PARAMETERS: [Vector/List Names of objects NOT to remove]. ## RETURN: Boolean Success (invisible) ##**************************************************************************** sfRemoveAll <- function( except=NULL, debug=FALSE, hidden=TRUE ) { sfCheck(); if( sfParallel() ) { ## @TODO Also hidden vars? if( hidden ) sfTmpAll <- sfClusterEval( ls( pos=globalenv(), all.names=TRUE ) ) else sfTmpAll <- sfClusterEval( ls( pos=globalenv(), all.names=FALSE ) ) if( length( sfTmpAll ) == 0 ) { message( "sfRemoveAll: problems fetching variables from nodes (or none existant)...\n" ) return( invisible( FALSE ) ) } ## Only take result from one node. ## We assume all nodes have exactly the same variables in global space. ## It may be the case, that there are different variables on each node ## (like a node-routine writes different vars on different cases). ## Take that node with the most variables in object space. ## @todo: Merge result lists from all nodes. sfTmp <- sfTmpAll[[which.max(sapply(sfTmpAll,length))]] ## If there are any variables on nodes. if( length( sfTmp ) > 0 ) { ## Now remove all variables which are listed in list except from ## parameters. if( !is.null( except ) ) { if( is.list( except ) ) except <- unlist( except ) if( !is.vector( except ) ) { warning( "sfRemoveAll: except is not a vector.\n" ) return( invisible( FALSE ) ) } ## Remove those elements which are included in except. ## Not very elegant... However. ## Bugfix see sfExportAll for( i in match( except, sfTmp ) ) sfTmp[i] <- NA ## sfTmp <- sfTmp[-i] would fail for multiple removals ## Remove NAs sfTmp <- sort( sfTmp, na.last = NA ) } ## Create a new namespace vector (temporary). # setVar( ".sfTmpList", sfTmp ) if( debug ) { message( "sfRemoveAll: Remove variables from nodes:" ) message( paste( sfTmp, collapse=", " ) ) } ## Export the list to cluster. sfExport( "sfTmp", local=TRUE ) ## Delete all variables in the list. sfClusterEval( rm( list=sfTmp, pos=globalenv() ) ) sfClusterEval( rm( "sfTmp", pos=globalenv() ) ) } else { message( "sfRemoveAll: no variables on nodes.\n" ) return( invisible( FALSE ) ) } return( invisible( TRUE ) ) } ## In sequential mode nothing is done and it counts as success ;) else { message( "sfRemoveAll() ignored in sequential mode.\n" ) return( invisible( TRUE ) ) } } ##**************************************************************************** ## Fast messages on the cluster (all nodes!). ## For testing proposes mainly. ## ## PARAMETER: Vector x Objects to print, ## String sep Separator ## Boolean master Print on master as well ##**************************************************************************** sfCat <- function( ..., sep=" ", master=TRUE ) { sfCheck(); .sfTmpX <- c( ... ) .sfTmpSEP <- sep if( length( .sfTmpX ) == 0 ) return( invisible( NULL ) ) ## ...it's unbelievable... if( sfParallel() ) { sfExport( ".sfTmpSEP", ".sfTmpX", local=TRUE ) sfClusterCall( cat, .sfTmpX, sep=.sfTmpSEP ) sfRemove( ".sfTmpX", ".sfTmpSEP", master=FALSE ) } ## Master&sequential mode. if( master ) cat( .sfTmpX, sep=.sfTmpSEP ) invisible( TRUE ) } ##**************************************************************************** ## Mainly an parallised lapply with intermediate result savings and restore ## on later callback. ## Resultfiles are saved on each step, where a step is defined by the amount ## of CPUs given (e.g. 4 cpus, 100 steps => 25 savings). ## ## Characteristic of the called functions: they are not allowed to return NULL ## values, as these indicate uncalculated potions in the result file. Please ## use NA or any other marker for undefined values. ## ## Files are saved under directory .sfOption$RESTDIR with the form: ## SAVE_file_name ## ## where file : name of current R-file or "DEFAULT" in interactive mode ## name : usergiven name for this current calculation (default: "default") ## If a program uses more than one call to sfClusterApplySR(), ## then name MUST be set! ## ## As this function itself calls sfLappy(), there is no explicit sequential form ## here. ## ## To disable printing of progress, set perupdate to 100. ## ## PARAMETERS: List x, \ Like lapply ## Function fun, | ## ... / ## [String name Name for this call of sfClusterApplySR], ## [perupdate int Percent Update frequency for process report], ## [Logical restore: restore previous results or don't restore] ## RETURN: List ##**************************************************************************** sfClusterApplySR <- function( x, fun, ..., name="default", perUpdate=NULL, restore=sfRestore() ) { sfCheck(); checkFunction( fun ) ## If none or no regular update frequency is given. if( is.null( perUpdate ) || !is.numeric( perUpdate ) || ( perUpdate < 0 ) || ( perUpdate > 100 ) ) perUpdate <- .sfOption$RESTOREUPDATE ## Ensure destination directory's existing, if not: create. if( !file.exists( .sfOption$RESTDIR ) ) dirCreateStop( .sfOption$RESTDIR ) ## No R-file given? if( is.null( .sfOption$CURRENT ) ) setOption( "CURRENT", "DEFAULT" ) ## Abs. file path file <- file.path( .sfOption$RESTDIR, paste( "SAVE_", .sfOption$CURRENT, "_", name, sep="" ) ) ## Mark this file for deletion on (regular) cluster stop - even if the file ## itself does not exist atm. addRestoreFile( file ) ## Resultfile is present: try to load it, check if variable result is included ## and check how many results are present in the file. ## If it seems that the results are ok, take them and continue at their end. if( file.exists( file ) && restore ) { ## Temp global var for saving possible loading errors (in namespace). setVar( ".sfLoadError", "" ) ## Load in current environment. tryCatch( load( file ), error=function( x ) { setVar( ".sfLoadError", x ) } ) if( .sfLoadError != "" ) stop( paste( "Loading error:", .sfLoadError ) ) cat( "Restoring previous made results from file:", file, "\n" ) errMsg <- "\nPlease remove file manually.\n" ## First check the contents of the file. ## If these don't match, do NOT remove file or overwrite it automatically, ## as (due to the weak filenames) mistakes could be done. ## Commit removal to user (with message). ## Variable "result" is loaded? if( length( ls( pattern="^result$" ) ) == 0 ) stop( paste( "Result variable not found in datafile:", file, errMsg ) ) ## Check if variable result is present. if( !is.list( result ) ) stop( paste( "Variable result is no list in datafile:", file, errMsg ) ) ## Check if variable result has correct length. if( length( result ) != length( x ) ) stop( paste( "Variable result from resultfile has different length to data:", length( result ), "<->", length( x ), errMsg ) ) ## Set marker to NA. startIndex <- NA ## Fetch the last non-NULL value in result (which declares the last result ## value which does not have to be recalculated). for( index in seq( length( result ), 1 ) ) { if( !is.null( result[[index]] ) ) { ## Flip to first NULL value => means the first unprocessed value. startIndex <- index + 1 break } } ## Complete unprocessed resultset found? Then start at first element. if( is.na( startIndex ) ) { startIndex <- 1 perCent <- 0 } ## At least some parts in the resultset are given. else { ## Complete processed result? Can happen in programs with more than one ## parallised call. if( startIndex >= length( result ) ) { return( result ) } ## Message for user where restore begins. perCent <- ( ( startIndex - 1 ) * 100 ) / length( result ) cat( "Starting calculation at ", round( perCent, 1 ), "% (", startIndex, "/", length( result ), ")\n" ) } } ## No resultfile given/present: generate clear result with all NULL fields. else { if( !restore ) message( "Restore is not active! No results are loaded." ) message( paste( "Saving results to: ", file, "\n" ) ) ## Resultlist: init with NULL in any element. result <- lapply( 1:length( x ), function( x ) NULL ) startIndex <- 1 # Start at the beginning perCent <- 0 # Nothing done yet } lastPrintPercent <- 0 ## Calculating list parts in cluster. for( sIndex in seq( startIndex, length( x ), by=sfCpus() ) ) { ## Endindex. eIndex <- sIndex + sfCpus() - 1 ## End out of bounds? if( eIndex > length( x ) ) eIndex <- length( x ) ## cat( "Calculating Indizes: ", sIndex, eIndex, "\n" ) newResult <- sfLapply( x[sIndex:eIndex], fun, ... ) ## Fill cells with new results. result[sIndex:eIndex] <- newResult[1:length( newResult )] ## Intermediate save of current results. save( result, file=file ) ## Calculated percentage. perCent <- eIndex * 100 / length( result ) ## If message about process is wanted, print it (also is a connector for ## sfCluster to show the calculation process). ## Also catch the case where mod rarely matches (if amount of CPUs is ## bigger than perUpdate). if( ( ( round( perCent, 0 ) - round( sIndex * 100 / length( result ), 0 ) ) >= perUpdate ) || ( ( ( round( perCent, 0 ) %% perUpdate ) == 0 ) && ( ( round( perCent, 0 ) - lastPrintPercent ) >= perUpdate ) ) ) { cat( "SR '", name, "' processed: ", round( perCent, 1 ), "%\n", sep="" ) lastPrintPercent <- round( perCent, 0 ) } ## cat( "Finished Indizes: ", sIndex, eIndex, "\n" ) } return( result ) } ##**************************************************************************** ## Complete "unit test" or most of the buildin functions. ## Mainly integrated for development, but can be used for testing the ## R functionality on all nodes, too. ## ## PARAMETER: - ## RETURN: Int amount of errors (0: everything is ok). ##**************************************************************************** sfTest <- function() { sfCheck(); if( !sfParallel() ) { message( "Tests only work in parallel mode." ) return( invisible( FALSE ) ) } ##*************************************************************************** ## Basic checks for Calls/Evals. ##*************************************************************************** checkResultBasic <- function( result ) { if( is.null( result ) ) return( c( FALSE, "Result was NULL" ) ) if( !is.list( result ) ) return( c( FALSE, "No proper return type (no list)." ) ) if( length( result ) != sfCpus() ) return( c( FALSE, "No proper return type (wrong length)." ) ) if( inherits( result, "try-error" ) ) return( c( FALSE, "TRY-ERROR raised on result." ) ) if( !all( sapply( result, function( x ) if( inherits( x, "try-error" ) ) return( FALSE ) else return( TRUE ) ) ) ) return( c( FALSE, "Result elements raised TRY-ERROR(s)." ) ) return( c( TRUE, "" ) ) } ##*************************************************************************** ## Checks if each element of a given list is equal to a certain value != ## NA/NULL). ##*************************************************************************** checkAllEqual <- function( result, equal ) { if( !all( sapply( unlist( result ), function( x ) return( x == equal ) ) ) ) return( FALSE ) return( TRUE ) } ##*************************************************************************** ## Test a list of lists against a vector (each sublist must be equal to the ## given list). ##*************************************************************************** checkAllEqualList <- function( result, equal ) { if( is.list( equal ) ) equal <- sort( unlist( equal ) ) else equal <- sort( equal ) for( res in result ) { ## res <- sort( res ) if( ( length( res ) != length( equal ) ) || ( length( which( sort( res ) == equal ) ) < length( equal ) ) ) return( FALSE ) ## i <- 1 ## while( i <= length( res ) ) { ## if( res[i] != equal[i] ) ## return( FALSE ) ## i <- i + 1 ## } } return( TRUE ) } ##*************************************************************************** ## Compare vectors. ##*************************************************************************** checkVecCmp <- function( x, y ) { if( length( x ) != length( y ) ) return( FALSE ) for( i in seq( 1, length( x ) ) ) { ## If NULL or NA, nothing is to compare. But only if both vals are NA/NULL ## If not, compare (throws exception/stop) if( ( is.na( x[i] ) && is.na( y[i] ) ) || ( is.null( x[i] ) && is.null( y[i] ) ) ) next if( x[i] != y[i] ) return( FALSE ) } return( TRUE ) } ##*************************************************************************** ## Testing sfLibrary. ##*************************************************************************** testLib <- function() { ## Package always be installed. if( !sfLibrary( "boot", character.only=TRUE, stopOnError=FALSE ) ) return( c( FALSE, "Unable to load library 'tools'" ) ) ## calcium is a dataframe. result <- sfClusterEval( as.matrix( get('calcium') )[,2] ) ## Compare if all nodes delivered the same data for variable "calcium" ## get needed to avoid R CMD check warnings. for( res in result ) if( !checkVecCmp( res, as.matrix( get( "calcium" ) )[,2] ) ) return( c( FALSE, "Wrong data delivered..." ) ) ## Load surely uninstalled package to test if lib call fail safely. if( try( sfLibrary( "xxxyyyzzz", character.only=TRUE, stopOnError=FALSE ), silent=TRUE ) ) return( c( FALSE, "Irregular return on loading inexisting library." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## testing sfSource. ##*************************************************************************** testSource <- function() { sfRemoveAll() ## Find path of the installed snowfall Package. res <- NULL res <- try( find.package( "snowfall" ) ) ## CHG 131712 from .find.package if( inherits( res, "try-error" ) ) return( c( FALSE, paste( "Exception: cannot locate package snowfall.", geterrmessage() ) ) ) if( is.null( res ) ) return( c( FALSE, "Cannot locate package snowfall." ) ) res <- file.path( res, "data", "test.R" ) cat( "PACKAGE...: ", res, "\n" ) con <- file( res, "r", blocking=FALSE ) a <- readLines( con, n=-1 ) debug( "test.R content:" ) debug( a ) result <- sfSource( res, stopOnError=FALSE ) if( inherits( result, "try-error" ) ) return( c( FALSE, paste( "Exception: cannot source on slaves.", geterrmessage() ) ) ) ## get to satisfy R CMD check result <- sfClusterEval( get("f1")(), stopOnError=FALSE ) resBasic <- checkResultBasic( result ) if( resBasic[1] == FALSE ) return( resBasic ) if( !checkAllEqual( result, 999 ) ) return( c( FALSE, "Wrong results on sourced function f1." ) ) ## get to satisfy R CMD check result <- sfClusterEval( get("f2")( 99, 1 ), stopOnError=FALSE ) resBasic <- checkResultBasic( result ) if( resBasic[1] == FALSE ) return( resBasic ) if( !checkAllEqual( result, 100 ) ) return( c( FALSE, "Wrong results on sourced function f2." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## Testing sfClusterCall. Allways first test. ##*************************************************************************** testCall <- function() { # Test 1 on Call result <- sfClusterCall( paste, "a", "b", "c", sep="", stopOnError=FALSE ) resBasic <- checkResultBasic( result ) if( resBasic[1] == FALSE ) return( resBasic ) if( !checkAllEqual( result, "abc" ) ) return( c( FALSE, "Wrong results on paste." ) ) # Test 2 on Call sums <- c( 99, 7, 3.4 ) result <- sfClusterCall( sum, sums, stopOnError=FALSE ) if( !checkAllEqual( result, sum( sums ) ) ) return( c( FALSE, "Wrong result on sum." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## Testing sfClusterEval ##*************************************************************************** testEval <- function() { # Test 1 on Eval result <- sfClusterEval( sum( sapply( 1:10, exp ) ) ) resBasic <- checkResultBasic( result ) if( resBasic[1] == FALSE ) return( resBasic ) if( !checkAllEqual( result, sum( sapply( 1:10, exp ) ) ) ) return( c( FALSE, "Wrong results on sum." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## Testing Export Funktion ##*************************************************************************** ## testExport <- function() { ## ## Needed to have a clean comparison global env. ## sfRemoveAll( hidden=TRUE ) ## ## vars <- sfClusterEval( ls( all.names=TRUE, envir=globalenv() ) ) ## ## print( vars ) ## ## if( length( vars ) != 0 ) ## if( !all( sapply( vars, ## function( x ) return( length( x ) == 0 ) ) ) ) ## return( c( FALSE, "sfRemoveAll() didn't kill everything" ) ) ## ## Setting global variable via assign, as <<- invokes warnings on ## ## package check. ## assign( "var1", 99, pos=globalenv() ) ## assign( "var2", 101, pos=globalenv() ) ## # var1 <<- 99 # Global ## # var2 <<- 101 ## var3 <- 103 # Local ## var4 <- 7 ## ## Setting var in namespace ("snowfall"). ## setVar( ".sfTestVar5", 77 ) ## if( getVar( ".sfTestVar5" ) != 77 ) ## return( c( FALSE, "Access to namespace failed." ) ) ## iTest <- function() { ## var3 <- 88 ## res <- FALSE ## res <- sfExport( "var1", "var2", ".sfTestVar5", ## list=list( "var3", "var4" ), ## local=TRUE, namespace="snowfall", stopOnError=FALSE ) ## if( inherits( res, "try-error" ) ) ## return( c( FALSE, "Exception on export." ) ) ## if( !res ) ## return( c( FALSE, "Unexpected Exception on export." ) ) ## print( "GLOBALENV..." ) ## print( sfClusterCall( ls, envir=globalenv() ) ) ## if( !checkAllEqualList( sfClusterCall( ls, all.names=TRUE, ## envir=globalenv() ), ## c( "var1", "var3", "var2", "var4", ## ".sfTestVar5" ) ) ) ## return( c( FALSE, "Not all vars exported." ) ) ## ## get to satisfy R CMD check ## if( !checkAllEqual( sfClusterEval( get("var1") ), 99 ) || ## !checkAllEqual( sfClusterEval( get("var2") ), 101 ) ) ## return( c( FALSE, "Error exporting global var." ) ) ## ## get to satisfy R CMD check ## if( !checkAllEqual( sfClusterEval( get("var3") ), 88 ) || ## !checkAllEqual( sfClusterEval( get("var4") ), 7 ) ) ## return( c( FALSE, "Error exporting local var." ) ) ## if( !checkAllEqual( sfClusterEval( get(".sfTestVar5") ), 77 ) ) ## return( c( FALSE, "Error exporting namespace var." ) ) ## ## Test removeAll with Exception-List ## sfRemoveAll( except=list( "var2", "var3" ) ) ## if( !checkAllEqualList( sfClusterCall( ls, envir=globalenv() ), ## list( "var2", "var3" ) ) ) ## return( c( FALSE, "Error on removeAll except-list." ) ) ## sfRemoveAll() ## return( c( TRUE, "ok" ) ) ## } ## return( iTest() ) ## } ##*************************************************************************** ## Testing Calculation Function Part 1 ##*************************************************************************** testCalc1 <- function() { size <- 50 mat <- matrix( 0, size, size ) for( var in 1:nrow( mat ) ) mat[var,] = runif( nrow( mat ) ) rSum <- function( row, mat ) { s <- 0 for( col in 1:ncol( mat ) ) s <- s + mat[row,col] return( s ) } cmp <- unlist( lapply( seq( 1, ncol( mat ) ), rSum, mat ) ) sfExport( "rSum", local=TRUE, debug=TRUE ) # Test 1 on Eval result <- sfLapply( seq( 1, ncol( mat ) ), rSum, mat ) ## cat( "FINISHED...\n" ) ## print( result ) if( !checkVecCmp( unlist( result ), cmp ) ) return( c( FALSE, "Wrong results on sfLapply." ) ) ## Testing sfClusterApplyLB result <- sfClusterApplyLB( seq( 1, ncol( mat ) ), rSum, mat ) if( !checkVecCmp( unlist( result ), cmp ) ) return( c( FALSE, "Wrong results on sfClusterApplyLB." ) ) ## Testing sfClusterApplySR result <- sfClusterApplySR( seq( 1, ncol( mat ) ), rSum, mat, name="TEST", restore=FALSE, perUpdate=100 ) if( !checkVecCmp( unlist( result ), cmp ) ) return( c( FALSE, "Wrong results on sfClusterApplySR." ) ) ## As clusterApply only works for #nodes samples, reduce data size depending ## on it. result <- sfClusterApply( seq( 1, min( sfCpus(), ncol( mat ) ) ), rSum, mat ) if( !checkVecCmp( unlist( result ), unlist( lapply( seq( 1, min( sfCpus(), ncol( mat ) ) ), rSum, mat ) ) ) ) return( c( FALSE, "Wrong results on sfClusterLapply." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## Testing Calculation Function Part 2 ## Further snow Wrappers. ##*************************************************************************** testCalc2 <- function() { size <- 50 mat1 <- matrix( 0, size, size ) mat2 <- matrix( 0, size, size ) for( var in 1:nrow( mat1 ) ) mat1[var,] = runif( nrow( mat1 ) ) for( var in 1:nrow( mat2 ) ) mat2[var,] = runif( nrow( mat2 ) ) matRes1 <- sfMM( mat1, mat2 ) matRes2 <- mat1 %*% mat2 for( row in seq( 1, size ) ) if( !checkVecCmp( matRes1[row,], matRes2[row,] ) ) return( c( FALSE, "Wrong results on sfParMM." ) ) return( c( TRUE, "ok" ) ) } ##*************************************************************************** ## Run single test (with given functionname) ##*************************************************************************** runTest <- function( fun ) { cat( "Run test: ", fun, "\n" ) res <- c( NA, "" ) res <- try( do.call( fun, list() ) ) if( inherits( res, "try-error" ) ) return( c( FALSE, paste( "TRY-ERROR on testCall()", geterrmessage() ) ) ) if( is.null( res ) || !is.vector( res ) || is.na( res[1] ) ) return( c( FALSE, paste( "Hidden exception on test.", geterrmessage() ) ) ) return( res ) } complete <- list( errors=0, warnings=0 ) ## @todo - Bibliotheken / Source ## @todo - anderen Applies / parMM ## @todo - exportAll ## testExport removed because of a R 3.0.0 warning (not error!) tests <- c( "testCall", "testEval", ## "testExport", "testCalc1", "testCalc2", "testLib", "testSource" ) ## Run tests. for( test in tests ) complete[[test]] <- runTest( test ) ## Print results. cat( "\n\nRESULTS ON TEST:\n\n" ) errors <- 0 for( test in tests ) { ## Align names to same length. if( as.logical( complete[[test]][1] ) ) cat( test, sapply( seq( 1, 13 - nchar( test ) ), function( x ) return( " " ) ), ": ok", "\n", sep="" ) else { cat( test, sapply( seq( 1, 13 - nchar( test ) ), function( x ) return( " " ) ), ": FAILED! (", complete[[test]][2], ")\n", sep="" ) errors <- errors + 1 } } cat( "\n----------------------------\n", errors, "tests failed.\n\n" ) return( invisible( errors ) ) } snowfall/MD50000644000175100001440000000212514530647156012440 0ustar hornikusers814ed49ab3c8d743ab2820624fbc9885 *DESCRIPTION 1ca6f22ce30bf544334fa0dcb8ab1094 *NAMESPACE d48d163e534e0d5c4de94fab2f3ddce1 *NEWS 0b51233b7236ffc20c8fc68ecd6896d9 *R/clusterFunctions.R 20f95184799876bedaa62ada9c56b335 *R/init.R a35e31fe9533fc6f5e892d04df46f352 *R/snowWrappers.R 3bc4f0aa308c788e8a463e00054c5feb *R/snowfall-internal.R d33347446f21ac8b53d84d4b09ef3f15 *R/socketRequest.R 93c797d506613cb993847daf7b3afacb *R/sysdata.rda b0cd9e2f371fb685637e09aa43f1c52c *build/vignette.rds ca74773497d5ff4ad4ba4ecd641b9e97 *data/config.txt.gz 6f59703731bb8165ff2e0b4de528c8de *data/test.rda 0a19586969fcac3238a562b5b8a489f3 *inst/doc/snowfall.Snw 2d5e14393e8ec84364e66f75ad1f14c7 *inst/doc/snowfall.pdf c81c6d6b643b77aaaa91d02de0cf6919 *man/snowfall-a-package.Rd f7de67205fce4f4738aff1e53e712fa3 *man/snowfall-b-init.Rd ccc4dcebbe555aae1f5d2474989e4c8f *man/snowfall-c-calculation.Rd 0858e802fc8f509ee61339e944b71331 *man/snowfall-d-tools.Rd 2fad2ee0868985596b79c766c33036a6 *man/snowfall-e-data.Rd 41f93382f016a0396de56fcaf069cfe7 *vignettes/all-bib.bib 0a19586969fcac3238a562b5b8a489f3 *vignettes/snowfall.Snw snowfall/inst/0000755000175100001440000000000012254301027013066 5ustar hornikuserssnowfall/inst/doc/0000755000175100001440000000000014530626457013653 5ustar hornikuserssnowfall/inst/doc/snowfall.pdf0000644000175100001440000033031314530626460016170 0ustar hornikusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4350 /Filter /FlateDecode /N 92 /First 776 >> stream x\[s۶~?omӒg:qb/qNuH$i %(J90A`] Kd3ŸCIaaJkfU9eeK!Oיc &xzɄxҠ0 [&ijc*)'2&Rs´,6)$S@h$43Fa6i1j3$` e"2wLO*)j1!(SN T@;` )FQf-Vg.$ d0ʨ@d`ʼn(S"КhYr&^di@ Y0 n3FWAgY)`r- ḲBUkYsj 1b)ڂ,7)F#9r8mZ@6pdhlA9H;@]ɋFc'@+nx2xgxsΒMسg4̇Xw+0#`)MѲ$E>'d<%_ D fmKf2G}/K2Hy= mx7~h g30_Oo Q%}ibr"xq0HӞ&d 2.RA[[HDOԋE*L<-~S]SLRR2y9%INǿ??c/Mqi?N<5\O'u* )d +(E6E+buh)hn#Z/^]xoӬhX8d-ɭN!0>.t4Ǡ`Fb-ë 2ts0u o0E:ɈNꗌ,b__a ߢShr-hձx׷׬S3BV d4'wZ蚪O;/n`̡vƌj~F}rE%yZ,$z-zkeiB⦛籧aVEYPY"j{uOZy~p D72Aae(#205u"eW%V—\|K*+LuqEjQբn(9=MXQ:588,U`:o2=H'ӉVU%`o団u)&ߛ?2zӚMN/)(SiA,t XeWX63Hkz(V0ITMC>c@I@C"OIVXW!uQ3ʻR-Y $*Sȏ#uRjӥVeʥ1o NGTj.sc ?:N^x#*9]03WɵIowK<_cBǦ$<ϧ(ɿގk%/%i՘ YXe173o(Chܰ|0aׅ;Yg ˘G^Z O* o2ϛ,5B[FګS[eDnyA)4 wpLLܲL֢lYYP ҦAYeršR,޳NWfKL[ ]*F y+i":ކ;Ք8)e<(8vk}rk. 2#RlPU.V_ 2]xmyF@ wd"w;lE..? 5Yf<b3yX/yjg+ 'MvrI1̛+SQdEL5uҖ#Pa>Gg`oS3)!Zt]YGtkM֐d:VBEv[IFvf+iJh&mVY%eipKb/$ɛ!i. gYDJ ek7FxVd6o3,v='yqm ||uN/tkIQ>|:>zD4]CNysI{gm '^j<+ !ڐhf;ڻ9}C 褡6Lňwܚ~?ZT&2[}OBGGb`DS1ٷI8&Yy "tIK ۙ˓gg!lx߬wRFo_o 7kjGJ*3!pw,_퓑wChteųL E^}BE઄iӬIhpLJ6wP@-2]$-P-p tEGBYҎ@Yt=-v圎Fv:svxU:| <>(Q+щ/C +^.frE_K.?5.Gn?k@A#WwNGKdN)ߦxP>Po-n#?~".O҈0:`u#Q~82Z7E3rèJ0]|'gF: sA%vP|^7蕙m+湙^礪YWد/v?cӝt3%G`[i:3*zԲJ5Ej"DP['JS V[H'o{v~֕Y5nSRqAOQ>_A2R@4(YcX7\U%\bYfV Ocy-{6|t>rQdp(]?>k^0U5{ADhC&G?KM-}qpUK@"VUORWᣫq/륈XU>?H!RYh35gL-_zqضƥ0ZL!jiEY )ýsȮ,e0b#i0Gc/]0g}|l$%L)ʖc෺2H9?9ڐK7{nCN<q_+%w> stream 2023-11-26T12:34:08+01:00 2023-11-26T12:34:08+01:00 TeX Untitled endstream endobj 95 0 obj << /Filter /FlateDecode /Length 2188 >> stream xXM6_T,h  7{wSf+L(#q"eD5kC|7^~YYNjw6w$nI)i]X!g`ug4v{/>_Y^h_*+\JQsksW>LN# RZ#|`!ULQbjݤEq ֫[-C?]yyP8%xm+ǟy+#sE7F1gZgt5|^b˖`a?Kwuo%.Cg̔awl݅e"/5R\˒=10+,P8_O/$s/ˬ|`tz67E,61|ne@} p0Kiqr'MMS ۋ~ M=I` #vljkد)xhb oҐwd}0zC6_X 8* Ý&/h?%O͸ms0e5 iR/2/H:(K1dI JQ ARG(C|24}hv{d #txaPcs9 G:Q )Ե&lQ{PfĺAh؃PySXWusʃչ>&\{rM0ZVJ)p!AHN]ۊ(Jlĺ 8ƺ[xT"j[؝@9!\GT| b2oD@8; ղ/12["M{_HJgZbB Z3uH2W}8H%'TqE.-GqIJ4#]Ӱ(Fgi8d$T$KI^MmsKhvoY$#8I]W e5μ..YdnPS&sk2PQ1B3)& bdD$2eV䥾"c!=5p܋Dn 08 KÁ9aRի9E0HGt~%"Bal}[ kzPVm[ɯN;jHNtْn>$ uo,P h0ۧKE5DEKpV&G9VkX))CQx` UZ?94:%в !t|,ǃ.@\D$brH_ ! ي^a~rBP!uVD5/5b0 P sy X1uTH-,%e?il,,^ yfDfzv %l,xM*iyOCoQs\N4/ܫ@L1]EPI ȯ$:kFh?݊*WH=TR== ?Txv9xIF82%4xmm2wVE˶9l6 Vq>dg,~e l@]h=y*4F._`ƶnq' C{Tx[pne(A[Du2esq`wj&eOߪW(w~2nqק)kv&;v SIPeA5&[ׯ2nSpYg")dߺ$򏇻_,endstream endobj 96 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2054 >> stream xUyp^YB]V! IB2C1u8-cX%[%ۺ}+ɶ,K,9 .$eR$iKtxr]$S,5p83mZ˫)Mt.LOm~ :9/%ʪJerEIiYBal+&`۰*l {[m6cs$D]Ni)rlRϧIҾ F?,JPbd%V $>jjAK|p5%_V1aMDjD8B:Ҩ$ߵnw7Sfzƹqk')8K(1I&n<:3E?'P: e(tk JZEZX`XVT-*WXY cJWx# ,o&w3LWdhS evtǴB{1n m̮"l{b"b75]Awp%!'mRd|GLdiw)5#w,Tcp^j@/B3]i:pH:NOgIצ$~pMd6/DC]8瀝ƃϒXVprVaZ;k^9;Yћ1$ VPUzbE rޢp4Dhoe{0Jؔ)MbopsowuOOxvM&ko gGcKz{} ޾TvhD :lg"%V*XWUٯ.u ]kץ׬. Ɲے,ay= əĿ P:΢,\&Y&PܒL#AvhN}$IFqCT< ?=ND֑`!d89ZS5дd#mc8ͣFC`cQ~v0)#l!VTL~*YYcfK} گ)BC &%nZcM⾳d+Q> Pwiw \>8oLjn"]qxd$Љ7ZKDȊ;:Θ8N} o\jk|&'<+zX?}®]'?y$”s%O$qk 18z8vO^ RWyTj v@I* =0Ht" |( W ˜N}q܉/D/s{x~Q.Y*vHCAw)š~ziv\sLE FH5ٌfjqG%hn (zZ 4Z}ҥp*XɼvrߞR&&pg._F[|hӆZ qd|ԌXI`g:=Sd\&|8Hd5zPSÚb`7 dp_$j?nR?]ݞp{;ɧ}\ejkT,/= b.㱇?jےK0P^m+U-=82lp%y4\)9u] I'fr=+g`4endstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1893 >> stream x}TSo R9@msuexZ*uX߫X|%A$\ (&\E 2[}9u*Gq:Or.;=ws|##"2lꥫV$̚93<HH"ᙄMk'mF|krz3f&b%XGiDPQ"PE A-/WD)t "7)Dԋ1-ㅉ~&vaJ ۩LmY人L t>y}+Wb ]SE|;Of2XOA0TaqGRuy0t)}BWAsV秮հV;թ93ji-iI0O&~a>O(.aJ۸j;%9SЊuSrnrn0;i/֪͑o/; pӋg3kWX_]]3ggUJ'Pr0b_a q+/Q}k>DˆvnrYQJuI8TR71F ۪Mg>I /SOzB(iQґU3Tg'N+i)7ˣ] p <4<î?1INRo8twj:gBcF\pexmTG8`eF멌,kVI RxJvfHz7[Awi҄geUB؋w86_N-B~Ð+:V8+PPP5@NCo2 Lii|oQmyÓݯN@9$\9 ѠZړW%*D$R_IqV (uPPTn2۠^ hvʝ}M26]1y81]Q&}mcM͖f@p=ppVĖ]ls>Iݵ԰!7{ \BO5xɝM?O%Kh¦[$4Q{X-= $6ƕ=8Ӽ+&oʼn*|`[ɸ}y"dR6WYz6DX$ʓĈb}ZlWa g@B.̋YZwGsA\=(PH|3(nIt-Wt"xN$ ".hc"9:5LEO t_v7wIf1}:Elbz̬YS =^3  Kz'C*#ÚQXAğJ:}q0u^m[bi qׅR;a_?=}sZ<%̒8kY q`l" [p",KؔZصӟ:+kyC$ښ#VqOf,`K[G^k> 7T=7tG0%'CcF+bIs {5%ka|c"Q>%9%xiSM|mt,%Aǘx%VO;#.{O{{?~kBWoi66hrpL=N}/r,A4Mendstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1041 >> stream x}LSgW !$.!ư87Ȩ`P@ʀ^ʥoˇmiPvvYC3زeq8 Nrߓ<IH$V_+=}cI]k81zq^`w\K@C]XZE(Q:ltQ"GкȦAuq !%)֋ W :5P&;`!=#-546'7`Ho@^(4 bZ6Z{Ln>ܘhWq:lFS`DSRY]iڀ<{RDI/"y q:p}D2[@ʘ ]JыN/Ծ^yC $t![8ji5k/=qEiwWO<ɦP C{يޥ {>ҌN3U|,IWc n8kc[& uTCVЊv576%,kc'+/0U)y%%(2y*ђ_y#ue W0uPQ)Y#b HjjRDCj@> Uxb*ٮ91uNkik|<}c1f@goG;}6qmsTDS OR`&{]z'}SV^7(9_?3{nrc9 rlE:^0C}97l]vƁ,1N/^10Ie`-[ ;.Lz:dxatI NZďF[{ iOO17 N˕V5Ԗ*0E|V'a!Ƚ5S-bPL=d v~_`X#)LN}6QsywO8syg;@$,?? !F5FYCCIIS|l?'Nu{#̊Fendstream endobj 99 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2519 >> stream xVyp^[v .7N:A!$H1uc0ƖuúmYOCaݾ9Lqc E3ibj yiWihtggg4z}/L$%%l\ ?D&;[; `R礡3/N?3H\J%RWp={Kv>A'6sD2&'V+Ujb "ӈ "/M&I<3.+<)89sr/xJ.VSc)kRSrD\:}ɩ׈i|;e.C,a#yXjE@^a,7,}4g2B7DYZ]'csrvZAhr"\Y-/ZsHu ]% BcdIWғXNFiɮ_8H$XtB % +m]e[BXұqApo@ؓx q/Go[Y&b^ڱ R5 t5<n;srQso{cTW] ȢŜ.,G. x:}'\$' &fLayo[AWI8OE)_@S1Su` ^/4g>R/mnľF֟9{RN-z fvB qu'֎6/Љ 6>m K^Ã+(*Kw9mu"6S<2 |DkC7Q*SVFaC{ξڻzSmeT5wbKhA C?,RZ[VWmɋ =#YânG8ľM~yeI[8 N\0x%-mnEaggP㕲t)' h8, xX[/CEj^=ig-M;Y푻(:MԍG:'\F:}KwpGEFGaC_71$Lf1*HNi 7ku tHZ -E=-G HJfh3k-vm]I"2V//Y[4C34*,D,kye <^k-|\8øi\Mm?w⧁3^;31{HB?"v,cкdO31x+.a,qQ:geZ-r5,=Z8qF' R5H<Hx!U5fdVPJI=v1QEG CߴXDѸ,ǔn:Q(@ X<+9g8~/s4JY:NvJ f7PTm;uEzO_S2`MF4"hZB\wendstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5879 >> stream xX XSWھ{ݕ4NVǎSU(uE@B kBl,%@HXUĽj];*VkkX;LG;'O:3@9'~~'8n鳧O3 އ04 h{z|Z,L݈c#Y lbQN2qdg^R)WYn}ocf«yqoyz֗v_ h s].Iytz `x!ħrT攺jBa)6ܠrV.\XފG7q+$'nl8ބ͘?aWp%kcXl t[J6lbX9<܋C!v.\J xɨ,}:SD-_GfeڸPLAؓL`ER7'"`\X8/['p]n ԎTC G!!Ʈ"x*KJO.n<ޟXPh*ˆOc4뻿?9krhRPhc!z&Y˷xץ2#qqQ' y z$6l3 5lD˦ S70܅ 8d3n֠bQ9b@sLD^ukRw yɅ)`zݟ'.'b\5aFTM]GB*)laXE IЌqjIݟs '~WȆL8=.Q{uH싀N;ŅQe': i֩SZJLR.łmqX 9ǪTU<4YʳB{vpم{).5,H]cyT!T 2@d<+wh8~ϟҾRgӖJ๨fT$f%Pz (rwQ =̺y6cm?Ue?Srj` km j2FȠي`qB0;$sбF,ɷr):e`݌c=a <6oԁ: vXh^dO$^(-VM Ƀ_?oc(<0U<.&+n1kT@ -| <(MvXUVVx꼵>M{R-2%P"RjypW( 9Ԯ.E%nϡ}akf+OVK%I(֓͒6`&றqUUD#WA57b (mm`,u{8,lqFߖc^ݢٙ*ؕv$NҁECUջ((-y*㞷ƨ(z4&N`iyP,2>'oo:,텖b^WX)$2=d:},GE*T9ɲ,9ko}OEhζ`?H+ú  3\e hQenc:;˿+7рh_&\h pK;oʬ*0d+ѶY!48~+}bLCtVM;ntQLM [oEw4'[4),5Qlz;=YںF;saUbIƞAE/FɱHJ)Ui4Z\Wkp5WuZkp2~%J2xn8Hk6;5ͧ$:y!yt^QW*n3| '972a덆"WsQ oKfȉDeTυ?׸N=J=B_?2'5){jJ Ra#%(TrA6 RkDvmix!'שỤ2,7FK*mR (uz$ߠgF{}JCJ}*uGeOQN[0,2uZ- 9\ޕ_1FFChH6Fϣ[l|z:yh,2EFJ^D(b%6&f]fBJmjↇPk`u;[+eDAF֛^EL?1遨S pavS\]ҙsX|srEd]Qɶ 9$?|aT˪hs!*LɨjdˁmEUJb`lX- 'qUhA!ڝ[d-{&LF)2E4-jkoxy07w1}8B@p]uT-R*|||Z ,Zs9s{#vn sSjԕVǤ1dy ߆oBԅBM![ҸW6/\K]"qd۵Tj{ LqZ}WK7"k;ka^-ݓH]W8"URk VGb`kN؜g)hهb( zc)2j"`qp*zz*+8&^jΨTnVgrNVϖbJO`TtAduX>qv]g > zM@S)ON{1}@Y+ˋƥqSnjPy9P4z^s;јhB&9O삃9ނB 7q7woat2)CKYsYDu2-_M 卵Z{qN="Mb1GԨ 1tSͺ AyD>>+Lຘ={L>YYYKh5ZV |QmT1 & 3JMy*SuiUSd=1^{qF5cng7T-qG|@4k$I^ĹO F/Ȱ% uY5ƣ/Ȯ7CUm ttLGg Xh"|y|JOJ.)cQY5Z7:zNu\QQ~]~.#K/Xu|4P.dh-VCkKhEkZ1!D9^8"p&8j)/A FoYMͧA>H/BZT- kkuh&D>4z3i̝!BmWKoެժ"W4J_[pNՙ,afn'_<荮Ũo3nN(C6*Va9'BB A+OGCQGF=g.u[^z+~EW_fRU)]fQ(OvPZHf,)ܙ.:w紝yu9t >9]/ ?%5}Jǂ6~xVKjGjwL/NgĭmFlF1r<)"['ᅨ2@{>@YXD_3oD ]endstream endobj 101 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1290 >> stream xkLSgO-^/nT6M7M6u,L0D7@P(-s(E :ЁMg3&ۇmf}|Ky'yeDB&=PTsgeRÙlX YK-^t}1Tܢn=~ D q(%.bn3"H>}3DX*z<+k\O,$g&e?3ͩYE{Ij\2wRބWwp^~A{xxeVp6Xu@"8B8$\ a /y.} gne} n!u#>cl꽍gIKJSUo5j cCD|rpI1-e !s_z:`Gb(ϣB^jZU4;i(As!&&67t\/uxje\_(XlN5>i}[O8SpWjD~8ЈKizhMŧvm;xX=mRTz#1cB P(˴2\*PX:k}=50#+܍7yyiUjQC j Qtun忤E OSa_]DD5L'mT48$ v>6 tV@ښָ<愢>2nI Y@rMԴDu%ńj [9֮ ZtF픢iں @;cUDhh~ZP,bN݋9ssiM]MGJ_vW<27 508ޣib vƩ )Ϙ/!CF`.OP.77vXYp"@[oO\NE .KƃP-mPkwyJiBVZ*ܒ(!44U>^3:8,פ2p8D^wo^rfޞ.^_.RwtXR"@=+֕@2u"׺Tx^4q@T)0> stream xywtT !sf(DA4A ГI&m&IL{zˤg26)A@^U𢀢+'n~> r޻k}:a3g~ywG]ݖZQF,?kzD]G+=b'7 X>-m532ff)%͚-=Wy^Ζ &.ںxI˒L]5f?ڀ _^S7lpވ^y@_@,"&/ċb D,#^"b%2J&È5 b8I 7,bKB&b1O#㉅k !!b>хMD}D?&("`nS$"xB=Ne_ _̿;W}i뻶~j{>}|x|[T:ࠋKY+7!\7tưg t a5zIDCO!2l)66mu{= J8*=e ғ*Q) PZ q"/TKūpHQq1O(J KѤE|m6 K]_`WxFQxCS|4rnJ ~CxOPL61ܒnHg29"[ Z>c&ɤ4er22mM L'A)>muz,YDq5)7xp:*[ V\WmZey0O (TȌbU?z/D\,&T'V'mZh x}.Abh?[w[U(3p]vg`?l).) Ur*zT+JJteZ?|DQU ~|7:90+וpw,GmI\uxQLL&fWV*a/QՉ6>NR I;{%SN<ش^;0? )Fz,j6=:8'z-@ 6#NP+rĽc6j-ی0 oaHjZ.秭KK[SiTz%e=Z֝zN )+ih2qRv'CC m'xr'lnZYPh9zM@qh9µ/}lNy NU=A2E`NcKw%;{vgS>DXJ@Y:Iɭ1НV0xH0Xt'r΀q%"e,ETmcӪް7ӰLj8іmy"=O7=a(l WED Ö:vQLКt:n׻羋l#KBϜ8[AI(֟oď(TOBTUj,'+r$px6x bWZmv`\ ! ܤ,JJbQJl`w F6Ol6[^r/t&ń=Ǒ- l lsR ,W7-Vs@ބFNS=8uke}}# rҡpI.v)_AI ; f&m"Zu>޻*=UW^ܝ߶xJr x0wƾcCd3jMm?&XV$6'RnXj6p*,8nqgxBkZC(ْJIt!^31m'h҄$pejSWu6Mn^aЫYg5MSJZ 5JMX>hB[sJNqsХ*뱹^zL?P b(<^ds<%чp1p KNच-mt`\b.&rrjI#N>ƅ֥'N7E8$m6翰OOVZ[CqD)D5RshαCp$+ fgb]dKrI?@^>,,"$RFKU0!4. G=<:z`(ܜa2xImt22]L8Djp4|&ũ<(h"73HB%ɶnŊ@8f}Nt:H9WHll:)?=G@oTHJ&Z1Xz!`EƓbCNKڲXq}DRԯ^d㸋A6t[GMdb6Y*59DR;pdo@!\0Qz#BJr$z~IbPʯ܆ `H-ɘoiPTFVS5U*Uop]Eٰ}R "/’P _:lo;[ ̣y"Fm\CD[MV`!SU"܍{>.-8t,QdStgDX;#*뤵" ΪU?oSX Ʋc*ًE"\Gn"lUU6M^j᎓p4lѭLT[`My)MR|bgA+YE N<RPFn/ɢڤ) h4_˫uik~5г]bwa\YG{nb]pa`wgŏH4yaԤkW8;#ulيv4Qׅ AnW _'; G}5k_Ns*skK &A$|smn9Iw0s@d7/.RRi] BOIqIIIq% D%yTfe"59B=!ē떕K6/d ?6Y C@ #(Kmpjԙ2PzN{QcGUr_4w\'X/‖Q,*0Ҿ@=6K#ڠ_4k#6D(ˊ* _v;Zo95@CU8 Cd1K]ny8m{[roJp8V[h'_*iйXR24*i6jJnJA3wV -,!nGְSJXy J{,@)alz} aܪ„ #6n$fY{tbB' Q'ux3>f 6edmRO! 4u-4"*ܽu&ǤVo(M܏UT0VJTڴ@ $ya j)- ?W`c\; o_-'SbUUPLZlJTuRbʟy^pA2KxLV`p0xUXJ 2 (ϩ/Ry}\` gGP'yRXv7f ήeZE$*$aC8-`N֩;ĺ_a%.n fsmH\WЂkh54Ϧ[[\\r.vJeGS)Ne"N's |4J VX:3ecx_hlTciXwcFr_AύD=uZ-VJ4xh9. & Uf Pc8׿}w׵[mUR!s( OY)!kK*ڽr=nzy=C=%`,y z #3߻YCW042܎*36]"Pl,gMǥRk RQ5݆νٴqV%>O{Xav*$xN7~xފ,usGHK:p?N$ <;~Q>ȄO*e6(+8Zslōr1 -- l)5 ۔wcϗv* E#j$?`_&W^ͅ3SFzGna? y/cәt}Qңgx$d@ŽQ\r7-zr, #VYp>=|w=t#`H7Yp$Ry6', )P7Cy-OQst\CeV+;eqkXwUsAWLj|Nm-x8 Že}_uF/x0DRkzs7ZHƃm`!Qij .=Q^It1pb_+`*\ڸSn܆t[TJon5'j'*l 43)ue C9{NS ?c&k*vYM*VdTFOߞT Τ ^o jJqTeRN|T/":/ΨI4BlE["?Ez9jsF_R2ނx>wۢ89SXϣƀ!@f6r&^l`l1,`J&1CN:'b>(hdPcW=r ts [7;7tEIʵ\yX 47+Yo k,wV]'9S.~P;7G69x\$ή=}s}~u7^odW6ZA#W<*5dkvJ:NT.SZLz=fjX}دJ]RwBv 0.)_z$%mܔjQQh\Zv1waG:LGS6 r͹ Z+ʹztTRudEΈ8w> 8U1ׇy9Hvz5|ҺPANM-Hbx6b%=CD?+賈डi{(:Rr:)O\0,t :cC,P'Uh'A@;')d%r*WbT仲|JapGf#|"n`O;E;ا(H]!fR32STXx89qjuf3oau!}= '¨?˝rmt39r3uQB+ZL;N ~arcA@'ۇJ:d23@ȶi'/d.}SPnqB/Eѫ|ܮm.;.?*. }M6YZ1&^mȒĂix$˶˷I[d v1aZ7Lv`g@]?XۉЭ_Ͼ<眳F*r[1ٜ k3Ibl5e6nrǃ`ebƒ<0>{kq؜Ug-͛x-9AKV! jm#pmuYؚE,G>D!f?|Ʀq)ր,\p.:yMV ɬ @k*!?oiβ*]N/hRT4[[`WځCTPCne,+2bӧ$L:L,KV%a3BD|^E6Eꃿn+ e8psƵwp"lW qd:%ñr <ϨQhhp4@+Qk-UqCCzD>~&c> lv̹-ndO`ʺڬ̌?|?E2lݞLUd@MT! PpBzAVA.M% T5_gYuftzTY5Bnkч V?nJb:אQ4pY46<6x=Oc=J ( 7m5:܌H`-Q^Hwn;k^X,CeE΢Bn˟zLnvmx#~ F6lߟ\W4FYE@eH23+$5eMcC4m3 u_T071xRrf{!K֢zxy@v{͘f܏}/cq5kT5',9+g,yQxJ-NQey0*ʋʿ|`PGu?}#O^1xLUj.`/^phXS((;g1=q>Ș=f7η%$L;O]Yoؓķ F=$XB)HNϜ@)HP_BEԍK9pOqz;ᯎXlǽg3l}Ǐ7,v'>LKmxƚgL.poxc $N-/ٰqS dVW5ΫӼxQxFF;}vqR2@*r,9gL N׷f{F~PfR@;cX"eQfnVG7s)pxg怱!LON@+ ҳEFEN*݄(L܊f4w"ޛLCzFD8m9T1m\6R?S0t ^PCՋ[v2ŮR[)xЀe̯H OcraRyxVjj+C4ZR‚AntoW fwIee Ɩ,:AlʒqF@Z{QN}S[RYI95%v u?|Ï{>3gh=qt{/kq3Jʦ:N+hh%Og40ٴNqz.ҦM@O!K|Gq|*6k;鵔wvLZ?UߌgЊ?3FàZzOt U_.|JPfLz ƭ!\!ŰǧG ~i8V MA-GN @4v+Cb @S Ј?? uv$hg)uIkֈdZL]. !;A1(s 8^Zڡʧw*q?= =:w9oOҮr%r9zAS МSh| =e78tI7Rc^=SkF8;80vqI&SKY-T= qWy?^ ^hl2zLmήsfNFp `_A6v-0I9 /Fخ"C^|O>-pt񢐈imm8V(MJd3f}~ж $*B9 `{#NRVeQ QyVMUeI83Bx%evZm, GuVpJv#E=E. /FqķY<^ ,%;.<FbX~n ul:1*;Yc-IX8EUjh :Z5 ğ3rIg6ZA[9^YvN W]Ug|lh 8hvoz,v>˫>]>V #\(촆.p7]0F?IW_-aW_z>::|R955U!q0 b2O>RB'NӭO y`QT>7^Y_y)z~.;ix4btw>ފn?x N3JmZ&̲"[ReyޠӲbC9DfJzl@3()J%= >tVmZ5r 2- Twjn}<$endstream endobj 103 0 obj << /Filter /FlateDecode /Length 1361 >> stream xYMs6WvJz=mqm98=0mC2I%ͿIH;U طow$}.V3<̞f> b\gg7TnjKgD@$L]1+T!ñ Amcƕ!ձ bVHJMb1u5wHi*b%j*iӢ#8!:D{&` "I| {#1"R*kۼ|#>XmZ2$a$0lW_P |w VǞ [ fMd"&TM^g x! BFUDQO#E13jFh-Q7"[lh%+Loc6BuZq?e"mEc@Q0\g0!o[>_9SaHb}7go&&z#:֐0KcUC}r=`A`Sr$K-IM:]l6JHCʜ;|}G&1!_HT)ȭ5lnjtzG;$߅xQ0hF\?c$6fljD1U Gã"T5̌Fh5hr9ҮWҐQIPLRLf$|&MOTTrƇHH0EP*_|uܭ5%ᤛpIB&EY2婉YFP0rulM񘆽HsYtV>yH = Plۛ) )7!1#"hb ^,*+OZGxHUǥ\4>#m5 fcVQ0삶Ub>ЯNx.˴XRqPO݈D}p5bt#yĈ7 ܑNրD@0Vݠ-16;seՑ~*ZO`Āhɧ-Tj1,VZ]*^:;5qGMMH@ Rthne'~xU^&IDc!n^DnK8Dw HJdo^ӼW`DMꉰoC7tq};,B;,}}]WkxMl٫G죭l.+2֡h^lٻZƒClХ:H:Tci7ebS)"?֍l,M e&$'{Ggo?M!3蔣Ǵ|L6W6|+ 4endstream endobj 104 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2684 >> stream xmVkT jQS/e*V-CPCy # !/# $"hQRZmkꭶuUҞp?흨;fu{?2xnA~k–,v>D8r|t|+G] /O/Lj驄zqڴ ظ!K\JD1"b.F! _XOl 6Kƒ'W d~w% \;0r n6g7O uLzyA'}IjY(7I:1Y=7{z00O^$m`c$ZQR ]1_y[ `r \/B7?/ALHl(k:5ڛ:+;U4a\U&HJ'Q:0)&=)VbО kj'-9JΟ3>]]|;`SR䃖Yڻ@u2̿D:RmRj*抔"87.>nWGhq{;?-sp{?!pCc,W-;(Z4 4E.gKǵ 8+x%Z=:୅>||OJs>+ޕ #`E(J\KV{gcK۩PKԖ@0QհBf9ֶzt{̨'}q#~XTID OH.Oщ k+5YsCF)kJ;(KHS,)\$jٚlDy]Wa\1@}j7"іr[UD-.;TŨ*VTUWAo N`:lNL&$e,S=/}HMTt(-w^1$kHFC+h$}ų!t1 ,f4]k'pSp zvx<1܈oAK>B*5LVgbvesr32:CmðW``e^nkm_i49<AEXCF"QP]%!,*$xxcQָu%<0l`_|=Pf5<-KI80O^虫 u" 'a4IL ]Yda`yE<܄y-6ksS9FzPWܴ=|Cv_D\G :.GF'vAT.cA,8I]!J^N2's{RbnB5! elcY( <e+sf| /UJsQ,^&ԠRSs% YK4ؼ:qJnwH>`baNB^UfkbEjV +e=ZOabxVFkW #eR2)'¼̈z`ZssIIo̍ aXk1>́L!\T/ %G!?0M`zh˴2ũr%;,30sڀ^ڇz) X4Xl7_@9ݟ`a 8*ֈ>!Swu?~g4켂ξ k{c88}M HGi#୻ =G{ 's>.HK@ (659pO,0+o| x>WM}rRSW6E^&IajZPPKѿ$bg}sۂLw iQaKJ[u_p[qaU$`ayt?Ea6YoZNԕ6*$\z!ut$Ŋ|NcϣũDGkJoʨ- r/H[,Xe=8z'-u欫x.u{br Iװ1ˑP  oMj]uYѓ:2hheem/B`蚪3>,CQ])pN%9%}zw9FQcD}$YoAnv,|cPwF%NCK Q]) 7ϜQ3$Y.=:P1'sV{qm?OCfm3jvi(iPeL%gG`~?<+0yjxʧ#8&dҘ.5./zrkB{?Ʃ{'﹏I+~E♎k$*:XlI%,&$Ϳ(=_ :;%R0tZ4qvԚ> stream xX xSe>"KH'AwM@YYKBJ5$|If^Іj ʢ23 .8rq߹?-03y?y{;f(F ]z< h'q-ϊ5^g'2ǼU1c%O~3EVځM!Cö3z |_giyf+2;?fbv3=\f/ìa1kuf=&{f2,f630")f L`&2/Fx350cd~ԏ:cuya'Rn5g~?w̢1]1/Oƍ_`pwI+'%MK+!l>Wѽ|꘼7ArfoK_p='E24:mPZ8X"V 4[ U+K#99l~ q.doZueq%nYruѠKT"[^%o&[!$]"$'B%͞}ŕM";Yx׵E$℟6ꢥcNx{ NVxa-&_ޢ};zw68ZvzX o!Sd p_r+^8')Fx|q~׃)m\븟'沂rC9oГL|wBG-WyR^PQ \}zcF_ɢ E؋:WI~]Yy>2a.u9t}-x ˊ.hAoVAqoE+`W#dzU 7T)yT.pklmm/n8kZa;9؟#`RlhV@HYrSbG99kbZ<:jh5 kFU'/lό+'R5H:zL.m!'霹SIWqkՠ֫?:N҄AfZ/"F l2'53M!;v\vldqbybz-Q';bk%lk1' 9KOɊW&@$D4Du5}gnڔ/.l.9RDWA)Z2J[6|yINv^:Kmmm7o*{3Ke4הܫ \,2Z@M`L|rA8d:U_-KD~rUV+ M2x%#%8Pry[UMenf]Sz[wuԷo9W"1e7bw\֖d3 xa)5Ř2Ex#SOcrO5)8s]qǃNj9م:|*_8Fp,nɲ+֟)= q\rI{O{gq4CV VEcjFwTJ"cB OP[qx HR:Wg=반-f2D_ʜkDђ spNMbg1pg_~2}IeAlz=c$3їhιb*4Jgm_]xt=z纍J *wHRNN hǰ&p^_G5*o{d61ő\sacd% aF)}PM =lk=#h*K\tsvhoVf8 m";\ ,=s| V8 UdQ Ew2M*^AregA~+2$Q$٦ ;>MWz}EZv4Ǩhg&:&nX 5*ѣ_Ս wzM~=\ZjZe6@AC2qِ>"Zjp ӭiON}I ,*_@3R1κ8U\`1YbR~X)(P+S( PRfj3Wl.˭6FXEG/>#|Po>P^[Ff{ ]WōTz%Irs%!Q~J:"1iԙxSk5{bH? 7] j)]/ ]\m};pyL0HJ ¢YJy]il;I-RCm-:th7$qd&{}!|8{|cE[aT䏣MȤ{6\{n-b}qNa.$C~"=.pG e?W%OqB7:ʿa* n}΢ g 5N,'X(՗B)?P7B=%Y Mlmo8ޖ#~'<$Q$@Ĺt}.kgɳԖ[pdۘ/ anka>L'pמhx>mhpLxr>Fw"sW?u= 0~9Y#2WK\DIpS*hAm6ܮx;\^cO%;-?F5B od79'=AArd&6Hj y*VU4EGJhtZY hLZz9Xy 0hJ&P7{?̄{ZSy;ir}]e}_ؓkG/2Bҏ.Q wv.C)ɒxU)&:ww \w y 'FwH:4 =۹As~K摘wv›&A,Ϛ$$x 'hN%Y,ժkx`B3GhYn\L\#4iecDÆDSr8C/^oj'ctmwڣd3X_])$*&Lq[f>LHSޮ.:H-N/@Eji{`7,>134'rS*ᗼyp<2=$zަ\I) ;iE`ZTh-4)Qç|˭A¸Ģv8FI}ZûdjK/d̖")z{zFRIZ~=khyy<q$ºRjߺB}K>/MrMJd۰DB["[)!ʼn>dZ\.]58M3G `9nZ+CV_z[PzSF)My0gj8^|pL8O^[S[&0Em@^ln.8Jw$:Ct:N,7#`A@J7yb r6˵3읇xqo| }юI.mjd]>!lf8:{[q zpT/ 6֬n\_T\ bZ jlU,Υx{T]PM2#R櫤͉}`Ӛ<h'odwS L٦l{P:8eTftnt-IuJRWѧ&#pФ)N\ " es6 ~pvbKv$M<'\%i4"'YVqs|[H,HwuBߞwT4:!|sf*ʐ8\^_9\h1,y*F2e= Fuڠu,eȨ*.)4;RaȤ̀GQ7qrmB ?| o?w]qpW2+?Ԕ_3r/+RiUbEN$V PU"0Y-8 B-C鎣VVە6D8> stream xzxU(q`tl`AA "{^^HB HwHQ) b-kss])9?y$OdY7zzM;ÇsyȊM}Aߞ0o=g+'TV=fRm!udx4ɲwf,bUsV-|#^xᑏ =xǟxrO-{zgU=MboaaOc 0l6+Ğ&ccaS4l:6<,DzXca4 `cJ vv/6QX1v6 #]YNcS=.g $fHg^]>C+.]׻ߕ{޼{2IN?`y|~(0>x߃z=$)z3dpq="M>գ%^{lc_<^ޣݙ!J?,ʥ:΢@&*NU9]fWA?cΘ/@7ޢ ~eMu\gU~3QC.*._[vwd9 E2vrvh 0 g7+ιit RsS>8lb7۝GNm)RaHdH!N>13䅽wӬl!!Te>Π%CDDU WlυoKWѷM`q*n(~ 靼rB(׊F`185I7_?4 pZ!͜u !9#߶HZ-~LddyEP革UkDL )I*I؝ -i3G.m۹TB{FP"lIjB, J,(4D%)=c #v.kDjU i -__yԏd#oIJ1x#*KaFZmኤߟ'ϣurN}n[ E ko 8 ON=TLrǥ?~yl09TJ l?nz`޵:]fYGA=cqueVeW]9<ɾϝ1uP|0!<@RkU T^P^'0s)(FgUٍT'Y-/ſ>rI[C$,P} - յB=]ָ*&;d_u6׭{`?Q|>pVakyŽh%OEłE;hhւ*W[eNXp̃r7\RiL*U5jqVef;{k\fV !h=jCL( R6J[&V][9sAkmA6ц[:(oyl=*,HEҽ` [LRbWsd'3 %fN8R4l h"L+RYPB&!j4Jܣ3 Oz_089x7 i2+vB*o>,*L"CK{*g1x* GWjp*sոO&<, ::/eTȶ^~ xD,c}~'  E<qd_kj)82VtT}3Eu|{DH 5OKcF@hu!<$R7*pH a|ac˹`[R(K4Ký?CEn#bh M08hn(AQñ>-nEx$ e +ػ= jҊ;@P0tV@6zCQF#  M)Yc "2N!wSO8ɕ# +҆Xц3jWUoL;Mt^谭%n; k9G5}uV>b.SnN+VSt5ǯa֖RZ% c. Ɠo_ (}ZE %D +Ui(M? ,jZ}tǘZ J4%oGl.+wP(R[ n)f|3bq>dlIC<-p|.X7^{s/#dp+ ʠOUµ2Q0MDV4ʒA:>k䤚 o5+`1|in2Hh .FA)+]&G}ҳ^OO$o9ҕueu3 5.] %⍜cU%w }%w e8׹x~3#h Y ' @4YxKBƤ1kzԬ^k̗m[+9 #:IjOoF+>(/VZV H[LsXΜ&nXlz~ D WK#%YV2 D[IkׂJkzǏsrmI _ȱDYRo{nۇ:;"%lo@ BS5_౨;{E̛ k@2L%R$þԮ8O@;=ŮP[T@EN9yOWB<_ӚR#3oLQDKǒ[Y_omţLJp 2 ӑ9o K >@u0 .ΐDNe< TVSِsyp:nfV7/>4g3l%l߇Gw-@LH\r3b ZX\/ر]iu}_`3Eײyc;,̲Όostزv|Jl6S,KA2yF`cž.lhl]6 40d`&xf`Br"ⲾӺcszKVR)DQW6"ͨod.< DJ?_fS#a Qu,62 {k|KBU9i" 9hW|eY]RД!Sw0#b|N߽'.oqw* M2`Mq~P/_@vtƷ;7n3pbZ'^e͚zX.Yߤj`&O+jX$Ħ+ޖD4k lí;|>3{z4`eImTnXV8W}cvm<7w} izws#/Ymdg>d7B#@8&nٰ@}==`C캳IPu[/g >j^; +Q tk7;qYHrڬA7mmf>'ִYKR;v'"]8;É2Pn8.Ѐ-RnhUbUoYGbZmY_⏢(7#Q~zI+]["6'_e6I6ku Fqr8u)O ue"  ^qئ8 qis7>QXy2^ %,2ĩ Qc45џm( D̼HTuXصX*!zdOAmtp uLyYؗf1ʸƒ/j4%fqr?j;v -@ޮ n[tft .}b4p8H·ϛH=b}FBV3x.{ҏL?+2QPuEEPyp`-f1'v OGG֞ lm f cD2A]OȰ ^C]aiqr3(Zð"9!)D@kAW#-)Tx&0p_s򨓻oztvW+-V6zևRT e8 u uHl jr+,zދ]{DP WLS'3YT Q7 ( /eyMPJCU8i~~[H(B:3BL,$?K9J{9~!:_df^w3+h[lkj;br!:Ďz"O+Rjb;_#hA 'b8ݞjs3q{ٞ#RY_vz95n}NsH! _Wt DH H D5n ȋ&O|/ 0FkP{ j|'`_$~-۳dVkgՕԄcWx_>}aO6)zP3U dc]awO=7MݼY:`>9iG܉3`/i4A'b3f_g̅ʹ5Kז$ ס}/a;6P)@)y37.jGaޠž.yn$ʐTU4` |@_Si ڀ.lP$0߯kDN r3j](Ju<6Or%]0J ~YM%Uu?xHZ 3 4bYPD+o!M[qKo4]x-n6;;^TϾv )%#gAW=F E.yl }@ f9n"#JT&/Gt ꁎPkU@ 1mHlbZHY@~ms?0>ICbt?osnl(y5 YH)I갞i7*e͚DVVJ5mH[ rvf%2TIjAPkr'"t;6qM|3@,A$bBph6-+ChVD7R\FZ.9d@FP h fQYpィP.\+Hjpwm |@F*$pJi53F R(1amaӺspjUFG&j)PԾQhnN*P]p]x}-wIcfa4!})~o_y3!o H*2hCj_oz amh6`Ur а-z]0+ J5 6) ("OZ Od۾MCx~g@%M%ŠKlQuR ۆT ;XV!;>Eq8 E}8hqv{$Iߙmk7ҎZcx+˧֔.5HR^p}vN\x#?| K1B ~{Fm|?kTYDㅆIv2e[JѠj*^{6ڮj!٧>-w^<DY)c]$y 6 .6Z6T#NhLxG %jTjjv ,KXLeN7 mPXkKxeERMVTNe h7>vӃ?N?c5{>s'=,qgج"62*hi'C* 5F]-LëPA$ JPI'oGy+-E~QX7Nj56_E8 c 珮" W%^|d. x ⑮)⧗;ؾ*|$I]eVXXIqD97}oRKphg!7u:'jg^hYi,DXm3W wamF\^/ 8VS]TJ>V_mOʠLYKRvul{䁫>%O mMh1پq}{a_endstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1410 >> stream x{LSwo/?7_P0Q$:PmFAd* RNmo[B<*0#œi1:5Lfl]<篓|s=|!d2[7m[>; `f H"bfe =D6Cl%$vseD<mݏacǾu/uz5Y4.kqZ&rڷĄ՟f @|;r< ?R B(0vPF`K-!?ɸ!E7NKpHsL=[z G9u8wWb fDTLc4.x-,q=^x(QG'UePA@-{jRSs8ac,`E+棂 :Zi  \5Hդ鉴 iL;vD.pIv%xNk$7Ody.^y5=贮Љ L]IAt *+ 3TDWYJU35ՠDU.<14p;c%Uk4ϽZs,_H+A Fw%e:IGCn85Ti-7W9t6cՙ6-b#Q~>7Ku08Ԁimh>_fS^l!gqxԩmQ1TϪjYm*izM q@,YwCMۑD5ӂO5I8-@~ÜЖN(_շJm,N&8{дkLjd;I ![`ws#ʋpLN>( Y#*,[P~]m[K .@ (flvk,ƪ$΢ĸrRZjxX&O>g)W$%.qTnZ-4_Ex^JHS6d ,54FRxIF-ފo7 L8endstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4636 >> stream xytSeLoJU\ HҤfH $`5UjZ%K$bpl &!4 ͤ9r&^d>d:3sKH><;7,߸r… soZϧ6aL9Ʀ[Vw\UrMՎ;7loxL&[++/[' ([ {MIQ2ًd/~,[.[![$lllISWekde? d'{o?y q|w-{7gEqC/1y{U^ըlW3P,eK~2l>SMq TŁ׸ j͵Rin1Bi|/:ZuKw4/_ھ[,۸'c*tצq9UۀrE"ADJ??_(`mdU[U[_׎f\F/5 A?俫'%zu> *R|ţW;OLP0$ 8ӝ !&k* \n/ec<ȧSSy <~'\#}4}i>z)Q!*yCC@M㑇f9D"Mj<5s/Gb v"2'n.'Q!SevH-j:הy5'?^?@X^ CQɿ^dquMg[ e+@󀵒u w w . vkHKVhYFZvg ^HAR #]^- B0 Z- |qODNԭp5Ku K;e(. #>'ЗD@|ȃw@4OTgafVt8]Ajd:+M-*UC :c9OѨW^^UxqfځriH؜V>dSNDR$1o|T>qQI'ᮝzeGuC7)8m^nǎnأv0-)>KήKm}gkhej˕vl0cnj"}&4O)|}U;ʉ_G=\pa_ @kʠU5SAh#@&.-u(kz-@J+oAϠ/EWs$O}?$ks$$^^aAKoBK9/ ~k+Z̠-yhs~H7m(jQmyxu n;TIvG"A \"JOa #r |(H{4MBc1<d,vZz[vjhq(g&g(o E:ݶNCț/r>LWթVkVULGw3b:^B0j ZB0Tm\-}4Fڣ4Z%(y`h)@a!M9 d7Fcm*XMp GGGsRcQ˸Y=uofi*4i]AU?ћbuZ˩ٷUfj(p)o۞qQCfHy )yO涨ڈV.OI ʪ#'}POC@=&\;c5Ѽw^փǙܕ7vmPb—%Q~vRׇ+""(a[ mڋ]H޳nw}H ǭipZ|ޒ)O;YUPr_X}yHv1E\OדSi=c2*ڟnRC'z_;Ȏ 0tFMKՑQyI(ojϪK-Wa8w< 4,5.#hAg2J*͡{?=f<їHqx[S_~:M/BM![tx~ nRp641Fρz9 u$SzyiRij {~ ^w56Т WֺSu&`:5NE ܣ\Jh_oX /̹Mcb7kÛ{b;OX?8 ~<F̠o6˶ohX $Ml dApjڥC@RHb v]w]U(Kp򚁖p#lUiXw5?^[eDV446(x0R~v]\H{lv;NUxcb8"^GH!x$)W\VV!=tiKM*m&u[AkRcv8L$h  ?*1cDIt1遷|fwG& #y֧/ٿ+|Зb^R9uxi'UmnHih )Z[r%cvZp/| ֔r; ] aO^4P("!T֛Ux4e_TFtP =?*YX@= >KL; EmCtX[zViT"8E}rߟؔzU(}bMϛwBY PK ֏_TDmbۍNeU z:ɄT@t蠐a7WUc2K2u/d^t,3/b{>"@yLE֮si)Km/ݵ'EqVn,%m@zfu2||hT0 h&|b/)A"dtZ:ܲ=/|0DOGD=Ѡm4DG3"磎 |({KˑI`v8yݼ䵎55TEEqJWXك*"Z3iGDq|][-MvJDV[ 5dEO @\wܽN'SHf`}|U:DRRo0Ey# 7ѷc-^?SR{ /"U=p|7>

ZIΆ0dKzZiܱJ 97}s$4(!>\h ]Z')w0VζyB耗Ay tѷSx*r(x=^{\.eN.bJ|췿QXiNH?WV>İ=$F˕WL:qJ.Qv5DZ ,֪_4x+3 p/qocSsZehYSg#Xhd0!E^(+^Ҏm$V5*C' IR3*RLb& .uL2endstream endobj 109 0 obj << /Filter /FlateDecode /Length 2511 >> stream xXMs/)|cr:ɖSZRv I ?fRw]-D=3=Yˬ_J,WWͤɵq*]IsY+mꃐ더ERz?ѯuxS۪m,C&eET 뷫¬9ͫqcUY/ij:z֡[obawJ_,䰾uez^Ef2/ /K4B~K-FVZLMG ]ܩ 瞔Nl!(k9+^Qt^Aw+t@tz0,CYр(-+'O;ۉ-t (W < v Z jj"xFaicd h}5 ua"-ʕ7g )" G/pů3V)2m 驇Id~aʋ{z ukjk07,{,c;1MLE]]|JV,@$\E9UpyFLȱY9E>]w$.pa|y =!{Z I\nLf#qS"Z~+X!f !k]dLO-)[;r.é?Az;ĜZ!Rru_f#,TԟTV3(~}^i_%8 azֵL ia%ΒteQh*x{L|\_D5W/kk; MmEj#֨\4Pd4 װP%X;SѡATqB˺mƑXTF@[ؖ{Vsqb0?Y9s<밶asP+1j9gz`?8ѣ ?< sE8ӌp ћ$hA7(?rGD]5PYZZH!FteQGNsOe'_;!c I(e%ݯaWHsqnJfpXf^_̪|.0ݬ SS8:շձz7xRw˄w Wq UcΌoZXυ!ƕa|^bG9GA8GY$HX.at}|RT&iAᙟkLV C!ݫTC[_Ⓜi~u뫯WsxVP8/B:Wê~\$7+!vv~Ѯͫ`c2&J=h3LPċ$me|S`.-Z^JU&xQї.Ű#"7;]3NCss[d.p1ef$ +H >ÑбO=77@7RS̈́gd3 o3ۢ=vfVqk3N&:f2P)%tAJ/5eF3EX(.A_VþiM~FoR|'-RB`2%>$%TH|Lޖ'`0xU3Dn%%6|=Oz2[ny_TB$ocCh^00~(T9endstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5191 >> stream xXixS!ޛBr& (ȠGK@i-t:͜s:0,ʠAAQAR?B9ssvGt~zZo9 ^9s?O֐Fsa|n`,45myzfp֪iLlnܮ7 ߜu7ޜ6}ٯϝGQ/Q* R4j3Am"er*ZAUTGGͧPoPBO HʏPQG"S ehFyk\Ooo)v#=~_7֏<1|Ա|T؀~^1bek?Os$1sC)q{ޣ|1R0C`2Ef {n8 zpTݯꦼhJxZR-PW9Eh)xJq!o&[ ")#B,>;H%?Wԟ"F@޽# |qûܨM ^] FP/rlbw.]wnGHXKbrBX==b8w;wq:?縨_X=8sWvڍc#?w.g'y`;r`qJ`\ㆳ]]&V)cSء OKN/ `WevG kzYb &xVm/P,_ښ6n-D@3p٬qEB9]L.=} aڡ{gTWC`v?[P?є+0: TA2ƩsSRaqNi!HeK5(AJ=; F/<](sZVˋV{AW p+Xq|}F~oMN2E^@1O{Ok="EDc~\?#LZ8ywq\CN/'\&X4Jg o!h\e}M&lxڊMdVGvyeq%Q4g@ Xq0}\Kq( _VJXL+Wof2sQ3<3j=Aai>Fj6 S*#Nia-ϡMm"(Sv^s;~@ZGX7^$f*Uo)au &'R Dȳ%xr θa٫4x 7zO1ɬ20fOacZK/~c0: b{yeKp*'α[OhKNjד ioaқKX-)Li$ԐUF=nG|4羃#1j|y /f/\gC -&Ki?Oe Db\s[TJ2 vV 5ڤ.`r\m)E fSòLέPhפ"Sw:Yc`=jdʡFy1./uѓ_#qٟ/'W zq}<.|;D?1]iIYݹ;o{#tQrP0p+ Mp4=<%iWMsOn4R*HlKZV[ %kdmһ2jSw祦o9x;3@~՝P]g{K*JwJҝ^$GXpڋ$Ppz?F,*RK:U>7)vݑ`КGbJ?OΏM6{8R"B-CJ3.c R|u:Yߤ7`\H+JqOO#@L|m>xԻx&rz.(~v[l!9TVy4w .2]a?xo OmX!,|WnV3[rٹ2aZ6I$IMQu^]v۾kZPBr(gk7tF4z)|>=#_BoO}{> n<ʪ_ܱzP^w?%ֈU,Nd+Ahqhq^~8I'J$`~$tC PVü;)74dV 3_ӊ!ᅛgd8k5zmop8dr$6zrR;Żr^ld$nB&[o\(נ),__Q6dmLڑnUe܂,XV4]ۨ(v*9LJR܂άn iYn\l9uœ1Mx11t۽,L&}( Cb!Gxj7pDoy^ž$"w%A0>-FwZZM,!΅ooCևpՖ=`̹7ŇU%ʾ4Uj} &[u2a ]Vp"cP4Vohlj[ 8J Ep ~@#ܦ4[)<:K:VcBh,6(+ueVső!+RravFQ HOPoט6:ü yKdjo28/%nt#1'w g=O_Yd^^O6W}%L)~ 7 ej6*q*C%VDeF5wt<؉Ni"Ϝ_9"iQ/ Gq"jw3 Ծ.;_Dcl$CC̗羺{4r7=w-.݁ܶm8-ߕ%7V5CD$5yDShHP j GƔIm;3g3xY<8إތČ!{@8dqssG><ƙ& ԢkMFkhէWY\gYb3Mƪ3xx';7,F;3ɭ>[mIe!I^.*$o%))?|~_@F~@sӘ#F3=óTZ t6($uU[@MlgTCW}UfŇ?\?#Ni[҇Ґ ו75wU d J ̊x],7]5b'katVYE\)EPlHQU&A j~c,u֗ .;HtsAMjEq7$Qے?OٺhtF%t6#((Z tPs]WYB^&=LiHnk4 _V^6[!y asNpőm$}F!m$Bg~BD<͘/v޹MT7a5x $Yɲ`|@4\S"M/wILXNfS 4@c}p$i;-` lw*~:y@iw@V8iiiʅ$^qB)QCr~z_av-^ x&4V]K4n53ND46-4мNdV鍖vIՂ^'p3:4PX(~Ǻ"/L!x,R(Aʝ:LWJxD_D! [YQZdTtAМ$/W'm"\(rH@X98d6:3awlGa!JTY+˽<]}^h,Q #&9wvRPHas< 6WLݐ̀,ߠO;vgg:hxZ:3Zʗ$>.z EGa/6w4vxcɢDN'E6]{0R+P lp;˚=Ndu'H45w߻:-_9Th.4o^z<9'L/)--YIht"&%endstream endobj 111 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 601 >> stream xN SFRM0700x7  4123}^9Nnw5UO95ejg~~g}{}inYzsty(/)|v9NwggptnmngQywx`e~{|zZlQR9Nſ͋F0$9;EHUWTUWWxx0Kim[z{yihhi$Ϳˋ (Givol9Nx> stream xa SFRM0600v8  4123h bkw799fih{|~cև||bm U|tt{[L1 4-bvv-w cgntlklpcSpwsv_f}y~{|XkNPbéʋZ!0=AHSYRWUX||xuGOfn^y{yxhDEDϿNj#' Khuqmbv<,Ϙn>IdBH|\VmN*He11J> stream xX TW't2VG[QVQ @B!$!}Bސ@Rjatڙ5uv̉xW άY򇬰8goj*""bWwl>>><83ΜxaE4ZЗQΡB?WgoH$o.Lr(ێ!LȲ (j:zNޠ]j7HPk':j9 *OFZOm6RRj+!{t*?eܹsk"R\D(1LFtw 25)ncus]_Z] C *qkS:%e[),ewDlj GW^^F;סMQ ^m/lƿބ];6oCJQAPuf0e"ڮ}шq5 dԁ6~?@rig~y ._4dYqh36r٪=5dR:bxy? G-J0`, —27n6<Ⳕ/sHd `\<ċ<yĠi_$XxDzx1 .@oks2 x ԷY)E)p5꺱z e4p@r=:?O뉇8Uxb>P"z--9T*Fj O[8^WU{/ %y+))Se89dNSJY g )@}h4e  Ed$#C9!8γvsfbCE0,EO# v$HV_Cfڑ!@ =̙k}; :5KS>X-3M{qx󞅇h*q-TPiRBF(D~GD F(klo8=cۮ*cGϛ|N 5GwB*)-mh*{jZ8Μ'9Rc`w&}s yPi*ˊ 1BN,Gs}FK8-/:uEaY%79ޤ:RRrUmnw6ǼPbC(@[(b<ӏ=5z*x_.ψcE&LX#jJ>Qt2޼ugXl.m s'XEOh #-8f9>Q FLK{2gW}_1Á.'Za^ [dCpv>~i]'u!"2w@oE{U%4{ ݦM7CBmF 9eM`'K%i4\+fTЖ^l+5Wۍ' o5&ZxkWX=DGGS)T< :Էb=A>M K<ɨEزkyVXhkxO6?psа5oB]fodo{T6B&_-yʲy;wHwJF5cuMdtB$R _-JiAK}у1|tT\&s(EN .?ܝ\h>™]'>6ʎe9ÙB}OK ̷˄_{wy[6OWv%֛a=kE-٘0&e"l\zSV푹_*B̬I[VL&ZxӄpHfeVM@omS&i/w:Jjb^*%I Ao '&,@eNQr<ݤS Z36_eF6$yo{ћN%,YeRP `3Z_-u{g5NyO'C\C7#;Cu6ɰ% Fm$@Ӗ" v:ġ7 I$QZvAId=;ICSY!̮ 5SC&tC]GL+HUJ3!ό`?hC^&~F(%dLȼHKd6. zL?IwZMؾxWAllzLZƠŊ^]خW;a ,J __y߹ 5?C[DZ,ȸuRFV*nR6V-fD-l2#>Gÿ .HKUEPʈ :|ޏje4~xa~GݍCt_qew-H lDy,;F5}$ :' t,?42 dtA-wlخMbC-P v8?pˡ3vi)7\~MaԚjk|6&zl^X1LMf!\ 5*utnv@;,}[ml2d WsJmH_$*.8D{Is֣)C'ﺉ:EOCG-mNG{wCGaR,QYf6wsz4"e`t(Cƻl^8kN,z q囻WdU*cNvI%4=}B~Hw{O I#OC}rc3[B N T܃{r]0ї3"3 A*(?[~aUk`BZ*˔dU!?$:G{ %g_ #xJdڎ:bj¦a59ʘ (%xkL*'7ȍD˔xsU ^ c(M7H~4TʌO :-(d'%_ YlGhfG>Un4Xyi 1:iK[McȮ/CxP]0Yw }=K(h]mKW&( ą2$.ҳR=1Ÿwa{x:Z"5I? Phy?ПH^cH'W/#E[6w7hVz_2,B+W?CO6Eyu?gSrݜ ݶ:WkfEFV^Z1cVїB5#*xj#) izvh {O)\˄nac.gDu,-Q 0Ej1Y>x?VhLٗ)RgѧM021} aA8[{]e1'[ f?RDl^CG et>Dc8iJaA8><O/lK>𮪽p\̈́%}g >"J ǐ&m'ǐLA8tg4AvnY'̔bT,\hZ}Bo0 ª"yJKy~vr5+ݒfIgIy yP=9r &#Od(m+wU13#xA_c{ f08 h7%rB"4% $=dHZ{SB'W١W _zH0%!w}szVaKBy-I=opwU嬼-9W5n[.M̒{"C^dt+ K"n/*{CcK}V5=8W<יy/w?Mnͭ32Nb*д~ĐS5O՝r\>o)j(8bH:E]x~/(){hF a(/ ? }DDTg4I)v cez7kdZ0p\*Z,W NBRQ[hjB+> # |dM6FapC̣N@kAGeq:['sӵ#| ʱvsA r/CuħjY;+)a&S'V^U[kjݶrpM*a4~IJo9m1?׫pd޽:V 5D 5vQs$a+.ne|R1+U^4 % E3yì7M?U$v9$r"rH GCra?F.?] %oECQH4 m(l{NjH8*e]WjJasl9fFI?9:9 0(`b SX7`tNۤ(T0\j 3Yi1fendstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2689 >> stream xVypU1lF.nXD P  !$I`92W2dL=,FBHv DZJyQJXϪNU[WUWb("%%eL-[<`Џ K^ǧ6O:5< W(_Qߓ %D1Dl!#^ !^$L$V #cb 1  ߧJJMK5m7_z?V<E3=zT+e(k '3DaC,$֛,}+ NM,Z b!wWPp}jWh/)+.Qȃq-ཅ9o{۫tZN*ss(3s7ni"iI&'E-;; exnb8S)<:D7]%8H5=URy9STihw#wlF/[jHEɸˌ2i$+&м:sfCxd34MU%rY)ҟ%$| *xИ/DOO! /QIsYdǛḒ+Cjwv/c@wzu-YQl#Yk3h&~%}th7u@w_lbW<3|:~׶Ї9g&iG؋de2f_0Ǒf,/ &3+x Ýu .|ɫVN>D*h]YKK 4I мda |coad]q@TRTW$hhog$C~ZUj? hѸxT8('ve:Ahv{s=.ؕ)cہ*}r#x7`TQ0tfJucl؆ߴ:~S/{iȸ۰tMq|Vtd$2g$U1Z{ _5y~c <F$Wa`D)KȆ҅Ν@2A$Î0:/+Wx4V omyogC6HA|HYfusEϓ]Kj!K恰1i$Vɢ!rrr=껻ɴVVBW?'SF0]'M J\k丐1]5.S W*5L(ڸ1᪸W0x(;kH(.Tklf'cͶ24 ^XXpgi."CTn2d)C˵zNH]&bWIP͔Y-BsrqZnW*F$mFN4NX_H)΢DȵBk} "AO4pBɽUC.p~eơ?\uz]Uk@,D^ ʦݩ*VQW[K,Fh*ZMNI)Ce/F 15°hy4.\OEbI`t0'G0DEcE.+}j<Jc1_ (t /Tԕ dFk)B{5 tFNUDpT둱Q5aX<h?C٨yB_g'vA%/e)*K S"T*jEBS`i-;%_Zt\'ҝb)^( \gb>"al2v _k/-鶮Kūͯ6UaU~:>o=,rN "'2CN}[ʾ\-+/ۤMʈҹX&2%SD O  bwP./& MpV2XNR$LfoV^Y@Q)zEX?Y+lVxetr*UԵ=H}2؃&^[>҆m'Ϟo=ӯ 2"ȵ hE)$;j8Yɗ1[\]))ԟ毝3=?Z~5jȃ]?40^c:Hd<2aŝHoݐ^@!b_ZcK5Tm6Eax^nxqЅd,T+P#V3tǰIuz\[s rΰ5g& NGaF)a" ]Mz2oLQ6Z.` R~a}7l=QWYf2#{=__}ݾ  t jļgL?/T1a.ieJnS&gVf~ AG1endstream endobj 115 0 obj << /Filter /FlateDecode /Length 2539 >> stream xXM6oan,AتnUR;ljkG$&)#IiKht~ݍϷaoC&|monu~y%Youi&6 >o:N ԑRJjGgzyLgj c5^ƭ˵:u+46bSMn_c_~TG.LX1Ԧʏ4_j86&! `s%~Q``n6؋$C{Su$)Y4gQUS0dٹm;?7_:~^1[җxW5yUޱfT,,Z#}Qꇢ-}Uu'K2è5aC#P!)}O;.Jql(OuZH*Nv3 |5]n$ՒXZG:IK*T]6C9f8;d۔iL-NtmEFF 9zU9a/ɌNfg2/K, $T'V,(4C1Ե ,9 :' 77(sSE߿'X>,dkyR/DeR䩢[ba/ÌAY|@gAWǧ^BiaWW%KH=YqpAmm=HμcFL] CA4ioc5.01xG,S8sfԩY/)V GGy*z=Ѿ݃ȁy y(S:Jpn .uE=\1JXkU"&e Ә-fgc̆d[_ N6lF׻v?QаNHTtO_(EQF_×)0wkKjL8k\M820 VzϽ ::ˮ*ӼJ! -g@bKZ 8u+D2B*\:z#d^P <4p_P ~(~rdA9;a|PFe`%l!qb&@OEf>af[ jA|-witsO @(j#*P㗨ptHNZG:fXwo(@OԚp XtRFlư`7AEo}ydC GS%%0S 3»]Z&HILTa2yf?)H%L3!+ЁEӆZ7u_~+lDZާMώLgR6jA{ˇki/,j 3RfPlI]9"~h&P09aprr Ptr9&x"R3sƩ^꧍%CQۨe=wJ :6 7dO.DL6.`f>Ie>ɷh<@a=C>ͪX]_E?]$\ Ծ)C}uш)d6t&$cRCEs#L ,SA%ؗQ+ʅ%x͗~]Wp'^yt(xۇԦUY]+h%w>԰r rL:ιGl9؎{YK pjSsS}De=Un~)3LCu]߱Uci&龊"~J֋ df%if_fFixBZ`JQ Z=]%Wݔ? &_sz%WLyE-+]9K*䮭k[sxv~ru*A< h(endstream endobj 116 0 obj << /Filter /FlateDecode /Length 3161 >> stream xYKs/dLX 36o)k);%в(AZ"ytc3w*[߭]f]<_Vn[3R+:W:BoDŽ0&oj_1V] YfPw I5Sٯ |J>m67?AnЏ l4OIwɌMNG+cx_&viޏy)ض,6+hav™,-e˸N _T*'l5U9s8;Ő.6WJ/]Ԋ7n@hzVu8{#ʼnEN&'ltJ)T3ST2D[2JbG͸N>oB2weШdk^ CLE3i$6Z le e^|omfɜ'*AQBxoBI*nre+_I~/wF[# 3a|]OC]8rKr8URz,̂V7I&#]Cp5dc#pmisT~/;)z3rh6~opClCu\gܷ3d lJlX55m/( 9ة:@ߜ1ȁWsu@z9P^J!9qGפ"3|uj߶Zn }V8xR%I֏NvkԹ.I+>C0mj?/w_1 icX16X^lW}EibZReҟv&M7>PQD,Ȕ+v2fH苂YBP~ =8WQqpwt18`A/~Zt'<08I?(,'~ӨGj,vBN\yC,OYΓ8Fq˾~|foCp*Y%\8ψqh&' ,K"A稹Z;yWAꮅrn^cAbeJסN wIx6 F H;y4ΐ d.2~xB1}㰀 KCy;px*&%0 0LOe%"ӰGGp| Rv16UhMw>T< t~k9>Ⱥr=Eb?)YEt49P(4 b EWň@jcДkWqGL}-N4T~YUO;PCt/篑3,1^'510pIw"~OL=V; h0C p >Y%&YC|_Ci`I3.# [< ~ Kƌ8#a8_wH*5 vELogexQ},CWfkc\jr6)vrBYɬmB h.K9; ""on]oir$yXH[Mke0*TQځ1|ܗ@#AFPz |r' |s[ܢLJz~z>syM"uN9>Dž{eg̫o_h@¦6><1 q6 vhIzQj 0_"1 2[<9S:i#|l?k\6BK:!s3|o@Xs4c+&9?Ly!t(İX&Ad:睵!0~QQk[Xw`0Y]L9`O qќF0{Ӌ]=dӼ|:˩/ ]q]Yj0K苕\(VB"$B}9] <ͳP.VO ,>0.QPKiZrU$\5P+sJ1+kt:vu{Oe}B ,a {k"* (gu.ӠKpPT5F#3 D4Q˖]qڕ)LA7FLRᶘZHna "<7Cs3,fV"2:oajj ܊| VЈTpGl3oyjPP(,iW]0jX{_ךنLu ?rw {OSLBeS@Jy\.*j(X.ДuTcv] Vx%G(sUP﹎ݩ žl?];,}endstream endobj 117 0 obj << /Filter /FlateDecode /Length 2376 >> stream xXK۸ϯPF lŇMM9l6rɁ@cIjd RÈx4/<!_ ZgueUYgJAۏSzO?ba_]NOb*qjpp7Ky-X#(\j97+4j.U0wd%~/Hv+~jBKZcisX}ğE&Nh%B+0Jŕȍ82ڃTYQ3VyMqsk.z5^1]t. ;d% bs߹41H%7ɝ2n}s:h1e龞[`@XVU&e!C>fTQ鮟˕~#c~k yl1W~z؞]1V4WaOy qM49^;vۦ=vw%XW r+"+Z%GVkJzٷL\D 3>)8hi2S~x=晞1MACڀ:(߼ UcG,*ԀLR_ r uN@2?Kv?8Ip*OЎtW֩mafXeڑkedl@%hydjEhH4ȗ uV([kSXwgC,>ݾ!1Z@mER++~(a;|]vmVirCWojXTrZ7cX,$lpjF/K~+%VNE"=”A2χgO7Fc ܻ #5 "d"i~K,~#A?/Qc hmv. j˝,wD% ڑ׵2cmV*b.eVĥ*ſU  2bF,akaU>q}nO:#MvaUZ뗚Se+O :pf=.,`(b3iaA>c{tf*5G r-s*I 0*!)]AKQŠq?2$+UfuVPܨ5, \9ߡ1T@Yå?sÁN~VNդ!k٨ѱON϶cHI"XZ#@-p(?8=rك Go~ E>A@'? ~*e|Qfh:Z;D!O,Uer`OXĹ9eCO$'r^-=LB`Ap[+HuSnz G__ h(z!X?JdB q2@ vFXt5# q^Mh7WAK_:lXn-r7sl:m@;jIVm-[H(q*iI~g^ QoTׇklCxYB ";C=7fDz&*;:ukx'fwUy- ^~$cϴ)\^R쵘c#dsᱧ=qz]7 6ceM8>+*F964]O3/i]pn8.rN??p`vC c8OȫT%w|bcf1} TmCaP2Xة;|ag j3&5l'^u"IJE]B{EHגjSx'ܜB^@51̇6cK>[A#q`_MW!5L[|hK ڽ;pp$>S|*yKUBiʊ2eLy8q<ę 2.x^?C0/'S3%SK`?cendstream endobj 118 0 obj << /Filter /FlateDecode /Length 2388 >> stream xYKs/؇M*>uKUvr uw\DX`hU*=\[Tť~|uiGr?w{Ww.$]?+rKT_ZGiFJBt7FX6Z)v3X#c="R*%YDn(t$jL8t**2om_65xB\./Nӫ|#QEX?\Hm~>X*1ʔy =?HHzҌEd0@q8|xZ3[=]X?cQU-֩?ٺY7f>cƒW6rb#%2ԬcxmV۪؝O&F_\(_Ⱦ6r6yrY瀳\:T-'@VI{LX^KqG]\ZE?Źl@r<63rj*.&]K%F,(s_ hE=}ΫOG3waKW"k-#n8uzѤ " U?Z| pfA^*kBc3.6'|"B?Cds҇ƞ22''^] xخ7:Clj>)xy۲u:|B_B8uWi_^2|&@ZDkZo QK=iQ)[+5y  +~S]hPK@G7-%* QA>f-Z&b+MD؉8 (۪!sc Fl@)΁YvRN7IRx~.Jz:c0b [ƳYiˌKXvȨfUDX V{x]/xVS7 -ⁱd %PgNI uOQP&St0 { PMkzMOk.11S֊/v~E2RC4IĖST~uv\z׈X,eؘʾKh>-IJاP@Κ&u3ud}uS~_֐Kc8̅ffBڒ܄F`6FR4FN;11=f*iq=xfS2GXR[>1JiC.?AHBX BZhI:Pe@/Du6c*} 6*N=@-]%)-~o<$X *Xp{̷@o3 #3w 6Eqi}Ye&  xI , 3J, 7Ae@r}(X)DA_Rη}_݁-d$OWU. R06\}aR)!cSR=k Ϋ֌Ofi{(85?(\&App8 \A*u>PT#;-vöRy|ڒ#:4gQ!7 @@vwm#ሔ$VD!``E/M%x!4^M]sN'=ѱǓTx߃*DߕSE+I47t<¤q-xhxֲʶXyL'`Z?T00=eFD FZc8r՚N 402UQ"k"Nwr~h 7MDl-z5O_Ǫhdԗ3lZ9(lLUUL,4!€…`if_y(DkfeLmNSgBXnm>4ISϱfYc|ӻn@c,p1;bѝxpʮfvކ{#heWDn4iV߄lB:f4}<Q%,+vmQ O5{Dq jqͻr` =9?Ȟ|g!9uhmOC><0CC*/y;)-<7WO1endstream endobj 119 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 156 >> stream xcd`ab`dd v 1400q$2黲!{@Q!=?}>0c]Bs۟~}O&ђ >S{6us,]@~*}&3{ry%|r\y8T5Kendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 597 >> stream xKܢrR>d(JV ͩ퇷~vsέlVZ"Q ^~QTܭP ||IB$f;ެ\&cJy`}f {i(lxLZZuTJjk׆mxȅ05:留t)EKQe5YV7-I5&fW>}(3c }WV.i2a9Z3h zz++wp7V M3$KQT0Vr5²W#$+cy^ʗE`#VArs%`H̻ҭjo7h{*sj) ccCY !%2 Lplm^kzi& (P .LPs|"4dt!mg"r>"ncgɴ m%endstream endobj 121 0 obj << /Filter /FlateDecode /Length 2603 >> stream xYK_H{_&`B@$@pvs1!=c52d# WUu=u-+]R4v˛Ջk$wڭo+>DkvOI7;6T$ϝv>CWmT"s^LKj~gHiN,y 8!'\m7ɔ/8D70k&~B@xd~TPXu~Kʈo]ݹL'hvm=Y yМ+ϸ\inٹ+c]vN|blf/&U.%06}lW箙$7@[%Dގ6ۯĿ77Zܸ+s=LJY\]Mȝ~(oOˉZ) 2+z2C@Оҗ ~ F&& V) [r d.s,C4%u[#vX4O m-)~Zڙ$IDMy{}q9ķM;ܕ&#=\̊>5Nd4q87j -|T-kd+xbbqƈx d&qf|ypW6ځR/ 8]pB>h /+Jt '3bhI>:l*)OBhkzw$MfD 6FmjQϬ<7elP6~;޼PI ѳm+$%FoiZQO ؞Vb ګăOw?Կ$-PCZ>!k(z1%~g^/C~ \&KE{dcgjCx8Z(ΧQ]-koY /q>XB3H%5J}DeǞj&:՚|XT̡MJDo y$xh"%膸}xDGw!#WU5F뢧H$C24) )%=%e_4fΓ7]kFO a;5Na߲T<4eFf.EF_oEt k.21lP;vto/HD&W!p "hx~rEчs?2,Y咋a))й.Uq#CW`ҀjPc;sMQL#d" "YX'X֐tdþ-68d, p!B܁A:'T/zj[ެY<;OmWtOAo }2&E2Woo~Iuap9P2rC1x !a)B&q5KG+ٲBAc9'r,N091-1*ˏO-R+F2r&±R˼w1Ԕ՛pz5DHC] e_3E>Ov-O0yP *:SF&P;Gr4JN}\9UC_Կ -RjRZι0otGtGNS]a=Y Ym͍0NWtyM#G I(uTT]tcoXYw\=éѫ/I&Tr \Ll@@1F9hJ*7cjȔfx-]ܿ.J=h]ekU]j؁7 <qJ @z\iNljz?XG?d_+ ؄Y?`DA)ZN&0yA" _| KM4`Y| !Fj^aǩP@j >i(Q(Z(R*" 9o\4EcV`ChZXEJU,P(drWٻendstream endobj 122 0 obj << /Filter /FlateDecode /Length 1984 >> stream xXK_f˰FragiFrv'ccYc5 {>}_mmw/NOgھmVϽf;lRָ6Row{å_fZ{v YK^7as{t^pdžT7aGk=ʱ,yﶻ6g ~CZp fXvSώX [DUf~~h y_v?}M/q\wp;?CHw0"TgvW `mHw|h>HV]cH+XiMź9(NP]YGc+sY=J`,+pDg rWm,[P8v} *BBk!r* 7FbtnJ{!D?NBC< teU<1RVC#ޜE ] پlbsaB8\4%6"^OOFE P=s#*>HbյSJkWU^]ϛX*y/AlxcuUeC2aٯpd<6:[kcΐoJăO0p6\^ƪ[X>bNeL(%m0X:;%Y!1h'Cv~<YHP$B/!riEKP! R.ks%M2π`(,#𚃹Lcε`T[UEyaˤvEUck}h>5VԾlr~w3HST`DB7u`G{kʁ/"Mk ;]`$C6`’~]DZd)eh8.DSXFy7؝N+Wq1ueS!i)>yI3YhjIP0T}(cjFy"fB @qX86p1J 6EyR 'r}gOeP0)gZ2⡁YO@{VН>ݪiѓGE`0C?ohIՆvAR.jR1G_7} 12߮YM },=`&l| &dzdFz"ėrq k}26(9 R|%'D~q! aᄲ7=}ZEwKѵ+jNDBA)l($3CE}a~sïhhܪ KC |Y߇1]pU}G. 7U-alb82ܨy'>75Yߐ7ovh{endstream endobj 123 0 obj << /Filter /FlateDecode /Length 3152 >> stream xY[~Ao+s H 8SwWm d@KԊD*$egAvelC0Lrf̜ws"M"ſx.o~u߮o~o]w7B.3va<ɕ_7?SCyXTXʓ4ͤ/WZR ?2/R-WiKUܜs+·>Ou\KeaU1졇=MZFg\ܬ؜7 </͙%z`rhv̢{dR9xp(jzqe ^6~}s'VK *jv$XQDZ|XfZ,hqZ%%\՝K'id`-MN dn \+V H/V>xnpixU Z ?Mhzy.J&IX?>;pxt) ֋erV]48-Gce0p. (Ɇf6J}:F[x^z;=+ ݸ+z4ۋ{ LFFc|-{zW*It5g;aW G>hCݙ>hiBdj% S@d<rtǰ2o6IuYnAN}Gi \^Y@[v}Ӗw&pWu)M s>ɣ1dS[~`՜;/DWSރO6SQ?Pu*vms qzEۗ s9Z*@v NË$g7]O)A7UxmS x>#05-dᐔ"`3ߣPqLp+0~xXFvZ;M Zܟ Z6h}a1^Mѿ;:JR m+Irbop:u>[t_zt.` )!ugԝ_kr9lr3@e;ft;* 1(%ΓdϭQ8$''c dք@UbeFCl1feH۪%LPu=T6XHƤwFL~9f9Yy\S6m:;@8F&l/m42F j3#7E{bu*`a$Xr}u1fD"e1 UHxRe A+`:+ '>IF'nV]T%(}T?b VS\|QWd}lcrw V}_xuyb.<cI/zP3:r)!qW!ˬPz.q04IǐrߤPч+N{I#vStMmދa[0ADLu8XgTimt=|vb=^9lon]JiqBe$2eF t" jM<,̅ё7P[F\ekJZY$ i"8 BNmtRS3r{0Sj&Ɠ^ UP*X7@M.H.rVzO(b,0Ckj k^ GJ v$K`*8fC~v3 uAhڅ \*UWP#膄ޣ~[96rM⚞Ol<-hAԲ3-cHKUvP.0#J߈U@߿|s*/Xv%^$ggږ] ޲onl?1L($ejE4gc'J1s@~7LW\z7nFזřYb/(Bs2vb#$&a 8O46gߠJVZ#Į1@%Xfߖ+sTjP`孑cOO7y_ }gȮ(j='seRÔ,&D?heF JNFV1 U JD*pbзTX6q(ϣvo[(0_hDG{rFWt(dg2^zMmT'piHynPy.#]/5 Śf,DΡLUтˮ//.`eAXQ}GEy_:P{=|i]4Ya/*$jmփ6&E=r2>$#7՘Pҏ&}9vx#.ɼ3S؋)Xʭ m_K#/{6'l3j+yy`C}n }6ETM0r]{^)cX Pyohؗr<,qp2x-)o_]endstream endobj 124 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 154 >> stream xcd`ab`ddds 24caaa]^~= h GGVm*]?EDwzwgws>ֽ)ǟYr9luoG:(f7옶AO|>'q2endstream endobj 125 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 142 >> stream xcd`ab`dd v 5400q$baa]H~= Pׁ? 7~H/(>GhXKdiVB~FM|w> stream xYI}-P6qTR=nW*5΁-Qj\d.Ӟ\eǩ>4Ioi")fvÿS{$TxK}f]?4Q9:Xumr8eTNr&4ũhBS4Zx8Iju.U&^f ˕bKr8SՖ0D.{~IyfsФ2AzƲɔ˟ M0VJ $?w^L{ (njy(5bD|e4ė|G@ծ#觶X`9SXl:~e1LMX]80E0e|*\x`1p%WZ+xg]uL ӉOr0|ִ5M;wu ijy:6i E#BkJ~ØZ"| d&w4Qf֯/kMц'~$ClWC؋,%Zm^k=BdE_`eI 'z( p|RMfZӔ/j͝I1E?.SA3{U)L͢SJdh=yhdP Ss'ݢbۍJg^be] KIX (3nc2&A RĀg-zAaX@dqdOq(2o2IgTCDجi.E<ۿYZI用a}~Nl8͠BVp A70Dz>CHy @bZoXL뱑GRo T2s>/1Pu 0=XTGx:Klki& es`WQGD)o!f4Ğ5rAqkq %:˳\PeZ7E@qJ|9ڵisR>zT14u c!Z6%BV4,)a6"z*@"H"Dir+zز:V?B4ȣ^1k_E6x X|WT-0K(>Se9C564DOBTߠRr9 ɋsAezEew Qu+G"< .ns~ݛw-R9G^23%09ЦAF"ԃ j%7ox 8qRATխ.rU"@";2}TAbў Ȣ}yiBi/ ,u & hAr-AI}El' qW֯+Mן9T0*q E6b l0B(\MCA թ=Q|1 q;DXŬӅƊA?Tpkţ~g2n:\Kխt99(TPsjqC1G[3 *EIPr4_!ƒ PL>bb zn=|X%@`-f܁Xvaŏtv&kݵҁڰΉW>Bi s85e[q1a]'{n}nvfU[t_ޥB.^f씃SCf3fig64X}<4M:$^аD(<宕RMYM'&$B%5r Th<㼜H7a12Vjendstream endobj 127 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1362 >> stream x{LSWoyCp[Խ:cdNu#"G(">~m!X0̉s[\&.dn-,N]n|~9GFD2,.{ǶO߰a]dD/; #J|**5%&bMIqC&2E޼zHN!jG#.9¢*桎Lc@DG7mJMA\8nވbO4L:xVo=Gb5X%Vo\Fzc8aPCxmw:06#og4MI8p tmU@QfG ¯7>.Qy+ /UU4ʠZz{ӝSR}$/u|  ,0.+oNTa)~hbR,&FJw`MO\? E0"P1ތwCҟ/Vz.EAC쩂Կ?? "m6hX-41vqֶZFH٥/\I]&0 xoiم8JJpjcUF5gLYT$VרiǓ=%9&k&:Mik-9]y@rR cV![m$yՇ|b2D7Jk_D߹j9=Z5>^FT%f.V> dž3Ub#\hg-YGLݾ&&m;K-4iSyp /X z͂srRR.Iv89K)g۟I*MUǁ_HXUR|˾]-A%ܾ""o\NQr555 mǡ(r%{e=W)-+^,~كmf:)çv*lI"@UUϨ C3&KQoRW_;48ql-.N (].=Pkl[ Vh<9e2r*ZYU`gt6b{4z ɹ ]ȩz\CD -8}C!P=6>4 \Sm^k@.nGaendstream endobj 128 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 997 >> stream xuLeǯޔK3ɽј82ˢSF6P疬1F[zҾڃ+me08C6H213FM&O$|o>"Jm:lڵG *գo,n`+>ޤCaLA.P*N(-No_SG|T;ڠ>뗶1yS+5bhb'$w{l]|2̐1`w:9aN |:侜Y(h`t])aqToioIS7nǽs&BއID7q+ƺ, v$П߾ՇUջꝛ^v֌IxSŊƦ3YDO?Z#–jMU̯L\ l; +eb@B(;.m⍈~[v0r*p_S|oyy#g$ek-ebc)ߘE۲^/qR$zMʢI$ ʊxi8!8⯉/*> [ZjM\ '/C& @Qc6`!>d@q7M"ΪpL( a13}?pT,7 lJ5F\WHu>EcT|?.2: ,T8L;bVH ) 毤r,iXdSwp3:jZQ7rG :ܱGt:c=}EdhĤѮ0ǒ퀳-Row{K7Q_T6+endstream endobj 129 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1352 >> stream x5kLSgS WI:a.CMsn*N (q)8=}z9=Jm*!ʼndD[0ӹ\3ayV\|{<{DDd!e|֭[x^21bR,̒ɥB/d5uE b5Ad"H#ĸ(]sI*錊"v^Pͽ2.г}Æl!FHa7vafxZX`ausVQMڤIdJ[W{o[N 6F'Am~[(8كEvm=e"Pѯt c=L(v^KN*\ڎBoKJT`chw{8*/. !dk5Y&h5LN%=`.3>n4_i&^S|FzTktPqq]TܰB ^uo+,p‹dx,qqUIԩ TˊR3IZ}KgKaMM NZF-ZׯhWu99p})%P tƉGVmZU!_A(~ ZBc8G!GC{$[IPM7:>3'F%+7*62JҨDѼffg(+:nrfP AQ,: HnKA.cGiz= mvfz04aVO~zzNm~M)jQ;S]0Gg9ݿylakFs E.an{7%.@Ke 4Zs3Ӣ(-./5ʩ?=0w@':49+9]M#S~f; e2u)]!M (V #?l*r\-%#ة30f  a};k<=?4vCh[4:2P~npv9KA?tOVZ0M%O !51׈2(;.S)t0^_t fiDw0ǎ@YQeIqdğ|Ay&.o^G$ R6j^vuy n˴jFSC,EO^*H5ekՂA id{67hxg1'=V[7'Qg]J函N)-†:[ HcoA]S!Ly:!+Cf [h!#Ûv\FK-;,",endstream endobj 130 0 obj << /Filter /FlateDecode /Length 2444 >> stream xXrPs0Ѥ $,ÎiX#TEK5e YRux:,\^|]]BIvǛ7vne+2Wƽ!wRgq]J]{>%vQq"K->)s5T0o /+6U:owpJfp +'*4M#YqeH%VIg?{7Jhaǽ*q $y%qI+aTv{qei:ĈR~2`юoe9km~X-?NB&1!@%ISYRPF"\̅8NdRX]%]=C7ؓ IR"'Z&$?㹩x9"J}`\eT e( _5v\; a % ѹ.|-nEUݑӞHFl̔w#&>qF-idhϮڇ$꺱_jZD&MV0%?C'dT6kFuj\vuƓK2⻇X}'hͩw Y%*qk;u7 <ʍ`|nKC ۷».1(< ųd-_@'BfH!dQ`& :fE?iU^|yԟIvpY1!d>dF[+Kx$ҪdZg(y0!zD7vY%J_NV7gpS%綣`&-y2#vW'T/3G-Z[u Qy.:ӡ.M4ȃK*5zZ4$W4lj_q=zߪؔ쌤A Ay C,͘i!쀂Uwk ?TXyS7@WXPT~/IHD\^ls`Xi>E~|5J4o]hIK08ANuڼaFإ>,I~?Bm}z3 CwU>tW1aa?+^rIEá!ߚt--'bI,Ocaxr9÷WH¹m~9#6lk;%t8S:ޮ?u]k/eJV*d';X\ S5L/pWwA1 ߰|@\"_if'}z^nMh\5 il[~T7ˇ*E >=+"UW?(KӪ)@rs-S}, _ !K.#>OWFZN/V%e\D2r kyA{[B+RP9);sn.endstream endobj 131 0 obj << /Filter /FlateDecode /Length 2231 >> stream xXM۸_EB)!>IlUI[TJ<9p$H"e~xvt7g]F|Ze)_eoO7pU=;iWVkoR)VF穐z9|`"UsLP9sD* }6kZe],aI_h 4~_=_wT ]{S*6UsC\ l}6??\aeP G?v0"բܚ,R"F5x*2r}8YR]8?lð5>nհoCU_]"|p@nmݭR\Kp!bl qq;n-ȇo稟=}kaQ[UC[DiB#NKiIt:)%78[Qrsru"02w,!}kH˅aUxMW{o ]"\T+^c²aN\K%(2lT n%_ʭID<n!l1XJ@XXx"8›(xr~EgS&,O=_9DO,f"xh0RJ3>c$ɰіcQ \/Y1Q #<͹sj@5H:6jpʇ(w)?v&/bDёI 5 !FSBRbUsmמ `mO5(v>crf̿`&6F`e=Ab+0Zwm:w˭{kN!y4#/"}"tC(ۉj–"}nxC3SoܮOoE}У.|ӄF'0/Q+50d1@|gIf3VI+1@ h ~G NfwcpG/0@O2YL1,*_HxgS/$K..y ̎¤f$I NO3s~:e㕟/ejʾt,,YGB:*IC瀶xa6hRv\a&@pu`pԘ vVeM1Q^@-e`_ձ7k][5gSGC(f$ x.RMG2$4VnM@, _M,۩lH)@%Pt "yjyaKⱖ 2B:C(V}D'F6\w@Eͤ5ޱW?WgS b&{諻8gXܱ?A%W8N Dz9~Sմ <T"7ln ;^endstream endobj 132 0 obj << /Filter /FlateDecode /Length 2047 >> stream xXK6뾷t0ăxv&Nv2n=p$H E|X?؟ XNiK@?Σ#nͷ3Ns_.gnD:סND2_nfvDYIBn,yQF,׳ݼno*`pFQ*s :lW+79EF1W`d9>rP׊v|VEwnAC 2O`wivuU*Yg㌋P?͖~`U݅ *%1kFj 9[,dC^!#i-K(2MCAU4`fASƊݾ,VEg?dUT9 ^LKԨ1)<1D|D_/HM6B0K3=_@!2)B.aC9e\k2^Wxi*O'p&y|#&] EemXDX/BD陱xuW[WhH63Dԁ Nh$˛BUψUDثo)>1MY @ܛ#UF9`m?FŢPuI(- J0',bX _r4Nh5 T1eo ]ΉWPQr0-qj,w9ʳ0}8Anc'yp18oƞCc2 aS;hNgND˯-,\DY+(g3ԕ> stream xXrFa~1;NLl0h&~>Y =Ö1==_݃gIg Y2ۜxlf/g"af3n\lB٢:c/{n*i@e:gW7"vNe$ <=f&JIx|R2NaYUy~&7.amMF£Aȱ)[n.08HJ1ƃiBX_snb% 0UY(8D𛫃Jl,W չD}qPx4Y!0hĉ]a*9c0?>Ӎo 㝝vV3wwv[QkΗE>,v`FRUd[>N.65`65ň$c={~ˬ% 7)ߵ 26}]6b]yw1D͢Co"qA7N!Cm ׏mA@@0PAl?e1HSE6+Wd_蔷dCƱ/wUެs i4R|sr F@I$-n]%ԵMS+,;mxT59$/NP4KX ^JyTEE-r;I%aHu4>F#pOJmh<5OES\cW4YvM<_1葦0},Fr0 p lk 7L ڋg @* ׉4!'8߀(KZ 1jnvJ(RĐԋe1p 4wsQ&`$dGĉ:|xsp[(.ާSO*䫬JG-O'6p#`m&aW')EfWTIhWRlg*? gcԖ )FiSyvRYwBiَ4xcr&9Kx$ O<K?SHdT_eIu> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 135 /ID [<8d5308e870f7288e85e96f69f72c76f6><1533f4040456a33612eaf4dd239b9509>] >> stream xcb&F~0 $8J8 KlGyDr$ %DrH ɚ "UA$l}DFH%])\+ "EIF 61;`5 w1 "@$7ɴ*.3 ,Lհ̆v L2IO0 endstream endobj startxref 110350 %%EOF snowfall/inst/doc/snowfall.Snw0000644000175100001440000006503712254102616016167 0ustar hornikusers% \VignetteIndexEntry{An R Package for easier cluster programming based on snow} % \VignetteKeyword{Parallel Computing} % \VignetteKeyword{Cluster} % \VignetteKeyword{HPC} % \VignetteKeyword{snow} % \VignetteKeyword{LAM} % \VignetteKeyword{MPI} \documentclass[10pt,oneside]{article} \usepackage{url} \begin{document} \pagestyle{empty} \setlength{\baselineskip}{1.25em} \setlength{\parskip}{0.5em} \setlength{\parindent}{0.0em} \begin{titlepage} \title{Developing parallel programs using snowfall} \author{Jochen Knaus} \date{2010-03-04} \maketitle \begin{abstract} \texttt{snowfall} is an R package for easier parallel programming using clusters. Basically it is build upon the package \texttt{snow} \cite{TIERNEY08} using it's network and cluter abilities and therefore offering use of Socket, MPI, PVM and NetWorkSpaces support and can be seen as an "usability wrapper". \texttt{snow} functions can used from within \texttt{snowfall} as well. \texttt{snowfall} offers additional support for implicit sequential execution (e.g. for distributing packages using optional parallel support), additional calculation functions, extended error handling, and many functions for more comfortable programming. Also, \texttt{snowfall} can be configured via command line arguments, making the change of cluster settings easier without program change. This can be used to connect to batch- and workloadmanagers. Finally \texttt{snowfall} can be directly connected to the R-specific cluster manager \emph{sfCluster}. \texttt{snowfall} does not add an technical layer of abstraction to \texttt{snow}. But beside from the connector to \texttt{sfCluster}, it builds an extra layer of usability on the top of \texttt{snow}. It is not thought as an replacement for \texttt{snow}, but an addition for inexperienced users or those who seek more comfort using parallel computing and R. A further introduction to snowfall is published in the R-Journal \cite{Knau:Porz:Bind:Schw:easi:2009}. For additional documentation, help and examples please visit our website: \url{http://www.imbi.uni-freiburg.de/parallel} \end{abstract} \end{titlepage} %% Inhaltsverzeichnis \tableofcontents \newpage \section{snowfall} \subsection{Getting started} \subsubsection{Requirements for sequential execution} Basically, \texttt{snowfall} is able to run without any external library. In this case, it is not possible to use parallel execution of commands. All potential calls to parallel functions will be executed sequentially. Programs written in sequential use with \texttt{snowfall} calls can be running in parallel without any code change. \subsubsection{Requirements for parallel execution: Basics} If you just want to use parallel computing on your local PC or laptop you are just fine with basically installation of \texttt{snowfall} and \texttt{snow}. You can use then a so called socket cluster, for which no additional software needs to be installed. If you are just wanting to use parallel programming on your local workstation, PC or laptop, you are fine. \subsubsection{Requirements for parallel execution: MPI} You have a running MPI cluster (OpenMPI or any other kind of MPI cluster) available. Although snowfall is useable with OpenMPI as well, the management software sfCluster can currently only used with LAM/MPI. \subsubsection{Requirements for parallel execution: LAM/MPI} For using sfCluster with snowfall, currently LAM/MPI is needed. If you are using Debian/Ubuntu Linux, just call\\ \texttt{aptitude install xmpi lam4-dev}\footnote{On other Linux distributions there are similar packages with probably different name. It is important that you install the development version of the LAM package, as the \texttt{Rmpi} package need these files for installation.} Further you need to install the R-packages \texttt{snow} and \texttt{Rmpi}. If your program uses libraries, ensure that these are available on all nodes. If they are not present in R-default path (on given machine), ensure that they are accessible in the same location on all machines (for example \texttt{/home/xy/R.libs}). If you want to run programs only on your (multi core) computer without any cluster of many machines, you do not have to setup the cluster yourself, it will be started implicitly in \texttt{snowfall}s initialisation. Using two or more machines for cluster calculations, you need to setup a LAM/MPI cluster and start cluster explicitely. This is no big thing at all. For example, edit a small textfile like this one: \texttt{machine1.yourdomain.com cpu=4 sched=yes\\ machine2.yourdomain.com cpu=2 sched=yes} Just enter the machines for your cluster and the amount of CPUs. You start a LAM/MPI cluster using\\ \texttt{lamboot hostfile}\\ where \texttt{hostfile} is the little configuration file edited above. To shutdown just call \texttt{lamhalt}. For further details upon LAM/MPI setup, see \cite{burns94:_lam}. Note: All parallel programs you start are running in this cluster. If your program requests 100 CPUs on your private dual-core machine, you get that amount and 100 R processes are spawn, independent or available ressources (memory, cpus). For workgroups or larger clusters, management solutions like \emph{sfCluster} are strongly recommended. \subsubsection{Requirements for parallel execution: PVM/NWS} PVM and NetWorkSpaces/Sleight are supported in snowfall as these are useable with snow. But both are less supported by sfCluster (but at least a managed start can be done using sfCluster), so there is no further documentation about their usage here. \subsection{(Short) introduction to parallel programming} The general goal of paralleling your R program is to vectorize the data or calculation loops (probably with wrapper functions), as all calculation functions of \texttt{snowfall} are kind of reimplementations of R-list/vector functions. A good introduction to parallel programming for statistical purposes can be found in \cite{ROSS_07} and \cite{HANA_STAT04}. \subsection{Introduction to usage of snowfall} Basically, usage of \texttt{snowfall} always works with the following scheme: \begin{enumerate} \item Initialization using \texttt{sfInit()}. Set up the cluster (if needed) and the internal functions. \texttt{sfInit} must be called before using any function of the \texttt{snowfall} package.\footnote{The only exception is the function \texttt{sfSetMaxCPUs()}, which raises or limits the configured maximum CPU count.} \item Export needed variables/objects to all slaves. \item Do some parallel calculations using \texttt{snowfall} calculation functions. Repeat as many times as needed. \item End parallel execution using \texttt{sfStop()}. \end{enumerate} The initialisation differs if you use \texttt{snowfall} alone or with the management tool \emph{sfCluster}. In this chapter we only cover a standalone usage of \texttt{snowfall}. For usage with \emph{sfCluster}, see chapter 2. If you are firm on using the R package \texttt{snow}, starting with or porting your program to \texttt{snowfall} is easy. The complete initialisation is done with a single call to \texttt{sfInit()}. The main arguments are \texttt{parallel}, \texttt{cpus} and \texttt{type}, giving the running mode (parallel execution or sequential execution), the amount of CPUs if executing in parallel mode and the type of the underlying cluster. If running in sequential mode, \texttt{cpus} is ignored (and set to one). Without a given \texttt{type} a socket cluster is started, which does not need any further software installed and therefore most likely runs anywhere immidiately. This is the desired choice for executing on a laptop or single multicore machine, too. Please note, that on Windows an installed Personal Firewall may alert the network access, please allow this. %On calling \texttt{sfInit( parallel=TRUE )} without a running LAM %cluster (but LAM installed), a \emph{local} cluster will be started, %which only contains your local machine. This can be handy on single %multi-core machines. But note you \texttt{sfStop} will not shutdown %this cluster, so you have to stop it yourself manually (if wished). Sequential mode can be useful for developing the program, probably on a single core laptop without installed cluster or running Windows operating system. Also sequential mode is needed to deploy a package using \texttt{snowfall} safely, where you cannot assume a user have an useable cluster installed. Other arguments for \texttt{sfCluster} are \texttt{restore}, \texttt{socketHosts}, \texttt{slaveOutfile} and \texttt{nostart}. See package help for description. If the initialisation fails, probably because of missing base libraries \texttt{Rmpi} and \texttt{snow}, \texttt{snowfall} falls back to sequential mode with a warning message. In sequential and parallel execution, all functions are useable in both modes in the same way and returning the same results. \begin{verbatim} sfInit( parallel=FALSE ) sfLapply( 1:10, exp ) sfStop() sfInit( parallel=TRUE, cpus=5 ) ## Now, index 1 is calculated on CPU1, 2 on CPU2 and so on. ## Index 6 is again on CPU1. ## So the whole call is done in two steps on the 5 CPUs. sfLapply( 1:10, exp ) sfStop() \end{verbatim} Please note: Most of the \texttt{snowfall} functions are stopping the program on failure by default (by calling \texttt{stop()}). This is much safer for unexperienced users. If you want own failure handling, install your own handler \texttt{options(error = ...)} to prevent snowfall from stopping in general. Also most of the functions feature an argument \texttt{stopOnError} which set to \texttt{FALSE} prevents the functions from stopping. Do not forget to handle potential errors in your program if using this feature. The given behavior is not only better for unexperienced users, any other behavior would be very nasty on package deployment. \subsection{Writing parallel programs with snowfall} \subsubsection{General notes and simple example} If you detected parts of your program which can be parallelised (loops etc) it is in most cases a fast step to give them a parallel run. First, rewrite them using Rs list operators (lapply, apply) instead of loops (if they are not yet calculated by list operators). Then write a wrapper function to be called by the list operators and manage a single parallel step. Note there are no local variables, only the data from the list index will be given as argument. If you need more than one variable argument, you need to make the required variables global (assign to global environment) and export them to all slaves. \texttt{snowfall} provides some functions to make this process easier (take a look at the package help). \begin{verbatim} sfInit( parallel=TRUE, cpus=4 ) b <- c( 3.4, 5.7, 10.8, 8, 7 ) ## Export a and b in their current state to all slaves. sfExport( ''b'' ) parWrapper <- function( datastep, add1, add2 ) { cat( ''Data: '', datastep, ''ADD1:'', add1, ''ADD2:'', add2, ''\n'' ) ## Only possible as ''b'' is exported! cat( ''b:'', b[datastep] ) ## Do something return( datastep ) } ## Calls parWrapper with each value of a and additional ## arguments 2 and 3. result <- sfLapply( 1:5, parWrapper, 2, 3 ) sfStop() \end{verbatim} \subsubsection{Basic load balancing using \texttt{sfClusterApplyLB}} All parallel wrappers around the R-list operators are executed in blocks: On one step the first $n$ indices are calculated, then the next $n$ indices, where $n$ is the number of CPUs in the cluster. This behavior is quite ok in a homogenous cluster, where all or mostly all machines are built with equal hardware and therefore offer the same speed. In heterogenous infrastructures, speed is depending on the slowest machine in the cluster, as the faster machines have to wait for it to finish its calculation. If your parallel algorithm is using different time for different problems, load balancing will reduce overall time in homogenous clusters greatly. \texttt{snow} and so \texttt{snowfall} feature a simple load balanced method to avoid waiting times in such environments. If calling \texttt{sfClusterApplyLB} the faster machines get further indices to calculate without waiting for the slowest to finish its step. \texttt{sfClusterApplyLB} is called like \texttt{lapply}. If your local infrastructure is such an heterogenous structure, this function is the way to go. It can also be handy in homogenous clusters where other users spawn processes, too, so sometimes load differs temporarily. A visualisation of basic load balacing can be found in \cite{ROSS_07}. \begin{verbatim} sfInit( parallel=TRUE, cpus=2 ) calcPar <- function( x ) { x1 <- matrix( 0, x, x ) x2 <- matrix( 0, x, x ) for( var in 1:nrow( x1 ) ) x1[var,] = runif( ncol( x1 ) ) for( var in 1:nrow( x2 ) ) x2[var,] = runif( ncol( x1 ) ) b <- sum( diag( ( x1 %*% x2 ) %*% x1 ) ) return( b ) } result <- sfClusterApplyLB( 50:100, calcPar ) sfStop() \end{verbatim} \subsubsection{Intermediate result saving and restoring using \texttt{sfClusterApplySR}} Another helpful function for long running clusters is \texttt{sfClusterApplySR}, which saves intermediate results after processing $n$-indices (where $n$ is the amount of CPUs). If it is likely you have to interrupt your program (probably because of server maintenance) you can start using \texttt{sfClusterApplySR} and restart your program without the results produced up to the shutdown time. Please note: Only complete $n$-blocks are saved, as the function \texttt{sfLapply} is used internally.\footnote{This function is an addition to \texttt{snow} and therefore could not be integrated in the load balanced version.} The result files are saved in the temporary folder \texttt{~/.sfCluster/RESTORE/x}, where x is a string with a given name and the name of the input R-file. \texttt{sfClusterApplySR} is called like \texttt{sfClusterApplyLB} and therefore like \texttt{lapply}. If using the function \texttt{sfClusterApplySR} result are always saved in the intermediate result file. But, if cluster stopped and results could be restored, restore itself is only done if explicitly stated. This aims to prevent false results if a program was interrupted by intend and restarted with different internal parameters (where with automatical restore probably results from previous runs would be inserted). So handle with care if you want to restore! If you only use one call to \texttt{sfClusterApplySR} in your program, the parameter \texttt{name} does not need to be changed, it only is important if you use more than one call to \texttt{sfClusterApplySR}. \begin{center} \begin{verbatim} sfInit( parallel=TRUE, cpus=2 ) # Saves under Name default resultA <- sfClusterApplySR( somelist, somefunc ) # Must be another name. resultB <- sfClusterApplySR( someotherlist, someotherfunc, name="CALC_TWO" ) sfStop() \end{verbatim} \end{center} If cluster stops probably during run of \texttt{someotherfunc} and restarted with restore-Option, the complete result of \texttt{resultA} is loaded and therefore no calculation on \texttt{somefunc} is done. \texttt{resultB} is restored with all the data available at shutdown and calculation begins with the first undefined result. \emph{Note on restoring errors}: If restoration of data fails (probably because list size is different in saving and current run), \texttt{sfClusterApplySR} stops. For securely reason it does not delete the RESTORE-files itself, but prompt the user the complete path to delete manually and explicitly. \subsection{Fault tolerance} Differing from \texttt{snowFT}, the fault tolerance extension for \texttt{snow}, \texttt{snowfall} does not feature fault tolerance (see \cite{HANA_04}). This is due to the lack of an MPI implementation of \texttt{snowFT}. \subsection{Controlling snowfall using the command line} snowfall can be widely controlled via command line arguments. This is useful for fast changing of cluster parameters (e.g. changing the host names in a Socket cluster) on a raw installation and it serves as connection to sfCluster. Of course it can be used as connection to any other workload- or batch managing software, too. On the commandline there are the following parameters: \begin{tabular}{lp{10cm}} parallel & Switch to parallel execution. Default is sequential execution \\ cpus=X & Amount of CPUs wanted. Without {-}{-}parallel, a value $X > 1$ switch to parallel execution. \\ type=X & Type of cluster. Allowed values are SOCK, MPI, PVM and NWS. \\ session=X & Session number. snowfall logfiles contain number, but only needed with sfCluster. \\ restoreSR & Enables restoring of previously saved results from \texttt{sfClusterApplySR} calls. \\ hosts=X & List of hosts for Socket (SOCK) or NetWorkSpaces (NWS) clusters. Entries are comma seperated. Any entry may contain colon seperated value for the amount of processors on this machine. Example: \texttt{{-}{-}hosts=machine1:4,machine2,123.123.12.13:2} (this spawns 4 workers on machine1, one on machine2 and two on 123.123.12.13). \\ tmpdir=X & Specify temporary directory for logfiles and R-output. \\ \end{tabular} For using these arguments, just add these after an \texttt{--args} on the commandline (which forces R not to treat these arguments as R ones). \begin{center} \texttt{R --no-save --args --parallel --cpus=2 < program.R} \end{center} Starts R and forces snowfall to start in parallel mode with 2 CPUs (in this case: using a Socket-cluster, as this is the default). \textit{Note}: arguments on the command line have lower priority as settings from the \texttt{sfInit} call. That means that the above example only works if initialisation is done via \texttt{sfInit()}, but not with \texttt{sfInit( parallel=FALSE )}, as then sequential execution is forced. Further examples should explan the feature: \begin{itemize} \item \texttt{R --no-save --args --parallel --type=MPI --cpus=4 < program.R} (start using 4 workers in an existing MPI cluster. If no MPI cluster exists, a plain one is started on your local machine only. Beware of this, as you have to shutdown this cluster afterwards manually.). \item \texttt{R --no-save --args --parallel --type=SOCK --hosts=localhost:3,singlema,othmach:4 < program.R} (Starts a socket cluster with two machines and 7 CPUs: 3 on \texttt{localhost}, 4 on \texttt{othmach} and one worker on \texttt{singlema}). \end{itemize} \subsection{Traps, Internals} \texttt{snowfall} limits the amount of CPUs by default (to 40). If you need more CPUs, call \texttt{sfSetMaxCPUs()} \emph{before} calling \texttt{sfInit()}. Beware of requesting more CPUs as you have ressources: there are as many R processes spawned as CPUs wanted. They are distributed across your cluster like in the given scheme of the LAM host configuration. You can easily kill all machines in your cluster by requesting huge amounts of CPUs or running very memory consuming functions across the cluster. To avoid such common problems use \emph{sfCluster}. For some functions of \texttt{snowfall} it is needed to create global variables on the master. All these variables start with prefix ``\texttt{.sf}'', please do not delete them. The internal control structure of \texttt{snowfall} is saved in the variable \texttt{.sfOptions}, which should be accessed through the wrapper functions as the structure may change in the future.\section{Using \emph{sfCluster} with \texttt{snowfall}} \subsection{About \emph{sfCluster}} \emph{sfCluster} is a small management tool, helping to run parallel R-programs using \texttt{snowfall}. Mainly, it exculpates the user from setting up a LAM/MPI cluster on his own. Further, it allows multiple clusters per user and therefore executes any parallel R program in a single cluster. These clusters are built according to the current load and usage of your cluster (this means: only machines are taken with free ressources). Also, execution is observed and if problems arise, the cluster is shut down. \emph{sfCluster} can be used with R-interactive shell or batch mode and also feature a special batch mode with visual logfile and process-displaying. For further details about installation, administration and configuration of \emph{sfCluster}, please visit \url{http://www.imbi.uni-freiburg.de/parallel} or run \texttt{sfCluster {-}{-}help} if you installed it yet. \subsection{Starting R using \emph{sfCluster}} An \emph{sfCluster} execution is following these steps: \begin{enumerate} \item Test memory usage of program if not explicitly given. This is done via a default temporary (10 minutes) sequential run to determinate the maximum usage of RAM on a slave. This is important for allocating ressources on slaves. \item Detect free ressources in cluster universe.\footnote{Which are all potentially useable machines.} Take machines with free ressources matching users request. \item Start LAM/MPI cluster with previous built setting. \item Run R with parameters for \texttt{snowfall} control. \item LOOP: Observe execution (check processes, memory usage, and machine state). In monitoring mode: Display state of cluster and logfiles on screen. \item On interruption or regular end: shutdown cluster. \end{enumerate} \subsection{Using \emph{sfCluster}} The most common parameters of \emph{sfCluster} are \texttt{{-}{-}cpus}, with which you request a certain amount of CPUs among the cluster (default is 2 in parallel and 1 in sequential mode). There is a builtin limit for the amount of CPUs, which is changeable using the \emph{sfCluster} configuration. There are four execution modes: \begin{tabular}{lp{3cm}p{9cm}} -b & Batchmode (Default) & Run silent on terminal.\\ -i & Interactive R-shell & Ability to use interactive R-shell with cluster.\\ -m & Monitoring mode & Visual processmonitor and logfile viewer.\\ -s & Sequential execution (no cluster usage) & Run without cluster on single CPU.\\ \end{tabular} To avoid the (time consuming) memory test, you can specify a maximum amount of memory usable per slave via option \texttt{{-}{-}mem}. The behavior on excessing this memory usage is configurable (default: cluster stop). The memory usage limit is very important for not getting your machines into swapping (means: shortage of physical RAM), which would hurt performance badly. So, simple calls to \emph{sfCluster} could be \begin{verbatim} ## Run a given R program with 8 cpus and max. 500MB (0.5 gigabytes) in monitoring mode sfCluster -m --cpus=8 --mem=0.5G myRprogram.R ## Run nonstopping cluster with real quiet output. nohup sfCluster -b --cpus=8 --mem=500M myRprogram.R --quiet ## Start R interactive shell with 4 cores. With 300MB memory (MB is default unit) ## No R-file is given for interactive mode. sfCluster -i --cpus=4 --mem=300 \end{verbatim} For all possible options and further examples for \emph{sfCluster} usage, see \texttt{sfCluster {-}{-}help}. \subsection{The snowfall-side of \emph{sfCluster}} If you start an R program using \texttt{snowfall} with \emph{sfCluster}, the latter waits until \texttt{sfInit()} is called and then starts the observation of the execution. The default behavior if using \emph{sfCluster} is just to call \texttt{sfInit()} without any argument. Use arguments only if you want to explicitly overwrite given settings by \emph{sfCluster}. \subsection{Proposed development cycle} The following development cycle is of course a proposal. You can skip or replace any step depending on your own needs. \begin{enumerate} \item Develop program in sequential mode (start using option \texttt{-s}). \item Test in parallel mode using interactive mode to detect directly problems on parallelisation (start using option \texttt{-i}). \item Try larger test runs using monitoring mode, observing the cluster and probably side effects during parallel execution (start using option \texttt{-m}). Problems arise on single nodes will be visible (like non correct working libraries). \item Do real runs using silent batch mode (start using options \texttt{-b {-}{-}quiet}). Probably you want to run these runs in the background of your Unix shell using \texttt{nohup}. \end{enumerate} \subsection{Future sfCluster} These additions are planned for the future: \begin{itemize} \item Port to OpenMPI \item Faster SSH connections for observing \item Extended scheduler for system ressources \end{itemize} %% History. \section{History of snowfall changes} You can also call: RShowDoc("NEWS", package="snowfall") \begin{itemize} \item 1.83 (API changes: minor additions) \begin{itemize} \item sfIsRunning: new function giving a logical is sfInit() was called or not. Needed, as all other snowfall functions implicitely call sfInit() if it was not called. \end{itemize} \item 1.82 \begin{itemize} \item Internal refactorings. \end{itemize} \item 1.81 \begin{itemize} \item Change in sfInit() MPI startup so sfCluster can run with snow > 0.3 now. \item sfExport now also works in sequential mode (writing to global environment). This prevented sequential execution in some cases. \end{itemize} \item 1.80 (API changes: minor additions) \begin{itemize} \item snowfall passes packages checks of R 2.10.1 without warning or error. Internal state is now only saved in the namespace itself (thanks to Uwe Ligges for the tipp). \item sfExport can now also export objects in a specific namespace (argument 'namespace') \item sfExport: behavior in error case manageable (stopOnError) \item sfExport: smaller bugfixes. \item sfRemoveAll can now also remove hidden names (argument 'hidden') \item sfRemoveAll is more robust now (some minor bugfixes, more checks) \item sfRemoveAll bugfix for multiple removals (thanks to Greggory Jefferis) \item Bugfix on exception list on sfExportAll \item Refactorings in sfTest() \item snowfall now has a NEWS doc ;) \item No warning on Mac OS because of default Mac-R command line arg 'gui' (thanks to Michael Siegel). \end{itemize} \item 1.71 (API changes: none) \begin{itemize} \item Exporting of objects using \texttt{sfExport} is speed up (round 30%) \item Fixed a bug on Windows in \texttt{sfSource} \end{itemize} \item 1.70 (API changes: minor additions, BEHAVIOR CHANGES: logging) \begin{itemize} \item Behavior change: new default: no logging of slave/worker output. \item API change: new argument \texttt{slaveOutfile} on \texttt{sfInit()}. \item API change: new argument \texttt{restore} on \texttt{sfInit()}. \item API change: new argument \texttt{master} on \texttt{sfCat}. \item Windows startup fixed. \item NWS startup fixed. \item sfSapply is working as intended. \item Changing CPU amount during runtime (with multiple sfInit() calls with different settings in a single program) is now possible using socket and NWS clusters. \item Dozens of small glitches inside snowfall fixed (also messages are made more precisly). \item Package vignette slightly extended. \end{itemize} \end{itemize} \bibliographystyle{plain} \bibliography{all-bib} \end{document}