clue/0000755000175000017500000000000014131007713011331 5ustar nileshnileshclue/MD50000644000175000017500000001136314131007713011645 0ustar nileshnilesh0842958a97c6f15b9a4dda5eb81b8baf *DESCRIPTION d9dfb1c0aac7ba6fae4e921fd35698a5 *NAMESPACE 281c7577f564a5acbecf046c5d1b8e64 *R/AAA.R 1956f4cd36476e3223d028f743f41cad *R/addtree.R 5ca7bd63f1ed85f171358c12fcf08e53 *R/agreement.R 61f26eec5666c409d3a7369f9cc0c99a *R/bag.R cd56914218fa9922aba0f76ff8b94909 *R/boot.R 74c617065ccf4f72df1353534f85da75 *R/classes.R c6a0f185f75ff0e903eb69c1e4d3a5b8 *R/consensus.R 6920384b112acd8962490a58131f04ab *R/dissimilarity.R c8a21520e911951d95d7ebd74e113265 *R/ensemble.R f4bbabdccc0b0dc31dbf14373ded5d11 *R/fuzziness.R 5999d867614d17cd53a043cbd99703c9 *R/hierarchy.R d67f188882f5aae752df482d3473fbd0 *R/lattice.R 285f76623c207f44342d7d1ca33d07e8 *R/lsap.R d10944e1875825af11bcea65571432fc *R/margin.R 6197a2bae482c876b955d9a418e2720a *R/medoid.R a3dccf831a311ed068578f817f38161e *R/membership.R 27369e3ebfc5ade758ebb2e49bb213fc *R/objects.R 4b8e8ee574a015903622e264e7560aa8 *R/partition.R 00c4dfcc2d401d810d70f246b7628f6b *R/pava.R 6131a8ffa97003764405701373a3bd48 *R/pclust.R 480eb9d4a5ec63da63f059511b5e4dd4 *R/predict.R 44b2e289ec1ed4ea042eccd8816080c5 *R/prototypes.R 5eabc5a234802b0f5a9f4c244ebe9aa9 *R/proximity.R f1a133ffc362372bc24ff24581964b1e *R/registration.R 69049e632bf64e2a11ed5b4f0c276570 *R/sumt.R 8cfa16132f28d693fcd03d396a678deb *R/tabulate.R f415cbecc8d1694bca125998db11a2ae *R/ultrametric.R b43e58c5919eab710f6d598830027845 *R/utilities.R 9c6964efc6762066184463e4c363a2d3 *R/validity.R 917bc66daf9b7bcacad5fefba35de6be *build/partial.rdb 95373ef811d3fa5f5aa61f90bc1d588c *build/vignette.rds d58bfd8bf54da08c9810a7d2613eacb7 *data/CKME.rda 190fc58a9d194601f968f0c96b12109d *data/Cassini.rda 40f22a96339e385b5526e63797b73bfb *data/GVME.rda b648708acb6475d3e6fcae9d89ac9d75 *data/GVME_Consensus.rda 571eecaf854ae748c28e8ba7abb1f6e9 *data/Kinship82.rda 4307b909ad1dd68edb3d0f88b02516af *data/Kinship82_Consensus.rda 0545712f5c31edc86462a507f8f7df64 *data/Phonemes.rda adf865dd114dae54c3db24060e42d4a5 *inst/CITATION 4ce2ff29ebfc819444d6c7eb2f09ff6b *inst/doc/clue.R ec5243c6beee816b6e93e5cbda9f722a *inst/doc/clue.Rnw b8387524e283e0757339e06ca5f80365 *inst/doc/clue.pdf fc5c32ebcb85203fa533853fb18d18d3 *inst/po/en@quot/LC_MESSAGES/R-clue.mo 6b382525256a1a059e58ce576eff7106 *man/CKME.Rd 0d61696816866774419c7fda98f05d5f *man/Cassini.Rd b18fd96f640a1001be51feae0fe8e66d *man/GVME.Rd 1b6144d910daf97b691b547c3bcf2d51 *man/GVME_Consensus.Rd b5b6f767a686a52c78824712442fa8f5 *man/Kinship82.Rd 0b423e42f1f2cfba9b9d52e163c0abf8 *man/Kinship82_Consensus.Rd 45e11496f6cac656142f7d6f01022de1 *man/Phonemes.Rd 50375af82b3d133984605c006831a07d *man/addtree.Rd f55d433cb1f20ffa39a7f0dbc9e75c02 *man/cl_agreement.Rd 6b582254f38651979fa27fcd15297bd0 *man/cl_bag.Rd 5dca26838651ac5caca862e459b4920f *man/cl_boot.Rd d4081e72f3447131afc6a61d0af7f3d2 *man/cl_classes.Rd 488ccf099c041a9cdc00b973935b0f25 *man/cl_consensus.Rd 6e672adfe90c3da3a6ed084d610e1aeb *man/cl_dissimilarity.Rd 872ecad639c4ade222bba29873cb5465 *man/cl_ensemble.Rd 4cabe55c90e4c148ee3bc274d970ebf0 *man/cl_fuzziness.Rd af83eebbfd3d600999346facaa4308d5 *man/cl_margin.Rd d4a61b7498b939cd372cf4b181378e11 *man/cl_medoid.Rd e26070e22290e167ec900cdeea0567ac *man/cl_membership.Rd 2ddf43cfa7b4809e1b211e2f89080d5c *man/cl_object_names.Rd 818d072c048b86741b39db9967dae2b2 *man/cl_pam.Rd d9486d40bc389102f8d0b5dbf4830b0c *man/cl_pclust.Rd 1eb04a9edb42f0c3ad50321b36475d6a *man/cl_predict.Rd 9e88e1119f27cc732e7b865471521f1f *man/cl_prototypes.Rd 931b58a667da8aab28dc441fd0c630f7 *man/cl_tabulate.Rd a79724c42916ad2db16343e6539e53b4 *man/cl_ultrametric.Rd 0c0e58d6062025f81c6c34ecf026a3e4 *man/cl_validity.Rd ffe8dcd2639eb402c485d2ae30ff7b55 *man/fit_ultrametric_target.Rd 3cbae2b63263993d541d67892e307696 *man/hierarchy.Rd 7175d60e57286b9735d26ff996592517 *man/kmedoids.Rd d1b212bcbf61720cc380d2aeb01c95e3 *man/l1_fit_ultrametric.Rd 9e257bcb7df23ccbca37778f11f52901 *man/lattice.Rd c393ed09b16d0d60bab37dedd95731a2 *man/ls_fit_addtree.Rd 9b0469edf996e2e47e8d3bb00dbb5ea4 *man/ls_fit_sum_of_ultrametrics.Rd 4de00e99c87ae942624b33a73fc10bbd *man/ls_fit_ultrametric.Rd 115623ffe35fcef24928738206942374 *man/n_of_classes.Rd e4822d78d50337d163d881298c234bb1 *man/n_of_objects.Rd da27a64e2cd173b00f81361e10bcab81 *man/partition.Rd 1bc099d43549aa2805afe8e5c5912696 *man/pclust.Rd ebf265d86f6eac729e616a40ef94ac08 *man/solve_LSAP.Rd f9e7119e8be0492354b6605b81fb5ff1 *man/sumt.Rd 6985140eb3d61464356b3a4ad86ec71c *po/R-clue.pot c1cb790e0fd0e4d3f38106f10318b585 *src/assignment.c 914912fa18b8403e505ac5d8e1f4ee29 *src/assignment.h e2f17003f4c681661ea31175a99503cf *src/clue.c 815d26eb16b0115f4a1e73c71c7d9402 *src/clue.h 1d83eaf5af08f3fc312d6dd0363e5c49 *src/init.c 76301856024f2491f73fee44641b5c86 *src/lsap.c 1db06fea8e5ba8856f5def041c22bf54 *src/trees.c ec5243c6beee816b6e93e5cbda9f722a *vignettes/clue.Rnw edc556826aaae2949798b09d5b188a61 *vignettes/cluster.bib clue/DESCRIPTION0000644000175000017500000000151314131007713013037 0ustar nileshnileshPackage: clue Version: 0.3-60 Encoding: UTF-8 Title: Cluster Ensembles Description: CLUster Ensembles. Authors@R: c(person("Kurt", "Hornik", role = c("aut", "cre"), email = "Kurt.Hornik@R-project.org", comment = c(ORCID = "0000-0003-4198-9911")), person("Walter", "Böhm", role = "ctb")) License: GPL-2 Depends: R (>= 3.2.0) Imports: stats, cluster, graphics, methods Suggests: e1071, lpSolve (>= 5.5.7), quadprog (>= 1.4-8), relations Enhances: RWeka, ape, cba, cclust, flexclust, flexmix, kernlab, mclust, movMF, modeltools NeedsCompilation: yes Packaged: 2021-10-11 08:28:41 UTC; hornik Author: Kurt Hornik [aut, cre] (), Walter Böhm [ctb] Maintainer: Kurt Hornik Repository: CRAN Date/Publication: 2021-10-11 10:19:55 UTC clue/po/0000755000175000017500000000000012213262407011751 5ustar nileshnileshclue/po/R-clue.pot0000644000175000017500000001223713142031604013624 0ustar nileshnileshmsgid "" msgstr "" "Project-Id-Version: clue 0.3-54\n" "POT-Creation-Date: 2017-08-07 11:31\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "Argument 'weights' must be compatible with 'x'." msgstr "" msgid "Argument 'weights' has negative elements." msgstr "" msgid "Argument 'weights' has no positive elements." msgstr "" msgid "Non-identical weights currently not supported." msgstr "" msgid "All given orders must be valid permutations." msgstr "" msgid "Iterative projection run: %d" msgstr "" msgid "Iterative reduction run: %d" msgstr "" msgid "Cannot coerce to 'cl_addtree'." msgstr "" msgid "Cannot mix partitions and hierarchies." msgstr "" msgid "All clusterings must have the same number of objects." msgstr "" msgid "Can only handle hard partitions." msgstr "" msgid "Can only determine classes of partitions or hierarchies." msgstr "" msgid "Cannot compute consensus of empty ensemble." msgstr "" msgid "AOS run: %d" msgstr "" msgid "Iteration: 0 *** value: %g" msgstr "" msgid "Iteration: %d *** value: %g" msgstr "" msgid "Minimum: %g" msgstr "" msgid "AOG run: %d" msgstr "" msgid "Parameter 'p' must be in [1/2, 1]." msgstr "" msgid "Cannot compute prototype distances." msgstr "" msgid "All elements must have the same number of objects." msgstr "" msgid "Generic '%s' not defined for \"%s\" objects." msgstr "" msgid "Wrong class." msgstr "" msgid "Plotting not available for elements %s of the ensemble." msgstr "" msgid "Value '%s' is not a valid abbreviation for a fuzziness method." msgstr "" msgid "Unary '%s' not defined for \"%s\" objects." msgstr "" msgid "Hierarchies must have the same number of objects." msgstr "" msgid "Dendrograms must have the same number of objects." msgstr "" msgid "Arguments 'x' and 'y' must have the same number of objects." msgstr "" msgid "Cannot compute meet of given clusterings." msgstr "" msgid "Cannot compute join of given clusterings." msgstr "" msgid "Join of given n-trees does not exist." msgstr "" msgid "x must be a matrix with nonnegative entries." msgstr "" msgid "x must not have more rows than columns." msgstr "" msgid "Argument 'x' must be a partition." msgstr "" msgid "Cannot compute medoid of empty ensemble." msgstr "" msgid "Cannot compute medoid partition of empty ensemble." msgstr "" msgid "Class ids:" msgstr "" msgid "Criterion:" msgstr "" msgid "Medoid ids:" msgstr "" msgid "k cannot be less than the number of classes in x." msgstr "" msgid "Cannot extract object dissimilarities" msgstr "" msgid "Cannot infer class ids from given object." msgstr "" msgid "A hard partition of %d objects." msgstr "" msgid "A partition of %d objects." msgstr "" msgid "Partitions must have the same number of objects." msgstr "" msgid "Class ids must be atomic." msgstr "" msgid "Not a valid membership matrix." msgstr "" msgid "Cannot coerce to 'cl_hard_partition'." msgstr "" msgid "No information on exponent in consensus method used." msgstr "" msgid "No information on dissimilarity in consensus method used." msgstr "" msgid "A hard partition of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "Cannot determine how to modify prototypes." msgstr "" msgid "Invalid function to modify prototypes." msgstr "" msgid "Cannot determine how to subset prototypes." msgstr "" msgid "Invalid function to subset prototypes." msgstr "" msgid "Pclust run: %d" msgstr "" msgid "A hard partition of %d objects into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of %d objects into %d classes." msgstr "" msgid "Cannot make new predictions." msgstr "" msgid "Standardization is currently not supported." msgstr "" msgid "Cannot determine prototypes." msgstr "" msgid "Invalid consensus method '%s'." msgstr "" msgid "Invalid dissimilarity method '%s'." msgstr "" msgid "Invalid agreement method '%s'." msgstr "" msgid "SUMT run: %d" msgstr "" msgid "Iteration: 0 Rho: %g P: %g" msgstr "" msgid "Iteration: %d Rho: %g P: %g" msgstr "" msgid "Not a valid ultrametric." msgstr "" msgid "Given ensemble contains no dissimilarities." msgstr "" msgid "Outer iteration: %d" msgstr "" msgid "Change: u: %g L: %g" msgstr "" msgid "Iteration: %d" msgstr "" msgid "Term: %d" msgstr "" msgid "Change: %g" msgstr "" msgid "Overall change: u: %g L: %g" msgstr "" msgid "An object of virtual class '%s', with representation:" msgstr "" msgid "An ensemble of %d partition of %d objects." msgid_plural "An ensemble of %d partitions of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d dendrogram of %d objects." msgid_plural "An ensemble of %d dendrograms of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d hierarchy of %d objects." msgid_plural "An ensemble of %d hierarchies of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble with %d element." msgid_plural "An ensemble with %d elements." msgstr[0] "" msgstr[1] "" clue/man/0000755000175000017500000000000012734172047012116 5ustar nileshnileshclue/man/cl_ensemble.Rd0000644000175000017500000000462311547637750014672 0ustar nileshnilesh\name{cl_ensemble} \alias{cl_ensemble} \alias{as.cl_ensemble} \alias{is.cl_ensemble} \title{Cluster Ensembles} \description{Creation and manipulation of cluster ensembles.} \usage{ cl_ensemble(..., list = NULL) as.cl_ensemble(x) is.cl_ensemble(x) } \arguments{ \item{\dots}{R objects representing clusterings of or dissimilarities between the same objects.} \item{list}{a list of R objects as in \code{\dots}.} \item{x}{for \code{as.cl_ensemble}, an R object as in \code{\dots}; for \code{is.cl_ensemble}, an arbitrary R object.} } \details{ \code{cl_ensemble} creates \dQuote{cluster ensembles}, which are realized as lists of clusterings (or dissimilarities) with additional class information, always inheriting from \code{"cl_ensemble"}. All elements of the ensemble must have the same number of objects. If all elements are partitions, the ensemble has class \code{"cl_partition_ensemble"}; if all elements are dendrograms, it has class \code{"cl_dendrogram_ensemble"} and inherits from \code{"cl_hierarchy_ensemble"}; if all elements are hierarchies (but not always dendrograms), it has class \code{"cl_hierarchy_ensemble"}. Note that empty or \dQuote{mixed} ensembles cannot be categorized according to the kind of elements they contain, and hence only have class \code{"cl_ensemble"}. The list representation makes it possible to use \code{lapply} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c}, \code{rep}, and \code{print}. There is also a \code{plot} method for ensembles for which all elements can be plotted (currently, additive trees, dendrograms and ultrametrics). } \value{ \code{cl_ensemble} returns a list of the given clusterings or dissimilarities, with additional class information (see \bold{Details}). } \examples{ d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) hens ## Subscripting. hens[1 : 3] ## Replication. rep(hens, 3) ## Plotting. plot(hens, main = names(hens)) ## And continue to analyze the ensemble, e.g. round(cl_dissimilarity(hens, method = "gamma"), 4) } \keyword{cluster} clue/man/cl_membership.Rd0000644000175000017500000000462312211412501015201 0ustar nileshnilesh\name{cl_membership} \alias{cl_membership} \alias{as.cl_membership} \title{Memberships of Partitions} \description{ Compute the memberships values for objects representing partitions. } \usage{ cl_membership(x, k = n_of_classes(x)) as.cl_membership(x) } \arguments{ \item{x}{an R object representing a partition of objects (for \code{cl_membership}) or raw memberships or class ids (for \code{as.cl_membership}).} \item{k}{an integer giving the number of columns (corresponding to class ids) to be used in the membership matrix. Must not be less, and default to, the number of classes in the partition.} } \value{ An object of class \code{"cl_membership"} with the matrix of membership values. } \details{ \code{cl_membership} is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{as.cl_membership} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ ## Getting the memberships of a single soft partition. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) ## And add the results of agnes and diana. require("cluster") hens <- c(hens, list(agnes = agnes(d), diana = diana(d))) ## Create a dissimilarity object from this. d1 <- cl_dissimilarity(hens) ## And compute a soft partition. party <- fanny(d1, 2) round(cl_membership(party), 5) ## The "nearest" hard partition to this: as.cl_hard_partition(party) ## (which has the same class ids as cl_class_ids(party)). ## Extracting the memberships from the elements of an ensemble of ## partitions. pens <- cl_boot(USArrests, 30, 3) pens mems <- lapply(pens, cl_membership) ## And turning these raw memberships into an ensemble of partitions. pens <- cl_ensemble(list = lapply(mems, as.cl_partition)) pens pens[[length(pens)]] } \keyword{cluster} clue/man/cl_pclust.Rd0000644000175000017500000001036512734173132014376 0ustar nileshnilesh\name{cl_pclust} \alias{cl_pclust} \title{Prototype-Based Partitions of Clusterings} \description{ Compute prototype-based partitions of a cluster ensemble by minimizing \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the elements \eqn{x_b} of the ensemble and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ cl_pclust(x, k, method = NULL, m = 1, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{the consensus method to be employed, see \code{\link{cl_consensus}}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_partition"} representing the obtained \dQuote{secondary} partition by an object of class \code{"cl_pclust"}, which is a list containing at least the following components. \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{silhouette}{Silhouette information for the partition, see \code{\link[cluster]{silhouette}}.} \item{validity}{precomputed validity measures for the partition.} \item{m}{the softness control argument.} \item{call}{the matched call.} \item{d}{the dissimilarity function \eqn{d = d(x, p)} employed.} \item{e}{the exponent \eqn{e} employed.} } \details{ Partitioning is performed using \code{\link{pclust}} via a family constructed from \code{method}. The dissimilarities \eqn{d} and exponent \eqn{e} are implied by the consensus method employed, and inferred via a registration mechanism currently only made available to built-in consensus methods. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity \eqn{d} and \eqn{e = 2}. For \eqn{m = 1}, the partitioning procedure was introduced by Gaul and Schader (1988) for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. Available control parameters are as for \code{\link{pclust}}. The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum (as this is already true for the computation of consensus clusterings). Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. W. Gaul and M. Schader (1988). Clusterwise aggregation of relations. \emph{Applied Stochastic Models and Data Analysis}, \bold{4}:273--282. \doi{10.1002/asm.3150040406}. } \examples{ ## Use a precomputed ensemble of 50 k-means partitions of the ## Cassini data. data("CKME") CKME <- CKME[1 : 30] # for saving precious time ... diss <- cl_dissimilarity(CKME) hc <- hclust(diss) plot(hc) ## This suggests using a partition with three classes, which can be ## obtained using cutree(hc, 3). Could use cl_consensus() to compute ## prototypes as the least squares consensus clusterings of the classes, ## or alternatively: set.seed(123) x1 <- cl_pclust(CKME, 3, m = 1) x2 <- cl_pclust(CKME, 3, m = 2) ## Agreement of solutions. cl_dissimilarity(x1, x2) table(cl_class_ids(x1), cl_class_ids(x2)) } \keyword{cluster} clue/man/cl_consensus.Rd0000644000175000017500000003671212734174375015122 0ustar nileshnilesh\name{cl_consensus} \alias{cl_consensus} \title{Consensus Partitions and Hierarchies} \description{ Compute the consensus clustering of an ensemble of partitions or hierarchies. } \usage{ cl_consensus(x, method = NULL, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string specifying one of the built-in methods for computing consensus clusterings, or a function to be taken as a user-defined method, or \code{NULL} (default value). If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods and defaults.} \item{weights}{a numeric vector with non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ The consensus partition or hierarchy. } \details{ Consensus clusterings \dQuote{synthesize} the information in the elements of a cluster ensemble into a single clustering, often by minimizing a criterion function measuring how dissimilar consensus candidates are from the (elements of) the ensemble (the so-called \dQuote{optimization approach} to consensus clustering). The most popular criterion functions are of the form \eqn{L(x) = \sum w_b d(x_b, x)^p}, where \eqn{d} is a suitable dissimilarity measure (see \code{\link{cl_dissimilarity}}), \eqn{w_b} is the case weight given to element \eqn{x_b} of the ensemble, and \eqn{p \ge 1}. If \eqn{p = 1} and minimization is over all possible base clusterings, a consensus solution is called a \emph{median} of the ensemble; if minimization is restricted to the elements of the ensemble, a consensus solution is called a \emph{medoid} (see \code{\link{cl_medoid}}). For \eqn{p = 2}, we obtain \emph{least squares} consensus partitions and hierarchies (generalized means). See also Gordon (1999) for more information. If all elements of the ensemble are partitions, the built-in consensus methods compute consensus partitions by minimizing a criterion of the form \eqn{L(x) = \sum w_b d(x_b, x)^p} over all hard or soft partitions \eqn{x} with a given (maximal) number \eqn{k} of classes. Available built-in methods are as follows. \describe{ \item{\code{"SE"}}{a fixed-point algorithm for obtaining \emph{soft} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all soft partitions with a given maximal number of classes). This iterates between individually matching all partitions to the current approximation to the consensus partition, and computing the next approximation as the membership matrix closest to a suitable weighted average of the memberships of all partitions after permuting their columns for the optimal matchings of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a matrix with number of rows equal to the number of objects of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, suitable random membership matrices are used.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } In the case of multiple runs, the first optimum found is returned. This method can also be referred to as \code{"soft/euclidean"}. } \item{\code{"GV1"}}{the fixed-point algorithm for the \dQuote{first model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being GV1 dissimilarity and \eqn{p = 2} over all soft partitions with a given maximal number of classes. This is similar to \code{"SE"}, but uses GV1 rather than Euclidean dissimilarity. Available control parameters are the same as for \code{"SE"}. } \item{\code{"DWH"}}{an extension of the greedy algorithm in Dimitriadou, Weingessel and Hornik (2002) for (approximately) obtaining soft least squares Euclidean consensus partitions. The reference provides some structure theory relating finding the consensus partition to an instance of the multiple assignment problem, which is known to be NP-hard, and suggests a simple heuristic based on successively matching an individual partition \eqn{x_b} to the current approximation to the consensus partition, and compute the memberships of the next approximation as a weighted average of those of the current one and of \eqn{x_b} after permuting its columns for the optimal matching of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{order}}{a permutation of the integers from 1 to the size of the ensemble, specifying the order in which the partitions in the ensemble should be aggregated. Defaults to using a random permutation (unlike the reference, which does not permute at all).} } } \item{\code{"HE"}}{a fixed-point algorithm for obtaining \emph{hard} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all hard partitions with a given maximal number of classes.) Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/euclidean"}. } \item{\code{"SM"}}{a fixed-point algorithm for obtaining \emph{soft} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all soft partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"soft/manhattan"}. } \item{\code{"SM"}}{a fixed-point algorithm for obtaining \emph{hard} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all hard partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/manhattan"}. } \item{\code{"GV3"}}{a \acronym{SUMT} algorithm for the \dQuote{third model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being co-membership dissimilarity and \eqn{p = 2}. (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) This optimization problem is equivalent to finding the membership matrix \eqn{m} for which the sum of the squared differences between \eqn{C(m) = m m'} and the weighted average co-membership matrix \eqn{\sum_b w_b C(m_b)} of the partitions is minimal. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a matrix with number of rows equal to the size of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, a membership based on a rank \eqn{k} approximation to the weighted average co-membership matrix is used.} } In the case of multiple runs, the first optimum found is returned. } \item{\code{"soft/symdiff"}}{a \acronym{SUMT} approach for minimizing \eqn{L = \sum w_b d(x_b, x)} over all soft partitions with a given maximal number of classes, where \eqn{d} is the Manhattan dissimilarity of the co-membership matrices (coinciding with symdiff partition dissimilarity in the case of hard partitions). Available control parameters are the same as for \code{"GV3"}. } \item{\code{"hard/symdiff"}}{an exact solver for minimizing \eqn{L = \sum w_b d(x_b, x)} over all hard partitions (possibly with a given maximal number of classes as specified by the control parameter \code{k}), where \eqn{d} is symdiff partition dissimilarity (so that soft partitions in the ensemble are replaced by their closest hard partitions), or equivalently, Rand distance or pair-bonds (Boorman-Arabie \eqn{D}) distance. The consensus solution is found via mixed linear or quadratic programming. } } By default, method \code{"SE"} is used for ensembles of partitions. If all elements of the ensemble are hierarchies, the following built-in methods for computing consensus hierarchies are available. \describe{ \item{\code{"euclidean"}}{an algorithm for minimizing \eqn{L(x) = \sum w_b d(x_b, x) ^ 2} over all dendrograms, where \eqn{d} is Euclidean dissimilarity. This is equivalent to finding the best least squares ultrametric approximation of the weighted average \eqn{d = \sum w_b u_b} of the ultrametrics \eqn{u_b} of the hierarchies \eqn{x_b}, which is attempted by calling \code{\link{ls_fit_ultrametric}} on \eqn{d} with appropriate control parameters. This method can also be referred to as \code{"cophenetic"}. } \item{\code{"manhattan"}}{a \acronym{SUMT} for minimizing \eqn{L = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is Manhattan dissimilarity. Available control parameters are the same as for \code{"euclidean"}. } \item{\code{"majority"}}{a hierarchy obtained from an extension of the majority consensus tree of Margush and McMorris (1981), which minimizes \eqn{L(x) = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is the symmetric difference dissimilarity. The unweighted \eqn{p}-majority tree is the \eqn{n}-tree (hierarchy in the strict sense) consisting of all subsets of objects contained in more than \eqn{100 p} percent of the \eqn{n}-trees \eqn{T_b} induced by the dendrograms, where \eqn{1/2 \le p < 1} and \eqn{p = 1/2} (default) corresponds to the standard majority tree. In the weighted case, it consists of all subsets \eqn{A} for which \eqn{\sum_{b: A \in T_b} w_b > W p}, where \eqn{W = \sum_b w_b}. We also allow for \eqn{p = 1}, which gives the \emph{strict consensus tree} consisting of all subsets contained in each of the \eqn{n}-trees. The majority dendrogram returned is a representation of the majority tree where all splits have height one. The fraction \eqn{p} can be specified via the control parameter \code{p}. } } By default, method \code{"euclidean"} is used for ensembles of hierarchies. If a user-defined consensus method is to be employed, it must be a function taking the cluster ensemble, the case weights, and a list of control parameters as its arguments, with formals named \code{x}, \code{weights}, and \code{control}, respectively. Most built-in methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the methods. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. T. Margush and F. R. McMorris (1981). Consensus \eqn{n}-trees. \emph{Bulletin of Mathematical Biology}, \bold{43}, 239--244. \doi{10.1007/BF02459446}. } \seealso{ \code{\link{cl_medoid}}, \code{\link[ape]{consensus}} } \examples{ ## Consensus partition for the Rosenberg-Kim kinship terms partition ## data based on co-membership dissimilarities. data("Kinship82") m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value for criterion function to be minimized: sum(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] sum(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ## Seems we get a better solution ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "comem") ## How "fuzzy" are they? cl_fuzziness(cl_ensemble(m1, m2)) ## Do the "nearest" hard partitions fully agree? cl_dissimilarity(as.cl_hard_partition(m1), as.cl_hard_partition(m2)) ## Consensus partition for the Gordon and Vichi (2001) macroeconomic ## partition data based on Euclidean dissimilarities. data("GVME") set.seed(1) ## First, using k = 2 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value of criterion function to be minimized: sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/2"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## Seems we get a slightly better solution ... ## But note that cl_dissimilarity(m1, m2, "GV1") ## and that the maximal deviation of the memberships is max(abs(cl_membership(m1) - cl_membership(m2))) ## so the differences seem to be due to rounding. ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) ## And now for k = 3 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). m2 <- GVME_Consensus[["MF1/3"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## This time we look much better ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "GV1") ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/kmedoids.Rd0000644000175000017500000000414612734173215014207 0ustar nileshnilesh\name{kmedoids} \alias{kmedoids} \title{K-Medoids Clustering} \description{ Compute a \eqn{k}-medoids partition of a dissimilarity object. } \usage{ kmedoids(x, k) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}, or a square matrix of pairwise object-to-object dissimilarity values.} \item{k}{an integer giving the number of classes to be used in the partition.} } \value{ An object of class \code{"kmedoids"} representing the obtained partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{criterion}{the value of the criterion function of the partition.} } \details{ Let \eqn{d} denote the pairwise object-to-object dissimilarity matrix corresponding to \code{x}. A \eqn{k}-medoids partition of \code{x} is defined as a partition of the numbers from 1 to \eqn{n}, the number of objects in \code{x}, into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_l \min_{j \in C_l} \sum_{i \in C_l} d_{ij}} is minimized. This is an NP-hard optimization problem. PAM (Partitioning Around Medoids, see Kaufman & Rousseeuw (1990), Chapter 2) is a very popular heuristic for obtaining optimal \eqn{k}-medoids partitions, and provided by \code{\link[cluster]{pam}} in package \pkg{cluster}. \code{kmedoids} is an exact algorithm based on a binary linear programming formulation of the optimization problem (e.g., Gordon & Vichi (1998), [P4']), using \code{\link[lpSolve]{lp}} from package \pkg{lpSolve} as solver. Depending on available hardware resources (the number of constraints of the program is of the order \eqn{n^2}), it may not be possible to obtain a solution. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \keyword{cluster} \keyword{optimize} clue/man/cl_tabulate.Rd0000644000175000017500000000105311304023137014646 0ustar nileshnilesh\name{cl_tabulate} \alias{cl_tabulate} \title{Tabulate Vector Objects} \description{Tabulate the unique values in vector objects.} \usage{ cl_tabulate(x) } \arguments{ \item{x}{a vector.} } \value{ A data frame with components: \item{values}{the unique values.} \item{counts}{an integer vector with the number of times each of the unique values occurs in \code{x}.} } \examples{ data("Kinship82") tab <- cl_tabulate(Kinship82) ## The counts: tab$counts ## The most frequent partition: tab$values[[which.max(tab$counts)]] } \keyword{utilities} clue/man/cl_pam.Rd0000644000175000017500000000515312734173110013634 0ustar nileshnilesh\name{cl_pam} \alias{cl_pam} \title{K-Medoids Partitions of Clusterings} \description{ Compute \eqn{k}-medoids partitions of clusterings. } \usage{ cl_pam(x, k, method = "euclidean", solver = c("pam", "kmedoids")) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} \item{solver}{a character string indicating the \eqn{k}-medoids solver to be employed. May be abbreviated. If \code{"pam"} (default), the Partitioning Around Medoids (Kaufman & Rousseeuw (1990), Chapter 2) heuristic \code{\link[cluster]{pam}} of package \pkg{cluster} is used. Otherwise, the exact algorithm of \code{\link{kmedoids}} is employed.} } \value{ An object of class \code{"cl_pam"} representing the obtained \dQuote{secondary} partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes (medoids).} \item{criterion}{the value of the criterion function of the partition.} \item{description}{a character string indicating the dissimilarity method employed.} } \details{ An optimal \eqn{k}-medoids partition of the given cluster ensemble is defined as a partition of the objects \eqn{x_i} (the elements of the ensemble) into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_{l=1}^k \min_{j \in C_l} \sum_{i \in C_l} d(x_i, x_j)} is minimized. Such secondary partitions (e.g., Gordon & Vichi, 1998) are obtained by computing the dissimilarities \eqn{d} of the objects in the ensemble for the given dissimilarity method, and applying a dissimilarity-based \eqn{k}-medoids solver to \eqn{d}. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \seealso{ \code{\link{cl_pclust}} for more general prototype-based partitions of clusterings. } \examples{ data("Kinship82") party <- cl_pam(Kinship82, 3, "symdiff") ## Compare results with tables 5 and 6 in Gordon & Vichi (1998). party lapply(cl_prototypes(party), cl_classes) table(cl_class_ids(party)) } \keyword{cluster} clue/man/pclust.Rd0000644000175000017500000001551611430740706013722 0ustar nileshnilesh\name{pclust} \alias{pclust} \alias{pclust_family} \alias{pclust_object} \title{Prototype-Based Partitioning} \description{ Obtain prototype-based partitions of objects by minimizing the criterion \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the objects \eqn{x_b} and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ pclust(x, k, family, m = 1, weights = 1, control = list()) pclust_family(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) pclust_object(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) } \arguments{ \item{x}{the object to be partitioned.} \item{k}{an integer giving the number of classes to be used in the partition.} \item{family}{an object of class \code{"pclust_family"} as generated by \code{pclust_family}, containing the information about \eqn{d} and \eqn{e}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} \item{D}{a function for computing dissimilarities \eqn{d} between objects and prototypes.} \item{C}{a \sQuote{consensus} function with formals \code{x}, \code{weights} and \code{control} for computing a consensus prototype \eqn{p} minimizing \eqn{\sum_b w_b d(x_b, p) ^ e}.} \item{init}{a function with formals \code{x} and \code{k} initializing an object with \eqn{k} prototypes from the object \code{x} to be partitioned.} \item{description}{a character string describing the family.} \item{e}{a number giving the exponent \eqn{e} of the criterion.} \item{.modify}{a function with formals \code{x}, \code{i} and \code{value} for modifying a single prototype, or \code{NULL} (default).} \item{.subset}{a function with formals \code{x} and \code{i} for subsetting prototypes, or \code{NULL} (default).} \item{prototypes}{an object representing the prototypes of the partition.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{value}{the value of the criterion to be minimized.} \item{...}{further elements to be included in the generated pclust object.} \item{classes}{a character vector giving further classes to be given to the generated pclust object in addition to \code{"pclust"}, or \code{NULL} (default).} \item{attributes}{a list of attributes, or \code{NULL} (default).} } \value{ \code{pclust} returns the partition found as an object of class \code{"pclust"} (as obtained by calling \code{pclust_object}) which in addition to the \emph{default} components contains \code{call} (the matched call) and a \code{converged} attribute indicating convergence status (i.e., whether the maximal number of iterations was reached). \code{pclust_family} returns an object of class \code{"pclust_family"}, which is a list with components corresponding to the formals of \code{pclust_family}. \code{pclust_object} returns an object inheriting from class \code{"pclust"}, which is a list with components corresponding to the formals (up to and including \code{...}) of \code{pclust_object}, and additional classes and attributes specified by \code{classes} and \code{attributes}, respectively. } \details{ For \eqn{m = 1}, a generalization of the Lloyd-Forgy variant of the \eqn{k}-means algorithm is used, which iterates between reclassifying objects to their closest prototypes (according to the dissimilarities given by \code{D}), and computing new prototypes as the consensus for the classes (using \code{C}). For \eqn{m > 1}, a generalization of the fuzzy \eqn{c}-means recipe (e.g., Bezdek (1981)) is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Currently, no local improvement heuristics are provided. It is possible to perform several runs of the procedure via control arguments \code{nruns} or \code{start} (the default is to perform a single run), in which case the first partition with the smallest value of the criterion is returned. The dissimilarity and consensus functions as well as the exponent \eqn{e} are specified via \code{family}. In principle, arbitrary representations of objects to be partitioned and prototypes (which do not necessarily have to be \dQuote{of the same kind}) can be employed. In addition to \code{D} and \code{C}, what is needed are means to obtain an initial collection of \eqn{k} prototypes (\code{init}), to modify a single prototype (\code{.modify}), and subset the prototypes (\code{.subset}). By default, list and (currently, only dense) matrix (with the usual convention that the rows correspond to the objects) are supported. Otherwise, the family has to provide the functions needed. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a list of prototype objects to be used as starting values.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{\code{control}}{control parameters to be used in the consensus function.} } The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum, in particular if \code{C} is not an exact minimizer. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. } \seealso{ \code{\link[stats]{kmeans}}, \code{\link[e1071]{cmeans}}. } clue/man/sumt.Rd0000644000175000017500000000670012734174226013401 0ustar nileshnilesh\name{sumt} \alias{sumt} \title{Sequential Unconstrained Minimization Technique} \description{ Solve constrained optimization problems via the Sequential Unconstrained Minimization Technique (\acronym{SUMT}). } \usage{ sumt(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) } \arguments{ \item{x0}{a list of starting values, or a single starting value.} \item{L}{a function to minimize.} \item{P}{a non-negative penalty function such that \eqn{P(x)} is zero iff the constraints are satisfied.} \item{grad_L}{a function giving the gradient of \code{L}, or \code{NULL} (default).} \item{grad_P}{a function giving the gradient of \code{P}, or \code{NULL} (default).} \item{method}{a character string, or \code{NULL}. If not given, \code{"CG"} is used. If equal to \code{"nlm"}, minimization is carried out using \code{\link[stats]{nlm}}. Otherwise, \code{\link[stats]{optim}} is used with \code{method} as the given method.} \item{eps}{the absolute convergence tolerance. The algorithm stops if the (maximum) distance between successive \code{x} values is less than \code{eps}. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{q}{a double greater than one controlling the growth of the \eqn{\rho_k} as described in \bold{Details}. Defaults to 10.} \item{verbose}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{control}{a list of control parameters to be passed to the minimization routine in case \code{optim} is used.} } \details{ The Sequential Unconstrained Minimization Technique is a heuristic for constrained optimization. To minimize a function \eqn{L} subject to constraints, one employs a non-negative function \eqn{P} penalizing violations of the constraints, such that \eqn{P(x)} is zero iff \eqn{x} satisfies the constraints. One iteratively minimizes \eqn{L(x) + \rho_k P(x)}, where the \eqn{\rho} values are increased according to the rule \eqn{\rho_{k+1} = q \rho_k} for some constant \eqn{q > 1}, until convergence is obtained in the sense that the Euclidean distance between successive solutions \eqn{x_k} and \eqn{x_{k+1}} is small enough. Note that the \dQuote{solution} \eqn{x} obtained does not necessarily satisfy the constraints, i.e., has zero \eqn{P(x)}. Note also that there is no guarantee that global (approximately) constrained optima are found. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the algorithm. The unconstrained minimizations are carried out by either \code{\link[stats]{optim}} or \code{\link[stats]{nlm}}, using analytic gradients if both \code{grad_L} and \code{grad_P} are given, and numeric ones otherwise. If more than one starting value is given, the solution with the minimal augmented criterion function value is returned. } \value{ A list inheriting from class \code{"sumt"}, with components \code{x}, \code{L}, \code{P}, and \code{rho} giving the solution obtained, the value of the criterion and penalty function at \code{x}, and the final \eqn{\rho} value used in the augmented criterion function. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. } \keyword{optimize} clue/man/cl_agreement.Rd0000644000175000017500000002370713761714616015050 0ustar nileshnilesh\name{cl_agreement} \alias{cl_agreement} \title{Agreement Between Partitions or Hierarchies} \description{Compute the agreement between (ensembles) of partitions or hierarchies. } \usage{ cl_agreement(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing agreement, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_agreement"} containing the agreements between the all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_agreement"} with the agreements between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring agreement between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{\eqn{1 - d / m}, where \eqn{d} is the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Euclidean dissimilarity. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{\eqn{1 - d / m}, where \eqn{d} is the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Manhattan dissimilarity.} \item{\code{"Rand"}}{the Rand index (the rate of distinct pairs of objects both in the same class or both in different classes in both partitions), see Rand (1971) or Gordon (1999), page 198. For soft partitions, (currently) the Rand index of the corresponding nearest hard partitions is used.} \item{\code{"cRand"}}{the Rand index corrected for agreement by chance, see Hubert and Arabie (1985) or Gordon (1999), page 198. Can only be used for hard partitions.} \item{\code{"NMI"}}{Normalized Mutual Information, see Strehl and Ghosh (2002). For soft partitions, (currently) the NMI of the corresponding nearest hard partitions is used.} \item{\code{"KP"}}{the Katz-Powell index, i.e., the product-moment correlation coefficient between the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, respectively, see Katz and Powell (1953). For soft partitions, (currently) the Katz-Powell index of the corresponding nearest hard partitions is used. (Note that for hard partitions, the \eqn{(i,j)} entry of \eqn{C(u)} is one iff objects \eqn{i} and \eqn{j} are in the same class.)} \item{\code{"angle"}}{the maximal cosine of the angle between the elements of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"diag"}}{the maximal co-classification rate, i.e., the maximal rate of objects with the same class ids in both partitions after arbitrarily permuting the ids.} \item{\code{"FM"}}{the index of Fowlkes and Mallows (1983), i.e., the ratio \eqn{N_{xy} / \sqrt{N_x N_y}}{N_xy / sqrt(N_x N_y)} of the number \eqn{N_{xy}}{N_xy} of distinct pairs of objects in the same class in both partitions and the geometric mean of the numbers \eqn{N_x} and \eqn{N_y} of distinct pairs of objects in the same class in partition \eqn{x} and partition \eqn{y}, respectively. For soft partitions, (currently) the Fowlkes-Mallows index of the corresponding nearest hard partitions is used.} \item{\code{"Jaccard"}}{the Jaccard index, i.e., the ratio of the numbers of distinct pairs of objects in the same class in both partitions and in at least one partition, respectively. For soft partitions, (currently) the Jaccard index of the corresponding nearest hard partitions is used.} \item{\code{"purity"}}{the purity of the classes of \code{x} with respect to those of \code{y}, i.e., \eqn{\sum_j \max_i n_{ij} / n}, where \eqn{n_{ij}} is the joint frequency of objects in class \eqn{i} for \code{x} and in class \eqn{j} for \code{y}, and \eqn{n} is the total number of objects.} \item{\code{"PS"}}{Prediction Strength, see Tibshirani and Walter (2005): the minimum, over all classes \eqn{j} of \code{y}, of the maximal rate of objects in the same class for \code{x} and in class \eqn{j} for \code{y}.} } If all components are hierarchies, available built-in methods for measuring agreement between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{The cophenetic correlation coefficient. (I.e., the product-moment correlation of the ultrametrics.)} \item{\code{"angle"}}{the cosine of the angle between the ultrametrics.} \item{\code{"gamma"}}{\eqn{1 - d}, where \eqn{d} is the rate of inversions between the associated ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}). (This agreement measure is a linear transformation of Kruskal's \eqn{\gamma}{gamma}.)} } The measures based on ultrametrics also allow computing agreement with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined agreement method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric agreement objects of class \code{"cl_agreement"} are implemented as symmetric proximity objects with self-proximities identical to one, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric agreement) object of class \code{"cl_cross_agreement"}. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. E. B. Fowlkes and C. L. Mallows (1983). A method for comparing two hierarchical clusterings. \emph{Journal of the American Statistical Association}, \bold{78}, 553--569. \cr \doi{10.1080/01621459.1983.10478008}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. L. Hubert and P. Arabie (1985). Comparing partitions. \emph{Journal of Classification}, \bold{2}, 193--218. \doi{10.1007/bf01908075}. W. M. Rand (1971). Objective criteria for the evaluation of clustering methods. \emph{Journal of the American Statistical Association}, \bold{66}, 846--850. \doi{10.2307/2284239}. L. Katz and J. H. Powell (1953). A proposed index of the conformity of one sociometric measurement to another. \emph{Psychometrika}, \bold{18}, 249--256. \doi{10.1007/BF02289063}. A. Strehl and J. Ghosh (2002). Cluster ensembles --- A knowledge reuse framework for combining multiple partitions. \emph{Journal of Machine Learning Research}, \bold{3}, 583--617. \cr \url{https://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}. R. Tibshirani and G. Walter (2005). Cluster validation by Prediction Strength. \emph{Journal of Computational and Graphical Statistics}, \bold{14}/3, 511--528. \doi{10.1198/106186005X59243}. } \seealso{ \code{\link{cl_dissimilarity}}; \code{\link[e1071]{classAgreement}} in package \pkg{e1071}. } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] # for saving precious time ... summary(c(cl_agreement(pens))) summary(c(cl_agreement(pens, method = "Rand"))) summary(c(cl_agreement(pens, method = "diag"))) cl_agreement(pens[1:5], pens[6:7], method = "NMI") ## Equivalently, using subscripting. cl_agreement(pens, method = "NMI")[1:5, 6:7] ## An ensemble of hierarchies. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods hens <- cl_ensemble(list = hclust_results) summary(c(cl_agreement(hens))) ## Note that the Euclidean agreements are *very* small. ## This is because the ultrametrics differ substantially in height: u <- lapply(hens, cl_ultrametric) round(sapply(u, max), 3) ## Rescaling the ultrametrics to [0, 1] gives: u <- lapply(u, function(x) (x - min(x)) / (max(x) - min(x))) shens <- cl_ensemble(list = lapply(u, as.cl_dendrogram)) summary(c(cl_agreement(shens))) ## Au contraire ... summary(c(cl_agreement(hens, method = "cophenetic"))) cl_agreement(hens[1:3], hens[4:5], method = "gamma") } \keyword{cluster} clue/man/n_of_objects.Rd0000644000175000017500000000221212211412717015023 0ustar nileshnilesh\name{n_of_objects} \alias{n_of_objects} \title{Number of Objects in a Partition or Hierarchy} \description{Determine the number of objects from which a partition or hierarchy was obtained.} \usage{ n_of_objects(x) } \arguments{ \item{x}{an \R object representing a (hard of soft) partition or a hierarchy of objects, or dissimilarities between objects.} } \value{ An integer giving the number of objects. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \seealso{ \code{\link{is.cl_partition}}, \code{\link{is.cl_hierarchy}} } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) n_of_objects(pcl) hcl <- hclust(dist(USArrests)) n_of_objects(hcl) } \keyword{cluster} clue/man/Kinship82.Rd0000644000175000017500000000352312734174662014174 0ustar nileshnilesh\name{Kinship82} \alias{Kinship82} \title{Rosenberg-Kim Kinship Terms Partition Data} \description{ Partitions of 15 kinship terms given by 85 female undergraduates at Rutgers University who were asked to sort the terms into classes \dQuote{on the basis of some aspect of meaning}. } \usage{data("Kinship82")} \format{ A cluster ensemble of 85 hard partitions of the 15 kinship terms. } \details{ Rosenberg and Kim (1975) describe an experiment where perceived similarities of the kinship terms were obtained from six different \dQuote{sorting} experiments. These \dQuote{original} Rosenberg-Kim kinship terms data were published in Arabie, Carroll and de Sarbo (1987), and are also contained in file \file{indclus.data} in the shell archive \url{http://www.netlib.org/mds/indclus.shar}. For one of the experiments, partitions of the terms were printed in Rosenberg (1982). Comparison with the original data indicates that the partition data have the \dQuote{nephew} and \dQuote{niece} columns interchanged, which is corrected in the data set at hand. } \source{ Table 7.1 in Rosenberg (1982), with the \dQuote{nephew} and \dQuote{niece} columns interchanged. } \references{ P. Arabie, J. D. Carroll and W. S. de Sarbo (1987). \emph{Three-way scaling and clustering}. Newbury Park, CA: Sage. S. Rosenberg and M. P. Kim (1975). The method of sorting as a data-gathering procedure in multivariate research. \emph{Multivariate Behavioral Research}, \bold{10}, 489--502. \cr \doi{10.1207/s15327906mbr1004_7}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \keyword{datasets} clue/man/solve_LSAP.Rd0000644000175000017500000000331511304023137014341 0ustar nileshnilesh\name{solve_LSAP} \encoding{UTF-8} \alias{solve_LSAP} \title{Solve Linear Sum Assignment Problem} \description{ Solve the linear sum assignment problem using the Hungarian method. } \usage{ solve_LSAP(x, maximum = FALSE) } \arguments{ \item{x}{a matrix with nonnegative entries and at least as many columns as rows.} \item{maximum}{a logical indicating whether to minimize of maximize the sum of assigned costs.} } \details{ If \eqn{nr} and \eqn{nc} are the numbers of rows and columns of \code{x}, \code{solve_LSAP} finds an optimal \emph{assignment} of rows to columns, i.e., a one-to-one map \code{p} of the numbers from 1 to \eqn{nr} to the numbers from 1 to \eqn{nc} (a permutation of these numbers in case \code{x} is a square matrix) such that \eqn{\sum_{i=1}^{nr} x[i, p[i]]} is minimized or maximized. This assignment can be found using a linear program (and package \pkg{lpSolve} provides a function \code{lp.assign} for doing so), but typically more efficiently and provably in polynomial time \eqn{O(n^3)} using primal-dual methods such as the so-called Hungarian method (see the references). } \value{ An object of class \code{"solve_LSAP"} with the optimal assignment of rows to columns. } \references{ C. Papadimitriou and K. Steiglitz (1982), \emph{Combinatorial Optimization: Algorithms and Complexity}. Englewood Cliffs: Prentice Hall. } \author{ Walter Böhm \email{Walter.Boehm@wu-wien.ac.at} kindly provided C code implementing the Hungarian method. } \examples{ x <- matrix(c(5, 1, 4, 3, 5, 2, 2, 4, 4), nrow = 3) solve_LSAP(x) solve_LSAP(x, maximum = TRUE) ## To get the optimal value (for now): y <- solve_LSAP(x) sum(x[cbind(seq_along(y), y)]) } \keyword{optimize} clue/man/Phonemes.Rd0000644000175000017500000000216212734171677014174 0ustar nileshnilesh\name{Phonemes} \alias{Phonemes} \title{Miller-Nicely Consonant Phoneme Confusion Data} \description{ Miller-Nicely data on the auditory confusion of 16 consonant phonemes. } \usage{data("Phonemes")} \format{ A symmetric matrix of the misclassification probabilities of 16 English consonant phonemes. } \details{ Miller and Nicely (1955) obtained the confusions by exposing female subjects to a series of syllables consisting of one of the 16 consonants followed by the vowel \samp{a} under 17 different experimental conditions. The data provided are obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. } \source{ The data set is also contained in file \file{mapclus.data} in the shell archive \url{http://www.netlib.org/mds/mapclus.shar}. } \references{ G. A. Miller and P. E. Nicely (1955). An analysis of perceptual confusions among some English consonants. \emph{Journal of the Acoustical Society of America}, \bold{27}, 338--352. \doi{10.1121/1.1907526}. } \keyword{datasets} clue/man/cl_ultrametric.Rd0000644000175000017500000000440311304023137015402 0ustar nileshnilesh\name{cl_ultrametric} \alias{cl_ultrametric} \alias{as.cl_ultrametric} \title{Ultrametrics of Hierarchies} \description{ Compute the ultrametric distances for objects representing (total indexed) hierarchies. } \usage{ cl_ultrametric(x, size = NULL, labels = NULL) as.cl_ultrametric(x) } \arguments{ \item{x}{an R object representing a (total indexed) hierarchy of objects.} \item{size}{an integer giving the number of objects in the hierarchy.} \item{labels}{a character vector giving the names of the objects in the hierarchy.} } \value{ An object of class \code{"cl_ultrametric"} containing the ultrametric distances. } \details{ If \code{x} is not an ultrametric or a hierarchy with an ultrametric representation, \code{cl_ultrametric} uses \code{\link[stats]{cophenetic}} to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{\link[stats]{as.hclust}} on the hierarchy. Support for a class which represents hierarchies can thus be added by providing \code{as.hclust} methods for this class. In R 2.1.0 or better, \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. \code{as.cl_ultrametric} is a generic function which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes \code{"\link{cl_dissimilarity}"} and \code{"cl_proximity"}. See section \bold{Details} in the documentation for \code{\link{cl_dissimilarity}} for implications. Ultrametric objects can also be coerced to classes \code{"\link[stats]{dendrogram}"} and \code{"\link[stats]{hclust}"}, and hence in particular use the \code{plot} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. } \seealso{ \code{\link{is.cl_hierarchy}} } \examples{ hc <- hclust(dist(USArrests)) u <- cl_ultrametric(hc) ## Subscripting. u[1 : 5, 1 : 5] u[1 : 5, 6 : 7] ## Plotting. plot(u) } \keyword{cluster} clue/man/cl_object_names.Rd0000644000175000017500000000176712211412557015520 0ustar nileshnilesh\name{cl_object_names} \alias{cl_object_names} \title{Find Object Names} \description{ Find the names of the objects from which a taxonomy (partition or hierarchy) or proximity was obtained. } \usage{ cl_object_names(x) } \arguments{ \item{x}{an \R object representing a taxonomy or proximity.} } \value{ A character vector of length \code{\link{n_of_objects}(x)} in case the names of the objects could be determined, or \code{NULL}. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself), in as much as possible. There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \keyword{cluster} clue/man/cl_boot.Rd0000644000175000017500000000455211304023137014017 0ustar nileshnilesh\name{cl_boot} \alias{cl_boot} \title{Bootstrap Resampling of Clustering Algorithms} \description{ Generate bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. } \usage{ cl_boot(x, B, k = NULL, algorithm = if (is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{resample}{a logical indicating whether the data should be resampled in addition to \dQuote{sampling from the algorithm}. If resampling is used, the class memberships of the objects given in \code{x} are predicted from the results of running the base algorithm on bootstrap samples of \code{x}.} } \value{ A cluster ensemble of length \eqn{B}, with either (if resampling is not used, default) the results of running the base algorithm on the given data set, or (if resampling is used) the memberships for the given data predicted from the results of running the base algorithm on bootstrap samples of the data. } \details{ This is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{\link[stats]{kmeans}} or \code{\link[e1071]{cmeans}}, see the examples. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{\link{replicate}} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl_ensemble(list = lst)} to create the ensemble). } \examples{ ## Study e.g. the effect of random kmeans() initializations. data("Cassini") pens <- cl_boot(Cassini$x, 15, 3) diss <- cl_dissimilarity(pens) summary(c(diss)) plot(hclust(diss)) } \keyword{cluster} clue/man/cl_dissimilarity.Rd0000644000175000017500000003665512734174403015766 0ustar nileshnilesh\name{cl_dissimilarity} \encoding{UTF-8} \alias{cl_dissimilarity} \title{Dissimilarity Between Partitions or Hierarchies} \description{Compute the dissimilarity between (ensembles) of partitions or hierarchies.} \usage{ cl_dissimilarity(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_dissimilarity"} containing the dissimilarities between all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_dissimilarity"} with the dissimilarities between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring dissimilarity between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"comemberships"}}{the Euclidean dissimilarity of the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, i.e., the square root of the sum of the squared differences of \eqn{C(u)} and \eqn{C(v)}.} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of co-classified pairs of distinct objects in the partitions. I.e., the number of distinct pairs of objects in the same class in exactly one of the partitions. (Alternatively, the cardinality of the symmetric set difference between the (binary) equivalence relations corresponding to the partitions.) For soft partitions, (currently) the symmetric set difference of the corresponding nearest hard partitions is used.} \item{\code{"Rand"}}{the Rand distance, i.e., the rate of distinct pairs of objects in the same class in exactly one of the partitions. (Related to the Rand index \eqn{a} via the linear transformation \eqn{d = (1 - a) / 2}.) For soft partitions, (currently) the Rand distance of the corresponding nearest hard partitions is used.} \item{\code{"GV1"}}{the square root of the dissimilarity \eqn{\Delta_1}{Delta_1} used for the first model in Gordon and Vichi (2001), i.e., the square root of the minimal sum of the squared differences of the \emph{matched} non-zero columns of \eqn{u} and \eqn{v}.} \item{\code{"BA/\var{d}"}}{distance measures for hard partitions discussed in Boorman and Arabie (1972), with \var{d} one of \samp{A}, \samp{C}, \samp{D}, or \samp{E}. For soft partitions, the distances of the corresponding nearest hard partitions are used. \code{"BA/A"} is the minimum number of single element moves (move from one class to another or a new one) needed to transform one partition into the other. Introduced in Rubin (1967). \code{"BA/C"} is the minimum number of lattice moves for transforming one partition into the other, where partitions are said to be connected by a lattice move if one is \emph{just} finer than the other (i.e., there is no other partition between them) in the partition lattice (see \code{\link{cl_meet}}). Equivalently, with \eqn{z} the join of \code{x} and \code{y} and \eqn{S} giving the number of classes, this can be written as \eqn{S(x) + S(y) - 2 S(z)}. Attributed to David Pavy. \code{"BA/D"} is the \dQuote{pair-bonds} distance, which can be defined as \eqn{S(x) + S(y) - 2 S(z)}, with \eqn{z} the meet of \code{x} and \code{y} and \eqn{S} the \emph{supervaluation} (i.e., non-decreasing with respect to the partial order on the partition lattice) function \eqn{\sum_i (n_i (n_i - 1)) / (n (n - 1))}, where the \eqn{n_i} are the numbers of objects in the respective classes of the partition (such that \eqn{n_i (n_i - 1) / 2} are the numbers of pair bonds in the classes), and \eqn{n} the total number of objects. \code{"BA/E"} is the normalized information distance, defined as \eqn{1 - I / H}, where \eqn{I} is the average mutual information between the partitions, and \eqn{H} is the average entropy of the meet \eqn{z} of the partitions. Introduced in Rajski (1961). (Boorman and Arabie also discuss a distance measure (\eqn{B}) based on the minimum number of set moves needed to transform one partition into the other, which, differently from the \eqn{A} and \eqn{C} distance measures is hard to compute (Day, 1981) and (currently) not provided.)} \item{\code{"VI"}}{Variation of Information, see Meila (2003). If \code{\dots} has an argument named \code{weights}, it is taken to specify case weights.} \item{\code{"Mallows"}}{the Mallows-type distance by Zhou, Li and Zha (2005), which is related to the Monge-Kantorovich mass transfer problem, and given as the \eqn{p}-th root of the minimal value of the transportation problem \eqn{\sum w_{jk} \sum_i |u_{ij} - v_{ik}| ^ p} with constraints \eqn{w_{jk} \ge 0}, \eqn{\sum_j w_{jk} = \alpha_j}, \eqn{\sum_k w_{jk} = \beta_k}, where \eqn{\sum_j \alpha_j = \sum_k \beta_k}. The parameters \eqn{p}, \eqn{\alpha} and \eqn{\beta} all default to one (in this case, the Mallows distance coincides with the Manhattan dissimilarity), and can be specified via additional arguments named \code{p}, \code{alpha}, and \code{beta}, respectively.} \item{\code{"CSSD"}}{the Cluster Similarity Sensitive Distance of Zhou, Li and Zha (2005), which is given as the minimal value of \eqn{\sum_{k,l} (1 - 2 w_{kl} / (\alpha_k + \beta_l)) L_{kl}}, where \eqn{L_{kl} = \sum_i u_{ik} v_{il} d(p_{x;k}, p_{y;l})} with \eqn{p_{x;k}} and \eqn{p_{y;l}} the prototype of the \eqn{k}-th class of \code{x} and the \eqn{l}-th class of \code{y}, respectively, \eqn{d} is the distance between these, and the \eqn{w_{kl}} as for Mallows distance. If prototypes are matrices, the Euclidean distance between these is used as default. Using the additional argument \code{L}, one can give a matrix of \eqn{L_{kl}} values, or the function \eqn{d}. Parameters \eqn{\alpha} and \eqn{\beta} all default to one, and can be specified via additional arguments named \code{alpha} and \code{beta}, respectively.} } For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} (Charon et al., 2005), which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{\eqn{R}-metric} in Day (1981), i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in Gusfield (2002), and equals twice the number of single element moves distance of Boorman and Arabie. For hard partitions, the pair-bonds (Boorman-Arabie \eqn{D}) distance is identical to the Rand distance, and can also be written as the Manhattan distance between the co-membership matrices corresponding to the partitions, or equivalently, their symdiff distance, normalized by \eqn{n (n - 1)}. If all components are hierarchies, available built-in methods for measuring dissimilarity between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{\eqn{1 - c^2}, where \eqn{c} is the cophenetic correlation coefficient (i.e., the product-moment correlation of the ultrametrics).} \item{\code{"gamma"}}{the rate of inversions between the ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}).} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. I.e., the number of sets of objects obtained by a split in exactly one of the hierarchies.} \item{\code{"Chebyshev"}}{the Chebyshev (maximal) dissimilarity of the ultrametrics (i.e., the maximum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"Lyapunov"}}{the logarithm of the product of the maximal and minimal ratios of the ultrametrics. This is also known as the \dQuote{Hilbert projective metric} on the cone represented by the ultrametrics (e.g., Jardine & Sibson (1971), page 107), and only defined for \emph{strict} ultrametrics (which are strictly positive for distinct objects).} \item{\code{"BO"}}{the \eqn{m_\delta} family of tree metrics by Boorman and Olivier (1973), which are of the form \eqn{m_\delta = \int_0^\infty \delta(p(h), q(h)) dh}, where \eqn{p(h)} and \eqn{q(h)} are the hard partitions obtaining by cutting the trees (dendrograms) at height \eqn{h}, and \eqn{\delta} is a suitably dissimilarity measure for partitions. In particular, when taking \eqn{\delta} as symdiff or Rand dissimilarity, \eqn{m_\delta} is the Manhattan dissimilarity of the hierarchies. If \code{\dots} has an argument named \code{delta} it is taken to specify the partition dissimilarity \eqn{\delta} to be employed.} \item{\code{"spectral"}}{the spectral norm (2-norm) of the differences of the ultrametrics, suggested in Mérigot, Durbec, and Gaertner (2010).} } The measures based on ultrametrics also allow computing dissimilarity with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined dissimilarity method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric dissimilarity objects of class \code{"cl_dissimilarity"} are implemented as symmetric proximity objects with self-proximities identical to zero, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric dissimilarity) object of class \code{"cl_cross_dissimilarity"}. Symmetric dissimilarity objects also inherit from class \code{"\link{dist}"} (although they currently do not \dQuote{strictly} extend this class), thus making it possible to use them directly for clustering algorithms based on dissimilarity matrices of this class, see the examples. } \references{ S. A. Boorman and P. Arabie (1972). Structural measures and the method of sorting. In R. N. Shepard, A. K. Romney, & S. B. Nerlove (eds.), \emph{Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory} (pages 225--249). New York: Seminar Press. S. A. Boorman and D. C. Olivier (1973). Metrics on spaces of finite trees. \emph{Journal of Mathematical Psychology}, \bold{10}, 26--59. \doi{10.1016/0022-2496(73)90003-5}. I. Charon, L. Denoeud, A. Guénoche and O. Hudry (2006). \emph{Maximum Transfer Distance Between Partitions}. \emph{Journal of Classification}, \bold{23}, 103--121. \doi{10.1007/s00357-006-0006-2}. W. E. H. Day (1981). The complexity of computing metric distances between partitions. \emph{Mathematical Social Sciences}, \bold{1}, 269--287. \doi{10.1016/0165-4896(81)90042-1}. E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. D. Gusfield (2002). Partition-distance: A problem and class of perfect graphs arising in clustering. \emph{Information Processing Letters}, \bold{82}, 159--164. \doi{10.1016/S0020-0190(01)00263-0}. N. Jardine and E. Sibson (1971). \emph{Mathematical Taxonomy}. London: Wiley. M. Meila (2003). Comparing clusterings by the variation of information. In B. Schölkopf and M. K. Warmuth (eds.), \emph{Learning Theory and Kernel Machines}, pages 173--187. Springer-Verlag: Lecture Notes in Computer Science 2777. B. Mérigot, J.-P. Durbec and J.-C. Gaertner (2010). On goodness-of-fit measure for dendrogram-based analyses. \emph{Ecology}, \bold{91}, 1850—-1859. \doi{10.1890/09-1387.1}. C. Rajski (1961). A metric space of discrete probability distributions, \emph{Information and Control}, \bold{4}, 371--377. \doi{10.1016/S0019-9958(61)80055-7}. J. Rubin (1967). Optimal classification into groups: An approach for solving the taxonomy problem. \emph{Journal of Theoretical Biology}, \bold{15}, 103--144. \doi{10.1016/0022-5193(67)90046-X}. D. Zhou, J. Li and H. Zha (2005). A new Mallows distance based metric for comparing clusterings. In \emph{Proceedings of the 22nd international Conference on Machine Learning} (Bonn, Germany, August 07--11, 2005), pages 1028--1035. ICML '05, volume 119. ACM Press, New York, NY. \doi{10.1145/1102351.1102481}. } \seealso{ \code{\link{cl_agreement}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 30] diss <- cl_dissimilarity(pens) summary(c(diss)) cl_dissimilarity(pens[1:5], pens[6:7]) ## Equivalently, using subscripting. diss[1:5, 6:7] ## Can use the dissimilarities for "secondary" clustering ## (e.g. obtaining hierarchies of partitions): hc <- hclust(diss) plot(hc) ## Example from Boorman and Arabie (1972). P1 <- as.cl_partition(c(1, 2, 2, 2, 3, 3, 2, 2)) P2 <- as.cl_partition(c(1, 1, 2, 2, 3, 3, 4, 4)) cl_dissimilarity(P1, P2, "BA/A") cl_dissimilarity(P1, P2, "BA/C") ## Hierarchical clustering. d <- dist(USArrests) x <- hclust(d) cl_dissimilarity(x, d, "cophenetic") cl_dissimilarity(x, d, "gamma") } \keyword{cluster} clue/man/lattice.Rd0000644000175000017500000001235614021342636014033 0ustar nileshnilesh\name{lattice} \encoding{UTF-8} \alias{cl_meet} \alias{cl_join} \alias{Ops.cl_partition} \alias{Summary.cl_partition} \alias{Ops.cl_dendrogram} \alias{Ops.cl_hierarchy} \alias{Summary.cl_hierarchy} \title{Cluster Lattices} \description{ Computations on the lattice of all (hard) partitions, or the lattice of all dendrograms, or the meet semilattice of all hierarchies (\eqn{n}-trees) of/on a set of objects: meet, join, and comparisons. } \usage{ cl_meet(x, y) cl_join(x, y) } \arguments{ \item{x}{an ensemble of partitions or dendrograms or hierarchies, or an R object representing a partition or dendrogram or hierarchy.} \item{y}{an R object representing a partition or dendrogram or hierarchy. Ignored if \code{x} is an ensemble.} } \details{ For a given finite set of objects \eqn{X}, the set \eqn{H(X)} of all (hard) partitions of \eqn{X} can be partially ordered by defining a partition \eqn{P} to be \dQuote{finer} than a partition \eqn{Q}, i.e., \eqn{P \le Q}, if each class of \eqn{P} is contained in some class of \eqn{Q}. With this partial order, \eqn{H(X)} becomes a bounded \dfn{lattice}, with intersection and union of two elements given by their greatest lower bound (\dfn{meet}) and their least upper bound (\dfn{join}), respectively. Specifically, the meet of two partitions computed by \code{cl_meet} is the partition obtained by intersecting the classes of the partitions; the classes of the join computed by \code{cl_join} are obtained by joining all elements in the same class in at least one of the partitions. Obviously, the least and greatest elements of the partition lattice are the partitions where each object is in a single class (sometimes referred to as the \dQuote{splitter} partition) or in the same class (the \dQuote{lumper} partition), respectively. Meet and join of an arbitrary number of partitions can be defined recursively. In addition to computing the meet and join, the comparison operations corresponding to the above partial order as well as \code{min}, \code{max}, and \code{range} are available at least for R objects representing partitions inheriting from \code{"\link{cl_partition}"}. The summary methods give the meet and join of the given partitions (for \code{min} and \code{max}), or a partition ensemble with the meet and join (for \code{range}). If the partitions specified by \code{x} and \code{y} are soft partitions, the corresponding nearest hard partitions are used. Future versions may optionally provide suitable \dQuote{soft} (fuzzy) extensions for computing meets and joins. The set of all dendrograms on \eqn{X} can be ordered using pointwise inequality of the associated ultrametric dissimilarities: i.e., if \eqn{D} and \eqn{E} are the dendrograms with ultrametrics \eqn{u} and \eqn{v}, respectively, then \eqn{D \le E} if \eqn{u_{ij} \le v_{ij}} for all pairs \eqn{(i, j)} of objects. This again yields a lattice (of dendrograms). The join of \eqn{D} and \eqn{E} is the dendrogram with ultrametrics given by \eqn{\max(u_{ij}, v_{ij})} (as this gives an ultrametric); the meet is the dendrogram with the maximal ultrametric dominated by \eqn{\min(u_{ij}, v_{ij})}, and can be obtained by applying single linkage hierarchical clustering to the minima. The set of all hierarchies on \eqn{X} can be ordered by set-wise inclusion of the classes: i.e., if \eqn{H} and \eqn{G} are two hierarchies, then \eqn{H \le G} if all classes of \eqn{H} are also classes of \eqn{G}. This yields a meet semilattice, with meet given by the classes contained in both hierarchies. The join only exists if the union of the classes is a hierarchy. In each case, a modular semilattice is obtained, which allows for a natural metrization via least element (semi)lattice move distances, see Barthélémy, Leclerc and Monjardet (1981). These latticial metrics are given by the BA/C (partitions), Manhattan (dendrograms), and symdiff (hierarchies) dissimilarities, respectively (see \code{\link{cl_dissimilarity}}). } \value{ For \code{cl_meet} and \code{cl_join}, an object of class \code{"\link{cl_partition}"} or \code{"\link{cl_dendrogram}"} with the class ids or ultrametric dissimilarities of the meet and join of the partitions or dendrograms, respectively. } \references{ J.-P. Barthélémy, B. Leclerc and B. Monjardet (1981). On the use of ordered sets in problems of comparison and consensus of classification. \emph{Journal of Classification}, \bold{3}, 187--224. \doi{10.1007/BF01894188}. } \examples{ ## Two simple partitions of 7 objects. A <- as.cl_partition(c(1, 1, 2, 3, 3, 5, 5)) B <- as.cl_partition(c(1, 2, 2, 3, 4, 5, 5)) ## These disagree on objects 1-3, A splits objects 4 and 5 into ## separate classes. Objects 6 and 7 are always in the same class. (A <= B) || (B <= A) ## (Neither partition is finer than the other.) cl_meet(A, B) cl_join(A, B) ## Meeting with the lumper (greatest) or joining with the splitter ## (least) partition does not make a difference: C_lumper <- as.cl_partition(rep(1, n_of_objects(A))) cl_meet(cl_ensemble(A, B, C_lumper)) C_splitter <- as.cl_partition(seq_len(n_of_objects(A))) cl_join(cl_ensemble(A, B, C_splitter)) ## Another way of computing the join: range(A, B, C_splitter)$max } \keyword{cluster} clue/man/Cassini.Rd0000644000175000017500000000237411304023137013767 0ustar nileshnilesh\name{Cassini} \alias{Cassini} \title{Cassini Data} \description{ A Cassini data set with 1000 points in 2-dimensional space which are drawn from the uniform distribution on 3 structures. The two outer structures are banana-shaped; the \dQuote{middle} structure in between them is a circle. } \usage{data("Cassini")} \format{ A classed list with components \describe{ \item{\code{x}}{a matrix with 1000 rows and 2 columns giving the coordinates of the points.} \item{\code{classes}}{a factor indicating which structure the respective points belong to.} } } \details{ Instances of Cassini data sets can be created using function \code{\link[mlbench]{mlbench.cassini}} in package \pkg{mlbench}. The data set at hand was obtained using \preformatted{ library("mlbench") set.seed(1234) Cassini <- mlbench.cassini(1000) } } \examples{ data("Cassini") op <- par(mfcol = c(1, 2)) ## Plot the data set: plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ## Create a "random" k-means partition of the data: set.seed(1234) party <- kmeans(Cassini$x, 3) ## And plot that. plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## (We can see the problem ...) par(op) } \keyword{datasets} clue/man/fit_ultrametric_target.Rd0000644000175000017500000000621411304023137017136 0ustar nileshnilesh\name{fit_ultrametric_target} \alias{ls_fit_ultrametric_target} \alias{l1_fit_ultrametric_target} \title{Fit Dissimilarities to a Hierarchy} \description{ Find the ultrametric from a target equivalence class of hierarchies which minimizes weighted Euclidean or Manhattan dissimilarity to a given dissimilarity object. } \usage{ ls_fit_ultrametric_target(x, y, weights = 1) l1_fit_ultrametric_target(x, y, weights = 1) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{y}{a target hierarchy.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}. Otherwise, it is recycled to the number of elements in \code{x}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the optimal ultrametric distances. } \details{ The target equivalence class consists of all dendrograms for which the corresponding \eqn{n}-trees are the same as the one corresponding to \code{y}. I.e., all splits are the same as for \code{y}, and optimization is over the height of the splits. The criterion function to be optimized over all ultrametrics from the equivalence class is \eqn{\sum w_{ij} |x_{ij} - u_{ij}|^p}, where \eqn{p = 2} in the Euclidean and \eqn{p = 1} in the Manhattan case, respectively. The optimum can be computed as follows. Suppose split \eqn{s} joins object classes \eqn{A} and \eqn{B}. As the ultrametric dissimilarities of all objects in \eqn{A} to all objects in \eqn{B} must be the same value, say, \eqn{u_{A,B} = u_s}, the contribution from the split to the criterion function is of the form \eqn{f_s(u_s) = \sum_{i \in A, j \in B} w_{ij} |x_{ij} - u_s|^p}. We need to minimize \eqn{\sum_s f_s(u_s)} under the constraint that the \eqn{u_s} form a non-decreasing sequence, which is accomplished by using the Pool Adjacent Violator Algorithm (\acronym{PAVA}) using the weighted mean (\eqn{p = 2}) or weighted median (\eqn{p = 1}) for solving the blockwise optimization problems. } \seealso{ \code{\link{ls_fit_ultrametric}} for finding the ultrametric minimizing Euclidean dissimilarity (without fixing the splits). } \examples{ data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) ## Find the maximal dominated and miminal dominating ultrametrics by ## hclust() with single and complete linkage: y1 <- hclust(d, "single") y2 <- hclust(d, "complete") ## Note that these are quite different: cl_dissimilarity(y1, y2, "gamma") ## Now find the L2 optimal members of the respective dendrogram ## equivalence classes. u1 <- ls_fit_ultrametric_target(d, y1) u2 <- ls_fit_ultrametric_target(d, y2) ## Compute the L2 optimal ultrametric approximation to d. u <- ls_fit_ultrametric(d) ## And compare ... cl_dissimilarity(cl_ensemble(Opt = u, Single = u1, Complete = u2), d) ## The solution obtained via complete linkage is quite close: cl_agreement(u2, u, "cophenetic") } \keyword{cluster} \keyword{optimize} clue/man/cl_prototypes.Rd0000644000175000017500000000265311304023137015304 0ustar nileshnilesh\name{cl_prototypes} \alias{cl_prototypes} \title{Partition Prototypes} \description{ Determine prototypes for the classes of an R object representing a partition. } \usage{ cl_prototypes(x) } \arguments{ \item{x}{an R object representing a partition of objects.} } \details{ Many partitioning methods are based on prototypes (\dQuote{centers}, \dQuote{centroids}, \dQuote{medoids}, \dots). In typical cases, these are points in the feature space for the measurements on the objects to be partitioned, such that one can quantify the distance between the objects and the prototypes, and, e.g., classify objects to their closest prototype. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{kernlab}, and \pkg{mclust} (and of course, \pkg{clue} itself). } \examples{ ## Show how prototypes ("centers") vary across k-means runs on ## bootstrap samples from the Cassini data. data("Cassini") nr <- NROW(Cassini$x) out <- replicate(50, { kmeans(Cassini$x[sample(nr, replace = TRUE), ], 3) }, simplify = FALSE) ## Plot the data points in light gray, and the prototypes found. plot(Cassini$x, col = gray(0.8)) points(do.call("rbind", lapply(out, cl_prototypes)), pch = 19) } \keyword{cluster} clue/man/l1_fit_ultrametric.Rd0000644000175000017500000001111313140644252016165 0ustar nileshnilesh\name{l1_fit_ultrametric} \alias{l1_fit_ultrametric} \title{Least Absolute Deviation Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal absolute distance (Manhattan dissimilarity) to a given dissimilarity object. } \usage{ l1_fit_ultrametric(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default) or \code{"IRIP"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ The problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} |x_{ij} - u_{ij}|} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). We provide two heuristics for solving this problem. Method \code{"SUMT"} implements a \acronym{SUMT} (Sequential Unconstrained Minimization Technique, see \code{\link{sumt}}) approach using the sign function for the gradients of the absolute value function. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } Method \code{"IRIP"} implements a variant of the Iteratively Reweighted Iterative Projection approach of Smith (2001), which attempts to solve the \eqn{L_1} problem via a sequence of weighted \eqn{L_2} problems, determining \eqn{u(t+1)} by minimizing the criterion function \deqn{\sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2 / \max(|x_{ij} - u_{ij}(t)|, m)} with \eqn{m} a \dQuote{small} non-zero value to avoid zero divisors. We use the \acronym{SUMT} method of \code{\link{ls_fit_ultrametric}} for solving the weighted least squares problems. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in \eqn{u} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the \eqn{L_1} criterion is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{MIN}}{the cutoff \eqn{m}. Defaults to \eqn{10^{-3}}.} \item{\code{start}}{a dissimilarity object to be used as the starting value for \eqn{u}.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed for solving the weighted \eqn{L_2} problems.} } One may need to adjust the default control parameters to achieve convergence. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. } \seealso{ \code{\link{cl_consensus}} for computing least absolute deviation (Manhattan) consensus hierarchies; \code{\link{ls_fit_ultrametric}}. } \references{ M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \keyword{cluster} \keyword{optimize} clue/man/GVME.Rd0000644000175000017500000000207412734171420013140 0ustar nileshnilesh\name{GVME} \alias{GVME} \title{Gordon-Vichi Macroeconomic Partition Ensemble Data} \description{ Soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. } \usage{data("GVME")} \format{ A named cluster ensemble of 5 soft partitions of 21 countries into 2 or 3 classes. The names are the years to which the partitions correspond. } \details{ The partitions were obtained using fuzzy \eqn{c}-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. See Gordon and Vichi (2001), page 230, for more details. } \source{ Table 1 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. } \keyword{datasets} clue/man/cl_validity.Rd0000644000175000017500000000674113140644223014710 0ustar nileshnilesh\name{cl_validity} \alias{cl_validity} \alias{cl_validity.default} \title{Validity Measures for Partitions and Hierarchies} \description{ Compute validity measures for partitions and hierarchies, attempting to measure how well these clusterings capture the underlying structure in the data they were obtained from. } \usage{ cl_validity(x, ...) \method{cl_validity}{default}(x, d, ...) } \arguments{ \item{x}{an object representing a partition or hierarchy.} \item{d}{a dissimilarity object from which \code{x} was obtained.} \item{\dots}{arguments to be passed to or from methods.} } \value{ A list of class \code{"cl_validity"} with the computed validity measures. } \details{ \code{cl_validity} is a generic function. For partitions, its default method gives the \dQuote{dissimilarity accounted for}, defined as \eqn{1 - a_w / a_t}, where \eqn{a_t} is the average total dissimilarity, and the \dQuote{average within dissimilarity} \eqn{a_w} is given by \deqn{\frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij}}{ \sum_{i,j} \sum_k m_{ik}m_{jk}}}{% \sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij} / \sum_{i,j} \sum_k m_{ik}m_{jk}} where \eqn{d} and \eqn{m} are the dissimilarities and memberships, respectively, and the sums are over all pairs of objects and all classes. For hierarchies, the validity measures computed by default are \dQuote{variance accounted for} (VAF, e.g., Hubert, Arabie & Meulman, 2006) and \dQuote{deviance accounted for} (DEV, e.g., Smith, 2001). If \code{u} is the ultrametric corresponding to the hierarchy \code{x} and \code{d} the dissimilarity \code{x} was obtained from, these validity measures are given by \deqn{\mathrm{VAF} = \max\left(0, 1 - \frac{\sum_{i,j} (d_{ij} - u_{ij})^2}{ \sum_{i,j} (d_{ij} - \mathrm{mean}(d)) ^ 2}\right)}{ max(0, 1 - sum_{i,j} (d_{ij} - u_{ij})^2 / sum_{i,j} (d_{ij} - mean(d))^2)} and \deqn{\mathrm{DEV} = \max\left(0, 1 - \frac{\sum_{i,j} |d_{ij} - u_{ij}|}{ \sum_{i,j} |d_{ij} - \mathrm{median}(d)|}\right)}{ max(0, 1 - sum_{i,j} |d_{ij} - u_{ij}| / sum_{i,j} |d_{ij} - median(d)|)} respectively. Note that VAF and DEV are not invariant under rescaling \code{u}, and may be \dQuote{arbitrarily small} (i.e., 0 using the above definitions) even though \code{u} and \code{d} are \dQuote{structurally close} in some sense. For the results of using \code{\link[cluster]{agnes}} and \code{\link[cluster]{diana}}, the agglomerative and divisive coefficients are provided in addition to the default ones. } \references{ L. Hubert, P. Arabie and J. Meulman (2006). \emph{The structural representation of proximity matrices with MATLAB}. Philadelphia, PA: SIAM. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}/2, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \seealso{ \code{\link[fpc]{cluster.stats}} in package \pkg{fpc} for a variety of cluster validation statistics; \code{\link[e1071]{fclustIndex}} in package \pkg{e1071} for several fuzzy cluster indexes; \code{\link[cclust:Rindexes]{clustIndex}} in package \pkg{cclust}; \code{\link[cluster]{silhouette}} in package \pkg{cluster}. } \keyword{cluster} clue/man/cl_bag.Rd0000644000175000017500000000642014021342067013606 0ustar nileshnilesh\name{cl_bag} \alias{cl_bag} \title{Bagging for Clustering} \description{ Construct partitions of objects by running a base clustering algorithm on bootstrap samples from a given data set, and \dQuote{suitably} aggregating these primary partitions. } \usage{ cl_bag(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{method}{a character string indicating the bagging method to use. Currently, only method \code{"DFBC1"} is available, which implements algorithm \emph{BagClust1} in Dudoit & Fridlyand (2003).} \item{control}{a list of control parameters for the aggregation. Currently, not used.} } \value{ An R object representing a partition of the objects given in \code{x}. } \details{ Bagging for clustering is really a rather general conceptual framework than a specific algorithm. If the primary partitions generated in the bootstrap stage form a cluster ensemble (so that class memberships of the objects in \code{x} can be obtained), consensus methods for cluster ensembles (as implemented, e.g., in \code{\link{cl_consensus}} and \code{\link{cl_medoid}}) can be employed for the aggregation stage. In particular, (possibly new) bagging algorithms can easily be realized by directly running \code{\link{cl_consensus}} on the results of \code{\link{cl_boot}}. In BagClust1, aggregation proceeds by generating a reference partition by running the base clustering algorithm on the whole given data set, and averaging the ensemble memberships after optimally matching them to the reference partition (in fact, by minimizing Euclidean dissimilarity, see \code{\link{cl_dissimilarity}}). If the base clustering algorithm yields prototypes, aggregation can be based on clustering these. This is the idea underlying the \dQuote{Bagged Clustering} algorithm introduced in Leisch (1999) and implemented by function \code{\link[e1071]{bclust}} in package \pkg{e1071}. } \references{ S. Dudoit and J. Fridlyand (2003). Bagging to improve the accuracy of a clustering procedure. \emph{Bioinformatics}, \bold{19}/9, 1090--1099. \doi{10.1093/bioinformatics/btg038}. F. Leisch (1999). \emph{Bagged Clustering}. Working Paper 51, SFB \dQuote{Adaptive Information Systems and Modeling in Economics and Management Science}. \url{https://epub.wu.ac.at/1272/}. } \examples{ set.seed(1234) ## Run BagClust1 on the Cassini data. data("Cassini") party <- cl_bag(Cassini$x, 50, 3) plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## Actually, using fuzzy c-means as a base learner works much better: if(require("e1071", quietly = TRUE)) { party <- cl_bag(Cassini$x, 20, 3, algorithm = "cmeans") plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") } } \keyword{cluster} clue/man/ls_fit_sum_of_ultrametrics.Rd0000644000175000017500000000603012116170572020025 0ustar nileshnilesh\name{ls_fit_sum_of_ultrametrics} \alias{ls_fit_sum_of_ultrametrics} \title{Least Squares Fit of Sums of Ultrametrics to Dissimilarities} \description{ Find a sequence of ultrametrics with sum minimizing square distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_sum_of_ultrametrics(x, nterms = 1, weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{nterms}{an integer giving the number of ultrametrics to be fitted.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \details{ The problem to be solved is minimizing the criterion function \deqn{L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2}{ L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2} over all \eqn{u(1), \ldots, u(n)} satisfying the ultrametric constraints. We provide an implementation of the iterative heuristic suggested in Carroll & Pruzansky (1980) which in each step \eqn{t} sequentially refits the \eqn{u(k)} as the least squares ultrametric fit to the \dQuote{residuals} \eqn{x - \sum_{l \ne k} u(l)} using \code{\link{ls_fit_ultrametric}}. Available control parameters include \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in all \eqn{u(k)} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the criterion function is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{method}}{a character string indicating the fitting method to be employed by the individual least squares fits.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed. By default, if the \acronym{SUMT} method method is used, 10 inner \acronym{SUMT} runs are performed for each refitting.} } It should be noted that the method used is a heuristic which can not be guaranteed to find the global minimum. } \value{ A list of objects of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \references{ J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. } \keyword{cluster} \keyword{optimize} clue/man/addtree.Rd0000644000175000017500000000364312037226501014012 0ustar nileshnilesh\name{addtree} \encoding{UTF-8} \alias{as.cl_addtree} \title{Additive Tree Distances} \description{ Objects representing additive tree distances. } \usage{ as.cl_addtree(x) } \arguments{ \item{x}{an R object representing additive tree distances.} } \value{ An object of class \code{"cl_addtree"} containing the additive tree distances. } \details{ Additive tree distances are object dissimilarities \eqn{d} satisfying the so-called \emph{additive tree conditions}, also known as \emph{four-point conditions} \eqn{d_{ij} + d_{kl} \le \max(d_{ik} + d_{jl}, d_{il} + d_{jk})} for all quadruples \eqn{i, j, k, l}. Equivalently, for each such quadruple, the largest two values of the sums \eqn{d_{ij} + d_{kl}}, \eqn{d_{ik} + d_{jl}}, and \eqn{d_{il} + d_{jk}} must be equal. Centroid distances are additive tree distances where the inequalities in the four-point conditions are strengthened to equalities (such that all three sums are equal), and can be represented as \eqn{d_{ij} = g_i + g_j}, i.e., as sums of distances from a \dQuote{centroid}. See, e.g., Barthélémy and Guénoche (1991) for more details on additive tree distances. \code{as.cl_addtree} is a generic function. Its default method can handle objects representing ultrametric distances and raw additive distance matrices. In addition, there is a method for coercing objects of class \code{"\link[ape:as.phylo]{phylo}"} from package \pkg{ape}. Functions \code{\link{ls_fit_addtree}} and \code{\link{ls_fit_centroid}} can be used to find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. There is a \code{\link{plot}} method for additive tree distances. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. } \keyword{cluster} clue/man/cl_margin.Rd0000644000175000017500000000165011304023137014325 0ustar nileshnilesh\name{cl_margin} \alias{cl_margin} \title{Membership Margins} \description{ Compute the \emph{margin} of the memberships of a partition, i.e., the difference between the largest and second largest membership values of the respective objects. } \usage{ cl_margin(x) } \arguments{ \item{x}{an \R object representing a partition of objects.} } \details{ For hard partitions, the margins are always 1. For soft partitions, the margins may be taken as an indication of the \dQuote{sureness} of classifying an object to the class with maximum membership value. } \examples{ data("GVME") ## Look at the classes obtained for 1980: split(cl_object_names(GVME[["1980"]]), cl_class_ids(GVME[["1980"]])) ## Margins: x <- cl_margin(GVME[["1980"]]) ## Add names, and sort: names(x) <- cl_object_names(GVME[["1980"]]) sort(x) ## Note the "uncertainty" of assigning Egypt to the "intermediate" class ## of nations. } \keyword{cluster} clue/man/GVME_Consensus.Rd0000644000175000017500000000343512734174635015215 0ustar nileshnilesh\name{GVME_Consensus} \alias{GVME_Consensus} \title{Gordon-Vichi Macroeconomic Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the macroeconomic partition data given in Gordon and Vichi (2001). } \usage{data("GVME_Consensus")} \format{ A named cluster ensemble of eight soft partitions of 21 countries terms into two or three classes. } \details{ The elements of the ensemble are consensus partitions for the macroeconomic partition data in Gordon and Vichi (2001), which are available as data set \code{\link{GVME}}. Element names are of the form \code{"\var{m}/\var{k}"}, where \var{m} indicates the consensus method employed (one of \samp{MF1}, \samp{MF2}, \samp{JMF}, and \samp{S&S}, corresponding to the application of models 1, 2, and 3 in Gordon and Vichi (2001) and the approach in Sato and Sato (1994), respectively), and \var{k} denotes the number classes (2 or 3). } \source{ Tables 4 and 5 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. M. Sato and Y. Sato (1994). On a multicriteria fuzzy clustering method for 3-way data. \emph{International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems}, \bold{2}, 127--142. \cr \doi{10.1142/S0218488594000122}. } \examples{ ## Load the consensus partitions. data("GVME_Consensus") ## Pick the partitions into 2 classes. GVME_Consensus_2 <- GVME_Consensus[1 : 4] ## Fuzziness using the Partition Coefficient. cl_fuzziness(GVME_Consensus_2) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(GVME_Consensus_2) cl_dissimilarity(GVME_Consensus_2, method = "comem") } \keyword{datasets} clue/man/CKME.Rd0000644000175000017500000000071011304023137013105 0ustar nileshnilesh\name{CKME} \alias{CKME} \title{Cassini Data Partitions Obtained by K-Means} \description{ A cluster ensemble of 50 \eqn{k}-means partitions of the Cassini data into three classes. } \usage{data("CKME")} \format{ A cluster ensemble of 50 (\eqn{k}-means) partitions. } \details{ The ensemble was generated via \preformatted{ require("clue") data("Cassini") set.seed(1234) CKME <- cl_boot(Cassini$x, 50, 3) } } \keyword{datasets} clue/man/ls_fit_addtree.Rd0000644000175000017500000000676612734173577015405 0ustar nileshnilesh\name{ls_fit_addtree} \encoding{UTF-8} \alias{ls_fit_addtree} \alias{ls_fit_centroid} \title{Least Squares Fit of Additive Tree Distances to Dissimilarities} \description{ Find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_addtree(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) ls_fit_centroid(x) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_addtree"} containing the optimal additive tree distances. } \details{ See \code{\link{as.cl_addtree}} for details on additive tree distances and centroid distances. With \eqn{L(d) = \sum w_{ij} (x_{ij} - d_{ij})^2}, the problem to be solved by \code{ls_fit_addtree} is minimizing \eqn{L} over all additive tree distances \eqn{d}. This problem is known to be NP hard. We provide three heuristics for solving this problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1983). Incomplete dissimilarities are currently not supported. Methods \code{"IP"} and \code{"IR"} implement the Iterative Projection and Iterative Reduction approaches of Hubert and Arabie (1995) and Roux (1988), respectively. Non-identical weights and incomplete dissimilarities are currently not supported. See \code{\link{ls_fit_ultrametric}} for details on these methods and available control parameters. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \code{ls_fit_centroid} finds the centroid distance \eqn{d} minimizing \eqn{L(d)} (currently, only for the case of identical weights). This optimization problem has a closed-form solution. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1983). A least squares algorithm for fitting additive trees to proximity data. \emph{Psychometrika}, \bold{48}, 621--626. \doi{10.1007/BF02293884}. } \keyword{cluster} \keyword{optimize} clue/man/cl_medoid.Rd0000644000175000017500000000331211726357356014333 0ustar nileshnilesh\name{cl_medoid} \alias{cl_medoid} \title{Medoid Partitions and Hierarchies} \description{ Compute the medoid of an ensemble of partitions or hierarchies, i.e., the element of the ensemble minimizing the sum of dissimilarities to all other elements. } \usage{ cl_medoid(x, method = "euclidean") } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} } \value{ The medoid partition or hierarchy. } \details{ Medoid clusterings are special cases of \dQuote{consensus} clusterings characterized as the solutions of an optimization problem. See Gordon (2001) for more information. The dissimilarities \code{d} for determining the medoid are obtained by calling \code{cl_dissimilarity} with arguments \code{x} and \code{method}. The medoid can then be found as the (first) row index for which the row sum of \code{as.matrix(d)} is minimal. Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{\link[cluster]{pam}} in package \pkg{cluster}. } \references{ A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. } \seealso{ \code{\link{cl_consensus}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] m1 <- cl_medoid(pens) diss <- cl_dissimilarity(pens) require("cluster") m2 <- pens[[pam(diss, 1)$medoids]] ## Agreement of medoid consensus partitions. cl_agreement(m1, m2) ## Or, more straightforwardly: table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/ls_fit_ultrametric.Rd0000644000175000017500000002076712734173740016315 0ustar nileshnilesh\name{ls_fit_ultrametric} \encoding{UTF-8} \alias{ls_fit_ultrametric} \title{Least Squares Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal square distance (Euclidean dissimilarity) to given dissimilarity objects. } \usage{ ls_fit_ultrametric(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}, or an ensemble of such objects.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ For a single dissimilarity object \code{x}, the problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). For an ensemble of dissimilarity objects, the criterion function is \deqn{L(u) = \sum_b w_b \sum_{i,j} w_{ij} (x_{ij}(b) - u_{ij})^2,} where \eqn{w_b} is the weight given to element \eqn{x_b} of the ensemble and can be specified via control parameter \code{weights} (default: all ones). This problem reduces to the above basic problem with \eqn{x} as the \eqn{w_b}-weighted mean of the \eqn{x_b}. We provide three heuristics for solving the basic problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1986) which in turn simplifies the suggestions in Carroll and Pruzansky (1980). (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) We then use a final single linkage hierarchical clustering step to ensure that the returned object exactly satisfies the ultrametric constraints. The starting value \eqn{u_0} is obtained by \dQuote{random shaking} of the given dissimilarity object (if not given). If there are missing values in \code{x}, i.e., the given dissimilarities are \emph{incomplete}, we follow a suggestion of de Soete (1984), imputing the missing values by the weighted mean of the non-missing ones, and setting the corresponding weights to zero. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm} is usually faster. Note that the number of ultrametric constraints is of the order \eqn{n^3}, where \eqn{n} is the number of objects in the dissimilarity object, suggesting to use the \acronym{SUMT} approach in favor of \code{\link[stats]{constrOptim}}. If starting values for the \acronym{SUMT} are provided via \code{start}, the number of starting values gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} starting values are obtained by random shaking of the dissimilarity to be fitted. In the case of multiple \acronym{SUMT} runs, the (first) best solution found is returned. Method \code{"IP"} implements the Iterative Projection approach of Hubert and Arabie (1995). This iteratively projects the current dissimilarities to the closed convex set given by the ultrametric constraints (3-point conditions) for a single index triple \eqn{(i, j, k)}, in fact replacing the two largest values among \eqn{d_{ij}, d_{ik}, d_{jk}} by their mean. The following control parameters can be provided via the \code{control} argument. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{order}}{a permutation of the numbers from 1 to the number of objects in \code{x}, specifying the order in which the ultrametric constraints are considered, or a list of such permutations.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be employed.} \item{\code{tol}}{a double indicating the maximal convergence tolerance. The algorithm stops if the total absolute change in the dissimilarities in an iteration is less than \code{tol}.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } If permutations are provided via \code{order}, the number of these gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} randomly generated orders are tried. In the case of multiple runs, the (first) best solution found is returned. Non-identical weights and incomplete dissimilarities are currently not supported. Method \code{"IR"} implements the Iterative Reduction approach suggested by Roux (1988), see also Barthélémy and Guénoche (1991). This is similar to the Iterative Projection method, but modifies the dissimilarities between objects proportionally to the aggregated change incurred from the ultrametric projections. Available control parameters are identical to those of method \code{"IP"}. Non-identical weights and incomplete dissimilarities are currently not supported. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1984). Ultrametric tree representations of incomplete dissimilarity data. \emph{Journal of Classification}, \bold{1}, 235--242. \doi{10.1007/BF01890124}. G. de Soete (1986). A least squares algorithm for fitting an ultrametric tree to a dissimilarity matrix. \emph{Pattern Recognition Letters}, \bold{2}, 133--137. \doi{10.1016/0167-8655(84)90036-9}. } \seealso{ \code{\link{cl_consensus}} for computing least squares (Euclidean) consensus hierarchies by least squares fitting of average ultrametric distances; \code{\link{l1_fit_ultrametric}}. } \examples{ ## Least squares fit of an ultrametric to the Miller-Nicely consonant ## phoneme confusion data. data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ## Cophenetic correlation: cor(d, u) ## Plot: plot(u) ## ("Basically" the same as Figure 1 in de Soete (1986).) } \keyword{cluster} \keyword{optimize} clue/man/n_of_classes.Rd0000644000175000017500000000347612211412701015035 0ustar nileshnilesh\name{n_of_classes} \alias{n_of_classes} \alias{cl_class_ids} \alias{as.cl_class_ids} \title{Classes in a Partition} \description{Determine the number of classes and the class ids in a partition of objects.} \usage{ n_of_classes(x) cl_class_ids(x) as.cl_class_ids(x) } \arguments{ \item{x}{an object representing a (hard or soft) partition (for \code{n_of_classes} and \code{cl_class_ids}), or raw class ids (for \code{as.cl_class_ids}).} } \value{ For \code{n_of_classes}, an integer giving the number of classes in the partition. For \code{cl_class_ids}, a vector of integers with the corresponding class ids. For soft partitions, the class ids returned are those of the \emph{nearest hard partition} obtained by taking the class ids of the (first) maximal membership values. } \details{ These function are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). Note that the number of classes is taken as the number of distinct class ids actually used in the partition, and may differ from the number of columns in a membership matrix representing the partition. \code{as.cl_class_ids} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) to class id objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ data("Cassini") party <- kmeans(Cassini$x, 3) n_of_classes(party) ## A simple confusion matrix: table(cl_class_ids(party), Cassini$classes) ## For an "oversize" membership matrix representation: n_of_classes(cl_membership(party, 6)) } \keyword{cluster} clue/man/cl_fuzziness.Rd0000644000175000017500000000500414021342416015110 0ustar nileshnilesh\name{cl_fuzziness} \alias{cl_fuzziness} \title{Partition Fuzziness} \description{ Compute the fuzziness of partitions. } \usage{ cl_fuzziness(x, method = NULL, normalize = TRUE) } \arguments{ \item{x}{a cluster ensemble of partitions, or an R object coercible to such.} \item{method}{a character string indicating the fuzziness measure to be employed, or \code{NULL} (default), or a function to be taken as a user-defined method. Currently available built-in methods are \code{"PC"} (Partition Coefficient) and \code{"PE"} (Partition Entropy), with the default corresponding to the first one. If \code{method} is a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}.} \item{normalize}{a logical indicating whether the fuzziness measure should be normalized in a way that hard partitions have value 0, and \dQuote{completely fuzzy} partitions (where for all objects, all classes get the same membership) have value 1.} } \details{ If \eqn{m} contains the membership values of a partition, the (unnormalized) Partition Coefficient and Partition Entropy are given by \eqn{\sum_{n,i} m_{n,i}^2} and \eqn{\sum_{n,i} H(m_{n,i})}, respectively, where \eqn{H(u) = u \log u - (1-u) \log(1-u)}{u log(u) - (1-u) log(1-u)}. Note that the normalization used here is different from the normalizations typically found in the literature. If a user-defined fuzziness method is to be employed, is must be a function taking a matrix of membership values and a logical to indicate whether normalization is to be performed as its arguments (in that order; argument names are not used). } \value{ An object of class \code{"cl_fuzziness"} giving the fuzziness values. } \references{ J. C. Bezdek (1981). \emph{Pattern Recognition with Fuzzy Objective Function Algorithms}. New York: Plenum. } \seealso{ Function \code{\link[e1071]{fclustIndex}} in package \pkg{e1071}, which also computes several other \dQuote{fuzzy cluster indexes} (typically based on more information than just the membership values). } \examples{ if(require("e1071", quietly = TRUE)) { ## Use an on-line version of fuzzy c-means from package e1071 if ## available. data("Cassini") pens <- cl_boot(Cassini$x, B = 15, k = 3, algorithm = "cmeans", parameters = list(method = "ufcl")) pens summary(cl_fuzziness(pens, "PC")) summary(cl_fuzziness(pens, "PE")) } } \keyword{cluster} clue/man/cl_classes.Rd0000644000175000017500000000126711304023137014511 0ustar nileshnilesh\name{cl_classes} \alias{cl_classes} \title{Cluster Classes} \description{ Extract the classes in a partition or hierarchy. } \usage{ cl_classes(x) } \arguments{ \item{x}{an R object representing a partition or hierarchy of objects.} } \value{ A list inheriting from \code{"cl_classes_of_objects"} of vectors indicating the classes. } \details{ For partitions, the classes are the equivalence classes (\dQuote{clusters}) of the partition; for soft partitions, the classes of the nearest hard partition are used. For hierarchies represented by trees, the classes are the sets of objects corresponding to (joined at or split by) the nodes of the tree. } \keyword{cluster} clue/man/Kinship82_Consensus.Rd0000644000175000017500000000307212734171653016230 0ustar nileshnilesh\name{Kinship82_Consensus} \alias{Kinship82_Consensus} \title{Gordon-Vichi Kinship82 Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the Rosenberg-Kim kinship terms partition data given in Gordon and Vichi (2001). } \usage{data("Kinship82_Consensus")} \format{ A named cluster ensemble of three soft partitions of the 15 kinship terms into three classes. } \details{ The elements of the ensemble are named \code{"MF1"}, \code{"MF2"}, and \code{"JMF"}, and correspond to the consensus partitions obtained by applying models 1, 2, and 3 in Gordon and Vichi (2001) to the kinship terms partition data in Rosenberg (1982), which are available as data set \code{\link{Kinship82}}. } \source{ Table 6 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \examples{ ## Load the consensus partitions. data("Kinship82_Consensus") ## Fuzziness using the Partition Coefficient. cl_fuzziness(Kinship82_Consensus) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(Kinship82_Consensus) cl_dissimilarity(Kinship82_Consensus, method = "comem") } \keyword{datasets} clue/man/hierarchy.Rd0000644000175000017500000000735412211412651014360 0ustar nileshnilesh\name{hierarchy} \alias{cl_hierarchy} % class ... \alias{is.cl_hierarchy} \alias{as.cl_hierarchy} \alias{cl_dendrogram} % class ... \alias{is.cl_dendrogram} \alias{as.cl_dendrogram} \alias{plot.cl_dendrogram} \title{Hierarchies} \description{ Determine whether an R object represents a hierarchy of objects, or coerce to an R object representing such.} \usage{ is.cl_hierarchy(x) is.cl_dendrogram(x) as.cl_hierarchy(x) as.cl_dendrogram(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_hierarchy"}, with a suitable representation of the hierarchy given by \code{x}. } \details{ These functions are generic functions. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). The hierarchies considered by \pkg{clue} are \emph{\eqn{n}-trees} (hierarchies in the strict sense) and \emph{dendrograms} (also known as valued \eqn{n}-trees or total indexed hierarchies), which are represented by the virtual classes \code{"cl_hierarchy"} and \code{"cl_dendrogram"} (which inherits from the former), respectively. \eqn{n}-trees on a set \eqn{X} of objects correspond to collections \eqn{H} of subsets of \eqn{X}, usually called \emph{classes} of the hierarchy, which satisfy the following properties: \itemize{ \item \eqn{H} contains all singletons with objects of \eqn{X}, \eqn{X} itself, but not the empty set; \item The intersection of two sets \eqn{A} and \eqn{B} in \eqn{H} is either empty or one of the sets. } The classes of a hierarchy can be obtained by \code{\link{cl_classes}}. Dendrograms are \eqn{n}-trees where additionally a height \eqn{h} is associated with each of the classes, so that for two classes \eqn{A} and \eqn{B} with non-empty intersection we have \eqn{h(A) \le h(B)} iff \eqn{A} is a subset of \eqn{B}. For each pair of objects one can then define \eqn{u_{ij}} as the height of the smallest class containing both \eqn{i} and \eqn{j}: this results in a dissimilarity on \eqn{X} which satisfies the ultrametric (3-point) conditions \eqn{u_{ij} \le \max(u_{ik}, u_{jk})} for all triples \eqn{(i, j, k)} of objects. Conversely, an ultrametric dissimilarity induces a unique dendrogram. The ultrametric dissimilarities of a dendrogram can be obtained by \code{\link{cl_ultrametric}}. \code{as.cl_hierarchy} returns an object of class \code{"cl_hierarchy"} \dQuote{containing} the given object \code{x} if this already represents a hierarchy (i.e., \code{is.cl_hierarchy(x)} is true), or the ultrametric obtained from \code{x} via \code{\link{as.cl_ultrametric}}. \code{as.cl_dendrogram} returns an object which has class \code{"cl_dendrogram"} and inherits from \code{"cl_hierarchy"}, and contains \code{x} if it represents a dendrogram (i.e., \code{is.cl_dendrogram(x)} is true), or the ultrametric obtained from \code{x}. Conceptually, hierarchies and dendrograms are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing dendrograms and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. There is also a \code{plot} method. } \examples{ hcl <- hclust(dist(USArrests)) is.cl_dendrogram(hcl) is.cl_hierarchy(hcl) } \keyword{cluster} clue/man/cl_predict.Rd0000644000175000017500000000411412211412617014503 0ustar nileshnilesh\name{cl_predict} \alias{cl_predict} \title{Predict Memberships} \description{ Predict class ids or memberships from R objects representing partitions. } \usage{ cl_predict(object, newdata = NULL, type = c("class_ids", "memberships"), ...) } \arguments{ \item{object}{an R object representing a partition of objects.} \item{newdata}{an optional data set giving the objects to make predictions for. This must be of the same \dQuote{kind} as the data set employed for obtaining the partition. If omitted, the original data are used.} \item{type}{a character string indicating whether class ids or memberships should be returned. May be abbreviated.} \item{\dots}{arguments to be passed to and from methods.} } \value{ Depending on \code{type}, an object of class \code{"cl_class_ids"} with the predicted class ids, or of class \code{"cl_membership"} with the matrix of predicted membership values. } \details{ Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for \dQuote{new} objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their \dQuote{closest} prototypes, or providing mixture models for the distribution of objects in feature space. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). } \examples{ ## Run kmeans on a random subset of the Cassini data, and predict the ## memberships for the "test" data set. data("Cassini") nr <- NROW(Cassini$x) ind <- sample(nr, 0.9 * nr, replace = FALSE) party <- kmeans(Cassini$x[ind, ], 3) table(cl_predict(party, Cassini$x[-ind, ]), Cassini$classes[-ind]) } \keyword{cluster} clue/man/partition.Rd0000644000175000017500000000525312734174303014420 0ustar nileshnilesh\name{partition} \alias{cl_partition} % class ... \alias{is.cl_partition} \alias{is.cl_hard_partition} \alias{is.cl_soft_partition} \alias{cl_hard_partition} % class ... \alias{as.cl_partition} \alias{as.cl_hard_partition} \title{Partitions} \description{ Determine whether an R object represents a partition of objects, or coerce to an R object representing such.} \usage{ is.cl_partition(x) is.cl_hard_partition(x) is.cl_soft_partition(x) as.cl_partition(x) as.cl_hard_partition(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_partition"}, with a suitable representation of the partition given by \code{x}. } \details{ \code{is.cl_partition} and \code{is.cl_hard_partition} are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{is.cl_soft_partition} gives true iff \code{is.cl_partition} is true and \code{is.cl_hard_partition} is false. \code{as.cl_partition} returns an object of class \code{"cl_partition"} \dQuote{containing} the given object \code{x} if this already represents a partition (i.e., \code{is.cl_partition(x)} is true), or the memberships obtained from \code{x} via \code{\link{as.cl_membership}}. \code{as.cl_hard_partition(x)} returns an object which has class \code{"cl_hard_partition"} and inherits from \code{"cl_partition"}, and contains \code{x} if it already represents a hard partition (i.e., provided that \code{is.cl_hard_partition(x)} is true), or the class ids obtained from \code{x}, using \code{x} if this is an atomic vector of raw class ids, or, if \code{x} represents a soft partition or is a raw matrix of membership values, using the class ids of the \emph{nearest hard partition}, defined by taking the class ids of the (first) maximal membership values. Conceptually, partitions and hard partitions are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing partitions and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) is.cl_partition(pcl) is.cl_hard_partition(pcl) is.cl_soft_partition(pcl) } \keyword{cluster} clue/src/0000755000175000017500000000000014130772671012133 5ustar nileshnileshclue/src/lsap.c0000644000175000017500000000033311304023137013217 0ustar nileshnilesh#include #include "assignment.h" #include "clue.h" void solve_LSAP(double *c, Sint *n, Sint *p) { AP *ap; ap = ap_create_problem(c, *n); ap_hungarian(ap); ap_assignment(ap, p); ap_free(ap); } clue/src/assignment.h0000644000175000017500000000334511623271704014454 0ustar nileshnilesh#include #include #include #include /* INT_MAX */ #include /* DBL_MAX */ #include #include /* constants used for improving readability of code */ #define COVERED 1 #define UNCOVERED 0 #define ASSIGNED 1 #define UNASSIGNED 0 #define TRUE 1 #define FALSE 0 #define MARKED 1 #define UNMARKED 0 #define REDUCE 1 #define NOREDUCE 0 typedef struct{ int n; /* order of problem */ double **C; /* cost matrix */ double **c; /* reduced cost matrix */ int *s; /* assignment */ int *f; /* column i is assigned to f[i] */ int na; /* number of assigned items; */ int runs; /* number of iterations */ double cost; /* minimum cost */ time_t rtime; /* time */ } AP; /* public interface */ /* constructors and destructor */ AP *ap_create_problem(double *t, int n); AP *ap_create_problem_from_matrix(double **t, int n); AP *ap_read_problem(char *file); void ap_free(AP *p); int ap_assignment(AP *p, int *res); int ap_costmatrix(AP *p, double **m); int ap_datamatrix(AP *p, double **m); int ap_iterations(AP *p); void ap_hungarian(AP *p); double ap_mincost(AP *p); void ap_print_solution(AP *p); void ap_show_data(AP *p); int ap_size(AP *p); int ap_time(AP *p); /* error reporting */ void ap_error(char *message); /* private functions */ void preprocess(AP *p); void preassign(AP *p); int cover(AP *p, int *ri, int *ci); void reduce(AP *p, int *ri, int *ci); clue/src/clue.c0000644000175000017500000000135211304023137013212 0ustar nileshnilesh#include #include #include "clue.h" double **clue_vector_to_square_matrix(double *x, Sint n) { double **data, *val; Sint i, j; data = (double **) R_alloc(n, sizeof(double)); for(i = 0; i < n; i++) { data[i] = (double *) R_alloc(n, sizeof(double)); val = x + i; for(j = 0; j < n; j++, val += n) data[i][j] = *val; } return(data); } static int clue_sign(double x) { if(x == 0) return(0); return((x > 0) ? 1 : -1); } void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count) { Sint i, j; for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) if((clue_sign(x[i] - x[j]) * clue_sign(y[i] - y[j])) < 0) (*count)++; } clue/src/clue.h0000644000175000017500000000205611304023137013221 0ustar nileshnilesh#ifndef _CLUE_H #define _CLUE_H #include void solve_LSAP(double *c, Sint *n, Sint *p); double **clue_vector_to_square_matrix(double *x, Sint n); void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count); void deviation_from_ultrametricity(double *x, int *n, double *v, int *max); void deviation_from_ultrametricity_gradient(double *x, int *n, double *out); void deviation_from_additivity(double *x, int *n, double *v, int *max); void deviation_from_additivity_gradient(double *x, int *n, double *out); void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); #endif clue/src/trees.c0000644000175000017500000002401211304023137013402 0ustar nileshnilesh#include #include #include "clue.h" static int iwork3[3]; static int iwork4[4]; static void isort3(int *i, int *j, int *k) { iwork3[0] = *i; iwork3[1] = *j; iwork3[2] = *k; R_isort(iwork3, 3); *i = iwork3[0]; *j = iwork3[1]; *k = iwork3[2]; } static void isort4(int *i, int *j, int *k, int *l) { iwork4[0] = *i; iwork4[1] = *j; iwork4[2] = *k; iwork4[3] = *l; R_isort(iwork4, 4); *i = iwork4[0]; *j = iwork4[1]; *k = iwork4[2]; *l = iwork4[3]; } void deviation_from_ultrametricity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) delta = C - B; else if(B <= C) delta = A - C; else delta = B - A; if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } } *v = p; } void deviation_from_ultrametricity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][k] += delta; G[j][k] -= delta; } else if(B <= C) { delta = 2 * (C - A); G[j][k] += delta; G[i][j] -= delta; } else { delta = 2 * (A - B); G[i][j] += delta; G[i][k] -= delta; } } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void deviation_from_additivity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) delta = (C - B); else if(B <= C) delta = (A - C); else delta = (B - A); if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } *v = p; } void deviation_from_additivity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][l] -= delta; G[j][k] -= delta; G[i][k] += delta; G[j][l] += delta; } else if(B <= C) { delta = 2 * (C - A); G[i][l] += delta; G[j][k] += delta; G[i][j] -= delta; G[k][l] -= delta; } else { delta = 2 * (A - B); G[i][k] -= delta; G[j][l] -= delta; G[i][j] += delta; G[k][l] += delta; } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta, tmp; int i, i1, j, j1, k, k1, N3; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[j][i]; B = D[k][i]; C = D[k][j]; /* B & G have a divisor of 2 for case 1 and 4 for cases 2 and 3 ... clearly, we should use the same in all cases, but should it be 2 or 4? */ if((A <= B) && (A <= C)) { /* Case 1: 5080 */ DQ = (C - B) / 2; D[i][k] += DQ; D[j][k] -= DQ; } else if(B <= C) { /* Case 2: 5100 */ DQ = (C - A) / 2; D[i][j] += DQ; D[j][k] -= DQ; } else { /* Case 3: 5120 */ DQ = (B - A) / 2; D[i][j] += DQ; D[i][k] -= DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, delta; int i, i1, j, j1, k, k1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); delta = 0; for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[i][j]; B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { D[i][k] = D[j][k] = (B + C) / 2; delta += fabs(B - C); } else if(B <= C) { D[i][j] = D[j][k] = (C + A) / 2; delta += fabs(C - A); } else { D[i][j] = D[i][k] = (A + B) / 2; delta += fabs(A - B); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { /* Once we have ls_fit_ultrametric_by_iterative_reduction() we can always do this as well ... See page 67f in Barthelemy and Guenoche. */ double A, B, C, **D, DQ, delta, tmp, N3; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2) * (*n - 3) / 2; for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[j][i] + D[l][k]; B = D[k][i] + D[l][j]; C = D[l][i] + D[k][j]; if((A <= B) && (A <= C)) { /* Case 1: 5090 */ DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; } else if(B <= C) { /* Case 2: 5120 */ DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; } else { /* Case 3: 5150 */ DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { delta = 0; if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; delta += fabs(C - B); } else if(B <= C) { DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; delta += fabs(A - C); } else { DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; delta += fabs(B - A); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } clue/src/assignment.c0000644000175000017500000002370411623271641014450 0ustar nileshnilesh#include /* error() */ #include "assignment.h" /* main routine */ void ap_hungarian(AP *p) { int n; /* size of problem */ int *ri; /* covered rows */ int *ci; /* covered columns */ time_t start, end; /* timer */ int i, j, ok; start = time(0); n = p->n; p->runs = 0; /* allocate memory */ p->s = calloc(1 + n, sizeof(int)); p->f = calloc(1 + n, sizeof(int)); ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); if(ri == NULL || ci == NULL || p->s == NULL || p->f == NULL) error("ap_hungarian: could not allocate memory!"); preprocess(p); preassign(p); while(p->na < n){ if(REDUCE == cover(p, ri, ci)) reduce(p, ri, ci); ++p->runs; } end = time(0); p->rtime = end - start; /* check if assignment is a permutation of (1..n) */ for(i = 1; i <= n; i++){ ok = 0; for(j = 1; j <= n; j++) if(p->s[j] == i) ++ok; if(ok != 1) error("ap_hungarian: error in assigment, is not a permutation!"); } /* calculate cost of assignment */ p->cost = 0; for(i = 1; i <= n; i++) p->cost+= p->C[i][p->s[i]]; /* reset result back to base-0 indexing */ for(i = 1; i <= n; i++) p->s[i - 1] = p->s[i] - 1; /* free memory */ free(ri); free(ci); } /* abbreviated interface */ int ap_assignment(AP *p, int *res) { int i; if(p->s == NULL) ap_hungarian(p); for(i = 0; i < p->n; i++) res[i] = p->s[i]; return p->n; } /*******************************************************************/ /* constructors */ /* read data from file */ /*******************************************************************/ AP *ap_read_problem(char *file) { FILE *f; int i,j,c; int m,n; double x; double **t; int nrow,ncol; AP *p; f = fopen(file,"r"); if(f==NULL) return NULL; t = (double **)malloc(sizeof(double*)); m = 0; n = 0; nrow = 0; ncol = 0; while(EOF != (i = fscanf(f, "%lf", &x))){ if(i == 1){ if(n == 0){ t = (double **) realloc(t,(m + 1) * sizeof(double *)); t[m] = (double *) malloc(sizeof(double)); }else t[m] = (double *) realloc(t[m], (n + 1) * sizeof(double)); t[m][n++] = x; ncol = (ncol < n) ? n : ncol; c=fgetc(f); if(c == '\n'){ n = 0; ++m; nrow = (nrow < m) ? m : nrow; } } } fclose(f); /* prepare data */ if(nrow != ncol){ /* fprintf(stderr,"ap_read_problem: problem not quadratic\nrows =%d, cols = %d\n",nrow,ncol); */ warning("ap_read_problem: problem not quadratic\nrows = %d, cols = %d\n", nrow, ncol); return NULL; } p = (AP*) malloc(sizeof(AP)); p->n = ncol; p->C = (double **) malloc((1 + nrow)*sizeof(double *)); p->c = (double **) malloc((1 + nrow)*sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= nrow; i++){ p->C[i] = (double *) calloc(ncol + 1, sizeof(double)); p->c[i] = (double *) calloc(ncol + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= nrow; i++) for( j = 1; j <= ncol; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } for(i = 0; i < nrow; i++) free(t[i]); free(t); p->cost = 0; p->s = NULL; p->f = NULL; return p; } AP *ap_create_problem_from_matrix(double **t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* read data from vector */ AP *ap_create_problem(double *t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[n*(j - 1) + i - 1]; p->c[i][j] = t[n*(j - 1) + i - 1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* destructor */ void ap_free(AP *p) { int i; free(p->s); free(p->f); for(i = 1; i <= p->n; i++){ free(p->C[i]); free(p->c[i]); } free(p->C); free(p->c); free(p); } /* set + get functions */ /* void ap_show_data(AP *p) { int i, j; for(i = 1; i <= p->n; i++){ for(j = 1; j <= p->n; j++) printf("%6.2f ", p->c[i][j]); printf("\n"); } } */ double ap_mincost(AP *p) { if(p->s == NULL) ap_hungarian(p); return p->cost; } int ap_size(AP *p) { return p->n; } int ap_time(AP *p) { return (int) p->rtime; } int ap_iterations(AP *p) { return p->runs; } /* void ap_print_solution(AP *p) { int i; printf("%d itertations, %d secs.\n",p->runs, (int)p->rtime); printf("Min Cost: %10.4f\n",p->cost); for(i = 0; i < p->n; i++) printf("%4d",p->s[i]); printf("\n"); } */ int ap_costmatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->C[i + 1][j + 1]; return p->n; } int ap_datamatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->c[i + 1][j + 1]; return p->n; } /* error reporting */ /* void ap_error(char *message) { fprintf(stderr,"%s\n",message); exit(1); } */ /*************************************************************/ /* these functions are used internally */ /* by ap_hungarian */ /*************************************************************/ int cover(AP *p, int *ri, int *ci) { int *mr, i, r; int n; n = p->n; mr = calloc(1 + p->n, sizeof(int)); /* reset cover indices */ for(i = 1; i <= n; i++){ if(p->s[i] == UNASSIGNED){ ri[i] = UNCOVERED; mr[i] = MARKED; } else ri[i] = COVERED; ci[i] = UNCOVERED; } while(TRUE){ /* find marked row */ r = 0; for(i = 1; i <= n; i++) if(mr[i] == MARKED){ r = i; break; } if(r == 0) break; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && ci[i] == UNCOVERED){ if(p->f[i]){ ri[p->f[i]] = UNCOVERED; mr[p->f[i]] = MARKED; ci[i] = COVERED; }else{ if(p->s[r] == UNASSIGNED) ++p->na; p->f[p->s[r]] = 0; p->f[i] = r; p->s[r] = i; free(mr); return NOREDUCE; } } mr[r] = UNMARKED; } free(mr); return REDUCE; } void reduce(AP *p, int *ri, int *ci) { int i, j, n; double min; n = p->n; /* find minimum in uncovered c-matrix */ min = DBL_MAX; for(i = 1; i <= n; i++) for(j = 1; j <= n; j++) if(ri[i] == UNCOVERED && ci[j] == UNCOVERED){ if(p->c[i][j] < min) min = p->c[i][j]; } /* subtract min from each uncovered element and add it to each element */ /* which is covered twice */ for(i = 1; i <= n; i++) for(j = 1; j <= n; j++){ if(ri[i] == UNCOVERED && ci[j] == UNCOVERED) p->c[i][j]-= min; if(ri[i] == COVERED && ci[j] == COVERED) p->c[i][j]+= min; } } void preassign(AP *p) { int i, j, min, r, c, n, count; int *ri, *ci, *rz, *cz; n = p->n; p->na = 0; /* row and column markers */ ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); /* row and column counts of zeroes */ rz = calloc(1 + n, sizeof(int)); cz = calloc(1 + n, sizeof(int)); for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[i][j] == 0) ++count; rz[i] = count; } for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[j][i] == 0) ++count; cz[i] = count; } while(TRUE){ /* find unassigned row with least number of zeroes > 0 */ min = INT_MAX; r = 0; for(i = 1; i <= n; i++) if(rz[i] > 0 && rz[i] < min && ri[i] == UNASSIGNED){ min = rz[i]; r = i; } /* check if we are done */ if(r == 0) break; /* find unassigned column in row r with least number of zeroes */ c = 0; min = INT_MAX; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && cz[i] < min && ci[i] == UNASSIGNED){ min = cz[i]; c = i; } if(c){ ++p->na; p->s[r] = c; p->f[c] = r; ri[r] = ASSIGNED; ci[c] = ASSIGNED; /* adjust zero counts */ cz[c] = 0; for(i = 1; i <= n; i++) if(p->c[i][c] == 0) --rz[i]; } } /* free memory */ free(ri); free(ci); free(rz); free(cz); } void preprocess(AP *p) { int i, j, n; double min; n = p->n; /* subtract column minima in each row */ for(i = 1; i <= n; i++){ min = p->c[i][1]; for(j = 2; j <= n; j++) if(p->c[i][j] < min) min = p->c[i][j]; for(j = 1; j <= n; j++) p->c[i][j]-= min; } /* subtract row minima in each column */ for(i = 1; i <= n; i++){ min = p->c[1][i]; for(j = 2; j <= n; j++) if(p->c[j][i] < min) min = p->c[j][i]; for(j = 1; j <= n; j++) p->c[j][i]-= min; } } clue/src/init.c0000644000175000017500000000373613340544526013251 0ustar nileshnilesh#include #include #include #include "clue.h" static R_NativePrimitiveArgType solve_LSAP_t[3] = { REALSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType clue_dissimilarity_count_inversions_t[4] = { REALSXP, REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_additivity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_additivity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; #define CDEF(name) {#name, (DL_FUNC) &name, sizeof(name ## _t)/sizeof(name ## _t[0]), name ##_t} static const R_CMethodDef cMethods[] = { CDEF(solve_LSAP), CDEF(clue_dissimilarity_count_inversions), CDEF(deviation_from_ultrametricity), CDEF(deviation_from_ultrametricity_gradient), CDEF(deviation_from_additivity), CDEF(deviation_from_additivity_gradient), CDEF(ls_fit_ultrametric_by_iterative_reduction), CDEF(ls_fit_ultrametric_by_iterative_projection), CDEF(ls_fit_addtree_by_iterative_reduction), CDEF(ls_fit_addtree_by_iterative_projection), {NULL, NULL, 0} }; void R_init_clue(DllInfo *dll) { R_registerRoutines(dll, cMethods, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } clue/vignettes/0000755000175000017500000000000014130772671013354 5ustar nileshnileshclue/vignettes/clue.Rnw0000644000175000017500000016521512734170652015005 0ustar nileshnilesh\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/vignettes/cluster.bib0000644000175000017500000012345614130772332015520 0ustar nileshnilesh@Book{cluster:Arabie+Carroll+Desarbo:1987, author = {Arabie, Phipps and Carroll, J. Douglas and DeSarbo, Wayne}, title = {Three-way Scaling and Clustering}, year = 1987, pages = 92, publisher = {Sage Publications Inc}, } @Book{cluster:Arabie+Hubert+DeSoete:1996, author = {Phipps Arabie and Lawrence J. Hubert and Geert de Soete}, title = {Clustering and Classification}, year = 1996, pages = 490, publisher = {World Scientific Publications}, } @Book{cluster:Barthelemy+Guenoche:1991, author = {Jean-Pierry Barth\'el\'emy and Alain Gu\'enoche}, title = {Trees and Proximity Representations}, publisher = {John Wiley \& Sons}, year = 1991, series = {Wiley-Interscience Series in Discrete Mathematics and Optimization}, address = {Chichester}, note = {{ISBN 0-471-92263-3}}, } @Article{cluster:Barthelemy+Leclerc+Monjardet:1986, author = {Jean-Pierre Barth\'el\'emy and Bruno Leclerc and Bernard Monjardet}, title = {On the Use of Ordered Sets in Problems of Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, number = 2, pages = {187--224}, doi = {10.1007/BF01894188}, } @Article{cluster:Barthelemy+Mcmorris:1986, author = {Jean-Pierre Barth\'el\'emy and F. R. McMorris}, title = {The Median Procedure for $n$-trees}, year = 1986, journal = {Journal of Classification}, volume = 3, pages = {329--334}, doi = {10.1007/BF01894194}, } @Article{cluster:Barthelemy+Monjardet:1981, author = {Jean-Pierre Barth\'el\'emy and Bernard Monjardet}, title = {The Median Procedure in Cluster Analysis and Social Choice Theory}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {235--267}, doi = {10.1016/0165-4896(81)90041-X}, } @TechReport{cluster:Bertsekas+Tseng:1994, author = {Dimitri P. Bertsekas and P. Tseng}, title = {{RELAX-IV}: A Faster Version of the {RELAX} Code for Solving Minimum Cost Flow Problems}, institution = {Massachusetts Institute of Technology}, year = 1994, number = {P-2276}, url = {http://www.mit.edu/dimitrib/www/noc.htm}, } @Book{cluster:Bezdek:1981, author = {James C. Bezdek}, title = {Pattern Recognition with Fuzzy Objective Function Algorithms}, publisher = {Plenum}, address = {New York}, year = 1981, } @InCollection{cluster:Boorman+Arabie:1972, author = {Scott A. Boorman and Phipps Arabie}, title = {Structural Measures and the Method of Sorting}, booktitle = {Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory}, pages = {225--249}, publisher = {Seminar Press}, year = 1972, editor = {Roger N. Shepard and A. Kimball Romney and Sara Beth Nerlove}, address = {New York}, } @Article{cluster:Boorman+Olivier:1973, author = {Scott A. Boorman and Donald C. Olivier}, title = {Metrics on Spaces of Finite Trees}, journal = {Journal of Mathematical Psychology}, year = 1973, volume = 10, number = 1, pages = {26--59}, doi = {10.1016/0022-2496(73)90003-5}, } @Article{cluster:Breiman:1996, author = {Leo Breiman}, title = {Bagging Predictors}, journal = {Machine Learning}, year = 1996, volume = 24, number = 2, pages = {123--140}, doi = {10.1023/A:1018054314350}, } @Manual{cluster:Buchta+Hahsler:2005, title = {cba: Clustering for Business Analytics}, author = {Christian Buchta and Michael Hahsler}, year = 2005, note = {R package version 0.1-6}, url = {https://CRAN.R-project.org/package=cba}, } @Article{cluster:Buttrey:2005, author = {Samuel E. Buttrey}, title = {Calling the \texttt{lp\_solve} Linear Program Software from {R}, {S-PLUS} and {Excel}}, journal = {Journal of Statistical Software}, year = 2005, volume = 14, number = 4, doi = {10.18637/jss.v014.i04}, } @article{cluster:Carpaneto+Toth:1980, author = {Giorgio Carpaneto and Paolo Toth}, title = {Algorithm 548: Solution of the Assignment Problem}, journal = {ACM Transactions on Mathematical Software}, volume = 6, number = 1, year = 1980, issn = {0098-3500}, pages = {104--111}, doi = {10.1145/355873.355883}, publisher = {ACM Press}, } @Article{cluster:Carroll+Clark+Desarbo:1984, author = {Carroll, J. Douglas and Clark, Linda A. and DeSarbo, Wayne S.}, title = {The Representation of Three-way Proximity Data by Single and Multiple Tree Structure Models}, year = 1984, journal = {Journal of Classification}, volume = 1, pages = {25--74}, keywords = {Clustering analysis; Alternating least squares; Discrete optimization}, doi = {10.1007/BF01890116}, } @InCollection{cluster:Carroll+Pruzansky:1980, author = {J. D. Carroll and S. Pruzansky}, title = {Discrete and Hybrid Scaling Models}, booktitle = {Similarity and Choice}, address = {Bern, Switzerland}, publisher = {Huber}, year = 1980, editor = {E. D. Lantermann and H. Feger}, } @Article{cluster:Carroll:1976, author = {Carroll, J. Douglas}, title = {Spatial, Non-spatial and Hybrid Models for Scaling}, year = 1976, journal = {Psychometrika}, volume = 41, pages = {439--464}, keywords = {Multidimensional scaling; Hierarchical tree structure; Clustering; Geometric model; Multivariate data}, doi = {10.1007/BF02296969}, } @TechReport{cluster:Charon+Denoeud+Guenoche:2005, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, institution = {Ecole Nationale Sup{\'e}rieure des T{\'e}l{\'e}communications --- Paris}, year = 2005, number = {2005D003}, month = {May}, note = {ISSN 0751-1345 ENST D}, } @Article{cluster:Charon+Denoeud+Guenoche:2006, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, journal = {Journal of Classification}, year = 2006, volume = 23, number = 1, pages = {103-121}, month = {June}, doi = {10.1007/s00357-006-0006-2}, } @Article{cluster:Day:1981, author = {William H. E. Day}, title = {The Complexity of Computing Metric Distances Between Partitions}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {269--287}, doi = {10.1016/0165-4896(81)90042-1}, } @Article{cluster:Day:1986, author = {William H. E. Day}, title = {Foreword: Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, pages = {183--185}, doi = {10.1007/BF01894187}, } @Article{cluster:Day:1987, author = {Day, William H. E.}, title = {Computational Complexity of Inferring Phylogenies from Dissimilarity Matrices}, year = 1987, journal = {Bulletin of Mathematical Biology}, volume = 49, pages = {461--467}, doi = {10.1007/BF02458863}, } @Article{cluster:DeSoete+Carroll+Desarbo:1987, author = {De Soete, Geert and Carroll, J. Douglas and DeSarbo, Wayne S.}, title = {Least Squares Algorithms for Constructing Constrained Ultrametric and Additive Tree Representations of Symmetric Proximity Data}, year = 1987, journal = {Journal of Classification}, volume = 4, pages = {155--173}, keywords = {Hierarchical clustering; Classification}, doi = {10.1007/BF01896984}, } @Article{cluster:DeSoete+Desarbo+Furnas:1984, author = {De Soete, Geert and DeSarbo, Wayne S. and Furnas, George W. and Carroll, J. Douglas}, title = {The Estimation of Ultrametric and Path Length Trees from Rectangular Proximity Data}, year = 1984, journal = {Psychometrika}, volume = 49, pages = {289--310}, keywords = {Cluster analysis}, doi = {10.1007/BF02306021}, } @Article{cluster:DeSoete:1983, author = {De Soete, Geert}, title = {A Least Squares Algorithm for Fitting Additive Trees to Proximity Data}, year = 1983, journal = {Psychometrika}, volume = 48, pages = {621--626}, keywords = {Clustering}, doi = {10.1007/BF02293884}, } @Article{cluster:DeSoete:1984, author = {Geert de Soete}, title = {Ultrametric Tree Representations of Incomplete Dissimilarity Data}, journal = {Journal of Classification}, year = 1984, volume = 1, pages = {235--242}, doi = {10.1007/BF01890124}, } @Article{cluster:DeSoete:1986, author = {Geert de Soete}, title = {A Least Squares Algorithm for Fitting an Ultrametric Tree to a Dissimilarity Matrix}, journal = {Pattern Recognition Letters}, year = 1986, volume = 2, pages = {133--137}, doi = {10.1016/0167-8655(84)90036-9}, } @Manual{cluster:Dimitriadou+Hornik+Leisch:2005, title = {e1071: Misc Functions of the Department of Statistics (e1071), TU Wien}, author = {Evgenia Dimitriadou and Kurt Hornik and Friedrich Leisch and David Meyer and Andreas Weingessel}, year = 2005, note = {R package version 1.5-7}, url = {https://CRAN.R-project.org/package=e1071}, } @Article{cluster:Dimitriadou+Weingessel+Hornik:2002, author = {Evgenia Dimitriadou and Andreas Weingessel and Kurt Hornik}, title = {A Combination Scheme for Fuzzy Clustering}, journal = {International Journal of Pattern Recognition and Artificial Intelligence}, year = 2002, volume = 16, number = 7, pages = {901--912}, doi = {10.1142/S0218001402002052}, } @Manual{cluster:Dimitriadou:2005, title = {cclust: Convex Clustering Methods and Clustering Indexes}, author = {Evgenia Dimitriadou}, year = 2005, note = {R package version 0.6-12}, url = {https://CRAN.R-project.org/package=cclust}, } @Article{cluster:Dudoit+Fridlyand:2002, author = {Sandrine Dudoit and Jane Fridlyand}, title = {A Prediction-based Resampling Method for Estimating the Number of Clusters in a Dataset}, journal = {Genome Biology}, year = 2002, volume = 3, number = 7, pages = {1--21}, url = {http://genomebiology.com/2002/3/7/resarch0036.1}, doi = {10.1186/gb-2002-3-7-research0036}, } @Article{cluster:Dudoit+Fridlyand:2003, author = {Sandrine Dudoit and Jane Fridlyand}, title = {Bagging to Improve the Accuracy of a Clustering Procedure}, journal = {Bioinformatics}, year = 2003, volume = 19, number = 9, pages = {1090--1099}, doi = {10.1093/bioinformatics/btg038}, } @InProceedings{cluster:Fern+Brodley:2004, author = {Xiaoli Zhang Fern and Carla E. Brodley}, title = {Solving Cluster Ensemble Problems by Bipartite Graph Partitioning}, booktitle = {ICML '04: Twenty-first International Conference on Machine Learning}, year = 2004, isbn = {1-58113-828-5}, location = {Banff, Alberta, Canada}, doi = {10.1145/1015330.1015414}, publisher = {ACM Press}, } @comment address = {New York, NY, USA}, @Book{cluster:Fiacco+McCormick:1968, author = {Anthony V. Fiacco and Garth P. McCormick}, title = {Nonlinear Programming: Sequential Unconstrained Minimization Techniques}, publisher = {John Willey \& Sons}, year = 1968, address = {New York}, } @Article{cluster:Forgy:1965, author = {Forgy, E. W.}, title = {Cluster Analysis of Multivariate Data: Efficiency vs Interpretability of Classifications}, journal = {Biometrics}, year = 1965, volume = 21, pages = {768--769}, } @Article{cluster:Fowlkes+Mallows:1983a, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {A Method for Comparing Two Hierarchical Clusterings}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {553--569}, keywords = {Similarity; Graphics}, doi = {10.1080/01621459.1983.10478008}, } @Article{cluster:Fowlkes+Mallows:1983b, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {Reply to Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {584--584}, } @Manual{cluster:Fraley+Raftery+Wehrens:2005, title = {mclust: Model-based Cluster Analysis}, author = {Chris Fraley and Adrian E. Raftery and Ron Wehrens}, year = 2005, note = {R package version 2.1-11}, url = {http://www.stat.washington.edu/mclust}, } @TechReport{cluster:Fraley+Raftery:2002, author = {Chris Fraley and Adrian E. Raftery}, title = {{MCLUST}: Software for Model-based Clustering, Discriminant Analysis, and Density Estimation}, institution = {Department of Statistics, University of Washington}, year = 2002, number = 415, month = {October}, url = {ftp://ftp.u.washington.edu/public/mclust/tr415.pdf}, } @Article{cluster:Fraley+Raftery:2003, author = {Chris Fraley and Adrian E. Raftery}, title = {Enhanced Model-based Clustering, Density Estimation, and Discriminant Analysis Software: {MCLUST}}, year = 2003, journal = {Journal of Classification}, volume = 20, number = 2, pages = {263--286}, keywords = {clustering software; Mixture models; Cluster analysis; supervised classification; unsupervised classification; software abstract}, doi = {10.1007/s00357-003-0015-3}, } @InProceedings{cluster:Fred+Jain:2002, author = {Ana L. N. Fred and Anil K. Jain}, title = {Data Clustering Using Evidence Accumulation}, booktitle = {Proceedings of the 16th International Conference on Pattern Recognition (ICPR 2002)}, pages = {276--280}, year = 2002, url = {http://citeseer.ist.psu.edu/fred02data.html}, } @Article{cluster:Friedman+Hastie+Tibshirani:2000, author = {Jerome Friedman and Travor Hastie and Robert Tibshirani}, title = {Additive Logistic Regression: A Statistical View of Boosting}, journal = {The Annals of Statistics}, year = 2000, volume = 28, number = 2, pages = {337--407}, doi = {10.1214/aos/1016218223}, } @Book{cluster:Garey+Johnson:1979, author = {M. R. Garey and D. S. Johnson}, title = {Computers and Intractability: A Guide to the Theory of {NP}-Completeness}, address = {San Francisco}, publisher = {W. H. Freeman}, year = 1979, } @Article{cluster:Gaul+Schader:1988, author = {Wolfgang Gaul and Manfred Schader}, title = {Clusterwise Aggregation of Relations}, journal = {Applied Stochastic Models and Data Analysis}, year = 1988, volume = 4, pages = {273--282}, doi = {10.1002/asm.3150040406}, } @Manual{cluster:Gentleman+Whalen:2005, author = {Robert Gentleman and Elizabeth Whalen}, title = {graph: A Package to Handle Graph Data Structures}, year = 2005, note = {R package version 1.5.9}, url = {http://www.bioconductor.org/}, } @Article{cluster:Gordon+Vichi:1998, author = {Gordon, A. D. and Vichi, M.}, title = {Partitions of Partitions}, year = 1998, journal = {Journal of Classification}, volume = 15, pages = {265--285}, keywords = {Classification}, doi = {10.1007/s003579900034}, } @Article{cluster:Gordon+Vichi:2001, author = {Gordon, A. D. and Vichi, M.}, title = {Fuzzy Partition Models for Fitting a Set of Partitions}, year = 2001, journal = {Psychometrika}, volume = 66, number = 2, pages = {229--248}, keywords = {Classification; Cluster analysis; consensus fuzzy partition; membership function; three-way data}, doi = {10.1007/BF02294837}, } @Article{cluster:Gordon:1996, author = {Gordon, A. D.}, title = {A Survey of Constrained Classification}, year = 1996, journal = {Computational Statistics \& Data Analysis}, volume = 21, pages = {17--29}, keywords = {Model selection}, doi = {10.1016/0167-9473(95)00005-4}, } @Book{cluster:Gordon:1999, author = {A. D. Gordon}, title = {Classification}, address = {Boca Raton, Florida}, publisher = {Chapman \& Hall/CRC}, year = 1999, pages = 256, edition = {2nd}, } @Article{cluster:Grundel+Oliveira+Pardalos:2005, author = {Don Grundel and Carlos A.S. Oliveira and Panos M. Pardalos and Eduardo Pasiliao}, title = {Asymptotic Results for Random Multidimensional Assignment Problems}, journal = {Computational Optimization and Applications}, year = 2005, volume = 31, number = 3, pages = {275--293}, pdf = {http://www.okstate.edu/ceat/iem/iepeople/oliveira/papers/asympt.pdf}, doi = {10.1007/s10589-005-3227-0}, } @Article{cluster:Guha+Rastogi+Shim:2000, author = {Sudipto Guha and Rajeev Rastogi and Kyuseok Shim}, title = {{ROCK}: A Robust Clustering Algorithm for Categorical Attributes}, journal = {Information Systems}, year = 2000, volume = 25, number = 5, pages = {345--366}, doi = {10.1016/S0306-4379(00)00022-3}, } @Article{cluster:Gusfield:2002, author = {Dan Gusfield}, title = {Partition-Distance: A Problem and Class of Perfect Graphs Arising in Clustering}, journal = {Information Processing Letters}, year = 2002, volume = 82, pages = {159--164}, doi = {10.1016/S0020-0190(01)00263-0}, } @Manual{cluster:Hansen:2005, title = {optmatch: Functions for Optimal Matching}, author = {Ben B. Hansen}, year = 2005, note = {R package version 0.1-3}, url = {http://www.stat.lsa.umich.edu/~bbh/optmatch.html}, } @Article{cluster:Hartigan+Wong:1979, author = {Hartigan, J. A. and Wong, M. A.}, title = {A $K$-Means Clustering Algorithm}, journal = {Applied Statistics}, year = 1979, volume = 28, pages = {100--108}, doi = {10.2307/2346830}, } @Article{cluster:Hoeting+Madigan+Raftery:1999, author = {Jennifer Hoeting and David Madigan and Adrian Raftery and Chris Volinsky}, title = {Bayesian Model Averaging: A Tutorial}, journal = {Statistical Science}, year = 1999, volume = 14, pages = {382--401}, doi = {10.1214/ss/1009212519}, } @Manual{cluster:Hornik+Hothorn+Karatzoglou:2006, title = {RWeka: {R/Weka} Interface}, author = {Kurt Hornik and Torsten Hothorn and Alexandros Karatzoglou}, year = 2006, note = {R package version 0.2-0}, url = {https://CRAN.R-project.org/package=RWeka}, } @InProceedings{cluster:Hornik:2005a, author = {Kurt Hornik}, title = {Cluster Ensembles}, booktitle = {Classification -- The Ubiquitous Challenge}, pages = {65--72}, year = 2005, editor = {Claus Weihs and Wolfgang Gaul}, publisher = {Springer-Verlag}, note = {Proceedings of the 28th Annual Conference of the Gesellschaft f{\"u}r Klassifikation e.V., University of Dortmund, March 9--11, 2004}, } @comment address = {Heidelberg}, @Article{cluster:Hornik:2005b, author = {Kurt Hornik}, title = {A {CLUE} for {CLUster Ensembles}}, year = 2005, journal = {Journal of Statistical Software}, volume = 14, number = 12, month = {September}, doi = {10.18637/jss.v014.i12}, } @Misc{cluster:Hubert+Arabie+Meulman:2004, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, year = 2004, url = {http://cda.psych.uiuc.edu/srpm_mfiles}, } @Book{cluster:Hubert+Arabie+Meulman:2006, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, publisher = {SIAM}, address = {Philadelphia}, year = 2006, } @Article{cluster:Hubert+Arabie:1985, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Comparing Partitions}, year = 1985, journal = {Journal of Classification}, volume = 2, pages = {193--218}, keywords = {Agreement; Association measure; Consensus index}, doi = {10.1007/bf01908075}, } @Article{cluster:Hubert+Arabie:1994, author = {Hubert, Lawrence and Arabie, Phipps}, title = {The Analysis of Proximity Matrices through Sums of Matrices Having (anti-) {R}obinson Forms}, year = 1994, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 47, pages = {1--40}, doi = {10.1111/j.2044-8317.1994.tb01023.x}, } @Article{cluster:Hubert+Arabie:1995, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Iterative Projection Strategies for the Least Squares Fitting of Tree Structures to Proximity Data}, year = 1995, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 48, pages = {281--317}, keywords = {Graph theory}, doi = {10.1111/j.2044-8317.1995.tb01065.x}, } @Article{cluster:Hubert+Baker:1978, author = {Hubert, Lawrence J. and Baker, Frank B.}, title = {Evaluating the Conformity of Sociometric Measurements}, year = 1978, journal = {Psychometrika}, volume = 43, pages = {31--42}, keywords = {Permutation test; Nonparametric test}, doi = {10.1007/BF02294087}, } @Article{cluster:Hutchinson:1989, author = {Hutchinson, J. Wesley}, title = {{NETSCAL}: {A} Network Scaling Algorithm for Nonsymmetric Proximity Data}, year = 1989, journal = {Psychometrika}, volume = 54, pages = {25--51}, keywords = {Similarity; Graph theory}, doi = {10.1007/BF02294447}, } @Article{cluster:Karatzoglou+Smola+Hornik:2004, title = {kernlab -- An {S4} Package for Kernel Methods in {R}}, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 9, pages = {1--20}, doi = {10.18637/jss.v011.i09}, } @Article{cluster:Katz+Powell:1953, author = {L. Katz and J. H. Powell}, title = {A Proposed Index of the Conformity of one Sociometric Measurement to Another}, journal = {Psychometrika}, year = 1953, volume = 18, pages = {249--256}, doi = {10.1007/BF02289063}, } @Book{cluster:Kaufman+Rousseeuw:1990, author = {Kaufman, Leonard and Rousseeuw, Peter J.}, title = {Finding Groups in Data: An Introduction to Cluster Analysis}, year = 1990, pages = 342, publisher = {John Wiley \& Sons}, } @Article{cluster:Klauer+Carroll:1989, author = {Klauer, K. C. and Carroll, J. D.}, title = {A Mathematical Programming Approach to Fitting General Graphs}, year = 1989, journal = {Journal of Classification}, volume = 6, pages = {247--270}, keywords = {Multivariate analysis; Proximity data}, doi = {10.1007/BF01908602}, } @Article{cluster:Klauer+Carroll:1991, author = {Klauer, K. C. and Carroll, J. O.}, title = {A Comparison of Two Approaches to Fitting Directed Graphs to Nonsymmetric Proximity Measures}, year = 1991, journal = {Journal of Classification}, volume = 8, pages = {251--268}, keywords = {Clustering}, doi = {10.1007/BF02616242}, } @Article{cluster:Krieger+Green:1999, author = {Abba M. Krieger and Paul E. Green}, title = {A Generalized {Rand}-index Method for Consensus Clustering of Separate Partitions of the Same Data Base}, journal = {Journal of Classification}, year = 1999, volume = 16, pages = {63--89}, doi = {10.1007/s003579900043}, } @Article{cluster:Krivanek+Moravek:1986, author = {M. Krivanek and J. Moravek}, title = {{NP}-hard Problems in Hierarchical Tree Clustering}, journal = {Acta Informatica}, year = 1986, volume = 23, pages = {311--323}, doi = {10.1007/BF00289116}, } @InProceedings{cluster:Krivanek:1986, author = {Krivanek, Mirko}, title = {On the Computational Complexity of Clustering}, year = 1986, booktitle = {Data Analysis and Informatics 4}, editor = {Diday, E. and Escoufier, Y. and Lebart, L. and Pages, J. and Schektman, Y. and Tomassone, R.}, publisher = {Elsevier/North-Holland}, pages = {89--96}, } @comment address = {Amsterdam}, @Article{cluster:Lange+Roth+Braun:2004, author = {Tilman Lange and Volker Roth and Mikio L. Braun and Joachim M. Buhmann}, title = {Stability-Based Validation of Clustering Solutions}, journal = {Neural Computation}, year = 2004, volume = 16, number = 6, pages = {1299--1323}, doi = {10.1162/089976604773717621}, } @Manual{cluster:Leisch+Dimitriadou:2005, title = {mlbench: Machine Learning Benchmark Problems}, author = {Friedrich Leisch and Evgenia Dimitriadou}, year = 2005, note = {R package version 1.0-1}, url = {https://CRAN.R-project.org/package=mlbench}, } @TechReport{cluster:Leisch:1999, author = {Friedrich Leisch}, title = {Bagged Clustering}, institution = {SFB ``Adaptive Information Systems and Modeling in Economics and Management Science''}, year = 1999, type = {Working Paper}, number = 51, month = {August}, url = {http://www.ci.tuwien.ac.at/~leisch/papers/wp51.ps}, } @Article{cluster:Leisch:2004, title = {{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in {R}}, author = {Friedrich Leisch}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 8, doi = {10.18637/jss.v011.i08}, } @Manual{cluster:Leisch:2005, author = {Friedrich Leisch}, title = {flexclust: Flexible Cluster Algorithms}, note = {R package 0.7-0}, year = 2005, url = {https://CRAN.R-project.org/package=flexclust}, } @Article{cluster:Leisch:2006a, author = {Friedrich Leisch}, title = {A Toolbox for $K$-Centroids Cluster Analysis}, journal = {Computational Statistics and Data Analysis}, year = 2006, volume = 51, number = 2, pages = {526--544}, doi = {10.1016/j.csda.2005.10.006}, } @Unpublished{cluster:Lloyd:1957, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, note = {Technical Note, Bell Laboratories}, year = 1957, } @Article{cluster:Lloyd:1982, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, journal = {IEEE Transactions on Information Theory}, year = 1982, volume = 28, pages = {128--137}, doi = {10.1109/TIT.1982.1056489}, } @Article{cluster:Margush+Mcmorris:1981, author = {T. Margush and F. R. McMorris}, title = {Consensus $n$-Trees}, journal = {Bulletin of Mathematical Biology}, year = 1981, volume = 43, number = 2, pages = {239--244}, doi = {10.1007/BF02459446}, } @InProceedings{cluster:Meila:2003, author = {Marina Meila}, title = {Comparing Clusterings by the Variation of Information}, booktitle = {Learning Theory and Kernel Machines}, editor = {Bernhard Sch{\"o}lkopf and Manfred K. Warmuth}, series = {Lecture Notes in Computer Science}, publisher = {Springer-Verlag}, volume = 2777, year = 2003, pages = {173--187}, ee = {http://springerlink.metapress.com/openurl.asp?genre=article&issn=0302-9743&volume=2777&spage=173}, bibsource = {DBLP, http://dblp.uni-trier.de}, } @comment address = {Heidelberg}, @Article{cluster:Messatfa:1992, author = {Messatfa, H.}, title = {An Algorithm to Maximize the Agreement Between Partitions}, year = 1992, journal = {Journal of Classification}, volume = 9, pages = {5--15}, keywords = {Association; Contingency table}, doi = {10.1007/BF02618465}, } @Article{cluster:Miller+Nicely:1955, author = {G. A. Miller and P. E. Nicely}, title = {An Analysis of Perceptual Confusions Among some {English} Consonants}, journal = {Journal of the Acoustical Society of America}, year = 1955, volume = 27, pages = {338--352}, doi = {10.1121/1.1907526}, } @Book{cluster:Mirkin:1996, author = {Boris G. Mirkin}, title = {Mathematical Classification and Clustering}, year = 1996, pages = 428, publisher = {Kluwer Academic Publishers Group}, } @Article{cluster:Monti+Tamayo+Mesirov:2003, author = {Stefano Monti and Pablo Tamayo and Jill Mesirov and Todd Golub}, title = {Consensus Clustering: A Resampling-based Method for Class Discovery and Visualization of Gene Expression Microarray Data}, journal = {Machine Learning}, volume = 52, number = {1--2}, year = 2003, issn = {0885-6125}, pages = {91--118}, publisher = {Kluwer Academic Publishers}, address = {Hingham, MA, USA}, doi = {10.1023/A:1023949509487}, } @Article{cluster:Oliveira+Pardalos:2004, author = {Carlos A. S. Oliveira and Panos M. Pardalos}, title = {Randomized Parallel Algorithms for the Multidimensional Assignment Problem}, journal = {Applied Numerical Mathematics}, year = 2004, volume = 49, number = 1, pages = {117--133}, month = {April}, doi = {10.1016/j.apnum.2003.11.014}, } @Book{cluster:Papadimitriou+Steiglitz:1982, author = {Christos Papadimitriou and Kenneth Steiglitz}, title = {Combinatorial Optimization: Algorithms and Complexity}, publisher = {Prentice Hall}, year = 1982, address = {Englewood Cliffs}, } @Manual{cluster:R:2005, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2005, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org}, } @article{cluster:Rajski:1961, author = {C. Rajski}, title = {A Metric Space of Discrete Probability Distributions}, journal = {Information and Control}, year = 1961, volume = 4, number = 4, pages = {371--377}, doi = {10.1016/S0019-9958(61)80055-7}, } @Article{cluster:Rand:1971, author = {William M. Rand}, title = {Objective Criteria for the Evaluation of Clustering Methods}, journal = {Journal of the American Statistical Association}, year = 1971, volume = 66, number = 336, pages = {846--850}, keywords = {Pattern recognition}, doi = {10.2307/2284239}, } @Article{cluster:Rosenberg+Kim:1975, author = {S. Rosenberg and M. P. Kim}, title = {The Method of Sorting as a Data-Gathering Procedure in Multivariate Research}, journal = {Multivariate Behavioral Research}, year = 1975, volume = 10, pages = {489--502}, doi = {10.1207/s15327906mbr1004_7}, } @InCollection{cluster:Rosenberg:1982, author = {S. Rosenberg}, title = {The Method of Sorting in Multivariate Research with Applications Selected from Cognitive Psychology and Person Perception}, booktitle = {Multivariate Applications in the Social Sciences}, pages = {117--142}, address = {Hillsdale, New Jersey}, publisher = {Erlbaum}, year = 1982, editor = {N. Hirschberg and L. G. Humphreys}, } @InProceedings{cluster:Roth+Lange+Braun:2002, author = {Volker Roth and Tilman Lange and Mikio Braun and Joachim M. Buhmann}, title = {A Resampling Approach to Cluster Validation}, booktitle = {{COMPSTAT} 2002 -- Proceedings in Computational Statistics}, pages = {123--128}, year = 2002, editor = {Wolfgang H{\"a}rdle and Bernd R{\"o}nz}, publisher = {Physika Verlag}, note = {ISBN 3-7908-1517-9}, } @comment address = {Heidelberg, Germany}, @Manual{cluster:Rousseeuw+Struyf+Hubert:2005, title = {cluster: Functions for Clustering (by Rousseeuw et al.)}, author = {Peter Rousseeuw and Anja Struyf and Mia Hubert and Martin Maechler}, year = 2005, note = {R package version 1.9.8}, url = {https://CRAN.R-project.org/package=cluster}, } @InCollection{cluster:Roux:1988, author = {M. Roux}, title = {Techniques of Approximation for Building Two Tree Structures}, booktitle = {Recent Developments in Clustering and Data Analysis}, pages = {151--170}, publisher = {Academic Press}, year = 1988, editor = {C. Hayashi and E. Diday and M. Jambu and N. Ohsumi}, address = {New York}, } @article{cluster:Rubin:1967, author = {Jerrold Rubin}, title = {Optimal Classification into Groups: An Approach for Solving the Taxonomy Problem}, journal = {Journal of Theoretical Biology}, year = 1967, volume = 15, number = 1, pages = {103--144}, doi = {10.1016/0022-5193(67)90046-X}, } @Article{cluster:Sato+Sato:1994, author = {M. Sato and Y. Sato}, title = {On a Multicriteria Fuzzy Clustering Method for 3-way Data}, journal = {International Journal of Uncertainty, Fuzziness and Knowledge-based Systems}, year = 1994, volume = 2, pages = {127--142}, doi = {10.1142/S0218488594000122}, } @Article{cluster:Smith:2000, author = {Smith, Thomas J.}, title = {${L}_1$ Optimization under Linear Inequality Constraints}, year = 2000, journal = {Journal of Classification}, volume = 17, number = 2, pages = {225--242}, keywords = {$L_1$-norm; Ultrametric; stuctural representation}, doi = {10.1007/s003570000020}, } @Article{cluster:Smith:2001, author = {Smith, Thomas J.}, title = {Constructing Ultrametric and Additive Trees Based on the ${L}_1$ Norm}, year = 2001, journal = {Journal of Classification}, volume = 18, number = 2, pages = {185--207}, keywords = {iteratively re-weighted iterative projection (IRIP); Combinatorial probability; explicit machine computation; Combinatorics; Trees; Graph theory; Linear regression; probabilistic Monte Carlo methods}, doi = {10.1007/s00357-001-0015-0}, } @Article{cluster:Sokal+Rohlf:1962, author = {R. R. Sokal and F. J. Rohlf}, title = {The Comparisons of Dendrograms by Objective Methods}, journal = {Taxon}, year = 1962, volume = 11, pages = {33--40}, doi = {10.2307/1217208}, } @Article{cluster:Strehl+Ghosh:2003a, author = {Alexander Strehl and Joydeep Ghosh}, title = {Cluster Ensembles -- {A} Knowledge Reuse Framework for Combining Multiple Partitions}, journal = {Journal of Machine Learning Research}, volume = 3, year = 2003, issn = {1533-7928}, pages = {583--617}, publisher = {MIT Press}, url = {http://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}, } @Article{cluster:Strehl+Ghosh:2003b, author = {Alexander Strehl and Joydeep Ghosh}, title = {Relationship-based Clustering and Visualization for High-Dimensional Data Mining}, journal = {{INFORMS} Journal on Computing}, year = 2003, volume = 15, issue = 2, pages = {208--230}, ISSN = {1526-5528}, doi = {10.1287/ijoc.15.2.208.14448}, } @Article{cluster:Struyf+Hubert+Rousseeuw:1996, author = {Anja Struyf and Mia Hubert and Peter Rousseeuw}, title = {Clustering in an Object-Oriented Environment}, journal = {Journal of Statistical Software}, year = 1996, volume = 1, number = 4, doi = {10.18637/jss.v001.i04}, } @Article{cluster:Tibshirani+Walther+Hastie:2001, author = {Tibshirani, Robert and Walther, Guenther and Hastie, Trevor}, title = {Estimating the Number of Clusters in a Data Set Via the Gap Statistic}, year = 2001, journal = {Journal of the Royal Statistical Society, Series B: Statistical Methodology}, volume = 63, number = 2, pages = {411--423}, keywords = {Clustering; groups; Hierarchy; $k$-means; Uniform distribution}, doi = {10.1111/1467-9868.00293}, } @Article{cluster:Tibshirani+Walther:2005, author = {Tibshirani, Robert and Walther, Guenther}, title = {Cluster Validation by Prediction Strength}, year = 2005, journal = {Journal of Computational and Graphical Statistics}, volume = 14, number = 3, pages = {511--528}, keywords = {number of clusters; prediction; Unsupervised learning}, doi = {10.1198/106186005X59243}, } @InProceedings{cluster:Topchy+Jain+Punch:2003, author = {A. Topchy and A. Jain and W. Punch}, title = {Combining Multiple Weak Clusterings}, booktitle = {Proceedings of the Third IEEE International Conference on Data Mining (ICDM'03)}, year = 2003, url = {http://citeseer.ist.psu.edu/topchy03combining.html}, } @Article{cluster:Vichi:1999, author = {Vichi, Maurizio}, title = {One-mode Classification of a Three-way Data Matrix}, year = 1999, journal = {Journal of Classification}, volume = 16, pages = {27--44}, keywords = {Cluster analysis}, doi = {10.1007/s003579900041}, } @Article{cluster:Wallace:1983, author = {Wallace, David L.}, title = {Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {569--576}, doi = {10.2307/2288118}, } @Inproceedings{cluster:Zhou+Li+Zha:2005, author = {Ding Zhou and Jia Li and Hongyuan Zha}, title = {A New {Mallows} Distance Based Metric for Comparing Clusterings}, booktitle = {ICML '05: Proceedings of the 22nd International Conference on Machine Learning}, year = 2005, isbn = {1-59593-180-5}, pages = {1028--1035}, location = {Bonn, Germany}, doi = {10.1145/1102351.1102481}, publisher = {ACM Press}, address = {New York, NY, USA}, } %%% Local Variables: *** %%% bibtex-maintain-sorted-entries: t *** %%% End: *** clue/build/0000755000175000017500000000000014130772671012443 5ustar nileshnileshclue/build/vignette.rds0000644000175000017500000000031114130772671014775 0ustar nileshnileshb```b`add`b2 1# 'H)M +Gt -.I-Rp+NMI-ƪ % M b fa(DXT%bZ]?tWxVaaqIY0AAn0Ez0?¹Ht&${+%$Q/nkclue/build/partial.rdb0000644000175000017500000031217414130772663014601 0ustar nileshnilesh |ɚ&  $HBd(p{7'QYUά"iF#itC-YdYӖFYk޵ue* "jy{Q|"/"10ڌÇm$Ih'<d.?eڸy{Q27߮~kYu|:ڰQ#_}}G+ww*f]ܬ+V;f_0m/tlJfagt>_WOټE-*<_F5Ge:jc3܌9{^Ĵ531ώƲ5V0cTg?޶!׈o/!R1VZXz)zHkw8Kqo|@C<426FTGݐ^L#kf6ivT!7Gfgl̺uNβMdlȖUX[l,Qeg5VO1מQew!m~{⑬vU:4[Ya칳Qbۑx`uŒk[[7=+|[*X7(ḬeJyE,7۵Lc#?g},HNW›ojk =k9˺6=Dj yU"> 'wvȐ^mCw7,fmEܶ}uu< Yт.B^T.*cۮ+gK,xab?_b)~e%Àgg-o^8Xn֯7hqP>!-畝y2s\Η|㉣mή|;[X/_E9ːҧDT_5fC.8T4|Am9[DxfxM4+8~N; kVXG+aL#mgCMN@qЯ#=u,CW] Yu5:SiK"eZk:`.N*#W#i֙|-yy5DGW5w[ ՊKw9+]*b2*Qo[u|M8ꋃ1&"t_4jB??v[h_R dG|iwNԶ!̈L͜|N[ʹH2Lcu;\WgML̎NLnd8 yMRX5ݓoή='O;77⑗ X%{w}{A{s2qOݙP/?{:샄ҵC>/Zmk/w/@v,F % L'ѼO.Yge[ҶwwlVq7~qwMbazbۋu2_zf$r ,mYN9.B%Л@VquߗʾyyegXl0i+R>  ;-ҷ=ja q{ßrmn5g9\s?V f;eٽrb֊  ռn,ٕ|q8Z=#cIJC;uwY}e`[{/S&{ݱ _j}xeNچYv-kpE{b_8 E_uK}պ@P=5xlYz&o[UW^e*=(%Qf`YfsjHm k-:۞Ֆ`oGVvN`y;9;Rxck~ s`+p; -DU7o]iu÷O:7}Iu}3[߷y2>{ϋ4t{8)T:j4άz'Uwy%שJB3{1e7_{ZV} lWn>K]ZF5>U<?g~d}+9.`ib硆/P)EH]̛x [5Z6>S;(dY7r,>?bup&p?/o:3`%S;⧐j)zL|\_,T( 8q{ tC+I%Zf}@]o~V9;u97d ![[V}/>[.$<CHx:5D 0\!p t[aA~OOEԴ>Mu(^ .JNS͒P ח@Kʞۮ&3Gˆ)ٟyEYE**ʅp2w^xbsz4L ~c?kHK:%V:SŨ?mr4Ջ mZdbK!-VR,qUCrGSLbRa\%Kw!Xɱt!PKo'L'I 23%Ť2S4 3LR8<ʵ3:̲̥@jڬW2.C~Lyq<\+m4NBVɊ;!bfj.UQrW!_n~AH֑rG,ʲ7Joj rI2/ WiaA7ZV%JjjޘF*[K6zU:a3֥|+seDQJ+i 2ɰ )b%ѬƓd)|0&3dQ1f2|B@I0"RmC cًa.09_ĪC֨6ܰvxMe+fl9l\vð޺\̰QQ5>*av꛸z3xsC<c@/~ iBe^,5/2.s); '?J~ϔ0FkQ4"S4r `Y7rI+^L21c걢M-X)c SaȥdCOW2I>U&~X _S'O]uuDŽD} 5>n!>2 +OW$cxx}LC[ө4A&+ulܦ6_.h`7ͩr/Rh?ooid?!McfMwLʤ=\Jy ?@a\nw~ @HefgGTl Y3QEWV~w8|ӂԸ?L4]%m(ѤԴ.j΢#顳Z,)7zzM.B|8^.@gc +PZ'm⑴6U:4[EgMzZ3 XMjt^".ѳ[vD*iAV5/3b#U|K}h(u!й113CԌ̒ ƥyV4cȌ=i#!3^1y4d8jIܴ{9J1{2ML$i/Ƥy>X={]ev&8dXϗ"h쬗5"6r z_I[OXX|fƾļz>!M1v K$&X~7mf.g1 B.3#UE7/*^yc lκvrmWb[?Ŕ]L;nVW@-A74nd Eg'o6n }zƁv.DS0 Y.NHyC%삝7]-054Ԯj1#TʋTtLGl(DW;Ӑ·j(cKR1 uԷqC왝K^'O(N}ƞded'P j~w8#=:Q^ ɋ9..` rJ[LL$DځÐ5%&[<3^vwi87MK)We-3&Q{DekM. 3>7VtR]e+m8r*-X[+q<{& n:Z+nV8_ Tf*-3.g/^b6R7<ޚGԝqmd8yNb HSXw3S/nvҵFyԌmQRÈU{*>)aӞM/E9۴1jD6m&JBZrҶ kgIv?=upit%8 Yk[}?-]~#3jqjIv+o4o_(w5S"gH<[H+Qr DKbR.8X ʅU[DZNg̳VG,t7;AwriW[p%8(\|--"5%clO'e*Arav=r^Z~u>1e⡄Q&l3TB qxMpt3hs'ky2UR9 ށo'EbԵg!*W]u DL!:|罜F_˖Y.KôVb!j2WEئ&7ƒ}ƞD^_"q\SO)۱pw8#==:2(JgoM"/lQOh(TN5+l^vmc@߂l^sot6%ɶ+f<+7vFGFZgQSփeYC/rGS=Q85;eJeJeK5yFc$mȷ_wx$C|I09%d'ߣh՗ h3}Nǃ4yV:qOT/C~Ly}0k? _x . !k&ˊwkJfH7'.)S,c-:sRw8 yZ7+mr3*F5C&[#pr9[JWCxm7 :tEbk8h?5<6rZ+}%l m%>X=7L&IJ8NcR_9NPMӷ[1}]R\b)Z i:z cNJki OsU <:ք1]%KӮ D )\nB@+N.`iFH];pr婜v%&w1]%;:"T: ]9*57vfaR:(0CbrZT|.}0beG2lDU<; ÆfU>%㯜+L&Fܧ]NAl/8aZF߹!MKǚg^ނ|KNr17m75Naϔ3|Y3L&@I=!|ƹhbN~ǀAԷq6HWsި) VR6ΧiJ?6%Ϝ)'Apt9ٲYp|s o7sߕo,o]Whn>u=K,'rqp q9aG2kN-ՊN@@g1+mBx|YjP H9 !sJeyAGcwFG'7s^E)f zo*'B裳~X&==1P} PځD Ghjb{c}^u1zTɃPzIPF{udAƨYK"VJU/+9 OKe^Iv?z4jzʹWɁCnw}%݊-̀jt> URxtgwha0"_nI1Nⷤ;C(Jgo!9nͿ07]+OEp2*j^z?dǥuF#}hX/;U#&uL֋HhYj=᝘ͪRo bnGҎe ^0̂+pBIr1X&x͗V;U΁/@V/"derG=8$+[=#.yYӳ/!? 1% ByeAۣ-:piEGтVe+Wr{O!wBE|v%G?"uC4t|Rw8yLT:f\|M$Ɓ38s\C z\spH.AVuШ ` Y=V3;Q??y.#Ѿw5e\>Mwt.:rqI/CQvq2Vmӣ1g!6ޘOÀ B|c&8ģ*;JNY 1iU:9ʡUΎ'Vuj|ávC.nX#i"Q!'ьO[H7qx KM/0/*əfvaH.33*_aH!K-$Wp,Yo%9ģAA~(sX~ʖnupS #t,/"@7']iy:v&gdu\gӻc{uoANSϔuS蟈pr'R7\܂?PO4Aj:#Q)Coa'@)8!F 3sQu]vJY5`T65q*+38;ɥOѹY-o5WC3c֥n2*0'=6iC}S4ߴ_Yp)Q eYD˒YKL paG)a?avaljC6Vzd1Yv7Agdd2˜fF|rʮ1u"anMQ B9>>GyB{iL쟩Qv~|0y) +ºe".diYO=k ѯ!w6Ba[NpJqGS_yxx ) M$YRD\O22RS*תZ)bw2͗ 䫢Ac]V zʶ붓h7si3&J:rǛUgd6ӱ68!r2᯸~1j=wSeB.hu/BZ ^Lz e96 5ѻ,uA&mFewe K4vCbNC;@}ǡލ0U >UC1x\2ܢ!QR3ۭ\XqYK8  iuGa>vXʥE`W.TB6\單%' „!˝K: O[ i|rM++Y԰#cаBx(K&΄ -T:%RsC< 6v٨^w9D:2"3$oBbD3!&TLdX1g[7"Zs `0_cGcKJme7-ϳXˈs0<-/Yjb||Bp(\B`癤><Ѩy&ѻl< [?LsϗhI4q؜y&i~I,3iԧ31-Lc %9$aII󂒥8EBnzY8`x&HMBVѬ39Q"&9eVE)*3t~Ui6}dVI.BuVIt9ӄ Ynz!~V)3qS`-CG2af àl@-6zfFAހ|33RO ff\&̎ ={!IC&ex֌tԽ4_:]YY횥`߶Jv1[0|z,yq!J[X=Qr'[Y ?K4KS0KehuԢA23TǎsY2w N=>AX gFf^*[vwY3"̀6R 55E(\|VL_%e9~M ív&g2liiG.1vxZ0'l9r]Q=g4,f]l =Nnw쮨y %=ct [f1k=喂̼kwvӳJB};oY.mn,xu]hv$ FwmTP;Y.*pFG9ӂ=a㻶 wlĭxrn`?d)>cKoOO˥?O]-nϨ==km50<\1*ҏo+Z<׻vVIϗn9tߨ;V&G>%'ʝC|wrpG!n|G꺁g KuF\¯qj_5K~X|kMۮ+gK8Q #瀏!k9UAoLyllYO#֦ΨGLB}RzX9!+t݋0{ʬ|#`B`}K8yP[I { 2S+XD ]jËWUs nJ\ASe~7ϨkmDkz+T}֟RNO=-<,t{jz0gM4 (̈́ GR;f)ndgrߑG>mD6!mwc~;qw%4$쁡cv$+A.\>eQ[!FѕR< ![!mO*i*5FeIQs5uȖiB5 " #qzPzD^2Fcn"VgE{\@VɏcBH-⯌ktP{ 7!&iA hkxfq3o %U[RG꺁kxG^'xv-!Q#h`χwyqpCG \,s-4Z<(;En{^nQ%[*{%Qe8ӄ* <>~.y4zj /4$iɅE0zIF\n=ĵhv6\ݗ܂@}{.vFJnn}F2 >2LH5hnKkv~NtӁ-~W@ijjg Kպ7RC=to_Ż'lS΍@L꺁 gXniy[ԤD^Unr=shj>98Y[R',jlޫ|_6XolϾ׻_1}>k/O",a.iծOP4~Lƀ |5!=4AE:\3Ub| YKFȑtRSXSC5~V~$rA!NgY0A䡆1i; Y='*ޟ-!׀O GvpCkKGئ,EC陋GS \><#l+N r~lD4 ]R mJ9s{C͈WDhx "p\ Yn7Wm]@Vw;#rmUDCćFM*{֕xEm'l]CVޠ+M`Q}kHiB A~\Cm³J?,:o2>=dӐbqtShrnJ@o[^3~<[&0GԶ.X<=az]C_1Y߂*.)G4KBluCJ,"87G9% dujvu9l̼Q(* n{w"8@[#jP˜SceFu0'Pцpغ{ ?.S+[lH]7p\-9؜{Äe>eG|#QYiu5C<ͨY9u$XmaY}?_3碛{E%LŠ\$MMx\OM]AR˿*&dro &ݘ7ּr'l`~aOh 6!C,{W!K- xH['d/.:=dy;g!'d^Gl@꺁!g&8N;X*nܰ`GӐVK?6ERCJ꺁 _vHH$ ,bBl1d؅pr g@vOaM zkP`~R!t.=a ۭ][nV=ĵjZZ է*CM$7k:4dsjԟPz/r{/6o)މ]u7L,:2lF@^Ii|O68E,G!T$xP5idh&lvxfY}!TvzA;nd9YKSI 3!ꉒ-_a2H]NN!7[{(6.~GH^n!_oJ\ހ|Cn&;[vvKR kfѺ|e \,Ica?"cXo;gc鷘WݐꩂE65,bɀgY^P}-{/ 5̄pyZͷׂp?RN |L]NCV SI 6dz 9P{\[S᱂rGC+1M[-{fvn=R /?B=9V%AL6ͼh_xouIa9@ y*lw B㐏kkpk);hb' P=7tZ brdIQތԳH& \9GQ-x,7\ 9r^i(iKFA5bhR7|zе}Xxə\ e?n}9ddEtYԖuv7˅"]G-e$ky|XQ+5(cRE8ޥ=KB- ^k$V_I d}xmԍCV6\,c]Y%{1/~s }5"j^[_%ƍm > d$ho_˺ ʇ MOLy\`iaZr7ARyj~C=x_M-YjlBӰ k!Ux TZ:4!AxF"{8Y}iNNdbb2 Y*p+VTk' Sixf֟<ݞN(ؾA#<`4 :q::_ F׼5_ :/^:5"謍\քxWҖ`?Conz[oK67L&IJ668wS-,V>ܷ[1}Ad!{öPA=[T04{f(xΈ~~?#>kد,umnjRi;̗+QU':gE%_.uĻ1jDK9W4|U%ܗʸd]oUT#km:"]m{;-Pۭ_pgÆPA=1ςR_I'ic8s Gxtzu'8o t3[t}3 8HI[}t%G0 ԗ8n*t|# A ? ݄jnv+—.R!ׄ=!FځkF8Fjh-#z.x d:mnxzΡ615;::1=#tph  5@{,^.dBq14=·ggnή}ήE#]=ҥH6Fti#'J&<5mX vzI66}wv0ۭҨ_w¶PA=Q+%Gq[%{ɿ jW O6[ 7o1A<6::ˢrs-kqܗQruh7E;re r!VM6qО-h,ӥd,Ϣ<#N% 1R!!#i1mvHt$&3V 1R7 Y=h39::-.B '.W 4#9NON^n?)6-{EZc^0OU)éG d'hm.BְSxdB}:O‚"]gY}d{[ku{f_7#&OuוV}Wf΅wهÛ-n)I8 + !t42|].Ea|O~pl"R Yn'|M E>d $, /5/"uːջc{ p򪺇t&3::=!O`+&ThN,7ˌd7g'-hAȃEo4uݒ25ajz/y~{ӾLm\ ~I+iKlze\6+F&[i%456kmCjkjY\|7Zɗ d}9X.]2(P*(F8A8p$Rx$;.U:q?w ]񭸮d6"g{]kv%X/Cŗ juvY'v m㞨+!rdma?P=T YH-ky{liZj9+3~;#Q^;ߑ-Z[ϣ ]eav%cׁm:Z 9|l20S7 SD8Ts.Ndzcy#<!rU6_l0ŶJ$P_!,r K9#sfL -~а0|\QFaw; MeSiFz4<,0#, ְ߇Ht6DdheP%<49ܔW@h=73w5qo*S%<] [l2>+{um ,n ~BVwF5ka…9gIocPנ;B=#ޡr%1V`F:AЗR?w]+y-B}yCOm@_ OT_SO) !jyS[@6_K]Nk{,b3@j(`g0R9.BVu8\4 -0zMp$<1kTo~ZWa , oA@זP_GvnwJ+m r9sYd3]M|^;Lxz%G[[YP!|C2 ] blYVpQ..AV{Zw' Wq@p2)s)/dZ{5_e~cM<'p5۴_Yx=0%<_eny=R(+$1_837e饫E34XŜl#p2e{}?5B)ӊY'{u՗yG}?L&:W-EEYcn ׊*+]jewa}ruc$Tp,@:CEHg4T,jbY,f݆VN]%C<ܤ9--Vn4؎j!**٥|\iG^|U#E#YEGTF(߈OW6Wr[. 5LnzGt\U-N*prq&sÎSdn3/ن|(2ZgU N S Ql{ ښjZ{N)3 \JxiQ4Kƿ4/WI|))mճݺ#!GU ]Ri)E.pk0^{ooRe2EW-׫|QP1QŌ99ypiVgn\]FufӏP9*Q|+ K ɨDDGV쩹y' Vrzrg6f"7_Or C~\CGP)Ĵ ^T>!mCǚ] jBz#!7Ǔx'$huqLEfK֦!Z- OYgvMh{so'aӄսa,pͭ4Ҿ_JP;&quۈ)g4mI>4~3R9k,K#qٳeʤgMt̊<2D1?:=%h,=B <./4@mLfuQxEFcHrݖ&xd-r KFyyV؍Z*ٻh=+<6Ru?YëQ0JDjC>FH/p鍐[FxeTG.fb+6-tXw\ΎLpk8mӸC VQLL[a?0&zZ9,:ǐ"bc0uP~CSN0 gdE+7k{(.+yyAniK^5 B{ĬfzDҸMA!7EjO6%@$%uǐuLԍ@nAԯr'p¸@fM`[t hYjR>IqEox 1'[~bcK]dEԈe"dy yYгʖ;Rێ#Yv+k3 ie]ۿaYڟ|}lYS`$.q閾 1ކ|5C$N+lbvڴtZ'2 \n3Ta1EI' Ssu Jzn1[Q7=D//BZ|DVaR9lu #PF@j(A8\@́`arV`ҌC9CMB*~Cx( @za(zXʟn_.3_ɣ(ZnS?=X3#0:1 k51w!`r <'!O*KD` 8y82B}]C?ՎIP8PhP•%T^X|S] MCuHx^neW*hu/@')l^'⚆MNrlZe{- jPD1dqm ʕ]KMtwшnĂEڤOSĩ _;Y].ujFdpT9j2|kXwCˌώ?+~U_!{2ٵmXzFA^o}^rwg~Rf<ɱ`l9>y']?_~t~ m7\ >ѳQ釟Gcע"b{c}]u1O.U޻˶Jmؘ iw[^#oRS=Ql޻Vt]|6/%F.afK %m 3*Z 2k^~u?9cnt$8[܇R Kߩvsް^\VLsNS,ZÄ\[!ʗ]f;KLD~0lܮL4c+e=WE.kl[2..dSBǓp{{!ԗQM! @}[1u}Imeә|eAv^r ŹWsv~ehroHOfh^CV_ OA+TkL.n.]\ʦظu۰:.}GmmJ}P:gÿRAxaTP:o==C#l7dx ݻ$)8ʟ}˲]H'eEߑD+yѺU=~WQZKR, {#ZmJ ;WӢ3G>^f r='r{ {*O%n=_mBnXn ?!;:!_y Jb}=_ Dx%Pߨw%=}3$X]{jr_kԟ !mdSL?{k=W`{ %4? s_{ d=B.?Q5`%D+Gv@Vt;^Pփ]>Ь!g8ģb-Vw}XMuGa'nv˶+,qQ[+ s̛]a~C<VLoe.D/lKfj8n#fH"x͜ .%KfH2x4~{npiԷqGSC-*宰?*]~o܍J'ܩ,yÝڂmU:u7JbdwDs:\q;)21 6@5)ނ,3k #rr^ op[b$:d7M/Mђ %Yfu7~A\ﻲ$WL,@n9;3~X( 59;g,6\Q9Y3/\+Z+E|3݄^LpYm1&Z7p`3:(Rdr]Sܒ67\c~;Wa9Gn^[gv9:t3aapۺ k L%,:/{=^l+'_lpj dH;yr/Goo R׺f}ZLxQbӐςa;[+ g!_T6=Q]FTx\5*U.mg}] j!qPFuMKdRI Jp^w :jl&#ksGQs0U6aksw "CoxCچR;nR 2d?N+߈鈨wkׂ^ [^,h/dA4"d!?rB)HC.' OhmiDu>)5 |ruIː՛6뙷M(\ #!mF C(V̽ #ЅpǜW|ޟTZXUԲ <'_|Z0̅ bi )X9,JpJG!KLpⒺ!`rZ#-M1"d\vgKՍS||n4-M/_j$[-4[m nimG9܂Nʛ>Ts/4̤˟KY s ~¤uLԓ(%e8J#]< d#ً ` HA0gd—yQr'ZV-3G%ꋇݴ}Z6]$C>h_S6cImPY=Ľ}B .KmZE_yTYo ǁ! 7gya7{71&FAS&-ȩ69RP %rc.FjRomGW!*IdQb{}7i7GXzdPB.bi:jY\e/p ˪F5k R'^vԨF GV%[ slyDj#er)M&d0*9OLE(}tBxH ik2 .tDr{)QciBMBEyJg.>26SFs2Ύ8)!+Rm !+v@7a66H4sGdඖ:<|y(i>tOCb2xTR(B͛n vb$&CaL|^<|Ynel푋$h S spRr NU\5B#l9?Ji+=N+S* "@}#aqB,}(D^SR$ J'H['8drV-vDނ,wFTO*,N@P=FhׄD<CoJ.@^QA%'DZŰǥ 8Q3Ė:9xL.l㮡33|#}ϏlQ28nv,Wv.-~3yT.\ K-.m GD -Kg}*Wm@^eAu}apom"m~훹[yJ+=!?jaA* ­/UQjY£5f&s!l m-T}6̗ 0*'8wAQ* ت1kdwX;' BI.q#%D] Rel ]8XX%4 RiD:ò*.|*5[/U-V9!U$J$"uGxeԐ6'*Y~ﻙr) [`Q\eXxC]q1_؍rw=mAxZ"RO bT;kz54Rd{mPۅdvRuˍd;ɺ@ZdVߑĮ+̯Rgb=es@d s4%^h~@k,q_1{ue$ (fE{%(d&XW Z{.Y WރGxQzx6W(b}'?<5>Raze36ȭ7|?"3C%W{/߇z$LFIe#lIn𺡣KnGضĵ,ȝҟ=# ?; Q(;FMH_YUҝwy%qޅ}K-0I}Ш^$55SJ?5)$j]9eaQ+0ߗ}tC|p 7Ƥ~ۆҧXoLL_VN7&MķQ|VG8 yo7FNoZ鵥"~)H{k )M۪MH-' ' Ԧڠ65"w֯mԏr8eHB#5{ҧU9 Yn`l^`5twHW2EFY<͌u ;!P~ h%aQkvvfpY]QK"G45=KtphF,Mg ]#‰hs~~nD՚K\asQNqC!K?h֝X"j'7!K݊?3'QNA )-4d jv0`DfY}2pᳺu,dTؿBuzǢ#9P"< j}lRwx Ԟƚ7[/ha N 5b5#MwCSgB݅["<Yj6:B=v N?s gaB-w&I8YjX$uw 13ҿ\>I`M08ٖҘVꉙumCZވI|ퟨfK_">`f7;gBYKVѫrfev-<9eZ %l/s#4|:L8n~n&Mi%̑#XdC  O/R5D!v/frT űÛPxIY> 8^C5Y}f9#Χ ]Yij;-0F9Clg 9:L8 yN&D?C#$AnxTRC p^|:7'[RmqRw~zو 6a.wl]2ljq?&yt4I=8uNٟY{uA%S/$ 4|WnrEM4KDCCFu^0ɼ%ż2#TQ(1XS_sa,e#pJsD~%|6l B8dӒ8yպ94 tBu ϡ9&2 XOP_UzijLdGGTဗ=C]RG꺁՟oi%u:Z[`l%u!ԑU(h%u?EՋ#S5F#Psu{Iq;]RGt2iB'^RG&_KѬKHK"!ѐ.qExݏ6~h%u#x7i_=D!phGqMT|Ú _cPU74˯MQȣlk XXH]d. _ &h- WD WI- xhْQ$zT j}&H]7m Ys%`CV!!+RʡBKOY*54VBkpYL7Co6T*asbU=VU,Xq;ŪNĪHphi(LH`bUPD3L"E*d Vg*Uӟ-FKU#|U? ?i|iUD!DImT|Rf~!· XA yp!r]hM"ui`+U`m61XESmawԡ`0‹u{~'NUsh\"u%PE\{*R@_m T\~7nK":=6'PE:_@q;NHphi(Ld*ؼ@i7 TE`CUrU{~#|U??c|iUD!AmT|B3f~/!· P@aUDk @K["~?l*u@m U[x.PÁo;[zI]7P;Y߇2U5<Y*DI09ff]ם rzG1"4 J{ 픶l7'%*d97B P){]}7p{ KEjD#ǁ!&G48@B~\{rG%ꎥ\gQ?k44t+ Y4%m@}o,VNӲS!v*dŹh2_?&zHGTsC< QIprU U%#(dFxJpG I <|GcYEQ0󐕢/Fս* ZnK# Y[!"m B6rɚ/5+5\BhV^ޡ5UߚB_Mu+ o m'k? P$ dp)bdיa/;.TGg}w@d(v|$\Mf$y4pdg7.,?mi mu ÑEMX&uK5,b ,@+ElpȿVkz!ڭmO8q`/dW8H]'$-hx )mrSO3yrӄOGdAf:Q!_wn^| n^D>q;lb'uGmU4%杝`˃?]H|жw.L]e^8Q"gaq6l<2QN'[NwZV Iu RIjv]/?$&jRti/L 42eߒ|dy}c٭d-OYs0?g’Q1?&MhW=]iRQ<2J {z<#5t@i+ihrl6h|iMmT}$~ˆ?Uf iEQR3wI:r? X[}oxZ'TQRC=bj1 $ȰV0 ǶK ;-ٹʵJe,T"*UHF"ECgTY_v&ަcsP4M2/Á'dnN>d2ˍ|n Ag_> Scx¥d8y\[w84^f Km*ʁZ;me#eN}ܶN% -7=`U8(K~>^ NpYd`BVDҌŤ5,>H `Yh%A?NAJU|1ѹˡDH4dG[9E۞F2 \JTֱY;'<@V+=j3+];ӡW@dݏx2PUlEiPI &ԟ1P.=qy\xǚ.BVzS%2d^2ӱ l<maf%^| hpyQvvN$Mnࠡ:;?UN}zmfK'7^-];AS6-߄AMC1݄ q^|:iARk`}Զ㻒]Q"W+W 򶭬a[9 ?pD`Pb'+pΉ6 &5ynY4%u}st K dAohO{,6LAVOjO=^FM!?T6)6=w>os;h~eK\k;of+wb8ؙSƓa̢p%J)ŬETM]w9[p(=Uqt5m_&(d( ~II44p iJݥ)!p#!:e3(gJtpAcmkedz͜kh)k9RDq8̉2}h'lbOP€߯C=6Zy6mjaK3է5 am>]>쵍A d!*?!u@)Whh$|LUM'W+է`2HeF Vdhqwٖm!.7-v40.ev},AhI61cơV?6_nB0⎫J i lJBU%q1^,PlH u/ʨ ^̅r1D~ Bnծ\C^p2pVzLA I['pr].%b.QF{D튨@VLiCJ8Oa/6> {!yQ7]@M# xRoQ"'WK84-)DZß(<;a2:%Z3,1d]'D9w Qs{9;uCNB֗q+^5R f0#΄o$Jg!jvz,5I egCE das@AnT5a?~FZhEeȗn4,prU(+?U.rYْ>Ej MUT{a^pw LJ Y*)uCބ,u ^s i@(W)l"t۵͒lL!>_Ya. .;1Q7P^ކ^?& 5{UJvv(J)#RXH9sʕ(~N OŇ l;;4/>B=}9?XpB Mnzt'>O< 2'&7FX'WiCWZ,f Ml\4\I b4iNj컼"SW KNzgUDgCnE]欗LH;}]3%腻 D-^><.6FA-Vn aSu}% hi,P߉2|N|_ɿ|?@@F5AhӐB^F[%<\uggQvEƁ+է =8.3{ K;#:8`!mǁ;HZI1r!dX=khD4p 2mj,+iĿ1жi8dZFLx2,! Mir}+(QQSiBE!O+WѠ~RU=@fWqP=J+k8l/8ㄇggOl).t"E ۀct~ -ƑdWCX~#3>ζIx 7vxe fiG6JT 0 Kg)g1>3賄g)Dbx}5N"\,& 2d,(R!E͕ _43P)r)㙱 62Dv AVO$9sT"7f4rqs=`Z ,iY}.;RH@Zv).^+(SʹDx8BVtF[|*ZJr=Rz /,PKnxd^ 4 ICk;#%"MKQ^r_|~ͳ5f;N_q݇Slc 6t<>* dUۉ#ԎKn 7%me꾮 8_>\86[yr$ A aA!JCn6mxԑ)7+%>׀ӐX&pm /vYUH _tYw1'!Kk>Jbݭ%TNlsFNǪq\A>Fx 1h({ip_D ,h:(\(Ry 8yNJ:_gv']w lml,LD K0_2t tq³QO`7u+FG;)@mf6bAbm Fظuހ@G4|(k|9\ (I0w;Au] PnUʤCHL(i`xs%5u, Tx e!Xu!l ;i7cԶn'qCM;3S7+E3el.i6 jt$h>ԟYXH"z(%'I]?F(]R")O-p>5e|z((Vv]Zr.[O_`Uvٵr"U臢sCf0Q.ӆ"&SiZU^'y#n}m$aoݸ1tTt =٠_+%:/6}-7>itHBYC0Mt_~vP;XaJVYta7:c1 l e #a#Q1}PVWX[1ud*j(kEoŜ^ e HO0Nb++/|<`.p*5"%2@s:4l*\ kK^i n[sKt̔ QSP5E'pj6[,Ya1LWvu3+Ҍ=&#DqWQU;nyV>yfOhdaa[B'2Qҡu9z-`FvĵApG[Ѕ.Uu_q.u|H"{R>ݒM)zuP.R(k< ꖬ|ޤ*x'5[R}a@ۈyiڢl-̌ 5g⺓%= 6;Le*0 G[kY7A0V(6{&#Ui ~3tvz?-K77MMr8yݻr/{/r` 3d&ƞOdgixH(_ϗ=_|AB!ہWBYp;hKNR5/oz[v*J%/5\{`Hj@=ma7;bvVދb݊s?VQa_i{3bf&WQ L“,\Xh5Fqĥ4{9D3Q `Jx L2 +ֳ-/#ڥ|]If\Zka3:8;+\EP 3㗥ZyrtMt iH2{<H`q\SgҞeRqJ+DtЯJ#frŸ1'S3ATf93g$\&@41=~B~ |_A9M+SxY#f_mqWc;xKNtcB}okOQB}С4kUcᬽ?a 4YcMxB̈́?޹ Nˬ{@{}>rZ3"/Z;MU/L9h>jwGx0΅7 ^ٵWWaEg'KoI 7qo/aG:8PB}F:c·A6BAq_&!3tt_G[t3]|Yq^B992~ 8OȵQU`KoJ7ͬVm{o·½@PɽU۵f6 ]Pahj>Cut:qٛ M#ܧ QS]{ic`C@}eZ!#KC[(!cc$^\rmCz0V*l=͘3ͱgĴ531ώ| fqkݦ?zQyr~I2uCV_cOG+Yã5(/ù3~aD9:?7NXyw_*ta}rE ʿ9Px$hĦ hdB4VET|B8ģRT; : rxj=fm3Ƒ r& T9cK|Ͱc'嚮?UQ uvQ_kn~ݑ;⑬::)4S.YzLELDKa*Uȼ )1n},/|{^JSͷ2zVQ(=#l=`{|ڊv %&mK~Hs|2Wi$:i97i,Y7&ރ|Oc]͸~;ht5k[T8#iUPCP7 &rcp~%7("YD/R@md^ QXBV }|XDFFAȃwi . `YFRC & n[TWc03-ΜGئԎNҡQvv n_WemCK ׭r9Ls 9f6VI89R :9|urfEbeKAڵ/ڀ%`rFoTԞgCNw5W*4tH]d 1Hpvд4E@E*3s0rǐ+3`dAWN1pab5`fkzL804ڮ`ƄW ݱ=KGD Z=QuS줸)0kQ ucD-<1d$S땸X'pnYD G{Ý OEmjv4զ>:59LQ+#YPh8ܬ W唋cD6|١(ŞC(|@Viw=;j:k\6T9%~2(M ]SW(ݖh%%:9ģ&fyVIs#?<q )R{CR¦?;GHxZDS6Ñ)-Tpda ;2qhqd P#d_Gf U*f6I[[2?m4tu[Nwҡ5oy"Se'7u$z=We3UEOt'63P;yC?͡32C,Coxo4zIJ8y?2Lzi|^.CVOV2V*D;½GWa|x^LVf5>5iKB"]i5*Y=$eP^ԫaA3!F=^Ƚ 7 < rL!Ny .C mJ->9 I zEM]^|&F^5_ EKu~bt8 Yjwi~CM& P;8-ۯ|z\+eqiW;E] ,òp2JDxTK5-(6ށ͢6$u`Φ\^VNZň9w@cP!O87=aݜ#CC㐺nYRcF=Cb7<>P%aE-8>6*Z-QՎaE qI#B0R^bz-ڠ'a4ru2SpNMt895iNCZ;N66 KfElfː\m8n!bD!ꜰq^ &afclBЖ0iބ|S\7Q"ZEȚ=ndͰfM,\_kwe}9P"3;P!DZ\xZ;6w.u[ &[@꺁< ,%լzm$>$:Z#k^n3 5t|#RR YYp XuRu녢22du}2ZZP6Wy\-ޞ # qnKԪG4f9Gڦw!o8%%Q["z&=nny QD8xHYNP <r%^n_mOB>6%j,S}@}ujce! ƒDm\O3jp/Ȏ(Q!W7 W!O3*$<\!z81d XT(T.C8H(=aQGQF_ RW:B<-q`ރF80Gh;/<"2ǁʭ卞XN rS-ռiZ, ܾk<5oEHYy7I'w"b†_=FyN7KhЧþ`*=C)̐ (AݶMbAQhg|:r6G>+Ogn} dh>[| h;"sĎuu4#M,l =DRebz#]KnXOj#lK/X6]M;{@Nhei7Mu썠=aBftW`8YDC G oeykqzs|O?4_ ߈Z'hEZY[(oRa w)󐋾}/ѰҐHxhZ{aoGD͜^,-fu$x]BV9k);LCZ]l-XXW ͢SNb,5%;]7z2aބ,; [[)H' OEcDxP3Yr 0@a; D.pŨ$iM7F qC; lR =.Ü q/x'R =)qY+ghKII^PJ5\_M%Ռ 5I-&p z#l,g,%MD8ѴUR Է3y,8(YTVeQ=t4]'=5,C@z4.QEK/DW%*xP5 ]w_ZrMʥ[h#'CVoCa#ok5p,!%NC< . vL8 yXˢi۲_'mV m D[q؜-{=S-1w9h~⽴xKb&, B=иS1̼D %`@CwcXl DgC< n >prm Z_⺡u2Ua#Ltچ]nǩj|n ΑK?ODty6 4Ykj#pzFO<@V,&/Q/FNLS@ou킫L1%[\.pa;a"'aD.(:%:lM Hd^S^R{{]~C<4dR C>,o{wᡯ%YRO )[L%!I꺁g Khi͛aYVqFZv OU-1V<+Nh$Vh)y 4 뛟&Q1OAV Orh,hTI8 Y.{Dz78x'63ͩېR0D̗BtD2ѹ|YKt+VeYRP]iANX^,Ul,j/BV{.l߷ѱ.rk/}7ѯŒC ~ $gπE9|\g=o¥MUxZ"f"0d1~>-8&:ci|CWsy}PE2'Hl%3vRDM˃Ax΍muߡl矼E{n <( 6^/~}OHj :nAEǩp)zŒVs eD&iq>EƀsK|'<.@VoNrs2jl42{.w>zloo/>b {_H['d/w!_^WgzպU2_d)#EAo&Nːl UPMپ04}$gE0Yj# {­$f*YԉL / ;ۂ9네oP| '{WʑuKy:K [t(\2Q;VnoBn|.vx z.K销Y)?x*8Ue DmzH!މxa5TubY^8 S2KǢex k!|rp˷س vt.!a`[96|eJ6vp Jj3 UlK w8ģ!'IqiRK?ܯ::K+WY!u_Qq LA:TdoZN@rke8 yRVzG]u["3\onmkv\]xݖ/qG̺qu#Cϟ%r}~R]X!u xݻ%k1_Y9{^gsԨR䙉g#+FoE[m 5XYf`?`G.a42q;_}r$Ked;?\/-ķcY_4#mEȭ%#\04.̈́EImV@Pǹ_{{/͊.@nYEfu$ œW!!:a4C2P i&!# O,׉,Q;Jkg.wk?J`q 3-8/؂u4+@}醏JVNAjveZ=4LC;5*>I"!{V>~(vax\߆lD@ufD CV?%GvR!ql^K:܁p ڻ?WLx ܞ)p u50T?=J%.9C> GQ?j4uKm%N=n ׺?Ix\0FvPt ~us8yHnXvP~h&vW YY2)MB is ]wq=r5d9s-*~ VcB}Q9< d u K L:S@i| [hR^1Z&qGFp#hm۴BZ򲮽g4Il 9l?!E[gAF>Sڪ|Ӳp%%Z_քjXOdҎ׃ AmB_%YKw8yFcp$cې5cE"J*~%ѹԗ cJ-G׹7^e`p"؅.{'P&pdaT:PI@꺁!K2X/w3CV ]pUߚ (~$k¢%GB}ՙ(=_xk(M%!<Y/hm#IC i^_5! J3&DxP/CdrmiJބ/jg۲H-ITzR?!&M،zblXO꺁 kbc=0Wvsst#h`P]:ipMO /w[~C<4%1,jcߋOmu k' P),6Sw!eUPj" ҁ@`҄{UJvVxh9,ERcsh% _ڏtoOv&D]-RPgBpztD.$NAVJ'*ܧjr U3 YHQU}"dxH`VR•|xdֱ66M8turli$тaX6tn*n2I  o@;Hc;6Y=x <ƒa`xc;6B^],db+,!4x)q3g,+-.!Q"  1Ƀ#e `Y5xċuF7'$f.9{Pn$7($"ىv^7tezْ%{%9/'(My'oC>ݨ<{8w[D7u?_'Gi 8z#q΃V?m$v&w@˝Y;X7ݬAE7w@KNfbC6^' yTq98J4ӡJ`Q$MCB0@>)X-]Z[e`( j6\Ws949v7ycWjߦUە=H,'wow%WA>e\\"8Iq8hU"aғv \pA! f:.9DXRDKP_J2Kq3!CZn&ĭ8 zXBXT[窕_"ͅw#ǠՇh&vS̨]M/N/$U܈%5 {G2[@$u-o<*LY蹶h' "=z[*VA%a["~<!ce唂7}*+o ¿%ʹPLDn5$n=ф[ ЎBX3n/+hlYrVI+mQpWK-! Vϧ\!NT { aCģ(F_Є%G5@Ofޮ1b OnCOV\p[н{ڥ$NӠ5.xb$4p#%qh0^r{wIEc.M׽*MǛsHI,hy_hf%nsa`df\̼>/йAK- b:Y>Eۏ#8 UkrvdE0&:*#6fc@lI;!Xrj0%+Fr^}_A~YG9+&ΎRҌUbO񛎵cWa2si_M~zCa:MMf's!< t0R=ZF?P:Y'ݥM ^GO]g]ڭ bC<QϪ1eate[kR[cA A}io@/Â;A=ږIݎFBa(QmDTb <Z.QtV *S{oYyH|Z}.yIr{Rn;v18Z+jcc {}$%U6b>bwx NWwJ8G\&Aw2}#ĉU]37u&?ѰLSX#_}o$t1alZsSiN.m%XEUAL+c䂫T$vk. D8 zR]nƗﭨI<ޤ]%7z֔"\+߂Y7{tVnԸ-vܭf-Ux?ma]vBGy ґRo[BX@Swӭ*~A#a@oRx=YKnB\,Q$[ʝT<o.]h29+:|$^f]72Z^1Wڿ0,d}!,{n/!3hR隷 "MaeI0heL/XW1l'AK%iP)^;|< Zj/Q՝]Y:Xo-|̀;- Ֆh}wb' Ǡ;p-Si;"z6f378^U`>s*7ImptMp=/\/weNR֠x iO֗ٻ7+UJ/nNWF.He87T b\ [4~1,?j=%,k֓Ts* 2Tk;Q :|{ag؋]l842],{ii^y7#^|߁̄]w:|&yV +o]‹9;K="傕3㞖m+$~pzt/k`-<4ˆ/ (ML8zLGS3lV_`I*䲩Eч =,9u7'j0Vp3oZP9reNOg#Xg5<̰LSf?AFBxk=EeNݶU*8.,^v!>_P}$n^}dWzjcog^n.;C ZJbC~zyJצ줄<3Yyܲ h?ͦg{31|Az5Pjy6_oOssO݊7ݪZϾv]ZZzI{ؕXLv2;71995ES\'''gc^kϟ.>$=WA 7'}y7ѕS*[Ѹ(I{%gK젖fϞ?YbJ]𾟱Ix O 9~)"͒+s8bNi.]Ib'-:nWl@n}gb tSZ} W(Pje,5v/xj`|fO͒,7Vɶg6 n~͘qwrvdU\/-VfLT[/|r^78.~dcI=4KY U%;GQ- {\;{t|rN<ЄAwbo®JH%r}>um:V CtŬ1{V+'!ypơ+&TgAߟ"pzHq{0:EW)Ʀ}ˠeŬB@kI8NO.=H[4hsJ1 kqr i|TV$"t?Pjx81~u''\ϻZ3~}@ByF[$'ԱOqf>(}шQ+6ZQ&PGotOA/2>923pn_vҋxnJu>K8;Z1 KǶƯ<Ey[$onhoĆGg<2q=4(|!+Y@g@ש] Rςfk~.Žw%'WI<7a.m.\vHH ZqL?@7 Z96EJ6mĄ͉]8zBY8>>0/ܑIImз;Б@>uưy=E$;7;>8ə8;0r?$5NfGeGW?+6Z?&P@oĭN}jyHoɆwkFҊx W(Fxڗ ǻtn59 ,7V8͘.HG}9FۇToݱ]S?7ҎfEA=OF(Wsegl[Venn,׺oWj+8',bVGMKejhFk㒡=7sN)n/T(E2 -Jo|r={oQ(X;?Fǵm:?Gvy ǟ(b,!hM1 @+c '٩b.s"$bwo)-zg4x>Qm Nh 9 ٙSم$Mϓq#r'w:vQ]lOu#MV޵ '$]1&2<"mb~L6;MU$k]:⋿񓼰}HoFЗ*]xŸ(0N#+kQD,# Z}ju?XXKdE;Y^G*~\ӿL!9 @w$o®r6j8=&k_O>q?}Z]38-gwzלh4~% 4#&G,2huA(tfAn/u#Q隕Ig㎜%ᇠ?>'#uwP>޾R7EѠt+6Zt&PP@oĭ9ǾtMZk_ѦݰO`Ze^;fUݝ鉶wo,oqz2h`&5N3?Tu-LH;Y|< Xxxx-co̢1#Iq r]K/8d s dlaA`/n6?j̱sIsG}Z.4f~캁7ZrD_>$0Љ6Uu-LI3 e1~n:1*!:8>$4^@'u/A_>ܗ_ /hp|28̦MOƼAq0 팃^社.#uyaȦБF0:ւ,>Z'HCs-DžBNyd콷1#|"|{ W(PzI̊#:Χy ImԢjwlü9cF' oVT8|4!&ė0q}d8yĵa/-.QuyC= UOB.ѥm?PS]{"/4(&J캁PUC–B/YB!߮ cOR.-ϭ',V e0bk'%&?5&Mwc]C5"cvl 㲭=4$yU߈.BEX>[/\]aNZeI3fR{!28D;岞|bN]vHnF%Ҧu+c?0Y`Oܮ7G2a7G%cb 3`{Z;iV6vLe~4B<ZC}ی(So5NYbOw-88q1 vߟ3$uN2Roo?0ߤ:~zE&Q+~ڄmAʰ-? 37/=¬,=+}RXE\k낀UlDZXƖK(9]xMߕ]S==W+q Nӄ[$#H]]Kt7H+L((D캁,DTB{9MrLXN%tc 3HiD1]B_ܗ'gIoN/C^E*ヴ5L^㤾&#u7YCp9%OREe"-kem 36I9-:n:#˭4nb tSZ_~qJ1P[FLO8ޥ#`OXf}HDwo􍎆@`\C2 zE =a~_۵t=4Nˬx2$y2e7Q(o\Vg~$ Sl9UE5xe8k|<QfޯzcoUo<3J^ە`Qm+hإ,h K^KXMFP"X(Cm6?_g Ȓ+fLhlu69?>ͦߣk\&ON w{|uէO؍4{mEeAȰ7>euco( xb[N q# . z&:=,H%掏Z}qeqR3c|@H9ۢd/=WDdm"M8&[0x^Щ&;edݺ:;ZM$k͋hٯ:.:}_mM4T~3xxښئ gA78 P.2Foo80݌ <ЄCa('>m_xބn&T2y0^;i͒ $~Z'ҫ}s-aQN\fK/1|4烚Qp혎Ay+07>컊s'"I{x,\pŭ_r#]Txިɱ<]K ˔Lᔢ=Uڌg$OlMOLX F9,ۥy.V*UK-8 ) /AJ9'tĭ%IȔ\YE8Fj?.,|rnigNAZBD{5_:ڗUQf^Uꖛsrˬcek88DJ fԄR@*!H5,ՒׂFk7puo9暞 u~Y^`I&H *kvԒs'vs4D’tQ v%|<}s]bX@M.-jŲ'l;vT*g[5+1zwKY,٘f1 _O'Zb >b:;$TFgKȍ+|'}S3Ogc+]Ilprރ)CZ{y?g4ŻHc()ًe1݊Ą"CoN\HEﹶMrvy,QR Y yW2ub> b2m፾M3'#Xi bUGFs#P (-{c;Q^סB}kIa:\Do24 zN$}9jO?~/ uY\6i'*s?N֘LAHi =Zi"4 مvjlvqGJ`˞"GGZg }S!I}n>3 =W|2Ayw(Ѱ.nΑ9^ 2Gh2Û日ǀ#Gsu.]n s| GgfN5O3j!ծ!6-mK5{Ql`u4ڵ.37Xگ$f:Os1O^c+W4NU؉2B:wȮ`K!qUqhXb+%p3&ݢ 8}+5q9v;4+Q<`qͧ٦H*F%]SOF֒p{֖0 =m̊UٍAOkd;=>&2> =Y.`;z_OGf.Bʑ9։?@>Ffl˫;cTYZ~׆. {[&%inb5kj6v樐t4DGg#C ZNl}ARtI)v y+XFys ]--2 gJ:&G+]%f åai-i7vF_:ƙ1. xЗ H*QqCˑQ("]}' )o\%OK׀An\}G}%;w9Y2@k,I߹!ϒܮ~+g &![P}ni,bJyvT;=zcCP4טx=q.^T6Z>(nG,7@o(K5NK1傽g3=5'֖cÍmQ-$&upHh}&;s pbɼmT TZf : ;jSɦj4"-;vgtFuͯv[M7i8Qqm*v!m1o/wͪc+'NBB^dvx8_IZ\B_sˠ/J7}Cc[Č nTׁ`?!vHp\W_/e9,WtX# VkOKofě:7k3z%3 ; -= ̥k# -AKVl| t8й; pjG $x|$ɐkHaJ b+yv0Q}{%O8ģ7mTevء b!ɑꌪ8TRC=>Մ0q=b#R)cA>z9JGճ6WA_UV9et>r#f^ԒHk+ ( 0#Jl6}sb}BlX+QC 'ATD}7Z]o~f+xV|[ڶD { +k'ohU҅4&8w䵅ahɵ JvnŅU0PXHQ$;c\Q.|w|<b?!st{)z3 K4T4sd^Y3h2E͙<=zs^-ۋzSY-Jܜ/„/֜ˎ]+{eSԜ/„/j1LEƀTC̭vS}Y^WXً%9/ qO"M6} 6jׅo~m˯`Vɳ7Ԫ 4{`઄ބMA[Ҁ?>1 3步򖫢Rq} JPn4NrD2+ iv6D:bSOa[ .?Vq(X %1K|@2Bib $ux nuorB@ʚ.A3*jX$]w:Q__ 6KBrahMn&v [7 {$N/ɠB=!g XM}^D+gCF|Զk ^g?7 wH>eRZiN8ZAkRZFKWWAsAKQǖ]ǰ=4Z$Im x/̷e秎V OcǼߒ+]p_^zؕX0d'''yp&>{Ob^ȥ }c`2;ܿ~sT/9\1V$rԵ\fy/&fϞ?YbJ]𾘱:R55|GEnjNI9m-4^e#g~Mp1qm!jMc?ީt""~Lwwzlv/TGg ꨡYd!wjgl{6mFeߌK%݋-^[\}I[c:f)uj N&lnT=8O"uqGr:*,PO>B.nBW(@Xe5ox\Įh1d]7,F1omd;ftBhiYxm)7@o(K=Ĕ[W]y35M1=O(PmyWqb3=HuBģ,yb$~`]0%~]_p`)R{8z5l:ܽbޖw2@]rſDuc`lǙp>T;7b?!vQFx> Ϩl~v8#Lԃ"겶\OT1ߙx~ef͹dnr.?.sh&;6h~ [kп˟oxeh΋ ;{ώ?t}Wesg_O^+yd/Io?:Mn3<Əs|V꿨ْJ֬iXoLol}U0ʻ.JF1j~lӀḆx$hjHG7eD]hDl.Qf+E+~1j ǑMkeՒFUšYbC<cbNbuƒjh@:Sn:хU%{rU_~7?/.Ҭc/O7^}][ό;v o(JpGOX37[%cH$HYTL. P]>ABG4퐨E89b"ng@_PV.+o\&qн)V0]t% c> rJ>+Oi <4\û$4m7xfWF$m$%,h@~$n.d@e_;:•IyC$3wjңG\X1+$N?MGB[B܎>i\fvlǪS>t sۨ*khr-ۥJvӠHKAI2å=E_CX!wNEӋ#IB_pAe~N$NC<-p‡ڏ[ wAm‡O>~l >=8zHz\jc_c\%4^2wH]Kl0l `Bhw66x!V215x!<7sW`8{CE'zV7؎mR }('lPؐp}@9''tHWZ\oB6pƎS^؅a+Mt!i0s)08V7 3~C<޾pv{(0ڕ^ʤ#]Tš͊^$ /0AƠ$ˆ;'G1 c> ujGNo1ˎz='-3D $\O ,JB'l=}%:<Zъz.1'rۼfr'NgA+hLe xpaxb7|Z=*~?nV_=`ͮۥZ =ek`.7c38L$и@?^FغuNP|}D-gQR|D7Ix 8 zReгyDq؞#<犮wH*6wVc;~Cpru>m넺[B-W#򬟰3Brd8ģM:yIJ.߉$v+=^J3ĸ. |_Pπw5na7K]A"Ad Zj/77: >̂͡Xg;QK)iC3ʏBFM:gAd/ J4 p]Ɏ ϒi8 mL5nxxT҆q'aЄ@kqiq`ЩwkvY CqB=#J5UڶbM%u"LqKS"OW@`2I7ٛA$ZsNI^$ׇ+Gz]txt}*諭wݑD:,i@]lՠ-"I` t­D0HnZ+VުIH=,;f.(^g*p۳n5[KZjZ3[0kUŽ![xVn LÜCģɴ/6 Ӫ,QC&)R!a7q݆<o!q8l/!Y*R?eTr0M҉/5R͗t6aNig`o|Pmn8\t-HG7aȓ%.H̔8Zϡt1X)Ju\V6 {N?.(4F8zLe@ | L8Z|RLI ,h}hb# p.QUA2 ϵCCա9Wv+N5W ! 6 S%fRmցcs{H@K͇bCNh3a4Hyr lDO&\E"V_cҞ KVճ&qF8bC!n\U4-ϻawkEV$"0;h F?9M AzB%-\NRcvOțxmxWP]ufaJ7tݰ[Jm2'_V)Hu sZ_gj`M\F42f&M6D_ei14xv_vhH^ [ĭZ*p~K?pv1]6%@KŐu3i P#ρ'jqu%_VmD'j7V[p D;g櫞[ƠRWswțe| zZ}= ɛKfR~捠hak&\%o,o_q"cfofEO20>o4p{X nR+QMhX%7X_VF,LCgElF3謸[6sQ Qw!aVOuz樛-זDKIkDÕ]CFB.ZatK&w@,K&vY]:OuĄ-=&qg>~1>݁2%B5I vq׮Z!Hy]v⶞9 +wqG/C8x[ cej_W*46P!w1k"̨xLwJA4DAP?7Bk'D7Kԇ&}21lk5D joQ$coOl{-G0+“OtXgݟgj124n'Pߢ;2RO!!!WM\6_TlO橷,-žkZ$pptG-b -ST/QFo5їHsC0U<h8ű)nMnVwŻ'R}T >]|a-;?uLZw|ڭ;]JGmܥyJ]Ɏ/8d''& NNxy^=7^}Q tӓ;U̷aD'%'J O%,'KlR)K733T_렆?Mfܵ(PajNI9m-4^e#g~Mp1ڄiW+-onS-i`=;ecnpwM7_jv54QK}~bN"lZb2*~3oo]*~^ozO舊o@3mwlPbHTXQ πR' }8~шH]FU3C?LBģ8ݻr}F.oӷ${^np۸/"Z}?ĪVbybX^@yIer|Y,I3ŸfY_g5^fβ.XEe2% FW s g2-#HFx %e)OH]? IrK9!T;W.ؕnN\C>}<%VwvLd]jݎz*?\j⫬{R*x\I5wMS *%M)+2 ו_f{M1q(Δ BM OգH93 f\[mDf%f>? bofJ|8b2LS9%.KR<[H]$LɊL!9bPf^늷ς>, 'iܤ奕w= )'S)q $R*r$۾MQ,"??\Ba!]-tceim=M-̽7Ә&-BG9#}]_><.aϾNVye/Ioϧ,&_yɋ*Q3E1Yɱ_z_h.3m`wbTjOU4"}/@<bzC'M`QwZUڭb?!MJ9V8 I }N*VkϷ_Ax$tXU9ģ*ʎLP#  s*3ahqىq 'ՓռeTI9H5@"YOA/kkͼLRtjW7QVܥ[Dn`/Me]$e(+v/IZޓ 5r0BT3UqhM=!VOa劸E@44"q|q43=2kRNPojIzAY2q3zm>,\9[uV i]_>T4Q|<D1ʘz\+UD)׏{0{@/}&l%+\aNJD;2wzQY̛ԑfbci3ry QtemV^ u] 6];ܶ u-O 5vIuX}M>Q\cܭ%~m`S۷Lڇ!HD$\Awqt>p: :xC^x4}fx[T =b8QE-Uš8d/z֟[.rإhPDS ?]!<kp+DNsT?1]ׄCq%jK$ 2hScR5 QB~sŠ`p>2Ukchc OB@K%B]1 "QMz= {$=ulxnB{lN __KHn"D_hVάt-!hLԱ| &| [)Zz9 n9ԶSk\",Pz飃$I=:`*Zsvvpv .L Pm6*E9lNU2ChD}OL5${e\c[8ʊfɮŃXeǠ+--.X_]ڢfGR 5Ʈ .|?; hE0,@y@$uo(ttv<O;=h]' ցZKEI]-gIh0Ί)d%`.+]Vw]xgC<-mzQYM0Q"iM5Rw]rkBncqC<~ h] Ӟ4 d4Da,t2[V*UDi89te܂|8(]T`1ah0q \>@tb ucA Ƭ +{,m׵/Tlp4 /(0 tvgm}Q]aLw,>;Odu+9v'XKtprTq|5H$j^I/\-b.$v8( ɡ[1A׀g}n̽tVaa0*BVSa C3h^el9fu/ 6 mEI3”&3P_1C+˃Pu#O"jUvQ?D/D캁cԵ$|e0v4i+;+!-`T R:b Ǖ1fSstip7&I2EЋsZ}T >U4Tp;99=;yyļہwϡ^}Am3I}QFjwgpSxepwS]`"61XD1Xp1K]6IW\{>Щ;^wV'gUv52Lj,.}_,ߌ-Kڽǚ[n (7nD[}X|*F;ľkLZ@VOBQr#U,qeGX-PbbBOpG}'mZ8FѬ8VNГoq4W;l_s~o?LGU1o݅gg*i"|9,bO^}vT3Er}ϸiz9hR'u[~ vkVכ_S!W. w~Duu.=$9#[$[e1J^fGWB%]EfC\ ꒯Aa`7Kzn _)S:û(Mvu b!VwxY];\rgO?KRiƴvv+.]D}P}巸ٔmUĤIÛӘ愸[{:BTU~[E|>\xOD 6 :H!q|q7soKߙN.L:=8K3MA7EkCl=M喹=$M_5ߞ8Fޞ6_t:4!RJ!g8l=Mc_#d_?#15}Vض;ё/?ǡT|BJa&K(![_~-V_Br!|rc(ޭ} FϷi8c2[V__, 0gv)Qv'hK׀7AߔJlt/]}KY_,7 o"7GB?i"Eg2y\,AKښҴel{| Zע-Ւed֖S ^zccۢ3-ۂhLK#*C; ~5iL;'lLۃp}@nڕj5輲I 9~dKg0D$s\QQ+!K̳\쒻M7K3K=+WGXمj"<{\h!y'!I1TX4jIO9IH棨~FWh0C8ģ} 0krM`<*{9ԓNw>Ms%[J}8!M`.i$IĴ̖Ϊ))Lqծ+; =͸$ pCMB >-Lh~_@3m*-,xW:4 .զCæb%[%E7SFYPb1ᙹʝ5)Щ,W.Jkn>IVS5_[/aoix8jSuof)uW x8ƘtQރ%j`-QO)': *h%$x4YI\]M6=y 7ٌ2f&=M\8tzea+;H3kcH^`b_qRWpAea^uj.,lhM_} Wߤ$TvFSfۢQ9e29 u^Gsx^7%xL?NQ2_mxmJœx}1X~~^Yx9 Xcs3(U\H''4n«jZI_Ul~~8!a>qMGu|luO>Afᾣ!35[g $UI=eYRQHFαK{QacS?5XeSe{$4>"Tso`$}jvEAKr iXW I6Êހ؍M1͐7`„A+ )INcf8F2/jpʭJ+WcN{%R<T">hW\)hP2ˉU: Cx܊ t66qY+A͊pBYARd/eqC}3%s"a]?tBWv@1;7JQ1BaO暐3+sV9&{_ן^0Drjr2BpFRd;i1M c#<&8(-l`1$E ʹu?vx}v&e~F/ڢ̭$c5'gEPi64 7Q(7!yDȀ[7hz3imbp\KBW1r+tuʦ$ԕ J~umqǍdNՑ@eʂbsc=W"*Zx AYS+bqi^;^1M?^sVɦYO ViVw4Jy1j̑-jn]-V[pY|>Q' />z* ZsC y f`8UZմ<hCEig3ê]ZaQ%З }e:v ym[y,o\?NR2#igAj^`>-ɦ+Y$9dy8jT+XXFY>1 rxu%D)i^9 |Zgib7\݆J.LA_~O?ӬW3\ ^FX'n<݆[ m_ 7J褪.Iv.UdAoe_@ҍ$FmI#vǀ FLF'Pu}Is+u#-܊Yw#>r=ې29m -cu1פ#z]#Ʈ6FC@Ǩu=a($1I'ǧ4hYY+>B!jіuz#h`A݊QovRК5J;DhV>_[wI3![8ZZ (.Ud$t=ؔ,zB?-a$9=aA/} ?=,~_CP FͣJOv_}#8P/*[6]~DBGT84giH`:3O<`ׄ:f"~Rs3c3 NM^{>6?3k!(K'tEM\`HnoQs$6 ~#R\EW: 4- m8 Z\SzH= e@sǠ-X`Y䚊IBS-Qc 3 .hEV-{,C'Sw#| #| GHxZxl;1;~ &lCLǢC+I6lcL39FSZH3ˉZZcprsCkGFyi(40)L:$]*hzZ>-)65^;#0&u#d: ǪI`Uc cՇ"e4 R܃+j[$Y2hSgpZ(3ٳD'M 'xZ< 4k`L/sՃ1Y{  x:0M؆%$KP=u9*|ը'3(7ml8ƞ׾/f0:b '@OПON&j5mt ^,j$SZ>ƠTgA^| 9@[!q$: <ZHcANσ+_O9Pu[ap{loV0}C_5vC캁A;I N֮ɛ-/!h k%%odphfvos7!w_;K~gxF@wjA tjcԭCU#WWA_8Mi\;fB"[DG_GQm*QG}{a ~ oeIf4B} c? Z[sWZqL td.>AZGćeD˞Klgϟ,J.MLx/::55yGEl.jqZSEC%{1Wnș_j.]Ibg;@Z+Ҹz wʾ%SpwiLu$_jv5Q+}_0ջ=H=ֶY/1ookqߋ-] 5-΃V PC=ӌ}Hkp@ӠO+vUVj߁~-6\eP~-u{Br5]gIe*r!nwAk8"jV~CC.c%) UF{Yު4፠aJ+o$ hn-*]b!Mn±M;CQ XJO G#ڥ[<Ϻ ;#< ZcAP_&/5wmQK/Ѻ#Go~CKvjj֜M&hcND!ۜM Lz~߭5%7p01Sj㠹l̰H=٧=^r/7YE?UB#Z7|3کǚ_߷ϾyW5|Tl$F{3XP{MNI;H$)kߦIy>1Qk.hR6,flZpCu/p0ME4 vC݉I7ⰴi ۊ[0p:9 =N{!'e7La僶bs F>} ᎌ R?!M9Z,ΞB)@ky:&qL]Tg= AQWw:6vJc%\Z[xC Ldh"vu)H1hbj+(i&ᑤ =ᑌlg@ϴx$S-z}>*px]l&PcPԿæ¨ruo5؎,Q]].LPRn({͡Dé0]lj(!qnrŁv x -epIB9i,h}r?N8%{E?s+ONNR<>8G%n6qxĹ݄ ľ%5Ko<޼w MK+pjgq޺D`Nncz&hL$(xZlL8T9 t# QoM+=VJ҄&A'3J0ֺ .^{~({p 6p:7ͬI$ p4⁥P,ahKg}ouc8{T-nyH6ci}_by :܂rj2!YPz3F Wn)] M p'L ͨsތj;Aݼ A]ݠuFͮoLϣ3='!/h,rx./HB~.⿜mY[Ya.ao I@kanhGJ9=M)GtZ/֗yZN/V^W`k*t' d|Z* Mx\hn;vQ̦[Iپcc&6 &xcpF㧫Qb؆:qRYV}bw THao?>1>7y];]wcàp]D_e:>V80SP@UqSJ AЉ]/pVȸ$(4h}q$R{tr@3eR~&>VM|юMb^nV s>؀àFGv$<Z_X{"v# uߐ:>4ڟbC\Bcl2cͻ vQT>J'9^b7LNuf" >hdCPiwȲ0vm h{$tVXC GKK'p|רf^xx\+1zRzm:/ac_x?AZvVVŏIʽY @Oi$$$ }T/b8  <WEjǠ՗i̷y|.9ns/!"pןktAbST&~$#dJsĹ!M/q + {1GB=㮼\. |ZcVj^1\YqO.X%|.lP_@G|xv  q#n>}I~ AzYIA/CSO)F-Hh具I;\UBs+iWAKmQ#.?݉CU=#.qqO5OaD^}Qz%Da*&֗[#n\K$ ̽ ڕ3R}7^fJ4 \* $8ģ(0ʹ+#<=?Ծi9GV}44h}[qBFv8nr5.3t;Ub{YSɫWH u}'1 2A!sAeBʊĹ!q&AZY)8ίHiA/ hu' TJ@G' ,b}V5h fx 4y$-$d["pqaY3Hphuf]OU4+ٮ`'jIŗ8cv*rB(B-UR$e ABqot)e+"3F\أ=p,ZcǬ3 ,X?T#*\@]cp1ܼ*/!۽ )Lr_Hb_;]ڗ#qs؞}9vo_|3=ɑdI*h5ړq ,VQs !WDe=,xZlYa4h\0̂# s zJO`-ڽIE2eeqNƛdAׄ'AKmEGEm@q; Z.zZ׹BG΀ 3hyI%AK۠okNWwkսL^~-}3Bb{!n:0(_ 7_/ڭa[=` oٞ/rp!J*έN .be݆(*SIicQϲsݯ&\ `zlm0?xky_2Mz+|'[իKɨTW~2pX^-[*=k6J9ϋ}gwI}浿V ,7{iL'/ڥ/ oMvq!]6v6BN8CD2fP]YXeT=5J x$IUz P4W[, 9 Me+xէMKŏS 8*/r2g<p X{JCxB}G12BQn嗐ph޿u#ꔨ6[mh0 1AK̳fö8ZlВm *ڴthZB[:e1lӠӴ,0?>>55#܋IqhRV}T >]|a-;?r:[1v̻[U.KKK/{/K}Lvrr~be}28]Xy^=~Cѷo kMOn,բ9oEKG8H?Q-{/˒=v+41}+c;;G y~ZSEC%{1Wnș_'\L6ᢹnJ˛+~ƾt"}w|wj8IZl]?tJ.,;GW d۳im7*~3oo]*Q.݋-]Eb󠥶"tM"o= 7aшؑH]FU=\"4*D<ٍ+WاolZrkIC%Ҏ/;p3.n2z4JiSlce6!'8quWbe eyrnkYq\c1Mf .ˎ{"/3Y?(;KwǞ}Nx轆kzÌq@9_y ycE#-n;*e+)K,n-!<ZJɏMk9r];x-jj72bQ 'AO*=rkWa^AAЃZMJI UUOt . frrڷ ߤ %xORme%Š+o(`,KLX%=,K4xdeNJ/)Ć7_ ;@}YV!QuӅ՗Fk~`M%D&.fPu#ցܹ^o\.{c{Uu! uו=_ɴ1"V y]}[%W %wqHS  :WԤWۉ| MiV(x :dg ^;!cQ1pKG~.emYϽ8Yc.3NNM͚sS3\~3ksh&p1h~Z}_~K m+/VbݖgVO^#>]ǔw{n˿;wǚ,"ܙyɋ*}FohZkwaz<߶ _yרm[(1|"VPb{C<bDns"фt 8zHF)鞯?jڥb?!MJQ|(Cx Fr=i,#{: ݪ!g8ģI5CPM,U 0qm+F9 }Fn%8ģI7^|Yٺ@[-JgRaXC>lL|#:uՅyGm.]aUqb5 [bgsal}fJTM&q8#.V"j7ݰU5a|m.ʕX6 @/(k0 OBenՏ8lJ&R\&KT5'GaAkXK) ׏91N?-VmxrG[3>tm v͇x>`[7 S Voji`]1rV{5([ўχ }[7r6GzP"Gf\|U\P2% /mf8,GDGG&蛭%`eFPI %Iyڴs2\,W us$\ |Z[˸73< r 8Zj3bw$Q HBry0JQY5嘯1e1s #=z'h)[}f VM[y{hxN dKo DLKu"HyP3y<6rif_n=<%lơABXrY.]lܗ7@Gx,9(lWu"VO]gųᄎmŇ~3eTFtFDx=eao!}B3*ݷF=t^`qa2F&v=2ٞ)ftpA!xZ QFǿߧ;pO!b!{Cu'!\Pj_1(BIfś=iʴ(pيBB.ֹjEf+aدr̹||ZjtSfx4}T8wmA.H#a^$bly_z&(VL,>++cE({qDNʺn8 {_)/,(k ? '%(QmZ/K|X#Pm乣*%FsGUJ9Jcj"gz"HH4\ժ,bw@֏tĮxz]'+s,㞍:*J! "q4!uPŖ(*rYas  ;}Zd>MZdo^~m} K p}"8c\CW$=[]vKzӠշnNWH#vᵙ%% 5yv-c%dYGTˉZ~(B˝PpaNq=eaW/ ɋu׵i0)HBI4h)?k撚s^qu%__V=YAADwHU*yd}jv$4[\OsSr,8{/M!h ϑ*{cۥ07Ŀ͊5U+Ϛ/ȣڧx)z7@G\F_ѭ˶SI(ܡvOC`i?,d T:/w< };5#u[7p!hz6rx(kJ9[w |`b:vZ'xk=s/Jd|?\he:uˎe T`)>VvZe4,Φ|ɞշjƔҸ|Q"LCxˆ/ĘD5 @/*j.=Qn%< Z H݇VĮ'Q$dHcʁ"| ZמV rvM@w{.Bۯ{ oLܵ<2w EuBPov-o3J* A+ c;UOϥdoev 7w"V>0wP.}I5> Z I R[7pz!;,Y[UEв4[y3׾`D ݢE>nY,H&-]Y`f~{b͗x&nCJrB\+Xލi |v'tTs+w #x7'j}dVc ϔav\vcO27x }n.8EQnj)׷xhd;z!uhA FBgMuo*U'T{VcybGf-~ބK+gz}P OO OE#6~_Tߟ{]`ƸH> Į Z}+$A͘m7'0'-1+A 1rŅOlj!;>>oH%u )⩲ 2tq{ɧ݊co.KKK/{/K}NL3Efjrff|ژfLek2;97+cًOWv7a%G|[-N[2ROD'fϞ?YbJ]Uvv&Ɔz)H~D+aH$o-4^e#g~Mp1;mEsݴ7IWLwN^"2HN[[0wi`@)tm> jZTa9Jԓ ? PQӌr#Cxie2ܴoWKz'pIΙ|?O<;v~ ބB!!Zh%b$Qkn]WU3@ #9.hQ1a,23[My3Ԝp%Iƀ;u-Pj]w8q3l+SS qQ7~ ]Cm"iqR:6\jl6h+-mbsKp\c"z+~ƾt"}Kv3}7,o7V&IW.;&]海E=`cfLAK"c:&`OR Aq #EC"uqG$ړ ۂBm&]GAWwG=&[=>y\%z7sYjj:8Sxm_ɓ:Fs"6 CCH} E"pH\6=ʦĉ58 R &Bl-e\nˈ}ܨ;ڥbC<3}=u˿'񳺲-ۇ% 0Fxm XBԠwJ#*[.x$H"$Bģ*g;( uu.kd ԋ %ת7 dBx}Ȳb{)Yj9nOXQk y9V1GBb]wqشpoUYBlzVYV{ɷiTdwY.\[fT6|,˲ oFT|kl]d*XYA>B 3: `!u(!QlAhd0ʈl uDa8!p8wY]v"ߙN΢*f-Ah1ʚ>L$CPU5HG<H(S2 bJl,O>>DJ) x~LyMbw_rQbf5ԃu[ kLJY"EcUbS~DC~Bt#w@/g;aU3oMaV_LTt4S/ߣg֒J]K>HYi2傽g3=Tu%c m%.mꐼ fObw x -e} d6TJ8$IAkӎԵO$=2fzEY7Ghj$j֝}Z[R}TC#C*p-fHs RɕaU|)3-& ǴE}Q|p T}R7NVn:O]v\$Hg`jѤky𜠷AȻdKNPo 1:ue=I'쭊a3z/Ign{^.,8 \>)at3ƿ1BXrY.9QFrxD=NASZck@gNCӉ+6R;u~Z=u π4oQ~Zg4yرip*t|Օx=eao!}B35˛}FwFxԎ&;l3:bw Z}吨7DrPxخ^S{D9vozCy 5گ6 *J݂I9_W󐿍CHe:W1,p!<Uzn݂ 5 3~C<޾pvt;}> О5dl. *,r'oc˥p4deUdKomP hMs{I+IГMNbw!QpBB2Kj8jו؝NVe ? jCK43U5; E70 Q<+5uwrk?29vp ]MVUob\elݏZ)AI/AgWA_6 In.|Z=k4贲ΥFx3L6&Ɓ@oM 3 O:h:2xvA5$)vm˯߰7z|8DSqos$΀Z[}>rOb =@qGQcٯMǿ^P1!٠UPXT4ט`pvKwlm9#TⲤ13i{Ӥi 7@ot`; lciJЮH^ h}{qvEܺCU#s߻?Rj.S<͢6IM/^}P,h[Xe9Hxs*vrgw4{GJ|]ɳQ2_%mxmJœx1\?M?/WrX4G\ #KWK7~_;r [7-'݊wQ[)KM :"}U:1n*v0N87{~C)gƕIF{d\ݞ9  4=) s$-7SKA6Nz2d6 ̂ RJ.^lҦKOn4zI+̱y!Łfڶ<{mE +s -I$<ƆHU:狘p^ Lt bơ1;Xcvt=hj.܀/c~m-גNMXa%oÂTAanABMh$qx%n@{(T O'jG6;@A_lr$gTTZzt&`“Q΂ jFHEUs{/#>퟾iz"w{m)m5Vyi, 7LY-9aGdH%3`cipUP @O(y4t_$<ZgaWOOU,3Isxum:L h8 Z*9'k撚s^T%q~8K0z M%e Z*A}V|y3`c>8X 3-#snvVΏ>-<{<||Z}uTJb񺸹{kyX%V:%RWpSΎ+ }xGշR̿[pyogN+_h^I|)H-:ydgwE"/$p`Pl p T4Hl v7A˝?]H;5aSdƞH@78,j0SB$bIr 8z-o<PQMk#g&{ʞJtl*U$UC#2 /;o伡(\&^~Vz".VrsL [#W)!;%bSB~B}ۓ#J*uE;76AW4r2Nh txYm}<.@FV@z6?ڕ< .VdH eiVeYf%@_ȄQu"^.Ec O2 amOW'9Np+;< bYjI~[fIS%:"̅ t6鷼;|IH>(Q&s!5,qE{ B~CUjk}aTbdՊĸ'>>bc (o4PA W}%"Ÿ ZÜ8[KtGe:pT+GEs'$Eկ0<_:OvKuO;1W _GrO6}xTT̃$v1c*F'$!\3ݛ"vZbx/9$ Z}"0,~e>~+Q3"ycaz[r; $K8hYuA OM>is' n~ECGս ; 4n 2)bi yK#>&YƁY[=԰vJ#q&2 4Ez#!<Z.IC,'  }VY;%G*EmZ=H|ƥ}0d7!`3\lsZX:S=:0gބl$jGàu2̝ܶ n U۶,_eI S̏`@!YH"gcDEBqOV^e+zmTLǨx>({%{ Z}fU9clYZX+|̂?W{]1Pߩ񇑙;5}N=2NnrWoHOe)]xx(a^n*^{D_؄wA-mWIĶofίB7d=JY^Rӄb?k=p-WƘ4"칊7˖ F.ḼAF7#j q{ R޼MYk5#騟<;{=rm] GAĨDv_50Z">T?up:gD`$p#ie Lg iЧ9\I +\ *h=c,\x@\Nh"0,W/P};,$q(6_Gc~Wz>;@-Nfư3 3! !hdH= Z=T,<SJWQ {[>Vgxt?<ԾuCvFAT=\ށ]j>G+:Ym0#`Ao؏q9oxmO""FFcW-5UBijvu+rF[ΎY .$ h`=<z#'"FavnƁW_+K[~zWw`וeҔc%i\9J\(.NPw]}$|A[.:89_C"1ݠlWB.sCt`y@S I7lI՝yg[O;-fפSr{](<瀏@gڮV ]cToIq1:+*yVH8zFY䂲PLL 'w'~*zr c,%5H@kewi؏q(M#쳽Z=v&<QC|AK{~a[ -+9?>ĸ$Ŕ!\9ъr=$M`t#) =' &kճcrnK%}}ބJx<ѶSĮ8zPY%gc*H! 7@OGSI4h8'q0vwL{H#+,ذ ct+蛸/"y/~ !qp6 XisfJ=^DOrƒ%laͿ+nP~T$b(`bOxA>}fcD'=|']?P ^|񀗨-ˠUBމߠOWp]}߮J3פf+So~=8yTonp,_¨>@0ąG"מVŠv j}?eVޘf6Y ޻ Ύcx#;xi]O4 7VrM3|w|FV<ɚO~F$̀Z;K@,F3B "E@EAU?$0s7 ?1;2`E{JX|/Ur=sp3J(P_ c;Uo.ULzm[)j`oyFoUH佩&P ? W@IbGS-"] 'dĭno[9+FiOBy#mN 'Awr;י\߅ آO-Jxd0렯kUfެx9:Cae.HTC` Qc􆣏22{# #bpW>uL3[eQtL)N)!phqby7m70 :31ˌoCag}dVc Z~_)^Hr^|&gSSs²C4ʲI܂? {:Б吢q=̰Uqlϙ"Þ9կjgMYD$LN阢ZoYEm9Vj n%G4E7}Byz%_4Iy7ޜ/CH[ j>^]ݵ=_B):hՋKGV#/ ^`ltJ,Ut zRKA1a+0 /=G:D=^cO2|55k&*?@)qzԊcq AK/;Ԫ´H&ѷ#x?hZMm#J Q1 h7~wiIUQ >5< ߟ{]` H> Į Z}c+0Z}LܜL菵Ĝf$h[v!öx 1eu Bv||:;/<ސ$3Θ(?Ǖ d/̷eL2:[1v̻[U.KKK/{/K}NL3Efjrff|ژfLek2;97+|ًOW/٠o kMϻO7GnU̷ ⤾%#DIdx/ooo%[ݥ _elgga{B<'Θ#bwF:%Q@?Yx喍7qŔ&\4MZiytϔxTE 쁼){t > ;9!З[wR) :jߗѹY~y?jl{6mFߌǛ1p'gJA^ozO.o()OG}R{a?pF|凿>ʰcJRs#3}l|7Ȓ fѯYkvVǤ[2]jv%CZB}ģ$xE%\`%:=%~Zf1OxYuo @h55-! hcb<*hri6xIr 8 zjQ'4GɗXYZXfb^G08:k(9cZ }6~Q4Fu#FMV '($qбojH|) j݈-iӯ|}_-L4~3x`+F|XK)} (Qe0-Y_ҕvP*O_Dvo߄@?%a='?[y#$#Uq}VKYТoOCZ[&T[YG͜Yk`εߚKFaϵDOɐ}۠NiL1};McDR>^ilv|<;?)KYe3v^-圑7V=J߂TAO)mQO ieXd/l:nBƌ߁y߿Mxlra&899=7~ }T#}aB &#uY= bI㯨=4WD1m"4M8&['EB͗'2H&ZK &L;c'}oſRwF1bV+#f;w"jr<Yn|_.1}e-4\mz13( ? '7F4pDէ*?*R?63 ]qרp Wxu,Uр+I6΍UzöxTف1nyB)gRfFGr+Xy(D͘ļ\9+E{Ţv}P_VMŠ1H" ',xn-d_z*}PB! Z?Z>`+RcF b =nNTJڻ&]83-ǡ]9tռ*N>Tb_ž4۴W\8*Lgc+g.K!c_/@ו_2]dd.m x~$V¿ρQ:B$ǞBcs\d!=,#>+fpQ$i:o>kW]״Ίk8u"62ހPͽ{ز 5ifeL?e,-$'5ٝqfr5Y6^ӉeHZ*IXp'd>^yr[t_]Ro@j۠}1VY2+(,$,_TEk{2Q{VśB~ >ɞxt?DR;n clue/R/0000755000175000017500000000000013435044702011537 5ustar nileshnileshclue/R/tabulate.R0000644000175000017500000000061411304023136013453 0ustar nileshnileshcl_tabulate <- function(x) { values <- unique(x) counts <- tabulate(match(x, values)) ## Still a bit tricky to create a data frame with a list "column" ## which is not protected by I(); otherwise, we oculd simply do ## data.frame(values = I(values), counts = counts) out <- data.frame(values = double(length(values)), counts = counts) out$values <- values out } clue/R/margin.R0000644000175000017500000000060511304023136013127 0ustar nileshnileshcl_margin <- function(x) { if(is.cl_hard_partition(x)) out <- rep.int(1, n_of_objects(x)) else if(is.cl_partition(x)) { x <- cl_membership(x) i <- seq_len(nrow(x)) j <- cbind(i, max.col(x)) out <- x[j] x[j] <- 0 out <- out - x[cbind(i, max.col(x))] } else stop("Argument 'x' must be a partition.") out } clue/R/proximity.R0000644000175000017500000001244311304023136013721 0ustar nileshnilesh### * cl_proximity cl_proximity <- function(x, description, class = NULL, labels = NULL, self = NULL, size = NULL) { ## Similar to as.dist(), in a way. ## Currently, as.dist() is not generic, so we cannot provide a ## cl_proximity method for it. Hence, we have our dissimilarities ## and ultrametrics extend dist, and we use capitalized names for ## the attributes provided for compatibility with dist (Size and ## Labels). if(inherits(x, "dist")) { ## Explicitly deal with dist objects. ## Useful in particular because cophenetic() returns them. out <- x if(is.null(size)) size <- attr(x, "Size") if(is.null(labels)) labels <- attr(x, "Labels") } else if(inherits(x, "cl_proximity") || !(is.matrix(x) && (nrow(x) == ncol(x)))) out <- x else { ## Actually, x should really be a square symmetric matrix. ## The "self-proximities" in the main diagonal must be stored ## provided there is one non-zero entry. self <- diag(x) if(all(self == 0)) self <- NULL out <- x[row(x) > col(x)] if(is.null(labels)) { if(!is.null(rownames(x))) labels <- rownames(x) else if(!is.null(colnames(x))) labels <- colnames(x) } } if(is.null(size)) size <- as.integer((sqrt(1 + 8 * length(out)) + 1) / 2) attributes(out) <- list(Size = size, Labels = labels, description = description, self = self) class(out) <- unique(c(class, "cl_proximity")) out } ### * names.cl_proximity names.cl_proximity <- function(x) NULL ### * print.cl_proximity print.cl_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_dissimilarity")) "Dissimilarities" else if(inherits(x, "cl_agreement")) "Agreements" else "Proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } m <- format(as.matrix(x)) if(is.null(self <- attr(x, "self"))) m[row(m) <= col(m)] <- "" else m[row(m) < col(m)] <- "" print(if(is.null(self)) m[-1, -attr(x, "Size")] else m, quote = FALSE, right = TRUE, ...) invisible(x) } ### * as.matrix.cl_proximity as.matrix.cl_proximity <- function(x, ...) { size <- attr(x, "Size") m <- matrix(0, size, size) m[row(m) > col(m)] <- x m <- m + t(m) if(!is.null(self <- attr(x, "self"))) { diag(m) <- self } ## ## stats:::as.matrix.dist() provides default dimnames ## (seq_len(size)) if no labels are available. ## We used to do this too, but ... if(!is.null(labels <- attr(x, "Labels"))) dimnames(m) <- list(labels, labels) ## m } ### * [.cl_proximity "[.cl_proximity" <- function(x, i, j) { ## Subscripting proximity objects. ## Basically matrix-like, but proximity objects are always ## "matrices", hence no 'drop' argument. ## For double-index subscripting, if i and j are identical, ## structure and class are preserved. Otherwise, a cross-proximity ## object is returned (and methods for classes inheriting from ## proximity need to readjust the class info as needed). ## For single-index subscripting, no attempty is currently made at ## preserving structure and class where possible. (We might also ## change this to select objects, i.e., the same rows and columns.) size <- attr(x, "Size") if(missing(j)) { if(missing(i)) return(x) else j <- seq_len(size) } if(missing(i)) i <- seq_len(size) description <- attr(x, "description") ## RG's graph:::[.dist avoids as.matrix() in noting that for dist ## objects, entry (i,j) is at n(i-1) - i(i-1)/2 + j - i (in the ## veclh dist representation). We could do something similar, but ## note that not all proximities have zero diagonals (i.e., NULL ## "self" attributes). y <- as.matrix(x)[i, j, drop = FALSE] if(identical(i, j)) { ## Testing using identical() is rather defensive ... return(cl_proximity(y, description = description, class = class(x))) } cl_cross_proximity(y, description = description) } ### * cl_cross_proximity cl_cross_proximity <- function(x, description = NULL, class = NULL) { attr(x, "description") <- description class(x) <- c(class, "cl_cross_proximity") x } ### * print.cl_cross_proximity print.cl_cross_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_cross_dissimilarity")) "Cross-dissimilarities" else if(inherits(x, "cl_cross_agreement")) "Cross-agreements" else "Cross-proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### ** print_description_prefix ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/classes.R0000644000175000017500000000460613434542602013326 0ustar nileshnileshcl_classes <- function(x) UseMethod("cl_classes") cl_classes.default <- function(x) { ## Be nice to users ... if(is.cl_partition(x)) cl_classes(as.cl_partition(x)) else if(is.cl_dendrogram(x)) cl_classes(as.cl_dendrogram(x)) else stop("Can only determine classes of partitions or hierarchies.") } cl_classes.cl_partition <- function(x) { n <- n_of_objects(x) out <- split(seq_len(n), cl_class_ids(x)) class(out) <- c("cl_classes_of_partition_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } cl_classes.cl_hierarchy <- function(x) { ## Assume a valid hierarchy/dendrogram. x <- as.hclust(x) n <- n_of_objects(x) labels <- seq_len(n) ## Only use the "maximal" partitions for each height (relevant in ## case of non-binary trees). groups <- cutree(x, h = unique(c(0, x$height))) ## Give a list with the (unique) sets of numbers of the objects. ## Note that objects may already be merged at height zero. out <- unique(unlist(c(as.list(labels), lapply(split(groups, col(groups)), function(k) split(labels, k))), recursive = FALSE, use.names = FALSE)) ## Preserve labels if possible, and re-order according to ## cardinality. out <- out[order(lengths(out))] class(out) <- c("cl_classes_of_hierarchy_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } ## Be nice to users of ultrametric fitters ... which should really fit ## dendrograms (which inherit from hierarchies). cl_classes.cl_ultrametric <- cl_classes.cl_hierarchy print.cl_classes_of_partition_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) writeLines(formatDL(names(x), sprintf("{%s}", unlist(y)), style = "list", ...)) invisible(x) } print.cl_classes_of_hierarchy_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) y <- strwrap(sprintf("{%s},", unlist(y)), exdent = 2) y[length(y)] <- sub(",$", "", y[length(y)]) writeLines(y) invisible(x) } clue/R/objects.R0000644000175000017500000001310113036461743013314 0ustar nileshnilesh### * n_of_objects ## Get the number of objects in a clustering. n_of_objects <- function(x) UseMethod("n_of_objects") ### ** Default method. n_of_objects.default <- function(x) length(cl_class_ids(x)) ## (Note that prior to R 2.1.0, kmeans() returned unclassed results, ## hence the best we can do for the *default* method is to look at a ## possibly existing "cluster" component. Using the class ids incurs ## another round of method dispatch, but avoids code duplication.) ### ** Partitioning methods. ## Package stats: kmeans() (R 2.1.0 or better). n_of_objects.kmeans <- n_of_objects.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_objects.partition <- n_of_objects.default ## Package cclust: cclust(). n_of_objects.cclust <- n_of_objects.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_objects.fclust <- function(x) nrow(x$membership) ## Package e1071: cshell(). n_of_objects.cshell <- n_of_objects.fclust ## Package e1071: bclust(). n_of_objects.bclust <- n_of_objects.default ## Package mclust: Mclust(). n_of_objects.Mclust <- n_of_objects.default ### ** Hierarchical methods. ## Package stats: hclust(). n_of_objects.hclust <- function(x) length(x$order) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". n_of_objects.twins <- n_of_objects.hclust ## Package cluster: mona(). n_of_objects.mona <- n_of_objects.hclust ## Package ape: class "phylo". n_of_objects.phylo <- function(x) length(x$tip.label) ### ** Others. ## Package stats: class "dist". n_of_objects.dist <- function(x) attr(x, "Size") ## Package clue: Ensembles. n_of_objects.cl_ensemble <- function(x) attr(x, "n_of_objects") ## Package clue: Memberships. n_of_objects.cl_membership <- nrow ## Package clue: pclust(). n_of_objects.pclust <- n_of_objects.default ## Package clue: Ultrametrics. n_of_objects.cl_ultrametric <- n_of_objects.dist ## Package clue: (virtual) class "cl_partition". n_of_objects.cl_partition <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ## Package clue: (virtual) class "cl_hierarchy". n_of_objects.cl_hierarchy <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ### * cl_object_names ## Determine the names of the objects in a clustering if available; give ## NULL otherwise. This is in sync with e.g. names() or dimnames(); au ## contraire, cl_object_labels() always gives labels even if no names ## are available. cl_object_names <- function(x) UseMethod("cl_object_names") ## ** Default method. cl_object_names.default <- function(x) names(cl_class_ids(x)) ## ** Partitions. ## There is really nothing special we can currently do. ## Most partitioning functions return no information on object names. ## This includes classes ## stats: kmeans ## cba: ccfkms, rock ## cclust: cclust ## e1071: bclust ## flexclust: kcca ## kernlab: specc ## mclust: Mclust ## The algorithms for which things "work" all give named class ids. ## RWeka: Weka_clusterer ## cluster: clara fanny pam ## e1071: cclust cshell ## ** Hierarchies. ## Package stats: hclust(). cl_object_names.hclust <- function(x) x$labels ## Package cluster: agnes(), diana() and mona() all return an object ## which has an 'order.lab' component iff "the original observations ## were labelled". We can use this together the the 'order' component ## to recreate the labels in their original order. Note that we cannot ## rely on dissimilarity or data components being available. cl_object_names.twins <- function(x) { if(!is.null(x$order.lab)) { out <- character(length = n_of_objects(x)) out[x$order] <- x$order.lab out } else NULL } cl_object_names.mona <- cl_object_names.twins ## Package ape: class "phylo". cl_object_names.phylo <- function(x) x$tip.label ## ** Others. ## Package stats: class "dist". ## (Raw object dissimilarities.) cl_object_names.dist <- function(x) attr(x, "Labels") ## Package clue: memberships. cl_object_names.cl_membership <- function(x) rownames(x) ## Package clue: ultrametrics. cl_object_names.cl_ultrametric <- function(x) attr(x, "Labels") ## Package clue: (virtual) class "cl_partition". cl_object_names.cl_partition <- function(x) cl_object_names(.get_representation(x)) ## Package clue: (virtual) class "cl_hierarchy". cl_object_names.cl_hierarchy <- function(x) cl_object_names(.get_representation(x)) ## Package clue: ensembles. cl_object_names.cl_ensemble <- function(x) { nms <- lapply(x, cl_object_names) ind <- which(lengths(nms) > 0L) if(any(ind)) nms[[ind[1L]]] else NULL } ### * cl_object_labels cl_object_labels <- function(x) { if(is.null(out <- cl_object_names(x))) out <- as.character(seq_len(n_of_objects(x))) out } ### * cl_object_dissimilarities ## Extract object dissimilarities from R objects containing such: this ## includes objects directly inheriting from "dist" as well as ## dendrograms or additive trees. cl_object_dissimilarities <- function(x) { ## Keep this in sync with .has_object_dissimilarities(). if(is.cl_dendrogram(x)) cl_ultrametric(x) else if(inherits(x, "dist")) x else stop("Cannot extract object dissimilarities") } .has_object_dissimilarities <- function(x) { ## Keep this in sync with cl_object_dissimilarities(). is.cl_dendrogram(x) || inherits(x, "dist") } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/fuzziness.R0000644000175000017500000000435013435044610013722 0ustar nileshnileshcl_fuzziness <- function(x, method = NULL, normalize = TRUE) { x <- as.cl_ensemble(x) out <- double(length(x)) ## ## The docs say that we should only have partitions ... attr(out, "description") <- "Fuzziness" class(out) <- "cl_fuzziness" parties <- vapply(x, is.cl_partition, NA) if(!(length(x) || any(parties))) { ## Currently, no fuzzy hierarchies ... return(out) } ## if(!is.function(method)) { builtin_methods <- c("PC", "PE") builtin_method_names <- c("partition coefficient", "partition entropy") if(is.null(method)) ind <- 1 else if(is.na(ind <- pmatch(tolower(method), tolower(builtin_methods)))) stop(gettextf("Value '%s' is not a valid abbreviation for a fuzziness method.", method), domain = NA) method <- paste0(".cl_fuzziness_partition_", builtin_methods[ind]) method_name <- builtin_method_names[ind] if(normalize) method_name <- paste("normalized", method_name) } else method_name <- "user-defined method" out[parties] <- as.numeric(sapply(x[parties], method, normalize)) attr(out, "description") <- paste("Fuzziness using", method_name) out } .cl_fuzziness_partition_PC <- function(x, normalize = TRUE) { ## Dunn's Partition Coefficient, see also ?fanny. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(1 - normalize) pc <- sum(cl_membership(x) ^ 2) / n_of_objects(x) if(normalize) pc <- (1 - pc) / (1 - 1 / n_of_classes(x)) pc } .cl_fuzziness_partition_PE <- function(x, normalize = TRUE) { ## Bezdek's Partition Entropy. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(0) M <- cl_membership(x) pe <- - sum(ifelse(M > 0, M * log(M), 0)) / n_of_objects(x) if(normalize) pe <- pe / log(n_of_classes(x)) pe } print.cl_fuzziness <- function(x, ...) { cat(attr(x, "description"), ":\n", sep = "") print(as.vector(x), ...) invisible(x) } clue/R/ultrametric.R0000644000175000017500000006365313435044702014232 0ustar nileshnilesh### * cl_ultrametric cl_ultrametric <- function(x, size = NULL, labels = NULL) { if(inherits(x, "cl_hierarchy")) { ## ## Strictly, not every hierarchy corresponds to an ultrametric. ## return(cl_ultrametric(.get_representation(x), size = size, labels = labels)) } else if(!inherits(x, "cl_ultrametric")) { ## Try using cophenetic(). ## This starts by coercing to hclust, which has methods for all ## currently supported hierarchical classification methods. ## To support others, either provide as.hclust methods for ## these, or make cl_ultrametric() generic and add methods. ## Or use the fact that in R >= 2.1.0, stats::cophenetic() is ## generic. out <- cophenetic(x) } else { out <- x if(is.null(labels)) labels <- attr(x, "Labels") } .cl_ultrametric_from_veclh(out, labels = labels, size = size) } .cl_ultrametric_from_veclh <- function(x, size = NULL, labels = NULL, meta = NULL) { if(.non_ultrametricity(x) > 0) stop("Not a valid ultrametric.") u <- cl_proximity(x, "Ultrametric distances", labels = labels, size = size, class = c("cl_ultrametric", "cl_dissimilarity", "cl_proximity", "dist")) if(!is.null(meta)) attr(u, "meta") <- meta u } ### * as.cl_ultrametric as.cl_ultrametric <- function(x) UseMethod("as.cl_ultrametric") as.cl_ultrametric.default <- function(x) { if(inherits(x, "cl_ultrametric")) x else if(is.atomic(x)) .cl_ultrametric_from_veclh(x) else cl_ultrametric(x) } as.cl_ultrametric.matrix <- function(x) .cl_ultrametric_from_veclh(x[row(x) > col(x)], labels = rownames(x)) ### * as.dendrogram.cl_ultrametric as.dendrogram.cl_ultrametric <- function(object, ...) as.dendrogram(as.hclust(object), ...) ### * as.hclust.cl_ultrametric as.hclust.cl_ultrametric <- function(x, ...) { ## Hierarchical clustering with single linkage gives the minimal ## ultrametric dominated by a dissimilarity, see e.g. Bock (1974, ## Theorem 39.2). Hence, hclust(method = "single") on an ## ultrametric gives the hclust representation of the associated ## dendrogram. hclust(x, "single") } ### * cophenetic.cl_ultrametric cophenetic.cl_ultrametric <- function(x) as.dist(x) ### * plot.cl_ultrametric plot.cl_ultrametric <- function(x, ...) plot(as.dendrogram(x), ...) ### * ls_fit_ultrametric ls_fit_ultrametric <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) { return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) } else if(is.cl_ensemble(x) || is.list(x)) { ## Might be given a list/ensemble of object dissimilarities. ## In this case, compute the suitably weighted average and ## proceed. if(length(x) == 0L) stop("Given ensemble contains no dissimilarities.") ## Let's be nice as usual ... ind <- !vapply(x, .has_object_dissimilarities, NA) if(any(ind)) x[ind] <- lapply(x[ind], as.dist) x <- .weighted_mean_of_object_dissimilarities(x, control$weights) } else if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_ultrametric_by_SUMT(x, weights, control), IP = { .ls_fit_ultrametric_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_ultrametric_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_ultrametric_by_SUMT .ls_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Fit an ultrametric to a dissimilarity by minimizing euclidean ## dissimilarity subject to the ultrametric constraint, using the ## sequential algorithm of de Soete (1984) with a slight change: we ## try to ensure that what we obtain satisfies the constraints ## "exactly" rather than approximately. We (currently?) do that via ## rounding ... ## ## This fits and hence returns an ultrametric, *not* the hierarchy ## corresponding to the ultrametric. ## w <- weights / sum(weights) ## Control parameters: ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## If x is an ultrametric, or satisfies the ultrametricity ## constraints, return it. if(inherits(x, "cl_ultrametric") || (.non_ultrametricity(x, max = TRUE) == 0)) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } ## We follow de Soete's notation, and use the veclh's (vector of ## lower half, in S the same as x[lower.tri(x)]) of the respective ## proximity objects. L <- function(d) sum(w * (x - d) ^ 2) P <- .make_penalty_function_ultrametric(n) grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_ultrametric(n) if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } .make_penalty_function_ultrametric <- function(n) function(d) { ## Smooth penalty function measuring the extent of violation of ## the ultrametricity constraint. Also ensure nonnegativity ... (.non_ultrametricity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_ultrametric <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_ultrametricity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_ultrametric_by_iterative_projection ## ## Functions ## .ls_fit_ultrametric_by_iterative_projection() ## .ls_fit_ultrametric_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_ultrametric_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .ls_fit_ultrametric_by_iterative_reduction .ls_fit_ultrametric_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### * Ultrametric Target Fitters. ### ** ls_fit_ultrametric_target ls_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) mean(x) else function(x, w) weighted.mean(x, w) distfun <- function(x, u, w) sqrt(sum(w * (x - u) ^ 2)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** l1_fit_ultrametric_target l1_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) median(x) else function(x, w) weighted_median(x, w) distfun <- function(x, u, w) sum(w * abs(x - u)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** .fit_ultrametric_target .fit_ultrametric_target <- function(x, y, w, fitter, distfun = NULL) { w <- .handle_weights_for_ultrametric_target_fitters(w, x) ## The documentation says that x should inherit from dist, so coerce ## to this if needed but if not a matrix (as we will coerce back to ## a matrix right away). if(!inherits(x, "dist") && !is.matrix(x)) x <- as.dist(x) x <- as.matrix(x) y <- as.hclust(y) n <- length(y$order) ilist <- vector("list", n) out <- matrix(0, n, n) mat <- xlist <- wlist <- vector("list", n - 1L) for(i in seq_len(n - 1L)) { inds <- y$merge[i, ] ids1 <- if(inds[1L] < 0) -inds[1L] else ilist[[inds[1L]]] ids2 <- if(inds[2L] < 0) -inds[2L] else ilist[[inds[2L]]] ilist[[i]] <- c(ids1, ids2) mat[[i]] <- cbind(rep.int(ids1, rep.int(length(ids2), length(ids1))), rep.int(ids2, length(ids1))) xlist[[i]] <- x[mat[[i]]] wlist[[i]] <- w[mat[[i]]] } values <- pava(xlist, wlist, fitter) for(i in seq_len(n - 1L)) out[mat[[i]]] <- values[i] rownames(out) <- y$labels u <- as.cl_ultrametric(out + t(out)) if(!is.null(distfun)) attr(u, "meta") <- list(objval = distfun(as.dist(x), u, as.dist(w))) u } ### ** .handle_weights_for_ultrametric_target_fitters .handle_weights_for_ultrametric_target_fitters <- function(weights, x) { ## Handle weights for the ultrametric target fitters. ## This is somewhat tricky ... if(is.matrix(weights)) { if(any(dim(weights) != attr(x, "Size"))) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- as.matrix(.dist_from_vector(rep_len(weights, length(x)))) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") weights } ### l1_fit_ultrametric l1_fit_ultrametric <- function(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .l1_fit_ultrametric_by_SUMT(x, weights, control), IRIP = .l1_fit_ultrametric_by_IRIP(x, weights, control)) } ### ** .l1_fit_ultrametric_by_SUMT .l1_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Try a SUMT with "pseudo-gradients". w <- weights / sum(weights) ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(d - x)) P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) w * sign(d - x) grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .l1_fit_ultrametric_by_IRIP .l1_fit_ultrametric_by_IRIP <- function(x, weights = 1, control = list()) { ## An attempt of implementing "Iteratively Reweighted Iterative ## Projection" as described in Smith (2000, 2001), Journal of ## Classification. Note that this suggests using the Iterative ## Projection of Hubert and Arabie (1995), which we cannot as we ## have not (yet?) implemented this for the weighted case. Hence, ## we use our SUMT least squares ultrametric fitter instead. ## ## However, we never got this to converge properly ... w <- weights / sum(weights) ## Control parameters: ## MIN, MIN <- control$MIN if(is.null(MIN)) MIN <- 1e-3 ## (A rather small cut-off which worked best in the cases we tried.) ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## start, start <- control$start ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(x - d)) ## Initialize by "random shaking" as for the L2 SUMT, but perhaps we ## should not do this? [Or do it differently?] u <- if(is.null(start)) x + rnorm(length(x), sd = sd(x) / 3) else start ## (No multiple runs for the time being.) L_new <- L(u) iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Outer iteration: %d", iter)) L_old <- L_new u_old <- u weights <- w / pmax(abs(u - x), MIN) u <- .ls_fit_ultrametric_by_SUMT(x, weights = weights, control = as.list(control$control)) ## Use some control arguments lateron ... L_new <- L(u) delta_L <- L_old - L_new delta_u <- max(abs(u_old - u)) if(verbose) message(gettextf("Change: u: %g L: %g", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } d <- .ultrametrify(u) meta <- list(objval = L(d), status = as.integer(iter == maxiter)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ## * ls_fit_sum_of_ultrametrics ls_fit_sum_of_ultrametrics <- function(x, nterms = 1, weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## We could catch some special cases right away: if x already is an ## ultrametric then the fit would be a list with x and nterms - 1 ## zero ultrametrics ... ## Control parameters: ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## method, method <- control$method if(is.null(method)) method <- "SUMT" ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last. control <- as.list(control$control) ## And be nice ... if(identical(method, "SUMT") && is.null(control$nruns)) control$nruns <- 10L L <- function(u) sum((x - rowSums(matrix(unlist(u), ncol = nterms))) ^ 2) ## Init. u <- rep.int(list(as.cl_ultrametric(0 * x)), nterms) L_new <- L(u) ## Loop. iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Iteration: %d", iter)) L_old <- L_new delta_u <- 0 for(i in seq_len(nterms)) { if(verbose) message(gettextf("Term: %d", i)) u_old <- u[[i]] ## Compute residual r = x - \sum_{j: j \ne i} u(j) r <- x - rowSums(matrix(unlist(u[-i]), ncol = nterms - 1L)) ## Fit residual. u[[i]] <- ls_fit_ultrametric(r, method, weights, control) ## Accumulate change. change <- max(abs(u[[i]] - u_old)) if(verbose) message(gettextf("Change: %g", change)) delta_u <- max(delta_u, change) } L_new <- L(u) delta_L <- L_old - L_new if(verbose) message(gettextf("Overall change: u: %g L: %g\n", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } .structure(u, objval = L_new, status = as.integer(iter == maxiter)) } ### * as.dist.hclust ## Using hclust() with methods 'median' or 'centroid' typically gives ## reversals and hence not valid hierarchies, i.e., distances which do ## not satisfy the ultrametricity conditions. The distances can be ## obtained via cophenetic(), but ls_fit_ultrametric() prefers using ## as.dist() [as arguably more appropriate] which in turn can be made to ## "work" by providing as.matrix() methods [bypassing the need to handle ## the extra arguments 'diag' and 'upper' for as.dist()]. as.matrix.hclust <- function(x, ...) as.matrix(cophenetic(x)) ### * .non_ultrametricity .non_ultrametricity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_ultrametricity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * .cl_ultrametric_from_classes .cl_ultrametric_from_classes <- function(x) { ## Compute an ultrametric from a hierarchy of classes (i.e., an ## n-tree). labels <- attr(x, "labels") ## Ensure we have no duplicates. x <- x[!duplicated(x)] ## .get_classes_in_hierarchy() orders according to cardinality, but ## a consensus method may forget to ... x[] <- x[order(lengths(x))] ## Get the objects (unique codes in the classes). objects <- sort(unique(unlist(x))) ## (Could also look at the classes of length 1.) ## Recursively compute the heights of the classes. heights <- double(length = length(x)) for(i in which(lengths(x) > 1L)) { ## Find the relevant classes. j <- sapply(x[seq_len(i - 1L)], function(s) all(s %in% x[[i]])) heights[i] <- max(heights[j]) + 1 } ## Next, create an incidence matrix (objects by classes). incidences <- sapply(x, function(s) objects %in% s) ## Now that we have the heights and incidences, we can compute ## distances, using the idea that ## distance(i, j) = min(height(A): A contains i and j) n <- length(objects) d <- matrix(0, n, n) for(i in objects) d[i, ] <- heights[apply((rep(incidences[i, ], each = n) & incidences), 1L, which.max)] dimnames(d) <- rep.int(list(labels), 2L) as.cl_ultrametric(d) } ### * .cl_ultrametric_with_meta_added .cl_ultrametric_with_meta_added <- function(x, meta = NULL) { ## An alternative to adding a 'meta' argument to cl_ultrametric(). attr(x, "meta") <- meta x } ### .ultrametrify .ultrametrify <- function(x) { ## Ensure ultrametricity. ## In earlier versions, function ## .cl_ultrametric_from_ultrametric_approximation() tried rounding ## to non-ultrametric significance, using ## round(x, floor(abs(log10(.non_ultrametricity(x, max = TRUE))))) ## which is nice but does not guarantee ultrametricity (and may ## result in poorer approximations than what we use now). ## Hence, let us use single linkage hierarchical clustering which ## gives the best dominated ultrametric approximation. cophenetic(hclust(.dist_from_vector(x), "single")) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/membership.R0000644000175000017500000001737311633352676014042 0ustar nileshnilesh### * cl_membership ## Get the class membership matrix from a partition. ## ## We could use sparse matrices for the memberships of hard partitions. ## Not sure if this is really that important, though, as we typically ## use memberships in a context where dense matrices (memberships of ## soft partitions) occur. ## ## ## Currently, the number of classes to be used for the memberships must ## not be less than the number of classes in the partition. We might ## eventually change this so that "optimal" collapsing of classes is ## performed (but note that optimality needs to be relative to some ## dissimilarity measure) ... ## However, from the discussion of the second method in Gordon and Vichi ## (2001) we note that whereas optimal assignment is "simple", optimal ## collapsing (equivalent to partitioning into an arbitrary number of ## partitions) is of course very hard. ## cl_membership <- function(x, k = n_of_classes(x)) { if(k < n_of_classes(x)) stop("k cannot be less than the number of classes in x.") UseMethod("cl_membership") } ## Default method. cl_membership.default <- function(x, k = n_of_classes(x)) .cl_membership_from_class_ids(cl_class_ids(x), k) ## Package stats: kmeans() (R 2.1.0 or better). cl_membership.kmeans <- cl_membership.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_membership.fanny <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$membership, k) cl_membership.partition <- cl_membership.default ## Package cclust: cclust(). cl_membership.cclust <- cl_membership.default ## Package e1071: cmeans() gives objects of class "fclust". cl_membership.fclust <- cl_membership.fanny ## Package e1071: cshell(). cl_membership.cshell <- cl_membership.fanny ## Package e1071: bclust(). cl_membership.bclust <- cl_membership.default ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::posterior(), but this now only ## has S4 methods for modeltools::posterior() S4 generic. Let's call ## this one, and hope that flexmix has been loaded ... ## cl_membership.flexmix <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(modeltools::posterior(x), k) ## Package mclust: Mclust(). cl_membership.Mclust <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$z, k) ## Package clue: Memberships. cl_membership.cl_membership <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x, k) ## (Note: we cannot simply return x in case k equals n_of_classes(x), ## because ncol(x) might be different.) ## Package clue: pclust(). cl_membership.pclust <- function(x, k = n_of_classes(x)) { ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. In case we ## decide not to fill the membership "slot" for such: if(is.null(m <- x$membership)) .cl_membership_from_class_ids(x$cluster, k) else .cl_membership_from_memberships(m, k) } ## Package clue: (virtual) class "cl_partition". cl_membership.cl_partition <- function(x, k = n_of_classes(x)) cl_membership(.get_representation(x), k) ## Package movMF: class "movMF". cl_membership.movMF <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$P, k) ### * .make_cl_membership ## A low-level common creator. .make_cl_membership <- function(x, n_of_classes, is_cl_hard_partition, meta = NULL) { attr(x, "n_of_classes") <- n_of_classes attr(x, "is_cl_hard_partition") <- is_cl_hard_partition attr(x, "meta") <- meta class(x) <- "cl_membership" x } ### * .cl_membership_from_class_ids .cl_membership_from_class_ids <- function(x, k = NULL, meta = NULL) { x <- factor(x) n_of_objects <- length(x) n_of_classes <- nlevels(x) if(is.null(k)) k <- n_of_classes else if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") ## ## Should really use a sparse encoding of this ... M <- matrix(0, n_of_objects, k) ## (Could also use .one_entry_per_column(M, as.numeric(x)) <- 1 for ## the time being.) M[cbind(seq_len(n_of_objects), as.numeric(x))] <- 1 ## But note that we also need to handle NAs ... M[is.na(x), ] <- NA ## if(nlevels(x) == k) colnames(M) <- levels(x) if(!is.null(nm <- names(x))) rownames(M) <- nm .make_cl_membership(M, n_of_classes, TRUE, meta) } ### * .cl_membership_from_memberships .cl_membership_from_memberships <- function(x, k = NULL, meta = NULL) { ## ## Dropping and re-filling of ## zero columns in case k is given may ## seem unnecessary, but really canonicalizes by moving zero columns ## last ... ## x <- x[ , colSums(x, na.rm = TRUE) > 0, drop = FALSE] n_of_classes <- ncol(x) if(!is.null(k)) { if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") if(k > n_of_classes) { ## Fill up with zero columns. x <- cbind(x, matrix(0, nrow(x), k - n_of_classes)) ## Handle NAs if necessary. x[apply(is.na(x), 1, any), ] <- NA } } .make_cl_membership(x, n_of_classes, all(rowSums(x == 1, na.rm = TRUE) > 0), meta) } ### * as.cl_membership as.cl_membership <- function(x) UseMethod("as.cl_membership") as.cl_membership.default <- function(x) { if(inherits(x, "cl_membership")) x else if(is.atomic(x)) .cl_membership_from_class_ids(x) else cl_membership(x) } as.cl_membership.matrix <- function(x) .cl_membership_from_memberships(x) ### * .memberships_from_cross_dissimilarities .memberships_from_cross_dissimilarities <- function(d, power = 2) { ## For a given matrix of cross-dissimilarities [d_{bj}], return a ## matrix [u_{bj}] such that \sum_{b,j} u_{bj}^p d_{bj}^q => min! ## under the constraint that u is a stochastic matrix. ## If only one power is given, it is taken as p, with q as 1. ## ## This returns a plain matrix of membership values and not a ## cl_membership object (so that it does not deal with possibly ## dropping or re-introducing unused classes). ## exponent <- if(length(power) == 1L) 1 / (1 - power) else power[2L] / (1 - power[1L]) u <- matrix(0, nrow(d), ncol(d)) zero_incidences <- !(d > 0) n_of_zeroes <- rowSums(zero_incidences) if(any(ind <- (n_of_zeroes > 0))) u[ind, ] <- zero_incidences[ind, , drop = FALSE] / n_of_zeroes[ind] if(any(!ind)) { ## Compute d_{bj}^e / \sum_k d_{bk}^e without overflow from very ## small d_{bj} values. d <- exponent * log(d[!ind, , drop = FALSE]) d <- exp(d - d[cbind(seq_len(nrow(d)), max.col(d))]) u[!ind, ] <- d / rowSums(d) } u } ### * print.cl_membership print.cl_membership <- function(x, ...) { writeLines("Memberships:") print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### .has_object_memberships ## Be nice to users when computing proximities: all measures for ## "partitions" we currently consider really only assume that we can ## compute memberships and/or class ids. ## Note that the cl_membership() default method works for cl_class_ids. .has_object_memberships <- function(x) (is.cl_partition(x) || inherits(x, "cl_membership") || inherits(x, "cl_class_ids")) ### * .stochastify .stochastify <- function(x) { ## Try to ensure that a stochastic matrix is returned. x <- pmax(x, 0) x / rowSums(x) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/hierarchy.R0000644000175000017500000002341713036464014013646 0ustar nileshnilesh### * is.cl_hierarchy ## Determine whether an object is a hierarchy. ## Note that hierarchies are n-trees, which can naturally be represented ## by their classes (as done via cl_classes()) or internal ultrametric ## obtained by assigning height one to all splits (as done by ## .cl_ultrametric_from_classes()). ## We typically used the latter, but note that this is an *internal* ## reprsentation. ## User-level, cl_dendrogram objects are indexed hierarchies, and ## cl_hierarchy objects are n-trees. The latter can be "converted" into ## the former (using height one splits) via as.cl_dendrogram(). is.cl_hierarchy <- function(x) UseMethod("is.cl_hierarchy") ## Default method. is.cl_hierarchy.default <- .false ## Package stats: hclust(). is.cl_hierarchy.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_hierarchy.twins <- .true ## Package cluster: mona(). is.cl_hierarchy.mona <- .true ## Package ape: class "phylo". is.cl_hierarchy.phylo <- function(x) ape::is.ultrametric(x) ## Package clue: (virtual) class "cl_hierarchy". ## Note that "raw" cl_ultrametric objects are *not* hierarchies, as ## these are meant for numeric computations. ## ## Is this really a good idea? ## We can as.hclust() a cl_dendrogram and then it is a cl_hierarchy ... ## is.cl_hierarchy.cl_hierarchy <- .true ### * as.cl_hierarchy ## Note that cl_hierarchy conceptually is a virtual class, so there are ## no prototypes and no cl_hierarchy() creator. .cl_hierarchy_classes <- "cl_hierarchy" as.cl_hierarchy <- function(x) { if(is.cl_hierarchy(x)) { if(!inherits(x, "cl_hierarchy")) .make_container(x, .cl_hierarchy_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_hierarchy_classes) } ### * print.cl_hierarchy print.cl_hierarchy <- function(x, ...) .print_container(x, "cl_hierarchy", ...) ### * Complex.cl_hierarchy ## No Complex() for any kind of hierarchy. Complex.cl_hierarchy <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_hierarchy ## No Math() for any kind of hierarchy. Math.cl_hierarchy <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_hierarchy Ops.cl_hierarchy <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) if(n_of_objects(e1) != n_of_objects(e2)) stop("Hierarchies must have the same number of objects.") c1 <- cl_classes(e1) c2 <- cl_classes(e2) switch(.Generic, "<=" = all(is.finite(match(c1, c2))), "<" = all(is.finite(match(c1, c2))) && any(is.na(match(c2, c1))), ">=" = all(is.finite(match(c2, c1))), ">" = all(is.finite(match(c2, c1))) && any(is.na(match(c1, c2))), "==" = all(is.finite(match(c1, c2))) && all(is.finite(match(c2, c1))), "!=" = any(is.na(match(c1, c2))) || any(is.na(match(c2, c1)))) } ### * Summary.cl_hierarchy ## ## This is really the same as Summary.cl_partition(). ## Summary.cl_hierarchy <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * as.hclust.cl_hierarchy as.hclust.cl_hierarchy <- function(x, ...) as.hclust(.get_representation(x), ...) ### * is.cl_dendrogram ## ## Once we have cl_dendrogram testing, we can simplify cl_hierarchy ## testing. E.g., ## is.cl_hierachy.default <- is.cl_dendrogram ## should be ok, and we can add cl_hierarchy predicates for hierarchies ## which are not dendrograms on top of that. ## is.cl_dendrogram <- function(x) UseMethod("is.cl_dendrogram") ## Default method. is.cl_dendrogram.default <- .false ## Package stats: hclust(). is.cl_dendrogram.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_dendrogram.twins <- .true ## Package cluster: mona(). is.cl_dendrogram.mona <- .true ## Package ape: class "phylo". is.cl_dendrogram.phylo <- function(x) ape::is.ultrametric(x) ## (We could also support ape's class "matching" via coercion to class ## "phylo".) ## Package clue: (virtual) class "cl_dendrogram". is.cl_dendrogram.cl_dendrogram <- .true ### * as.cl_dendrogram .cl_dendrogram_classes <- c("cl_dendrogram", "cl_hierarchy") as.cl_dendrogram <- function(x) { if(is.cl_dendrogram(x)) { if(!inherits(x, "cl_dendrogram")) .make_container(x, .cl_dendrogram_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_dendrogram_classes) } ### * print.cl_dendrogram print.cl_dendrogram <- function(x, ...) .print_container(x, "cl_dendrogram", ...) ### * plot.cl_dendrogram plot.cl_dendrogram <- function(x, ...) plot(cl_ultrametric(.get_representation(x)), ...) ### * Group methods for cl_dendrogram objects. Ops.cl_dendrogram <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) u1 <- cl_ultrametric(e1) u2 <- cl_ultrametric(e2) if(length(u1) != length(u2)) stop("Dendrograms must have the same number of objects.") switch(.Generic, "<=" = all(u1 <= u2), "<" = all(u1 <= u2) && any(u1 < u2), ">=" = all(u1 >= u2), ">" = all(u1 >= u2) && any(u1 > u2), "==" = all(u1 == u2), "!=" = any(u1 != u2)) } ### * Summary.cl_dendrogram ## ## This is really the same as Summary.cl_hierarchy() ... ## We cannot really call the poset specific internal meet and join ## functions from here as e.g. max(D, H) (D a dendrogram, H an n-tree) ## should use the n-tree poset functions ... ## However, dispatch for cl_dendrogram should not be needed if we also ## dispatch on cl_hierarchy ... ## ## Summary.cl_dendrogram <- ## function(..., na.rm = FALSE) ## { ## ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) ## if(!ok) ## stop(gettextf("Generic '%s' not defined for \"%s\" objects.", ## .Generic, .Class)) ## args <- list(...) ## switch(.Generic, ## "min" = cl_meet(cl_ensemble(list = args)), ## "max" = cl_join(cl_ensemble(list = args)), ## "range" = { ## cl_ensemble(min = cl_meet(cl_ensemble(list = args)), ## max = cl_join(cl_ensemble(list = args))) ## }) ## } ### * as.hclust.cl_dendrogram ## ## This is really the same as as.hclust.cl_hierarchy() ... ## Dispatch for cl_dendrogram should not be needed if we also dispatch ## on cl_hierarchy ... ## ## as.hclust.cl_dendrogram <- ## function(x, ...) ## as.hclust(.get_representation(x), ...) ### ** cut.cl_dendrogram ## Not perfect as this perhaps return something more "classed" in the ## spirit of clue ... cut.cl_dendrogram <- function(x, ...) cutree(as.hclust(x), ...) ### * Utilities ## To turn a mona object into a cl_dendrogram, we need to be able to ## compute its associated ultrametric. Hence, provide a cophenetic() ## method for mona objects ... cophenetic.mona <- function(x) { no <- length(x$order) ns <- max(x$step) + 1 m <- matrix(NA, no, no) FOO <- function(ind, step, s) { if(length(ind) <= 1) return() grp <- c(0, cumsum(step == s)) ind <- split(ind, grp) len <- length(ind) for(a in seq_len(len)) { for(b in seq_len(a - 1L)) { ## Need both as we currently cannot assume that the ## indices are sorted. Alternatively, work with the ## sequence from one to the number of objects, and ## reorder at the end ... m[ind[[a]], ind[[b]]] <<- s m[ind[[b]], ind[[a]]] <<- s } } ind <- ind[lengths(ind) > 1L] pos <- which(step == s) step <- split(step[-pos], grp[-1][-pos]) if(is.null(step)) return() for(a in seq_along(ind)) FOO(ind[[a]], step[[a]], s + 1) } FOO(x$order, x$step, 1) m[is.na(m)] <- ns m <- ns - m rownames(m) <- rownames(x$data) as.dist(m) } ## And while we're at it ... ## (Of course, as.hclust() should really "know" that a cophenetic() ## method is available ...) as.hclust.mona <- function(x, ...) hclust(cophenetic(x), "single") ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/pava.R0000644000175000017500000000507013036461767012626 0ustar nileshnilesh## A Pool Adjacent Violators Algorithm framework for minimizing problems ## like ## ## \sum_i \sum_{J_i} w_{ij} f(y_{ij}, m_i) ## ## under the constraint m_1 <= ... <= m_n with f a convex function in m. ## Note that this formulation allows for repeated data in each block, ## and hence is more general than the usual pava/isoreg ones. A solver ## for the unconstrained \sum_k w_k f(y_k, m) => min! is needed. ## Typical cases are f(y, m) = |y - m|^p for p = 2 (solved by weighted ## mean) and p = 1 (solved by weighted median), respectively. ## A general design issue is whether weights should be supported or not, ## because in the latter case the solver could be a function of a single ## (data) argument only. Let's assume the former for the time being. pava <- function(x, w = NULL, solver = weighted.mean, merger = c) { n <- length(x) if(is.null(w)) { w <- if(is.list(x)) lapply(lengths(x), function(u) rep.int(1, u)) else rep.int(1, n) } else if(is.list(x)) w <- as.list(w) inds <- as.list(seq_len(n)) vals <- mapply(solver, x, w) ## Combine blocks i and i + 1. combine <- if(is.list(x)) { ## In the repeated data case, we explicitly merge the data (and ## weight) lists. function(i) { ## Merge the data and indices, solve, and put things back ## into position i, dropping position i + 1. j <- i + 1L x[[i]] <<- merger(x[[i]], x[[j]]) w[[i]] <<- c(w[[i]], w[[j]]) vals[i] <<- solver(x[[i]], w[[i]]) inds[[i]] <<- c(inds[[i]], inds[[j]]) keep <- seq_len(n)[-j] x <<- x[keep] w <<- w[keep] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } else { function(i) { ## In the "simple" case, merge only indices and values. j <- i + 1L inds[[i]] <<- c(inds[[i]], inds[[j]]) vals[i] <<- solver(x[inds[[i]]], w[inds[[i]]]) keep <- seq_len(n)[-j] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } i <- 1L repeat { if(i < n) { if((vals[i] > vals[i + 1])) { combine(i) while((i > 1L) && (vals[i - 1L] > vals[i])) { combine(i - 1L) i <- i - 1L } } else i <- i + 1L } else break } rep.int(vals, lengths(inds)) } clue/R/boot.R0000644000175000017500000000255711304023136012625 0ustar nileshnileshcl_boot <- function(x, B, k = NULL, algorithm = if(is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) { clusterings <- if(!resample) { x <- rep.int(list(x), B) eval(as.call(c(list(as.name("lapply"), x, algorithm), if(!is.null(k)) list(k), parameters))) } else { replicate(B, expr = { algorithm <- match.fun(algorithm) ## ## This is not quite perfect. We have ## cl_predict() to encapsulate the process of ## assigning objects to classes, but for sampling ## from the objects we assume that they correspond ## to the *rows* of 'x'. Argh. ## ind <- sample(NROW(x), replace = TRUE) train <- if(length(dim(x)) == 2) x[ind, ] else x[ind] out <- eval(as.call(c(list(algorithm, train), if(!is.null(k)) list(k), parameters))) as.cl_partition(cl_predict(out, x, "memberships")) }, simplify = FALSE) } cl_ensemble(list = clusterings) } clue/R/consensus.R0000644000175000017500000011507513140644003013704 0ustar nileshnilesh### * cl_consensus cl_consensus <- function(x, method = NULL, weights = 1, control = list()) { ## ## Interfaces are a matter of taste. ## E.g., one might want to have a 'type' argument indication whether ## hard or soft partitions are sought. One could then do ## cl_consensus(x, method = "euclidean", type = "hard") ## to look for an optimal median (or least squares) hard partition ## (for euclidean dissimilarity). ## For us, "method" really indicates a certain algorithm, with its ## bells and whistles accessed via the 'control' argument. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute consensus of empty ensemble.") weights <- rep_len(weights, length(clusterings)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") if(!is.function(method)) { if(!inherits(method, "cl_consensus_method")) { ## Get the method definition from the registry. type <- .cl_ensemble_type(clusterings) if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) } method <- method$definition } method(clusterings, weights, control) } ### * .cl_consensus_partition_DWH .cl_consensus_partition_DWH <- function(clusterings, weights, control) { ## ## Could make things more efficient by subscripting on positive ## weights. ## (Note that this means control$order has to be subscripted as ## well.) ## max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- max_n_of_classes order <- control$order if(is.null(order)) order <- sample(seq_along(clusterings)) clusterings <- clusterings[order] weights <- weights[order] k_max <- max(k, max_n_of_classes) s <- weights / cumsum(weights) s[is.na(s)] <- 0 # Division by zero ... M <- cl_membership(clusterings[[1L]], k_max) for(b in seq_along(clusterings)[-1L]) { mem <- cl_membership(clusterings[[b]], k_max) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M, mem), maximum = TRUE) M <- (1 - s[b]) * M + s[b] * mem[, ind] if(k < k_max) M <- .project_to_leading_columns(M, k) } M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k) as.cl_partition(M) } ### * .cl_consensus_partition_AOS .cl_consensus_partition_AOS <- function(clusterings, weights, control, type = c("SE", "HE", "SM", "HM")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ e ## = \sum_b \min_{P_b} w_b f(M, M_b P_b) ^ e ## for the special case where the criterion function is based on ## M and M_b P_b (i.e., column permutations of M_b), as opposed to ## the general case where d(M, M_b) = \min_{P_b} f(M, P_b, M_b) ## handled by .cl_consensus_partition_AOG(). ## ## The AO ("alternative optimization") proceeds by alternatively ## matching the M_b to M by minimizing f(M, M_b P_b) over P_b, and ## fitting M by minimizing \sum_b w_b f(M, M_b P_b) ^ e for fixed ## matchings. ## ## Such a procedure requires three ingredients: a function for ## matching M_b to M (in fact simply replacing M_b by the matched ## M_b P_b); a function for fitting M to the \{M_b P_b\}, and a ## function for computing the value of the criterion function ## corresponding to this fit (so that one can stop if the relative ## improvement is small enough). ## ## For the time being, we only use this to determine soft and hard ## Euclidean least squares consensus partitions (soft and hard ## Euclidean means), so the interface does not yet reflect the ## generality of the approach (which would either pass the three ## functions, or even set up family objects encapsulating the three ## functions). ## ## This special case is provided for efficiency and convenience. ## Using the special form of the criterion function, we can simply ## always work memberships with the same maximal number of columns, ## and with the permuted \{ M_b P_b \}. ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100 nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## The maximal (possible) number of classes in M and the \{ M_b \}. k_all <- max(k, k_max) value <- switch(type, SE = , HE = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum((u - M) ^ 2))) }, SM = , HM = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum(abs(u - M)))) }) ## Return the M[, ind] column permutation of M optimally matching N. match_memberships <- switch(type, SE = , HE = function(M, N) { M[, solve_LSAP(crossprod(N, M), maximum = TRUE), drop = FALSE] }, SM = , HM = function(M, N) { M[, solve_LSAP(.cxdist(N, M, "manhattan")), drop = FALSE] }) ## Function for fitting M to (fixed) memberships \{ M_b P_b \}. ## As we use a common number of columns for all membership matrices ## involved, we need to pass the desired 'k' ... fit_M <- switch(type, SE = function(memberships, w, k) { ## Update M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## If k < k_all, "project" as indicated in Gordon & ## Vichi (2001), p. 238. if(k < ncol(M)) M <- .project_to_leading_columns(M, k) M }, HE = , HM = function(memberships, w, k) { ## Compute M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## And compute a closest hard partition H(M) from ## that, using the first k columns of M. ids <- max.col(M[ , seq_len(k), drop = FALSE]) .cl_membership_from_class_ids(ids, ncol(M)) }, SM = .l1_fit_M) memberships <- lapply(clusterings, cl_membership, k_all) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOS run: %d", run)) M <- start[[run]] if(k < k_all) M <- cbind(M, matrix(0, nrow(M), k_all - k)) memberships <- lapply(memberships, match_memberships, M) old_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M to the M_b P_b. M <- fit_M(memberships, w, k) ## Match the \{ M_b P_b \} to M. memberships <- lapply(memberships, match_memberships, M) ## Update value. new_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) rownames(M) <- rownames(memberships[[1L]]) meta <- list(objval = value(M, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k, meta) as.cl_partition(M) } .random_stochastic_matrix <- function(n, k) { M <- matrix(runif(n * k), n, k) M / rowSums(M) } .l1_fit_M <- function(memberships, w, k) { ## Determine stochastic matrix M with at most k leading nonzero ## columns such that ## ## \sum_b w_b \sum_{i,j} | m_{ij}(b) - m_{ij} | => min ## ## where the sum over j goes from 1 to k. ## ## Clearly, this can be done separately for each row, where we need ## to minimize ## ## \sum_b w_b \sum_j | y_j(b) - x_j | => min ## ## over all probability vectors x. Such problems can e.g. be solved ## via the following linear program: ## ## \sum_b \sum_j w_b e'(u(b) + v(b)) => min ## ## subject to ## ## u(1), v(1), ..., u(B), v(B), x >= 0 ## x + u(b) - v(b) = y(b), b = 1, ..., B ## e'x = 1 ## ## (where e = [1, ..., 1]). ## ## So we have one long vector z of "variables": ## ## z = [u(1)', v(1)', ..., u(B)', v(B)', x']' ## ## of length (2B + 1) k, with x the object of interest. ## Rather than providing a separate function for weighted L1 fitting ## of probability vectors we prefer doing "everything" at once, in ## order to avoid recomputing the coefficients and constraints of ## the associated linear program. B <- length(memberships) L <- (2 * B + 1) * k ## Set up associated linear program. ## Coefficients in the objective function. objective_in <- c(rep(w, each = 2 * k), rep.int(0, k)) ## Constraints. constr_mat <- rbind(diag(1, L), cbind(kronecker(diag(1, B), cbind(diag(1, k), diag(-1, k))), kronecker(rep.int(1, B), diag(1, k))), c(rep.int(0, 2 * B * k), rep.int(1, k))) constr_dir <- c(rep.int(">=", L), rep.int("==", B * k + 1L)) ind <- seq.int(from = 2 * B * k + 1L, length.out = k) nr <- NROW(memberships[[1L]]) nc <- NCOL(memberships[[1L]]) M <- matrix(0, nrow = nr, ncol = k) ## Put the memberships into one big array so that we can get their ## rows more conveniently (and efficiently): memberships <- array(unlist(memberships), c(nr, nc, B)) for(i in seq_len(nr)) { out <- lpSolve::lp("min", objective_in, constr_mat, constr_dir, c(rep.int(0, L), memberships[i, seq_len(k), ], 1)) M[i, ] <- out$solution[ind] } ## Add zero columns if necessary. if(k < nc) M <- cbind(M, matrix(0, nr, nc - k)) M } ### ** .cl_consensus_partition_soft_euclidean .cl_consensus_partition_soft_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SE") ### ** .cl_consensus_partition_hard_euclidean .cl_consensus_partition_hard_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HE") ### ** .cl_consensus_partition_soft_manhattan .cl_consensus_partition_soft_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SM") ### ** .cl_consensus_partition_hard_manhattan .cl_consensus_partition_hard_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HM") ### * .cl_consensus_partition_AOG .cl_consensus_partition_AOG <- function(clusterings, weights, control, type = c("GV1")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ p ## = \sum_b \min_{P_b} w_b f(M, M_b, P_b) ^ e ## for general dissimilarity matrices which involve class matching ## via permutation matrices P_b. ## ## The AO ("Alternative Optimization") proceeds by alternating ## between determining the optimal permutations P_b by minimizing ## f(M, M_b, P_b) ## for fixed M, and fitting M by minimizing ## \sum_b w_b f(M, M_b, P_b) ^ e ## for fixed \{ P_b \}. ## ## We encapsulate this into functions fit_P() and fit_M() (and a ## value() function for the criterion function to be minimized with ## respect to both M and \{ P_b \}, even though the current ## interface does not yet reflect the generality of the approach. ## ## Note that rather than passing on information about the numbers of ## classes (e.g., needed for GV1) and representing all involved ## membership matrices with the same maximal number of columns, we ## use "minimal" representations with no dummy classes (strictly ## speaking, with the possible exception of M, for which the given k ## is used). ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## ## For the given memberships, we can simply use ncol() in the ## computations (rather than n_of_classes(), because we used ## cl_membership() to create them. For M, the number of classes ## could be smaller than the given k "target". ## value <- function(M, permutations, memberships, w) { k <- .n_of_nonzero_columns(M) d <- function(u, p) { ## Compute the squared GV1 dissimilarity between M and u ## based on the M->u class matching p. nc_u <- ncol(u) if(nc_u == k) { ## Simple case: all classes are matched. sum((u[, p] - M) ^ 2) } else { ## Only include the matched non-dummy classes of M .. ind <- seq_len(k) ## ... which are matched to non-dummy classes of u. ind <- ind[p[ind] <= nc_u] sum((u[, p[ind]] - M[, ind]) ^ 2) } } sum(w * mapply(d, memberships, permutations)) } fit_P <- function(u, M) { ## Return a permutation representing a GV1 optimal matching of ## the columns of M to the columns of u (note the order of the ## arguments), using a minimal number of dummy classes (i.e., p ## has max(.n_of_nonzero_columns(M), n_of_classes(u)) entries). ## See also .cl_dissimilarity_partition_GV1(). C <- outer(colSums(M ^ 2), colSums(u ^ 2), "+") - 2 * crossprod(M, u) nc_M <- .n_of_nonzero_columns(M) nc_u <- ncol(u) ## (See above for ncol() vs n_of_classes().) if(nc_M < nc_u) C <- rbind(C, matrix(0, nrow = nc_u - nc_M, ncol = nc_u)) else if(nc_M > nc_u) C <- cbind(C, matrix(0, nrow = nc_M, ncol = nc_M - nc_u)) solve_LSAP(C) } fit_M <- function(permutations, memberships, w) { ## Here comes the trickiest part ... ## ## In general, M = [m_{iq}] is determined as follows. ## Write value(M, permutations, memberships, w) as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - m_{iq})^2 x_{pq}(b) ## where U(b) and X(b) are the b-th membership matrix and the ## permutation matrix representing the M->U(b) non-dummy class ## matching (as always, note the order of the arguments). ## ## Let ## \beta_{iq} = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## \alpha_q = \sum_b \sum_{p=1}^{k_b} w_b x_{pq}(b) ## and ## \bar{m}_{iq} = ## \cases{\beta_{iq}/\alpha_q, & $\alpha_q > 0$ \cr ## 0 & otherwise}. ## Then, as the cross-product terms cancel out, the value ## function rewrites as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - \bar{m}_{iq})^2 x_{pq}(b) ## + \sum_i \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2, ## where the first term is a constant, and the minimum is found ## by solving ## \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2 => min! ## s.t. ## m_{i1}, ..., m_{ik} >= 0, \sum_{iq} m_{iq} = 1. ## ## We can distinguish three cases. ## A. If S_i = \sum_q \bar{m}_{iq} = 1, things are trivial. ## B. If S_i = \sum_q \bar{m}_{iq} < 1. ## B1. If some \alpha_q are zero, then we can choose ## m_{iq} = \bar{m}_{iq} for those q with \alpha_q = 0; ## m_{iq} = 1 / number of zero \alpha's, otherwise. ## B2. If all \alpha_q are positive, we can simply ## equidistribute 1 - S_i over all classes as written ## in G&V. ## C. If S_i > 1, things are not so clear (as equidistributing ## will typically result in violations of the non-negativity ## constraint). We currently revert to using solve.QP() from ## package quadprog, as constrOptim() already failed in very ## simple test cases. ## ## Now consider \sum_{p=1}^{k_b} x_{pq}(b). If k <= k_b for all ## b, all M classes from 1 to k are matched to one of the k_b ## classes in U(b), hence the sum and also \alpha_q are one. ## But then ## \sum_q \bar{m}_{iq} ## = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## <= \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) ## = 1 ## with equality if k = k_b for all b. I.e., ## * If k = \min_b k_b = \max k_b, we are in case A. ## * If k <= \min_b k_b, we are in case B2. ## And it makes sense to handle these cases explicitly for ## efficiency reasons. ## And now for something completely different ... the code. k <- .n_of_nonzero_columns(M) nr_M <- nrow(M) nc_M <- ncol(M) nc_memberships <- sapply(memberships, ncol) if(k <= min(nc_memberships)) { ## Compute the weighted means \bar{M}. M <- .weighted_sum_of_matrices(mapply(function(u, p) u[ , p[seq_len(k)]], memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) ## If we always got the same number of classes, we are ## done. Otherwise, equidistribute ... if(k < max(nc_memberships)) M <- pmax(M + (1 - rowSums(M)) / nc_M, 0) return(M) } ## Here comes the general case. ## First, compute the \alpha and \beta. alpha <- rowSums(rep(w, each = k) * mapply(function(p, n) p[seq_len(k)] <= n, permutations, nc_memberships)) ## Alternatively (more literally): ## X <- lapply(permutations, .make_X_from_p) ## alpha1 <- double(length = k) ## for(b in seq_along(permutations)) { ## alpha1 <- alpha1 + ## w[b] * colSums(X[[b]][seq_len(nc_memberships[b]), ]) ## } ## A helper function giving suitably permuted memberships. pmem <- function(u, p) { ## Only matched classes, similar to the one used in value(), ## maybe merge eventually ... v <- matrix(0, nr_M, k) ind <- seq_len(k) ind <- ind[p[ind] <= ncol(u)] if(any(ind)) v[ , ind] <- u[ , p[ind]] v } beta <- .weighted_sum_of_matrices(mapply(pmem, memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## Alternatively (more literally): ## beta1 <- matrix(0, nr_M, nc_M) ## for(b in seq_along(permutations)) { ## ind <- seq_len(nc_memberships[b]) ## beta1 <- beta1 + ## w[b] * memberships[[b]][, ind] %*% X[[b]][ind, ] ## } ## Compute the weighted means \bar{M}. M <- .cscale(beta, ifelse(alpha > 0, 1 / alpha, 0)) ## Alternatively (see comments for .cscale()): ## M1 <- beta %*% diag(ifelse(alpha > 0, 1 / alpha, 0)) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) S <- rowSums(M) ## Take care of those rows with row sums < 1. ind <- (S < 1) if(any(ind)) { i_0 <- alpha == 0 if(any(i_0)) M[ind, i_0] <- 1 / sum(i_0) else M[ind, ] <- pmax(M[ind, ] + (1 - S[ind]) / nc_M, 0) } ## Take care of those rows with row sums > 1. ind <- (S > 1) if(any(ind)) { ## Argh. Call solve.QP() for each such i. Alternatively, ## could set up on very large QP, but is this any better? Dmat <- diag(alpha, nc_M) Amat <- t(rbind(rep.int(-1, nc_M), diag(1, nc_M))) bvec <- c(-1, rep.int(0, nc_M)) for(i in which(ind)) M[i, ] <- quadprog::solve.QP(Dmat, alpha * M[i, ], Amat, bvec)$solution } M } memberships <- lapply(clusterings, cl_membership) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOG run: %d", run)) M <- start[[run]] permutations <- lapply(memberships, fit_P, M) old_value <- value(M, permutations, memberships, w) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M. M <- fit_M(permutations, memberships, w) ## Fit \{ P_b \}. permutations <- lapply(memberships, fit_P, M) ## Update value. new_value <- value(M, permutations, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) ## Seems that M is always kept a k columns ... if not, use ## M <- .stochastify(M_opt[, seq_len(k), drop = FALSE]) rownames(M) <- rownames(memberships[[1L]]) ## Recompute the value, just making sure ... permutations <- lapply(memberships, fit_P, M) meta <- list(objval = value(M, permutations, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### ** .cl_consensus_partition_GV1 .cl_consensus_partition_GV1 <- function(clusterings, weights, control) .cl_consensus_partition_AOG(clusterings, weights, control, "GV1") ### * .cl_consensus_partition_GV3 .cl_consensus_partition_GV3 <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \| Y - M M' \|_F^2 => min ## where M is a membership matrix and Y = \sum_b w_b M_b M_b'. n <- n_of_objects(clusterings) max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters: ## k, k <- control$k if(is.null(k)) k <- max_n_of_classes ## nruns, nruns <- control$nruns ## start. start <- control$start w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) Y <- .weighted_sum_of_matrices(comemberships, w, n) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } y <- c(Y) L <- function(m) sum((y - tcrossprod(matrix(m, n))) ^ 2) P <- .make_penalty_function_membership(n, k) grad_L <- function(m) { M <- matrix(m, n) 4 * c((tcrossprod(M) - Y) %*% M) } grad_P <- .make_penalty_gradient_membership(n, k) out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_soft_symdiff .cl_consensus_partition_soft_symdiff <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \sum_b w_b \sum_{ij} | c_{ij}(b) - c_{ij} | => min ## where C(b) = comembership(M(b)) and C = comembership(M) and M is ## a membership matrix. ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## k, k <- control$k ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } max_n_of_classes <- max(sapply(clusterings, n_of_classes)) if(is.null(k)) k <- max_n_of_classes B <- length(clusterings) n <- n_of_objects(clusterings) w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## Try using a rank k "root" of the weighted median of the ## comemberships as starting value. Y <- apply(array(unlist(comemberships), c(n, n, B)), c(1, 2), weighted_median, w) e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) ## Note that here (as opposed to hard/symdiff) we take soft ## partitions as is without replacing them by their closest hard ## partitions. sum(w * sapply(comemberships, function(C) sum(abs(C_M - C)))) } P <- .make_penalty_function_membership(n, k) if(gradient) { grad_L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) .weighted_sum_of_matrices(lapply(comemberships, function(C) 2 * sign(C_M - C) %*% M), w, n) } grad_P <- .make_penalty_gradient_membership(n, k) } else grad_L <- grad_P <- NULL out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_hard_symdiff .cl_consensus_partition_hard_symdiff <- function(clusterings, weights, control) { ## ## This is mostly duplicated from relations. ## Once this is on CRAN, we could consider having clue suggest ## relations ... ## comemberships <- lapply(clusterings, function(x) { ## Here, we always turn possibly soft partitions to ## their closest hard partitions. ids <- cl_class_ids(x) outer(ids, ids, "==") ## (Simpler than using tcrossprod() on ## cl_membership().) }) ## Could also create a relation ensemble from the comemberships and ## call relation_consensus(). B <- relations:::.make_fit_relation_symdiff_B(comemberships, weights) k <- control$k control <- control$control ## Note that currently we provide no support for finding *all* ## consensus partitions (but allow for specifying the solver). control$all <- FALSE I <- if(!is.null(k)) { ## ## We could actually get the memberships directly in this case. relations:::fit_relation_LP_E_k(B, k, control) ## } else relations:::fit_relation_LP(B, "E", control) ids <- relations:::get_class_ids_from_incidence(I) names(ids) <- cl_object_names(clusterings) as.cl_hard_partition(ids) } ### * .cl_consensus_hierarchy_cophenetic .cl_consensus_hierarchy_cophenetic <- function(clusterings, weights, control) { ## d <- .weighted_mean_of_object_dissimilarities(clusterings, weights) ## Alternatively: ## as.cl_dendrogram(ls_fit_ultrametric(d, control = control)) control <- c(list(weights = weights), control) as.cl_dendrogram(ls_fit_ultrametric(clusterings, control = control)) } ### * .cl_consensus_hierarchy_manhattan .cl_consensus_hierarchy_manhattan <- function(clusterings, weights, control) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) B <- length(clusterings) ultrametrics <- lapply(clusterings, cl_ultrametric) if(B == 1L) return(as.cl_dendrogram(ultrametrics[[1L]])) n <- n_of_objects(ultrametrics[[1L]]) labels <- cl_object_names(ultrametrics[[1L]]) ## We need to do ## ## \sum_b w_b \sum_{i,j} | u_{ij}(b) - u_{ij} | => min ## ## over all ultrametrics u. Let's use a SUMT (for which "gradients" ## can optionally be switched off) ... L <- function(d) { sum(w * sapply(ultrametrics, function(u) sum(abs(u - d)))) ## Could also do something like ## sum(w * sapply(ultrametrics, cl_dissimilarity, d, ## "manhattan")) } P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) { ## "Gradient" is \sum_b w_b sign(d - u(b)). .weighted_sum_of_vectors(lapply(ultrametrics, function(u) sign(d - u)), w) } grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking" of the weighted median of the ## ultrametrics. Any better ideas? ## ## Using var(x) / 3 is really L2 ... ## x <- apply(matrix(unlist(ultrametrics), ncol = B), 1, weighted_median, w) start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) d <- .cl_ultrametric_from_veclh(d, n, labels, meta) as.cl_dendrogram(d) } ### * .cl_consensus_hierarchy_majority .cl_consensus_hierarchy_majority <- function(clusterings, weights, control) { w <- weights / sum(weights) p <- control$p if(is.null(p)) p <- 1 / 2 else if(!is.numeric(p) || (length(p) != 1) || (p < 1 / 2) || (p > 1)) stop("Parameter 'p' must be in [1/2, 1].") classes <- lapply(clusterings, cl_classes) all_classes <- unique(unlist(classes, recursive = FALSE)) gamma <- double(length = length(all_classes)) for(i in seq_along(classes)) gamma <- gamma + w[i] * !is.na(match(all_classes, classes[[i]])) ## Rescale to [0, 1]. gamma <- gamma / max(gamma) maj_classes <- if(p == 1) { ## Strict consensus tree. all_classes[gamma == 1] } else all_classes[gamma > p] attr(maj_classes, "labels") <- attr(classes[[1L]], "labels") ## ## Stop auto-coercing that to dendrograms once we have suitable ways ## of representing n-trees. as.cl_hierarchy(.cl_ultrametric_from_classes(maj_classes)) ## } ### * Utilities ### ** .cl_consensus_method_default .cl_consensus_method_default <- function(type) { switch(type, partition = "SE", hierarchy = "euclidean", NULL) } ### ** .project_to_leading_columns .project_to_leading_columns <- function(x, k) { ## For a given matrix stochastic matrix x, return the stochastic ## matrix y which has columns from k+1 on all zero which is closest ## to x in the Frobenius distance. y <- x[, seq_len(k), drop = FALSE] y <- cbind(pmax(y + (1 - rowSums(y)) / k, 0), matrix(0, nrow(y), ncol(x) - k)) ## (Use the pmax to ensure that entries remain nonnegative.) } ### ** .make_X_from_p .make_X_from_p <- function(p) { ## X matrix corresponding to permutation p as needed for the AO ## algorithms. I.e., x_{ij} = 1 iff j->p(j)=i. X <- matrix(0, length(p), length(p)) i <- seq_along(p) X[cbind(p[i], i)] <- 1 X } ### ** .n_of_nonzero_columns ## ## Could turn this into n_of_classes.matrix(). .n_of_nonzero_columns <- function(x) sum(colSums(x) > 0) ## ### ** .cscale ## ## Move to utilities eventually ... .cscale <- function(A, x) { ## Scale the columns of matrix A by the elements of vector x. ## Formally, A %*% diag(x), but faster. ## Could also use sweep(A, 2, x, "*") rep(x, each = nrow(A)) * A } ## ## .make_penalty_function_membership .make_penalty_function_membership <- function(nr, nc) function(m) { sum(pmin(m, 0) ^ 2) + sum((rowSums(matrix(m, nr)) - 1) ^ 2) } ## .make_penalty_gradient_membership .make_penalty_gradient_membership <- function(nr, nc) function(m) { 2 * (pmin(m, 0) + rep.int(rowSums(matrix(m, nr)) - 1, nc)) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/validity.R0000644000175000017500000001003011304023136013470 0ustar nileshnilesh## A slightly polymorphic function, similar to cluster::silhouette() and ## its methods. cl_validity <- function(x, ...) UseMethod("cl_validity") cl_validity.default <- function(x, d, ...) { ## Note that providing methods for classes "cl_partition" and ## "cl_hierarchy" is not good enough ... out <- list() if(.has_object_memberships(x)) { v <- .cl_validity_partition_d_a_f(cl_membership(x), as.matrix(d)) out <- list("Dissimilarity accounted for" = v) } else if(.has_object_dissimilarities(x)) { x <- cl_object_dissimilarities(x) d <- as.dist(d) out <- list("Variance accounted for" = .cl_validity_hierarchy_variance_a_f(x, d), "Deviance accounted for" = .cl_validity_hierarchy_deviance_a_f(x, d)) ## Consider adding e.g. the Agglomerative Coefficient or ## Divisive Coeffcient for more than cluster::agnes() and ## cluster::diana(), respectively. } class(out) <- "cl_validity" out } ## Package cluster: agnes(). cl_validity.agnes <- function(x, ...) { out <- list("Agglomerative coefficient" = x$ac) ## According to the docs, agnes objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package cluster: diana(). cl_validity.diana <- function(x, ...) { out <- list("Divisive coefficient" = x$dc) ## According to the docs, diana objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package clue: (virtual) class "cl_partition". cl_validity.cl_partition <- function(x, ...) cl_validity(.get_representation(x), ...) ## Package clue: class pclust. ## So that this works for all classes extending pclust ... cl_validity.pclust <- function(x, ...) x$validity print.cl_validity <- function(x, ...) { for(nm in names(x)) cat(nm, ": ", x[[nm]], "\n", sep = "") invisible(x) } .cl_validity_partition_d_a_f <- function(m, d) { ## "Dissimilarity accounted for". ## Internal function for computing 1 - a / mean(d), where the ## "average within dissimilarity" a is given by ## \frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d(i,j)} ## {\sum_{i,j} \sum_k m_{ik}m_{jk}} ## where m is the membership matrix and d a *symmetric* matrix of ## dissimilarities. within_sums <- rowSums(sapply(seq_len(ncol(m)), function(k) { z <- m[, k] w <- outer(z, z, "*") c(sum(w * d), sum(w)) })) average_within_d <- within_sums[1L] / within_sums[2L] 1 - average_within_d / mean(d) } .cl_validity_hierarchy_variance_a_f <- function(u, d) { ## *Variance accounted for*. ## See e.g. Hubert, Arabie, & Meulman (2006), The structural ## representation of proximity matrices with MATLAB: ## variance_accounted_for = ## 1 - \frac{\sum_{i < j} (d_{ij} - u_{ij}) ^ 2} ## {\sum_{i < j} (d_{ij} - mean(d)) ^ 2} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum((d - u) ^ 2) / sum((d - mean(d)) ^ 2), 0) } .cl_validity_hierarchy_deviance_a_f <- function(u, d) { ## *Deviance accounted for* (i.e., absolute deviation). ## See e.g. Smith (2001), Constructing ultrametric and additive ## trees based on the ${L}_1$ norm, Journal of Classification. ## deviance_accounted_for = ## 1 - \frac{\sum_{i < j} |d_{ij} - u_{ij}|} ## {\sum_{i < j} |d_{ij} - median(d)|} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum(abs(d - u)) / sum(abs(d - median(d))), 0) } ## Silhouette methods silhouette.cl_partition <- function(x, ...) silhouette(.get_representation(x), ...) silhouette.cl_pclust <- function(x, ...) x$silhouette clue/R/prototypes.R0000644000175000017500000000346711304023136014113 0ustar nileshnileshcl_prototypes <- function(x) UseMethod("cl_prototypes") ## No default method. ## Package stats: kmeans() (R 2.1.0 or better). cl_prototypes.kmeans <- function(x) x$centers ## Package cluster: clara() always gives prototypes. cl_prototypes.clara <- function(x) x$medoids ## Package cluster: fanny() never gives prototypes. ## Package cluster: pam() does not give prototypes if given a ## dissimilarity matrix. cl_prototypes.pam <- function(x) { p <- x$medoids if(!is.matrix(p)) stop("Cannot determine prototypes.") p } ## Package cba: ccfkms(). cl_prototypes.ccfkms <- cl_prototypes.kmeans ## Package cclust: cclust(). cl_prototypes.cclust <- cl_prototypes.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_prototypes.fclust <- cl_prototypes.kmeans ## Package e1071: cshell(). cl_prototypes.cshell <- cl_prototypes.kmeans ## Package e1071: bclust(). cl_prototypes.bclust <- cl_prototypes.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_prototypes.kcca <- function(x) methods::slot(x, "centers") ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_prototypes.specc <- function(x) kernlab::centers(x) ## Package mclust: Mclust(). cl_prototypes.Mclust <- function(x) { p <- x$mu ## For multidimensional models, we get a matrix whose columns are ## the means of each group in the best model, and hence needs to be ## transposed. if(is.matrix(p)) p <- t(p) p } ## Package clue: cl_pam(). cl_prototypes.cl_pam <- function(x) x$prototypes ## Package clue: (virtual) class "cl_partition". cl_prototypes.cl_partition <- function(x) cl_prototypes(.get_representation(x)) ## Package clue: pclust(). cl_prototypes.pclust <- function(x) x$prototypes clue/R/bag.R0000644000175000017500000000276711304023136012416 0ustar nileshnileshcl_bag <- function(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) { ## Currently, method 'DFBC1' (Dudoit-Fridlyand BagClust1) is the ## only one available, and argument 'control' is ignored. ## Construct reference partition. algorithm <- match.fun(algorithm) reference <- eval(as.call(c(list(algorithm, x), if(!is.null(k)) list(k), parameters))) ## Construct bootstrap ensemble. clusterings <- cl_boot(x, B, k, algorithm, parameters, resample = TRUE) ## Construct Dudoit-Fridlyand BagClust1 consensus partitions, ## suitably generalized ... ## ## In principle, this could be turned into a "constructive" method ## for cl_consensus(), also allowing for weights (straightforward). ## E.g., ## .cl_consensus_partition_DFBC1(clusterings, weights, control) ## where either 'control specifies a reference partition, or the ## first element of 'clusterings' is taken as such. ## k <- max(sapply(c(clusterings, reference), n_of_classes)) M_ref <- cl_membership(reference, k) M <- matrix(0, NROW(M_ref), k) for(b in seq_len(B)) { mem <- cl_membership(clusterings[[b]], k) ## Match classes to reference partition. ind <- solve_LSAP(crossprod(M_ref, mem), maximum = TRUE) M <- M + mem[, ind] } as.cl_partition(cl_membership(as.cl_membership(M / B), k)) } clue/R/lattice.R0000644000175000017500000001356713036514161013321 0ustar nileshnileshcl_meet <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_meet_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_meet_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_meet_hierarchy(x) else stop("Cannot compute meet of given clusterings.") } .cl_meet_partition <- function(x) { x <- unique(x) if(length(x) == 1L) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ids <- seq_len(n_of_objects(x[[1L]])) ## Cross-classify the objects. z <- split(ids, lapply(x, cl_class_ids)) ## Subscript on the non-empty cells to get adjacent class ids. lens <- lengths(z) pos <- which(lens > 0) ids[unlist(z, use.names = FALSE)] <- rep.int(seq_along(z[pos]), lens[pos]) cl_partition_by_class_ids(ids) } .cl_meet_dendrogram <- function(x) { ## Meet of an ensemble of dendrograms. ## We need the maximal ultrametric dominated by the given ones, ## which can be obtained by hierarchical clustering with single ## linkage on the pointwise minima of the ultrametrics. as.cl_dendrogram(hclust(as.dist(do.call(pmin, lapply(x, cl_ultrametric))), "single")) } .cl_meet_hierarchy <- function(x) { ## Meet of an ensemble of n-trees. ## Need to find the classes in *all* n-trees. ## Equivalent to computing a strict majority tree. .cl_consensus_hierarchy_majority(x, rep.int(1, length(x)), list(p = 1)) } cl_join <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_join_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_join_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_join_hierarchy(x) else stop("Cannot compute join of given clusterings.") } .cl_join_partition <- function(x) { x <- unique(x) if(length(x) == 1) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ## Canonicalize: ensure that class ids are always the integers from ## one to the number of classes. n <- sapply(x, n_of_classes) ids <- mapply(function(p, ncp) match(cl_class_ids(p), seq_len(ncp)), x, n, SIMPLIFY = FALSE) ## Order according to the number of classes. ids <- ids[order(n)] ## And now incrementally build the join. jcids <- ids[[1L]] # Class ids of the current join. jnc <- length(unique(jcids)) # Number of classes of this. for(b in seq.int(from = 2, to = length(x))) { z <- table(jcids, ids[[b]]) ## It is faster to work on the smaller partition, but this ## should be ensured by the reordering ... ## We need to "join all elements in the same class in at least ## one of the partitions". In the matrix ## C <- (tcrossprod(z) > 0) ## entry i,j is true/one iff z_{ik} z_{jk} > 0 for classes ## i and j in the current join (ids jcids) and some class k in ## the partition with ids[[b]], so that i and j must be joined. ## I.e., C indicates which classes need to be joined directly. ## We need to determine the transitive closure of this relation, ## which can be performed by repeating ## C_{t+1} <- ((C_t %*% C) > 0) ## with C_1 = C until C_t does not change. C_new <- C_old <- C <- (tcrossprod(z) > 0) repeat { C_new <- (C_old %*% C) > 0 if(all(C_new == C_old)) break C_old <- C_new } C <- C_new ## This should now have the connected components. ## Next, compute the map of the join class ids to the ids of ## these components. cnt <- 0 map <- remaining_ids <- seq_len(jnc) while(length(remaining_ids)) { cnt <- cnt + 1 pos <- which(C[remaining_ids[1L], remaining_ids] > 0) map[remaining_ids[pos]] <- cnt remaining_ids <- remaining_ids[-pos] } ## And update the join: jcids <- map[jcids] jnc <- cnt } cl_partition_by_class_ids(jcids) } .cl_join_dendrogram <- function(x) { ## Join of an ensemble of dendrograms. as.cl_dendrogram(do.call(pmax, lapply(x, cl_ultrametric))) } .cl_join_hierarchy <- function(x) { ## Join of an ensemble of n-trees. ## Only exists if the union of all classes of the n-trees is itself ## an n-tree (see Barthelemy et al). classes <- unique(unlist(lapply(x, cl_classes), recursive = FALSE)) ## Now check if this is an n-tree. ## We must verify that for all classes A and B, their intersection ## is A, B, or empty. check <- function(A, B) { m_AB <- match(A, B) m_BA <- match(B, A) ((all(is.na(m_AB)) && all(is.na(m_BA))) || all(is.finite(m_AB)) || all(is.finite(m_BA))) } for(i in seq_along(classes)) { A <- classes[[i]] for(j in seq_along(classes)) if(!check(A, classes[[j]])) stop("Join of given n-trees does not exist.") } as.cl_hierarchy(.cl_ultrametric_from_classes(classes)) } clue/R/utilities.R0000644000175000017500000001015013036513662013675 0ustar nileshnilesh### * Matrix/vector utilities ### * .dist_from_vector .dist_from_vector <- function(x, n = NULL, labels = NULL) { ## This might be useful as as.dist.vector, perhaps without the extra ## argument n then which we only have for minimal performance gains. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) attr(x, "Size") <- n if(!is.null(labels)) attr(x, "Labels") <- labels class(x) <- "dist" x } ### ** .one_entry_per_column .one_entry_per_column <- function(x, j) { ## For a matrix x and a vector of column indices j_1, ..., j_n where ## n is the number of rows of x, get x[1,j_1], ..., x[n,j_n]. ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] } ".one_entry_per_column<-" <- function(x, j, value) { ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] <- value x } ### * .symmetric_matrix_from_veclh .symmetric_matrix_from_veclh <- function(x, n = NULL) { ## In essence the same as as.matrix.dist, but without handling the ## additional attributes that dist objects might have. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) M <- matrix(0, n, n) M[row(M) > col(M)] <- x M + t(M) } ### * .weighted_mean_of_object_dissimilarities .weighted_mean_of_object_dissimilarities <- function(x, w = NULL) { w <- if(is.null(w)) { rep.int(1, length(x)) } else { rep_len(w, length(x)) } ## (Need the latter because we want w / sum(w) ...) dissimilarities <- lapply(x, cl_object_dissimilarities) m <- rowSums(mapply("*", dissimilarities, w / sum(w))) labels <- attr(dissimilarities[[1L]], "Labels") .dist_from_vector(m, labels = labels) } ### ** .weighted_sum_of_matrices .weighted_sum_of_matrices <- function(x, w = NULL, nr = NULL) { ## Quite often we need to compute weighted sums \sum_b w_b X_b of ## conforming matrices \{ X_b \}. If x is a list containing the ## matrices and w the vector of weights, it seems that one ## reasonably efficient way of doing this is the following. if(is.null(w)) w <- rep.int(1, length(x)) if(is.null(nr)) nr <- NROW(x[[1L]]) matrix(rowSums(mapply("*", x, w)), nr) } ### ** .weighted_sum_of_vectors .weighted_sum_of_vectors <- function(x, w = NULL) { ## See above. if(is.null(w)) w <- rep.int(1, length(x)) rowSums(mapply("*", x, w)) } ### * Containers ## Creator. .make_container <- function(x, classes, properties = NULL) { out <- list(.Data = x, .Meta = properties) class(out) <- unique(classes) out } ## Getters. .get_representation <- function(x) x$.Data .get_properties <- function(x) x$.Meta .get_property <- function(x, which) x$.Meta[[which]] .has_property <- function(x, which) which %in% names(x$.Meta) .get_property_from_object_or_representation <- function(x, which, getter) { if(.has_property(x, which)) .get_property(x, which) else { if(missing(getter)) getter <- get(which) getter(.get_representation(x)) } } ## Methods (sort of). .print_container <- function(x, cls, ...) { writeLines(gettextf("An object of virtual class '%s', with representation:\n", cls)) print(.get_representation(x), ...) invisible(x) } ### * Others weighted_median <- function(x, w = 1, na.rm = FALSE) { w <- rep_len(w, length(x)) if(na.rm && any(ind <- is.na(x))) { x <- x[!ind] w <- w[!ind] } if(any(is.na(x)) || !length(x)) return(NA) w <- w / sum(w) ind <- order(x) x <- x[ind] w <- w[ind] x[which.min(x * (cumsum(w) - 0.5) - cumsum(w * x))] } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/registration.R0000644000175000017500000003271611754400707014410 0ustar nileshnilesh### ### At least currently, all registries are meant and used for all types ### of clusterings (for the time being, partitions and hierarchies) ### simultaneously. ### ### * Internal stuff. .make_db_key <- function(name, type) paste(type, name, sep = "_") ### * General-purpose stuff. ### ### This currently insists on a given type: maybe it should simply list ### everything split according to type. But hey, it's internal stuff ### anyway (at least for the time being ...) ### get_methods_from_db <- function(db, type) { type <- match.arg(type, c("partition", "hierarchy")) pattern <- sprintf("^%s_", type) sub(pattern, "", grep(pattern, objects(db), value = TRUE)) } get_method_from_db <- function(db, type, name, msg) { ## ## Keep 'msg' here so that gettext()ing could work ... ## type <- match.arg(type, c("partition", "hierarchy")) db_keys <- objects(db) ind <- pmatch(.make_db_key(tolower(name), type), tolower(db_keys)) if(is.na(ind)) stop(msg, call. = FALSE, domain = NA) db[[db_keys[ind]]] } put_method_into_db <- function(db, type, name, value) { type <- match.arg(type, c("partition", "hierarchy")) db[[.make_db_key(name, type)]] <- value } ### * Consensus Method Registration. cl_consensus_methods_db <- new.env() get_cl_consensus_methods <- function(type) get_methods_from_db(cl_consensus_methods_db, type) get_cl_consensus_method <- function(name, type) { get_method_from_db(cl_consensus_methods_db, type, name, gettextf("Invalid consensus method '%s'.", name)) } set_cl_consensus_method <- function(name, type, definition, ...) { ## Register a @code{type} consensus method called @code{name} with ## definition @code{definition}. Provide more information where ## appropriate, e.g., @code{dissimilarity} d and @code{exponent} e ## for methods minimizing \sum_b d(x_b, x) ^ e. put_method_into_db(cl_consensus_methods_db, type, name, .structure(c(list(definition = definition), list(...)), class = "cl_consensus_method")) } set_cl_consensus_method("DWH", "partition", .cl_consensus_partition_DWH, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/euclidean", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("SE", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("hard/euclidean", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("HE", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/manhattan", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("SM", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("hard/manhattan", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("HM", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("GV1", "partition", .cl_consensus_partition_GV1, dissimilarity = "GV1", exponent = 2) set_cl_consensus_method("GV3", "partition", .cl_consensus_partition_GV3, dissimilarity = "comemberships", exponent = 2) set_cl_consensus_method("soft/symdiff", "partition", .cl_consensus_partition_soft_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("hard/symdiff", "partition", .cl_consensus_partition_hard_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("cophenetic", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("euclidean", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("manhattan", "hierarchy", .cl_consensus_hierarchy_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("majority", "hierarchy", .cl_consensus_hierarchy_majority, dissimilarity = "symdiff", exponent = 1) ### * Dissimilarity Method Registration. cl_dissimilarity_methods_db <- new.env() get_cl_dissimilarity_methods <- function(type) get_methods_from_db(cl_dissimilarity_methods_db, type) get_cl_dissimilarity_method <- function(name, type) get_method_from_db(cl_dissimilarity_methods_db, type, name, gettextf("Invalid dissimilarity method '%s'.", name)) set_cl_dissimilarity_method <- function(name, type, definition, description, ...) put_method_into_db(cl_dissimilarity_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_dissimilarity_method")) set_cl_dissimilarity_method("euclidean", "partition", .cl_dissimilarity_partition_euclidean, "minimal Euclidean membership distance") set_cl_dissimilarity_method("manhattan", "partition", .cl_dissimilarity_partition_manhattan, "minimal Manhattan membership distance") set_cl_dissimilarity_method("comemberships", "partition", .cl_dissimilarity_partition_comemberships, "Euclidean comembership distance") set_cl_dissimilarity_method("symdiff", "partition", .cl_dissimilarity_partition_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Rand", "partition", .cl_dissimilarity_partition_Rand, "Rand distance") set_cl_dissimilarity_method("GV1", "partition", .cl_dissimilarity_partition_GV1, "Gordon-Vichi Delta_1 dissimilarity") set_cl_dissimilarity_method("BA/A", "partition", .cl_dissimilarity_partition_BA_A, "Boorman/Arabie minimum element moves distance") set_cl_dissimilarity_method("BA/C", "partition", .cl_dissimilarity_partition_BA_C, "Boorman/Arabie minimum lattice moves distance") set_cl_dissimilarity_method("BA/D", "partition", .cl_dissimilarity_partition_BA_D, "Boorman/Arabie pair-bonds distance") set_cl_dissimilarity_method("BA/E", "partition", .cl_dissimilarity_partition_BA_E, "Boorman/Arabie normalized information distance") set_cl_dissimilarity_method("VI", "partition", .cl_dissimilarity_partition_VI, "Variation of information") set_cl_dissimilarity_method("Mallows", "partition", .cl_dissimilarity_partition_Mallows, "Mallows dissimilarity") set_cl_dissimilarity_method("CSSD", "partition", .cl_dissimilarity_partition_CSSD, "Cluster Similarity Sensitive Distance") set_cl_dissimilarity_method("euclidean", "hierarchy", .cl_dissimilarity_hierarchy_euclidean, "Euclidean ultrametric distance") set_cl_dissimilarity_method("manhattan", "hierarchy", .cl_dissimilarity_hierarchy_manhattan, "Manhattan ultrametric distance") set_cl_dissimilarity_method("cophenetic", "hierarchy", .cl_dissimilarity_hierarchy_cophenetic, "cophenetic correlations") set_cl_dissimilarity_method("gamma", "hierarchy", .cl_dissimilarity_hierarchy_gamma, "rate of inversions") set_cl_dissimilarity_method("symdiff", "hierarchy", .cl_dissimilarity_hierarchy_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Chebyshev", "hierarchy", .cl_dissimilarity_hierarchy_Chebyshev, "Chebyshev distance") set_cl_dissimilarity_method("Lyapunov", "hierarchy", .cl_dissimilarity_hierarchy_Lyapunov, "Lyapunov distance") set_cl_dissimilarity_method("BO", "hierarchy", .cl_dissimilarity_hierarchy_BO, "Boorman/Olivier m_delta tree distance") set_cl_dissimilarity_method("spectral", "hierarchy", .cl_dissimilarity_hierarchy_spectral, "spectral ultrametric distance") ### * Agreement Method Registration. cl_agreement_methods_db <- new.env() get_cl_agreement_methods <- function(type) get_methods_from_db(cl_agreement_methods_db, type) get_cl_agreement_method <- function(name, type) get_method_from_db(cl_agreement_methods_db, type, name, gettextf("Invalid agreement method '%s'.", name)) set_cl_agreement_method <- function(name, type, definition, description, ...) put_method_into_db(cl_agreement_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_agreement_method")) set_cl_agreement_method("euclidean", "partition", .cl_agreement_partition_euclidean, "minimal euclidean membership distance") set_cl_agreement_method("manhattan", "partition", .cl_agreement_partition_manhattan, "minimal manhattan membership distance") set_cl_agreement_method("Rand", "partition", .cl_agreement_partition_Rand, "Rand index") set_cl_agreement_method("cRand", "partition", .cl_agreement_partition_cRand, "corrected Rand index") set_cl_agreement_method("NMI", "partition", .cl_agreement_partition_NMI, "normalized mutual information") set_cl_agreement_method("KP", "partition", .cl_agreement_partition_KP, "Katz-Powell index") set_cl_agreement_method("angle", "partition", .cl_agreement_partition_angle, "maximal angle between memberships") set_cl_agreement_method("diag", "partition", .cl_agreement_partition_diag, "maximal co-classification rate") set_cl_agreement_method("FM", "partition", .cl_agreement_partition_FM, "Fowlkes-Mallows index") set_cl_agreement_method("Jaccard", "partition", .cl_agreement_partition_Jaccard, "Jaccard index") set_cl_agreement_method("purity", "partition", .cl_agreement_partition_purity, "purity") set_cl_agreement_method("PS", "partition", .cl_agreement_partition_PS, "Prediction Strength") set_cl_agreement_method("euclidean", "hierarchy", .cl_agreement_hierarchy_euclidean, "euclidean ultrametric distance") set_cl_agreement_method("manhattan", "hierarchy", .cl_agreement_hierarchy_manhattan, "manhattan ultrametric distance") set_cl_agreement_method("cophenetic", "hierarchy", .cl_agreement_hierarchy_cophenetic, "cophenetic correlations") set_cl_agreement_method("angle", "hierarchy", .cl_agreement_hierarchy_angle, "angle between ultrametrics") set_cl_agreement_method("gamma", "hierarchy", .cl_agreement_hierarchy_gamma, "rate of inversions") clue/R/agreement.R0000644000175000017500000002713313435044376013646 0ustar nileshnilesh### * cl_agreement cl_agreement <- function(x, y = NULL, method = "euclidean", ...) { ## ## This code is repeated from cl_dissimilarity(), mutatis mutandis. ## Not really a big surprise ... ## x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_agreement_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_agreement_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-agreements. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_agreement")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } ## ## We assume that self-agreements are always one ... ## cl_proximity(unlist(d), method_name, labels = names(x), self = rep.int(1, length(x)), size = n, class = "cl_agreement") } ### ** .cl_agreement_partition_euclidean .cl_agreement_partition_euclidean <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- sqrt(2 * n_of_objects(x)) ## 1 - .cl_dissimilarity_partition_euclidean(x, y) / d_max } ### ** .cl_agreement_partition_manhattan .cl_agreement_partition_manhattan <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- 2 * n_of_objects(x) ## 1 - .cl_dissimilarity_partition_manhattan(x, y) / d_max } ### ** .cl_agreement_partition_Rand .cl_agreement_partition_Rand <- function(x, y) { n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The number A of concordant pairs is given by ## A = choose(n,2) + \sum_{i,j} x_{ij}^2 ## - (1/2) * (\sum_i x_{i.}^2 + \sum_j x_{.j}^2) ## = choose(n,2) + 2 \sum_{i,j} choose(x_{ij},2) ## - (\sum_i choose(x_{i.},2) + \sum_j choose(x_{.j},2) ## with the first version certainly much faster to compute. ## 1 + (sum(x^2) - (sum(rowSums(x)^2) + sum(colSums(x)^2)) / 2) / choose(n, 2) } ### ** .cl_agreement_partition_cRand .cl_agreement_partition_cRand <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") n <- n_of_objects(x) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The basic formula is ## (Sxy - E) / ((Sx. + S.y) / 2 - E) ## where ## Sxy = \sum_{i,j} choose(x_{ij}, 2) ## Sx. = \sum_i choose(x_{i.}, 2) ## S.y = \sum_j choose(x_{.j}, 2) ## and ## E = Sx. * S.y / choose(n, 2) ## We replace the bincoefs by the corresponding sums of squares, ## getting ## (Txy - F) / ((Tx. + T.y) / 2 - F) ## where ## Txy = \sum_{i,j} x_{ij}^2 - n ## Tx. = \sum_i x_{i.}^2 - n ## T.y = \sum_j x_{.j}^2 - n ## and ## F = Tx. * T.y / (n^2 - n) ## Txy <- sum(x ^ 2) - n Tx. <- sum(rowSums(x) ^ 2) - n T.y <- sum(colSums(x) ^ 2) - n F <- Tx. * T.y / (n ^ 2 - n) (Txy - F) / ((Tx. + T.y) / 2 - F) } ### ** .cl_agreement_partition_NMI .cl_agreement_partition_NMI <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") x <- table(cl_class_ids(x), cl_class_ids(y)) x <- x / sum(x) m_x <- rowSums(x) m_y <- colSums(x) y <- outer(m_x, m_y) i <- which((x > 0) & (y > 0)) out <- sum(x[i] * log(x[i] / y[i])) e_x <- sum(m_x * log(ifelse(m_x > 0, m_x, 1))) e_y <- sum(m_y * log(ifelse(m_y > 0, m_y, 1))) out / sqrt(e_x * e_y) } ### ** .cl_agreement_partition_KP .cl_agreement_partition_KP <- function(x, y) { ## Agreement measure due to Katz & Powell (1953, Psychometrika), see ## also Messatfa (1992, Journal of Classification). n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) A_xy <- sum(x ^ 2) A_x. <- sum(rowSums(x) ^ 2) A_.y <- sum(colSums(x) ^ 2) (n^2 * A_xy - A_x. * A_.y) / sqrt(A_x. * (n^2 - A_x.) * A_.y * (n^2 - A_.y)) } ### ** .cl_agreement_partition_angle .cl_agreement_partition_angle <- function(x, y) { ## Maximal angle between the matched memberships. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / sqrt(sum(M_x ^ 2) * sum(M_y ^ 2)) } ### ** .cl_agreement_partition_diag .cl_agreement_partition_diag <- function(x, y) { ## Maximal co-classification rate. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / n_of_objects(x) } ### ** .cl_agreement_partition_FM .cl_agreement_partition_FM <- function(x, y) { ## Fowlkes-Mallows index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) (sum(x ^ 2) - n) / sqrt((sum(rowSums(x) ^ 2) - n) * (sum(colSums(x) ^ 2) - n)) } ### ** .cl_agreement_partition_Jaccard .cl_agreement_partition_Jaccard <- function(x, y) { ## Jaccard index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) Z <- sum(x ^ 2) (Z - n) / (sum(rowSums(x) ^ 2) + sum(colSums(x) ^ 2) - n - Z) } ### ** .cl_agreement_partition_purity .cl_agreement_partition_purity <- function(x, y) { ## Purity of classes of x with respect to those of y: relative ## fraction of "optimally matched and collapsed" joint class ## frequencies, i.e., \sum_i \max_j c_{ij} / n. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) sum(apply(x, 1L, max)) / n } .cl_agreement_partition_PS <- function(x, y) { ## Prediction Strength as used in Tibshirani and Walter (2005), ## "Cluster Validation by Prediction Strength", JCGS. ## See Eqn 2.1 in the reference: this is ## min_l rate of different objects in the same class in partition ## A and in class l in partition B, ## where the min is taken over all classes l of partition B. x <- table(cl_class_ids(x), cl_class_ids(y)) s <- rowSums(x) min((rowSums(x ^ 2) - s) / (s * (s - 1)), na.rm = TRUE) } ## Some computations useful for interpreting some of the above. ## ## Consider two hard partitions A and B and write ## a_{ik} ... indicator of object i in class k for partition A ## b_{il} ... indicator of object i in class l for partition B ## (so that the a_{ik} and b_{il} are of course the membership matrices ## of the partitions). ## ## Then obviously ## \sum_i a_{ik} b_{il} = m_{kl} ## is the number of objects in class k for A and in class l for B, and ## \sum_i a_{ik} = m_{k.} = # objects in class k for A ## \sum_i b_{il} = m_{.l} = # objects in class l for B ## ## Number of pairs of objects in the same classes for both A and B: ## \sum_{i, j, k, l} a_{ik} a_{jk} b_{il} b_{jl} ## = \sum_{k, l} \sum_i a_{ik} b_{il} \sum_j a_{jk} b_{jl} ## = \sum_{k, l} m_{kl} ^ 2 ## This includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same classes for both A ## and B: ## (\sum_{k, l} m_{kl} ^ 2 - n) / 2 ## ## Number of pairs of objects in the same class for A: ## \sum_{i, j, k} a_{ik} a_{jk} ## = \sum_k \sum_i a_{ik} \sum_j a_{jk} ## = \sum_k m_{k.} ^ 2 ## Again, this includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same class for A: ## (\sum_k m_{k.} ^ 2 - n) / 2 ## ## Similarly, \sum_l m_{.l} ^ 2 corresponds to the number of pairs of ## objects in the same class for B. ## ## Finally, to get the number of pairs of objects in different classes ## for both A and B, we note that this is the total number of pairs, ## minus the sum of the numbers of those in the same class for A and for ## B, respectively, plus the number of pairs in the same class for both ## A and B. ## ## This makes e.g. the interpretation of some of the Fowlkes-Mallows or ## Rand agreement indices rather straightforward. ### ** .cl_agreement_hierarchy_euclidean .cl_agreement_hierarchy_euclidean <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_euclidean(x, y)) ### ** .cl_agreement_hierarchy_manhattan .cl_agreement_hierarchy_manhattan <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_manhattan(x, y)) ### ** .cl_agreement_hierarchy_cophenetic .cl_agreement_hierarchy_cophenetic <- function(x, y) { ## Cophenetic correlation. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) cor(cl_object_dissimilarities(x), cl_object_dissimilarities(y)) } ### ** .cl_agreement_hierarchy_angle .cl_agreement_hierarchy_angle <- function(x, y) { ## Angle between ultrametrics. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u_x <- cl_object_dissimilarities(x) u_y <- cl_object_dissimilarities(y) sum(u_x * u_y) / sqrt(sum(u_x ^ 2) * sum(u_y ^ 2)) } ### ** .cl_agreement_hierarchy_gamma .cl_agreement_hierarchy_gamma <- function(x, y) 1 - .cl_dissimilarity_hierarchy_gamma(x, y) ### * [.cl_agreement "[.cl_agreement" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_agreement")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_agreement")) } y } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/predict.R0000644000175000017500000002456013267260544013332 0ustar nileshnilesh## ## Maybe add support for "auto" type (class_ids when predicting from a ## hard, memberships when predicting from a soft partition) eventually. ## cl_predict <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) UseMethod("cl_predict") ## Default method. ## Should also work for kcca() from package flexclust. cl_predict.default <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) .as_cl_class_ids_or_membership(predict(object, newdata, ...), type) ## Package stats: kmeans() (R 2.1.0 or better). cl_predict.kmeans <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- .rxdist(newdata, object$centers) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package cluster: ## * fanny() cannot make "new" predictions. ## * clara() gives medoids, and takes metric data using Euclidean or ## Manhattan dissimilarities (and we can figure out which by looking ## at the call and the default values). ## * pam() gives medoids, but might have been called with dissimilarity ## data, so is tricky. We can always find out which by looking at the ## medoids: as in the dissimilarity input case this is a vector of ## class labels, and a matrix with in each row the coordinates of one ## medoid otherwise. We then still need to figure out whether ## Euclidean or Manhattan distances were used by looking at the call ## and the default values. ## Both pam() and clara() show that the interfaces could be improved to ## accomodate modern needs, e.g., for bagging. cl_predict.fanny <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) stop("Cannot make new predictions.") } cl_predict.clara <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(cluster::clara)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } cl_predict.pam <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) prototypes <- object$medoids if(!is.matrix(prototypes)) stop("Cannot make new predictions.") ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(cluster::pam)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_predict.Weka_clusterer <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata = newdata, type = type, ...), type) } ## Package cba: ccfkms(). cl_predict.ccfkms <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(as.vector(predict(object, newdata)$cl), type) } ## Package cba: rockCluster() returns objects of class "rock". ## If x is a Rock object, fitted(x) and predict(x, newdata) can result ## in missing classifications, as ## In the case a 'drop' value greater than zero is specified, all ## clusters with size equal or less than this value are removed from ## the classifier. Especially, 'fitted' uses a threshold of one ## because for singleton clusters the neighborhood is empty. cl_predict.rock <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) newdata <- object$x ids <- as.vector(predict(object, newdata, ...)$cl) .as_cl_class_ids_or_membership(ids, type) } ## Package cclust: cclust(). cl_predict.cclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { ## Package cclust provides predict.cclust() which returns (again) an ## object of class "cclust", but does not give the labels of the ## original data in case no new data are given. if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata), type) } ## Package e1071: cmeans() gives objects of class "fclust". cl_predict.fclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Note that the 'fclust' objects returned by cmeans() do not always ## directly contain the information on the fuzzification parameter m ## and the distance (Euclidean/Manhattan) employed, so we have to ## engineer this from the matched call and the default arguments. nms <- names(object$call) ## Note that we cannot directly use object$call$m, as this could ## give the 'method' argument if 'm' was not given. m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) power <- c(m, if(method == "euclidean") 2 else 1) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: cshell(). cl_predict.cshell <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Not surprisingly, this is rather similar to what we do for fclust ## objects. Only dissimiliraties (and exponents) need to be ## computed differently ... nms <- names(object$call) m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cshell)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cshell)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) d <- sweep(d, 2, object$radius) ^ 2 M <- .memberships_from_cross_dissimilarities(d, m) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: bclust(). ## ## One might argue that it would be better to use the 'dist.method' ## employed for the hierarchical clustering, but it seems that class ## labels ("clusters") are always assigned using Euclidean distances. cl_predict.bclust <- cl_predict.kmeans ## ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_predict.kcca <- cl_predict.default ## Package flexmix: class "flexmix". cl_predict.flexmix <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(modeltools::posterior(object, newdata, ...), type) } ## Package mclust: Mclust(). cl_predict.Mclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) pred <- predict(object, newdata, ...) type <- match.arg(type) if(type == "class_ids") as.cl_class_ids(pred$classification) else as.cl_membership(pred$z) } ## Package movMF: movMF(). cl_predict.movMF <- cl_predict.Weka_clusterer ## Package clue: pclust(). cl_predict.pclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- object$family$D(newdata, object$prototypes) power <- c(object$m, object$family$e) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package clue: (virtual) class "cl_partition". cl_predict.cl_partition <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) cl_predict(.get_representation(object), newdata = newdata, type, ...) ## Internal helpers: this looks a bit silly, but makes the rest of the ## code look nicer ... .cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") cl_class_ids(x) else cl_membership(x) } .as_cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") { if(is.matrix(x)) { ## Same as for cl_class_ids.cl_membership(). as.cl_class_ids(.structure(max.col(x), names = rownames(x))) } else as.cl_class_ids(x) } else as.cl_membership(x) } clue/R/lsap.R0000644000175000017500000000147012036747337012635 0ustar nileshnileshsolve_LSAP <- function(x, maximum = FALSE) { if(!is.matrix(x) || any(x < 0)) stop("x must be a matrix with nonnegative entries.") nr <- nrow(x) nc <- ncol(x) if(nr > nc) stop("x must not have more rows than columns.") if(nc > nr) x <- rbind(x, matrix(2 * sum(x), nc - nr, nc)) if(maximum) x <- max(x) - x storage.mode(x) <- "double" out <- .C(C_solve_LSAP, x, as.integer(nc), p = integer(nc))$p + 1 out <- out[seq_len(nr)] class(out) <- "solve_LSAP" out } print.solve_LSAP <- function(x, ...) { writeLines(c("Optimal assignment:", gsub("x", " ", strwrap(paste(seq_along(x), x, sep = "x=>x", collapse = ", "))))) invisible(x) } clue/R/dissimilarity.R0000644000175000017500000004447113435044476014572 0ustar nileshnilesh### * cl_dissimilarity cl_dissimilarity <- function(x, y = NULL, method = "euclidean", ...) { x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_dissimilarity_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_dissimilarity_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-dissimilarities. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_dissimilarity")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } cl_proximity(unlist(d), method_name, labels = names(x), size = n, class = c("cl_dissimilarity", "cl_proximity", "dist")) } ### ** .cl_dissimilarity_partition_euclidean .cl_dissimilarity_partition_euclidean <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sqrt(sum((M_x - M_y[, ind]) ^ 2)) } ### ### ** .cl_dissimilarity_partition_manhattan .cl_dissimilarity_partition_manhattan <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "manhattan") ind <- solve_LSAP(C) sum(C[cbind(seq_along(ind), ind)]) } ### ** .cl_dissimilarity_partition_comemberships .cl_dissimilarity_partition_comemberships <- function(x, y) { ## We used to have the straightforward ## C_x <- tcrossprod(cl_membership(x)) # M_x M_x' ## C_y <- tcrossprod(cl_membership(y)) # M_y M_y' ## sum((C_x - C_y) ^ 2) / n_of_objects(x) ^ 2 ## But note that ## \| AA' - BB' \|^2 ## = tr((AA' - BB')'(AA' - BB') ## = tr(A'A A'A) - 2 tr(A'B B'A) + tr(B'B B'B) ## = \| A'A \|^2 - 2 \| A'B \|^2 + \| B'B \|^2 ## which can be computed much more efficiently as all involved cross ## product matrices are "small" ... k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) sqrt(sum(crossprod(M_x) ^ 2) - 2 * sum(crossprod(M_x, M_y) ^ 2) + sum(crossprod(M_y) ^ 2)) } ### ** .cl_dissimilarity_partition_symdiff .cl_dissimilarity_partition_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the partitions ## regarded as binary equivalence relations, i.e., the number of ## discordant pairs. ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) n <- n_of_objects(x) .cl_dissimilarity_partition_Rand(x, y) * choose(n, 2) } ### ** .cl_dissimilarity_partition_Rand .cl_dissimilarity_partition_Rand <- function(x, y) { ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) 1 - .cl_agreement_partition_Rand(x, y) } ### ** .cl_dissimilarity_partition_GV1 .cl_dissimilarity_partition_GV1 <- function(x, y) { k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) C <- outer(colSums(M_x ^ 2), colSums(M_y ^ 2), "+") - 2 * crossprod(M_x, M_y) if(k_x < k_y) C <- rbind(C, matrix(0, nrow = k_y - k_x, ncol = k_y)) else if(k_x > k_y) C <- cbind(C, matrix(0, nrow = k_x, ncol = k_x - k_y)) ind <- solve_LSAP(C) sqrt(sum(C[cbind(seq_along(ind), ind)])) ## (Note that this sum really only includes matched non-dummy ## classes.) } ### ** .cl_dissimilarity_partition_BA_A .cl_dissimilarity_partition_BA_A <- function(x, y) { .cl_dissimilarity_partition_manhattan(as.cl_hard_partition(x), as.cl_hard_partition(y)) / 2 ## Could to this more efficiently, of course ... } ### ** .cl_dissimilarity_partition_BA_C .cl_dissimilarity_partition_BA_C <- function(x, y) { n_of_classes(x) + n_of_classes(y) - 2 * n_of_classes(cl_join(x, y)) } ### ** .cl_dissimilarity_partition_BA_D .cl_dissimilarity_partition_BA_D <- .cl_dissimilarity_partition_Rand ### ** .cl_dissimilarity_partition_BA_E .cl_dissimilarity_partition_BA_E <- function(x, y) { z <- table(cl_class_ids(x), cl_class_ids(y)) z <- z / sum(z) ## Average mutual information between the partitions. y <- outer(rowSums(z), colSums(z)) i <- which((z > 0) & (y > 0)) I <- sum(z[i] * log(z[i] / y[i])) ## Entropy of meet(x, y). i <- which(z > 0) H <- - sum(z[i] * log(z[i])) 1 - I / H } ### ** .cl_dissimilarity_partition_VI .cl_dissimilarity_partition_VI <- function(x, y, weights = 1) { ## Variation of information for general "soft clusterings", cf ## Section 5.2. in Meila (2002). weights <- rep_len(weights, n_of_objects(x)) weights <- weights / sum(weights) M_x <- cl_membership(x) ## Weighted marginal distribution of x: m_x <- colSums(weights * M_x) M_y <- cl_membership(y) ## Weighted marginal distribution of y: m_y <- colSums(weights * M_y) gamma <- crossprod(weights * M_x, M_y) delta <- outer(m_x, m_y) ## Entropy of x: H_x <- - sum(m_x * log(ifelse(m_x > 0, m_x, 1))) ## Entropy of y: H_y <- - sum(m_y * log(ifelse(m_y > 0, m_y, 1))) ## VI is H_x + H_y minus twice the (weighted) joint information. i <- which((gamma > 0) & (delta > 0)) H_x + H_y - 2 * sum(gamma[i] * log(gamma[i] / delta[i])) } ### ** .cl_dissimilarity_partition_Mallows .cl_dissimilarity_partition_Mallows <- function(x, y, p = 1, alpha = NULL, beta = NULL) { ## Currently, no "real" primal-dual solver for minimum cost flow ## problems, and lpSolve::lp.transport() seems to work only for ## integer bounds. Hence, rather than using ## ## C <- .cxdist(cl_membership(x), cl_membership(y), ## "minkowski", p) ^ p ## n_x <- nrow(C) ## n_y <- ncol(C) ## if(is.null(alpha)) ## alpha <- rep.int(1 / n_x, n_x) ## else { ## alpha <- rep_len(alpha, n_x) ## alpha <- alpha / sum(alpha) ## } ## ## etc right away, ensure a square cost matrix so that we can have ## integer bounds for at least the default case. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "minkowski", p) ^ p if(is.null(alpha)) alpha <- rep.int(1, k) if(is.null(beta)) beta <- rep.int(1, k) lpSolve::lp.transport(C, "min", rep.int("==", k), alpha, rep.int("==", k), beta, integers = NULL)$objval ^ (1 / p) } ### ** .cl_dissimilarity_partition_CSSD .cl_dissimilarity_partition_CSSD <- function(x, y, L = NULL, alpha = NULL, beta = NULL, ...) { ## Cluster Similarity Sensitive Distance. ## Reference: D. Zhou, J. Li and H. Zha (2005), ## A new Mallows distance based metric for comparing clusterings. ## See .cl_dissimilarity_partition_Mallows() re solving cost flow ## problems. ## Dissimilarity is defined by minimizing ## \sum_{k,l} (1 - 2 w_{kl} / (alpha_k + beta_l)) L_{kl} ## where ## L_{kl} = \sum_i m_{x;ik} m_{y;il} distance(p_{x;k}, p_{y;l}) ## with m and p the memberships and prototypes, respectively. ## If we get matrices of prototypes, use .rxdist; otherwise, the ## user needs to specify an L function or matrix. k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) if(!is.matrix(L)) { p_x <- cl_prototypes(x) p_y <- cl_prototypes(y) if(is.matrix(p_x) && is.matrix(p_y) && is.null(L)) L <- .rxdist(p_x, p_y, ...) else if(is.function(L)) L <- L(p_x, p_y) else stop("Cannot compute prototype distances.") } C <- crossprod(M_x, M_y) * L if(is.null(alpha)) alpha <- rep.int(1, k_x) if(is.null(beta)) beta <- rep.int(1, k_y) sum(C) - 2 * lpSolve::lp.transport(C / outer(alpha, beta, "+"), "max", rep.int("==", k_x), alpha, rep.int("==", k_y), beta, integers = NULL)$objval } ### ** .cl_dissimilarity_hierarchy_euclidean .cl_dissimilarity_hierarchy_euclidean <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sqrt(sum(weights * (u - v) ^ 2)) } ### ** .cl_dissimilarity_hierarchy_manhattan .cl_dissimilarity_hierarchy_manhattan <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sum(weights * abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_cophenetic .cl_dissimilarity_hierarchy_cophenetic <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) 1 - cor(u, v) ^ 2 } ### ** .cl_dissimilarity_hierarchy_gamma .cl_dissimilarity_hierarchy_gamma <- function(x, y) { ## ## This is a dissimilarity measure that works for arbitrary ## dissimilarities, see e.g. Bock. ## (And the current implementation finally respects this ...) ## if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) n <- length(u) .C(C_clue_dissimilarity_count_inversions, as.double(u), as.double(v), as.integer(n), count = double(1L)) $ count / choose(n, 2) } ### ** .cl_dissimilarity_hierarchy_symdiff .cl_dissimilarity_hierarchy_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the n-trees when ## regarded as sets of subsets (classes) of the set of objects. x <- cl_classes(x) y <- cl_classes(y) sum(is.na(match(x, y))) + sum(is.na(match(y, x))) } ### ** .cl_dissimilarity_hierarchy_Chebyshev .cl_dissimilarity_hierarchy_Chebyshev <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) max(abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_Lyapunov .cl_dissimilarity_hierarchy_Lyapunov <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) q <- cl_object_dissimilarities(x) / cl_object_dissimilarities(y) if(is.matrix(q)) q <- q[lower.tri(q)] log(max(q) / min(q)) } ### ** .cl_dissimilarity_hierarchy_BO .cl_dissimilarity_hierarchy_BO <- function(x, y, delta, ...) { ## Compute Boorman-Olivier (1973) dendrogram ("valued tree") ## dissimilarities of the form ## ## m_\delta(T_1, T_2) ## = \int_0^\infty \delta(P_1(\alpha), P_2(\alpha)) d\alpha ## ## where the trees (dendrograms) are defined as right-continuous ## maps from [0, \Infty) to the partition lattice. ## We can compute this as follows. Take the ultrametrics and use ## as.hclust() to detemine the heights \alpha_1(k) and \alpha_2(l) ## of the splits. Let \alpha_i be the sequence obtained by ## combining these two. Then ## ## m_\delta ## = \sum_{i=0}^{L-1} (\alpha_{i+1} - \alpha_i) ## \delta(P_1(\alpha_i), P_2(\alpha_i)) ## ## We use cutree() for computing the latter partitions. As we ## already have the hclust representations, we should be able to do ## things more efficiently ... if(inherits(x, "hclust")) t_x <- x else if(inherits(x, "cl_ultrametric")) t_x <- as.hclust(x) else if(is.cl_dendrogram(x)) t_x <- as.hclust(cl_ultrametric(x)) else return(NA) if(inherits(y, "hclust")) t_y <- y else if(inherits(y, "cl_ultrametric")) t_y <- as.hclust(y) else if(is.cl_dendrogram(y)) t_y <- as.hclust(cl_ultrametric(y)) else return(NA) if(is.unsorted(t_x$height) || is.unsorted(t_y$height)) return(NA) alpha <- sort(unique(c(t_x$height, t_y$height))) cuts_x <- cutree(t_x, h = alpha) cuts_y <- cutree(t_y, h = alpha) deltas <- mapply(cl_dissimilarity, lapply(split(cuts_x, col(cuts_x)), as.cl_partition), lapply(split(cuts_y, col(cuts_y)), as.cl_partition), MoreArgs = list(delta, ...)) sum(diff(alpha) * deltas[-length(deltas)]) } ### ** .cl_dissimilarity_hierarchy_spectral .cl_dissimilarity_hierarchy_spectral <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) svd(as.matrix(u - v))$d[1L] } ### * as.dist.cl_dissimilarity as.dist.cl_dissimilarity <- function(m, diag = FALSE, upper = FALSE) { y <- c(m) ## Fill non-inherited attributes with default values. attributes(y) <- c(attributes(m)[c("Size", "Labels")], Diag = diag, Upper = upper, call = match.call()) ## (Note that as.dist.default() does not automatically add ## 'method'.) class(y) <- "dist" y } ### * [.cl_dissimilarity "[.cl_dissimilarity" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_dissimilarity")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_dissimilarity")) } y } ### .cxdist .cxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the column cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[, j], B[, k]) ## Currently, only Manhattan (L1) distances are provided. ## Extensions to Minkowski or even more distances (a la dist()) ## could be added eventually. ## ## Possible implementations include ## ## foo_a <- function(A, B) ## apply(B, 2, function(u) colSums(abs(A - u))) ## foo_d <- function(A, B) { ## out <- as.matrix(dist(rbind(t(A), t(B)), "manhattan")) ## dimnames(out) <- NULL ## nc_B <- NCOL(B) ## out[seq(from = NCOL(A) + 1, length.out = nc_B), seq_len(nc_B)] ## } ## foo_f <- function(A, B) { ## out <- matrix(0, NCOL(A), NCOL(B)) ## for(j in seq_len(NCOL(A))) ## for(k in seq_len(NCOL(B))) ## out[j, k] = sum(abs(A[, j] - B[, k])) ## out ## } ## ## The one actually used seems to be the best performer, with the ## "for" version a close second (note that "typically", A and B have ## much fewer columns than rows). ## only few columns method <- match.arg(method) ## Workhorse. FOO <- switch(method, "euclidean" = function(M) sqrt(colSums(M ^ 2)), "manhattan" = function(M) colSums(abs(M)), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(M) (colSums(abs(M) ^ p)) ^ (1 / p) }) out <- matrix(0, NCOL(A), NCOL(B)) for(k in seq_len(NCOL(B))) out[, k] <- FOO(A - B[, k]) out } ### .rxdist .rxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the row cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[j, ], B[k, ]) ## ## Could also do something like ## ind <- seq_len(NROW(B)) ## as.matrix(dist(rbind(B, A)))[-ind, ind] ## but that is *very* inefficient for the "usual" data by prototype ## case (where NROW(B) << NROW(A)). ## ## No fancy pmatching for methods for the time being. method <- match.arg(method) ## Workhorse: Full A, single row of b. FOO <- switch(method, "euclidean" = function(A, b) sqrt(rowSums(sweep(A, 2, b) ^ 2)), "manhattan" = function(A, b) rowSums(abs(sweep(A, 2, b))), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(A, b) (rowSums(abs(sweep(A, 2, b)) ^ p)) ^ (1 / p) }) out <- matrix(0, NROW(A), NROW(B)) for(k in seq_len(NROW(B))) out[, k] <- FOO(A, B[k, ]) out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/addtree.R0000644000175000017500000003111013435044354013271 0ustar nileshnilesh### * ls_fit_addtree ls_fit_addtree <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 3L) return(as.cl_addtree(x)) if(.non_additivity(x, max = TRUE) == 0) return(as.cl_addtree(x)) ## Handle argument 'weights'. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_addtree_by_SUMT(x, weights, control), IP = { .ls_fit_addtree_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_addtree_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_addtree_by_SUMT .ls_fit_addtree_by_SUMT <- function(x, weights = 1, control = list()) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start, start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } L <- function(d) sum(w * (d - x) ^ 2) P <- .make_penalty_function_addtree(n) if(gradient) { grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_addtree(n) } else { grad_L <- grad_P <- NULL } if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... d <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control))$x ## Round to enforce additivity, and hope for the best ... .cl_addtree_from_addtree_approximation(d, n, labels) } .make_penalty_function_addtree <- function(n) function(d) { (.non_additivity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_addtree <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_additivity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_addtree_by_iterative_projection ## ## Functions ## .ls_fit_addtree_by_iterative_projection() ## .ls_fit_addtree_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_addtree_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### ** .ls_fit_addtree_by_iterative_reduction .ls_fit_addtree_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### * .non_additivity .non_additivity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_additivity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * ls_fit_centroid ls_fit_centroid <- function(x) { ## Fit a centroid additive tree distance along the lines of Carroll ## & Pruzansky (1980). In fact, solving ## ## \sum_{i,j: i \ne j} (\delta_{ij} - (g_i + g_j)) ^ 2 => min_g ## ## gives \sum_{j: j \ne i} (g_i + g_j - \delta_{ij}) = 0, or (also ## in Barthemely & Guenoche) ## ## (n - 2) g_i + \sum_j g_j = \sum_{j: j \ne i} \delta_{ij} ## ## which after summing over all i and some manipulations eventually ## gives ## ## g_i = \frac{1}{n-2} (v_i - m), ## ## v_i = \sum_{j: j \ne i} \delta_{ij} ## s = \frac{1}{2(n-1)} \sum_{i,j: j \ne i} \delta_{ij} n <- attr(x, "Size") if(n <= 2L) return(as.cl_addtree(0 * x)) x <- as.matrix(x) g <- rowSums(x) / (n - 2) - sum(x) / (2 * (n - 1) * (n - 2)) as.cl_addtree(as.dist(.make_centroid_matrix(g))) } .make_centroid_matrix <- function(g) { y <- outer(g, g, "+") diag(y) <- 0 y } ### * as.cl_addtree as.cl_addtree <- function(x) UseMethod("as.cl_addtree") as.cl_addtree.default <- function(x) { if(inherits(x, "cl_addtree")) x else if(is.atomic(x) || inherits(x, "cl_ultrametric")) .cl_addtree_from_veclh(x) else if(is.matrix(x)) { ## Should actually check whether the matrix is symmetric, >= 0 ## and satisfies the 4-point conditions ... .cl_addtree_from_veclh(as.dist(x)) } else if(is.cl_dendrogram(x)) .cl_addtree_from_veclh(cl_ultrametric(x)) else stop("Cannot coerce to 'cl_addtree'.") } as.cl_addtree.phylo <- function(x) .cl_addtree_from_veclh(as.dist(cophenetic(x))) ## Phylogenetic trees with edge/branch lengths yield additive tree ## dissimilarities. ### * .cl_addtree_from_veclh .cl_addtree_from_veclh <- function(x, size = NULL, labels = NULL) { cl_proximity(x, "Additive tree distances", labels = labels, size = size, class = c("cl_addtree", "cl_dissimilarity", "cl_proximity", "dist")) } ### * .cl_addtree_from_addtree_approximation .cl_addtree_from_addtree_approximation <- function(x, size = NULL, labels = NULL) { ## Turn x into an addtree after possibly rounding to non-additivity ## significance (note that this is not guaranteed to work ...). mnum <- .non_additivity(x, max = TRUE) x <- round(x, floor(abs(log10(mnum)))) .cl_addtree_from_veclh(x, size = size, labels = labels) } ### * .decompose_addtree .decompose_addtree <- function(x, const = NULL) { ## Decompose an addtree into an ultrametric and a centroid ## distance. ## If 'const' is not given, we take the root as half way between the ## diameter of the addtree, and choose a minimal constant to ensure ## non-negativity (but not positivity) of the ultrametric. ## As this is all slightly dubious and it is not quite clear how ## much positivity we want in the ultrametric of the decomposition, ## we keep this hidden. For plotting addtrees, the choice of the ## constant does not seem to matter. x <- as.matrix(x) n <- nrow(x) ## Determine diameter. ind <- which.max(x) - 1 u <- ind %% n + 1 v <- ind %/% n + 1 if(!is.null(const)) g <- pmax(x[u, ], x[v, ]) - const else { g <- pmax(x[u, ], x[v, ]) - x[u, v] / 2 u <- x - .make_centroid_matrix(g) k <- - min(u) g <- g - k / 2 } u <- x - .make_centroid_matrix(g) names(g) <- rownames(x) ## Ensure a valid ultrametric. d <- .ultrametrify(as.dist(u)) u <- .cl_ultrametric_from_veclh(d, nrow(x), rownames(x)) ## Note that we return the centroid distances to the root, and not ## between the objects (as.dist(.make_centroid_matrix(g))) ... list(Ultrametric = as.cl_ultrametric(u), Centroid = g) } ### * plot.cl_addtree plot.cl_addtree <- function(x, ...) { ## Construct a dendrogram-style representation of the addtree with ## the root half way between the diameter, and plot. y <- .decompose_addtree(x, max(x)) u <- y$Ultrametric g <- y$Centroid ## We halve the scale of the ultrametric, and add the maximal g from ## the centroid. h <- hclust(as.dist(u / 2), "single") h$height <- h$height + max(g) d <- as.dendrogram(h) ## Now modify the heights of the leaves so that the objects giving ## the diameter of the addtree end up with height zero. g <- max(g) - g names(g) <- labels(g) d <- dendrapply(d, function(n) { if(!is.leaf(n)) return(n) attr(n, "height") <- g[attr(n, "label")] n }) ## And finally plot plot(d, ...) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/medoid.R0000644000175000017500000001562611450366117013137 0ustar nileshnilesh### * cl_medoid cl_medoid <- function(x, method = "euclidean") { ## ## In principle we can get the same using pam(k = 1)$medoids. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid of empty ensemble.") dissimilarities <- as.matrix(cl_dissimilarity(clusterings, method = method)) clusterings[[which.min(rowSums(dissimilarities))]] } ### * cl_pam cl_pam <- function(x, k, method = "euclidean", solver = c("pam", "kmedoids")) { clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid partition of empty ensemble.") ## Actually, we should have at least k distinct elements in the ## ensemble ... make_cl_pam <- function(class_ids, medoid_ids, medoids, criterion, description) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, prototypes = medoids, criterion = criterion, description = description), class = "cl_pam") if(k == 1L) { ## Simplify matters if a global medoid is sought. dissimilarities <- cl_dissimilarity(clusterings, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) row_sums <- rowSums(dissimilarities) medoid_id <- which.min(row_sums) criterion <- row_sums[medoid_id] return(make_cl_pam(as.cl_class_ids(seq_along(clusterings)), medoid_id, clusterings[medoid_id], criterion, description)) } solver <- match.arg(solver) ## Argh. We really want to run k-medoids for the unique elements of ## the ensemble, but pam() only works for symmetric dissimilarties. ## As computing cluster dissimilarities is typically expensive, use ## the unique elements for doing so in any case. values <- unique(clusterings) ## Positions of ensemble members in the unique values. positions <- match(clusterings, values) ## Dissimilarities between unique values. dissimilarities <- cl_dissimilarity(values, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) ## For pam(), we need the dissimilarities for all objects. if(solver == "pam") { dissimilarities <- dissimilarities[positions, positions] party <- cluster::pam(as.dist(dissimilarities), k) class_ids <- cl_class_ids(party) medoid_ids <- cl_medoid_ids(party) medoids <- clusterings[medoid_ids] criterion <- sum(dissimilarities[cbind(seq_along(class_ids), medoid_ids[class_ids])]) } else { ## Counts of unique values. counts <- tabulate(positions) ## Weigh according to the counts. Should be straightforward to ## add "case weights" as well ... dissimilarities <- counts * dissimilarities ## Now partition. party <- kmedoids(dissimilarities, k) ## And build the solution from this ... criterion <- party$criterion ## First, things for the unique values. medoid_ids <- cl_medoid_ids(party) medoids <- values[medoid_ids] class_ids <- cl_class_ids(party) ## Second, things for all objects. class_ids <- class_ids[positions] medoid_ids <- match(medoid_ids, positions) } make_cl_pam(class_ids, medoid_ids, medoids, criterion, description) } print.cl_pam <- function(x, ...) { class_ids <- cl_class_ids(x) fmt <- "A k-medoid partition of a cluster ensemble with %d elements into %d classes (dissimilarity measure: %s)." writeLines(c(strwrap(gettextf(fmt, n_of_objects(x), n_of_classes(x), x$description)))) writeLines(gettext("Class ids:")) print(class_ids, ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } ### * cl_medoid_ids ## Little helper, internal for the time being ... cl_medoid_ids <- function(x) UseMethod("cl_medoid_ids") cl_medoid_ids.cl_pam <- function(x) x$medoid_ids cl_medoid_ids.kmedoids <- function(x) x$medoid_ids cl_medoid_ids.clara <- function(x) x$i.med cl_medoid_ids.pam <- function(x) x$id.med ### * kmedoids kmedoids <- function(x, k) { ## ## For the time being, 'x' is assumed a dissimilarity object or a ## matrix of dissimilarities. ## Let's worry about the interface later. ## x <- as.matrix(x) n <- nrow(x) ## Use the formulation in Gordon & Vichi (1998), Journal of ## Classification, [P4'], page 279, with variables c(vec(X), z), but ## with rows and cols interchanged (such that x_{ij} is one iff o_i ## has medoid o_j, and z_j is one iff o_j is a medoid). make_constraint_mat <- function(n) { nsq <- n * n rbind(cbind(kronecker(rbind(rep.int(1, n)), diag(1, n)), matrix(0, n, n)), cbind(diag(1, nsq), kronecker(diag(1, n), rep.int(-1, n))), c(double(nsq), rep.int(1, n)), cbind(matrix(0, n, nsq), diag(1, n))) } make_constraint_dir <- function(n) rep.int(c("=", "<=", "=", "<="), c(n, n * n, 1, n)) make_constraint_rhs <- function(n, k) rep.int(c(1, 0, k, 1), c(n, n * n, 1, n)) ## ## We could try a relaxation without integrality constraints first, ## which seems to "typically work" (and should be faster). To test ## for integrality, use something like ## if(identical(all.equal(y$solution, round(y$solution)), TRUE)) ## y <- lpSolve::lp("min", c(c(x), double(n)), make_constraint_mat(n), make_constraint_dir(n), make_constraint_rhs(n, k), int.vec = seq_len(n * (n + 1))) ## Now get the class ids and medoids. ind <- which(matrix(y$solution[seq_len(n * n)], n) > 0, arr.ind = TRUE) medoid_ids <- unique(ind[, 2L]) class_ids <- seq_len(n) class_ids[ind[, 1L]] <- match(ind[, 2L], medoid_ids) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, criterion = y$objval), class = "kmedoids") } print.kmedoids <- function(x, ...) { fmt <- "A k-medoids clustering of %d objects into %d clusters." writeLines(gettextf(fmt, n_of_objects(x), n_of_classes(x))) writeLines(gettext("Medoid ids:")) print(cl_medoid_ids(x), ...) writeLines(gettext("Class ids:")) print(unclass(cl_class_ids(x)), ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } clue/R/pclust.R0000644000175000017500000004307013036513600013173 0ustar nileshnilesh### * cl_pclust cl_pclust <- function(x, k, method = NULL, m = 1, weights = 1, control = list()) { ## Partition a cluster ensemble x into (at most) k classes by ## minimizing ## \sum_b \sum_j w_b u_{bj}^m d(x_b, p_j) ^ e ## for "suitable" prototypes p_1, ..., p_k, where 1 <= m < \infty, ## with 1 corresponding to hard (secondary) partitions, and d a ## dissimilarity measure (such as Euclidean dissimilarity of ## partitions or hierarchies). ## ## The algorithm works whenever there is a consensus method for ## solving ## \sum_b u_{bj}^m d(x_b, p) ^ e => \min_p ## ## As we refer to consensus methods by their *name* (e.g., 'HBH'), ## we rely on the registration mechanism (set_cl_consensus_method()) ## to provide the required information about d and e. clusterings <- as.cl_ensemble(x) type <- .cl_ensemble_type(clusterings) if(type == "partition") { ## Canonicalize by turning into an ensemble of partitions ## represented by membership matrices with the same (minimal) ## number of columns. memberships <- lapply(clusterings, cl_membership, max(sapply(clusterings, n_of_classes))) clusterings <- cl_ensemble(list = lapply(memberships, as.cl_partition)) } if(!inherits(method, "cl_consensus_method")) { ## Get required information on d and e from the registry. if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) ## Note that this avoids registry lookup in subsequent calls to ## cl_consensus(). if(is.null(method$exponent)) stop("No information on exponent in consensus method used.") e <- method$exponent if(is.null(method$dissimilarity)) stop("No information on dissimilarity in consensus method used.") d <- function(x, y = NULL) cl_dissimilarity(x, y, method = method$dissimilarity) } family <- pclust_family(D = d, C = method$definition, e = e) out <- pclust(x, k, family, m, weights, control) ## Massage the results a bit. dissimilarities <- as.matrix(d(clusterings) ^ e) out$call <- match.call() out <- .structure(c(out, list(silhouette = silhouette(out$cluster, dmatrix = dissimilarities), validity = cl_validity(cl_membership(out), dissimilarities), ## ## Information about d and e is also in the ## family returned, of course. Trying to be ## nice to users by "directly" providing d ## and e is currently of limited usefulness ## as the pclust representation is not ## directly available to users. d = d, e = e ## )), class = unique(c("cl_pclust", class(out)))) as.cl_partition(out) } print.cl_pclust <- function(x, ...) { txt <- if(x$m == 1) gettextf("A hard partition of a cluster ensemble with %d elements into %d classes.", n_of_objects(x), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.", x$m, n_of_objects(x), n_of_classes(x)) writeLines(strwrap(txt)) NextMethod("print", x, header = FALSE) print(x$validity, ...) invisible(x) } ### * pclust pclust <- function(x, k, family, m = 1, weights = 1, control = list()) { ## A general purpose alternating optimization algorithm for ## prototype-based partitioning. ## For now, assume family specifies three functions: ## * A dissimilarity function D() for data and prototypes. ## * A consensus function C() for data, weights and control. ## * An init function init() of data and k giving an initial object ## of k prototypes. ## ## We use k as the second argument as this seems to be common ## practice for partitioning algorithms. ## ## We assume that consensus functions can all handle WEIGHTS ## (formals: x, weights, control; only used positionally). ## ## ## We now allow for arbitrary representations/objects of prototypes. ## What is needed are functions to modify a *single* prototype and ## subset the prototypes. By default, list and matrix (with the ## usual convention that rows are "objects") representations are ## supported. Otherwise, the family needs to provide suitable ## .modify() and .subset() functions. ## The approach relies on having the initializer of the family ## (init()) return an appropriate object of prototypes. ## It would be possible to have default initializers as well to ## randomly subset the data (i.e., select elements of lists or rows ## of matrices, respectively). ## ## ## The 'prototypes' are not necessarily objects of the same kind as ## the data objects. Therefore, D() is really a 2-argument ## cross-dissimilarity function. ## It would also be useful to have a way of computing the pairwise ## dissimilarities between objects: but this is something different ## from D() is objects and prototypes are not of the same kind. ## A "clean" solution could consist in specifying the family either ## via a (non-symmetric) cross-dissimilarity function X(), or a ## symmetric D() which when called with a single argument gives the ## pairwise object dissimilarities. ## I.e., ## pclust_family(D = NULL, C, init = NULL, X = NULL, ......) ## using ## * If both D and X are not given => TROUBLE. ## * If only D is given: use for X as well. ## * If only X is given: only use as such. ## Something for the future ... ## ## ## If people have code for computing cross-dissimilarities for the ## data and a *single* prototype (say, xd()), they can easily wrap ## into what is needed using ## t(sapply(prototypes, function(p) xd(x, p))) ## Assuming symmetry of the dissimilarity, they could also do ## t(sapply(prototypes, xd, x)) ## ## Perhaps check whether 'family' is a feasible/suitable pclust ## family (object). D <- family$D C <- family$C e <- family$e .modify <- family$.modify .subset <- family$.subset maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last ... control <- as.list(control$control) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, family$init(x, k), simplify = FALSE) } ## Initialize. ## We need to do this here because it is (currently) the only way to ## figure out the number B of objects to be partitioned (which is ## needed for getting the object weights to the right length). prototypes <- start[[1L]] dissimilarities <- D(x, prototypes) ^ e B <- NROW(dissimilarities) ## Also try to figure out (if necessary) how to modify a single ## prototype and to subset the prototypes. Note that we can only ## check this *after* prototypes were obtained (and not when the ## family object is created). if(is.null(.modify)) { if(is.list(prototypes)) .modify <- function(x, i, value) { x[[i]] <- value x } else if(is.matrix(prototypes)) .modify <- function(x, i, value) { x[i, ] <- value x } else stop("Cannot determine how to modify prototypes.") } else if(!is.function(.modify) || !identical(formals(args(.modify)), c("x", "i", "value"))) stop("Invalid function to modify prototypes.") if(is.null(.subset)) { if(is.list(prototypes)) .subset <- `[` else if(is.matrix(prototypes)) .subset <- function(x, i) x[i, , drop = FALSE] else stop("Cannot determine how to subset prototypes.") } else if(!is.function(.subset) || !identical(formals(args(.subset)), c("x", "i"))) stop("Invalid function to subset prototypes.") weights <- rep_len(weights, B) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") ## A little helper. .make_unit_weights <- function(B, i) { out <- double(B) out[i] <- 1 out } if(m == 1) { ## Hard partitions. value <- if(all(weights == 1)) function(dissimilarities, ids) sum(.one_entry_per_column(dissimilarities, ids)) else function(dissimilarities, ids) sum(weights * .one_entry_per_column(dissimilarities, ids)) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { class_ids <- max.col( - dissimilarities ) old_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { class_ids_used <- unique(class_ids) for(j in class_ids_used) prototypes <- .modify(prototypes, j, C(x, weights * (class_ids %in% j), control)) dissimilarities <- D(x, prototypes) ^ e class_ids <- max.col( - dissimilarities ) ## Try avoiding degenerate solutions. if(length(class_ids_used) < k) { ## Find the k - l largest ## object-to-assigned-prototype dissimilarities. o <- order(.one_entry_per_column(dissimilarities, class_ids), decreasing = TRUE) ## Find and recompute unused prototypes. unused <- setdiff(seq_len(k), class_ids_used) for(j in seq_along(unused)) prototypes <- .modify(prototypes, unused[j], C(x, .make_unit_weights(B, o[j]), control)) dissimilarities[, unused] <- D(x, .subset(prototypes, unused)) ^ e class_ids <- max.col( - dissimilarities ) ## For the time being, do not retry in case the ## solution is still degenerate. } new_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_class_ids <- class_ids opt_prototypes <- prototypes } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. For now: opt_u <- NULL ## opt_u <- matrix(0, B, k) ## opt_u[cbind(seq_len(B), opt_class_ids)] <- 1 } else { ## Soft partitions. value <- if(all(weights == 1)) function(dissimilarities, u) sum(u ^ m * dissimilarities) else function(dissimilarities, u) sum(weights * u ^ m * dissimilarities) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { u <- .memberships_from_cross_dissimilarities(dissimilarities, m) old_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Update the prototypes. ## This amounts to solving, for each j: ## \sum_b w_b u_{bj}^m D(x_b, p) ^ e => \min_p ## I.e., p_j is the *weighted* consensus of the x_b with ## corresponding weights u_{bj}^m. for(j in seq_len(k)) { prototypes <- .modify(prototypes, j, C(x, weights * u[, j] ^ m, control)) } ## Update u. dissimilarities <- D(x, prototypes) ^ e u <- .memberships_from_cross_dissimilarities(dissimilarities, m) new_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_prototypes <- prototypes opt_u <- u } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } opt_class_ids <- max.col(opt_u) ## Ensure that opt_u is a stochastic matrix. opt_u <- pmax(opt_u, 0) opt_u <- opt_u / rowSums(opt_u) rownames(opt_u) <- rownames(dissimilarities) opt_u <- cl_membership(as.cl_membership(opt_u), k) } names(opt_class_ids) <- rownames(dissimilarities) pclust_object(prototypes = opt_prototypes, membership = opt_u, cluster = opt_class_ids, family = family, m = m, value = opt_value, call = match.call(), attributes = list("converged" = converged)) } print.pclust <- function(x, header = TRUE, ...) { is_hard <- (x$m == 1) class_ids <- cl_class_ids(x) if(header) { txt <- if(is_hard) gettextf("A hard partition of %d objects into %d classes.", length(class_ids), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of %d objects into %d classes.", x$m, length(class_ids), n_of_classes(x)) writeLines(strwrap(txt)) } if(is_hard) { print(class_ids, ...) } else { writeLines("Class memberships:") print(cl_membership(x), ...) writeLines("Class ids of closest hard partition:") print(unclass(class_ids), ...) } invisible(x) } ### * pclust_family pclust_family <- function(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) { ## Add checking formals (lengths) eventually ... if(is.null(init)) { ## Works for list representations ... init <- function(x, k) sample(x, k) } .structure(list(description = description, D = D, C = C, init = init, e = e, .modify = .modify, .subset = .subset), class = "pclust_family") } ### * pclust_object pclust_object <- function(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) { out <- c(list(prototypes = prototypes, membership = membership, cluster = cluster, family = family, m = m, value = value), list(...)) attributes(out) <- c(attributes(out), attributes) classes <- unique(as.character(classes)) class(out) <- c(classes[classes != "pclust"], "pclust") out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/AAA.R0000644000175000017500000000037411304023136012237 0ustar nileshnilesh## Things which must come first in the package code. ### * Internal utilities. .false <- function(x) FALSE .true <- function(x) TRUE ## A fast version of structure(). .structure <- function(x, ...) `attributes<-`(x, c(attributes(x), list(...))) clue/R/ensemble.R0000644000175000017500000001567013435044575013475 0ustar nileshnileshcl_ensemble <- function(..., list = NULL) { clusterings <- c(list(...), list) if(!length(clusterings)) { ## Return an empty cl_ensemble. ## In this case, we cannot additionally know whether it contains ## partitions or hierarchies ... attr(clusterings, "n_of_objects") <- as.integer(NA) class(clusterings) <- "cl_ensemble" return(clusterings) } ## Previously, we used to require that the elements of the ensemble ## either all be partitions, or all be hierarchies. We no longer do ## this, as it makes sense to also allow e.g. object dissimilarities ## (raw "dist" objects or additive distances) as elements (e.g., ## when computing proximities), and it is rather cumbersome to ## decide in advance which combinations of elements might be useful ## and hence should be allowed. All we enforce is that all elements ## correspond to the same number of objects (as we typically cannot ## verify that they relate to the *same* objects). For "pure" ## ensembles of partitions or hierarchies we add additional class ## information. if(all(vapply(clusterings, is.cl_partition, NA))) class(clusterings) <- c("cl_partition_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_dendrogram, NA))) class(clusterings) <- c("cl_dendrogram_ensemble", "cl_hierarchy_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_hierarchy, NA))) class(clusterings) <- c("cl_hierarchy_ensemble", "cl_ensemble") else class(clusterings) <- "cl_ensemble" n <- sapply(clusterings, n_of_objects) if(any(diff(n))) stop("All elements must have the same number of objects.") attr(clusterings, "n_of_objects") <- as.integer(n[1L]) clusterings } is.cl_ensemble <- function(x) inherits(x, "cl_ensemble") ## ## In the old days, kmeans() results were unclassed lists, hence such ## objects were taken as representing a single clustering. Nowadays, we ## take these as lists of clusterings. as.cl_ensemble <- function(x) { if(is.cl_ensemble(x)) x else if(is.list(x) && !is.object(x)) cl_ensemble(list = x) else cl_ensemble(x) } ## c.cl_ensemble <- function(..., recursive = FALSE) { clusterings <- unlist(lapply(list(...), as.cl_ensemble), recursive = FALSE) cl_ensemble(list = clusterings) } "[.cl_ensemble" <- function(x, i) { ## Make subscripting empty ensembles a noop. if(length(x) == 0L) return(x) cl_ensemble(list = NextMethod("[")) } rep.cl_ensemble <- function(x, times, ...) cl_ensemble(list = NextMethod("rep")) print.cl_partition_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d partition of %d objects.", "An ensemble of %d partitions of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } Summary.cl_partition_ensemble <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) ## Combine the given partition ensembles. x <- do.call(c, args) switch(.Generic, "min" = cl_meet(x), "max" = cl_join(x), "range" = cl_ensemble(min = cl_meet(x), max = cl_join(x))) } print.cl_dendrogram_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d dendrogram of %d objects.", "An ensemble of %d dendrograms of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_hierarchy_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d hierarchy of %d objects.", "An ensemble of %d hierarchies of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_ensemble <- function(x, ...) { writeLines(sprintf(ngettext(length(x), "An ensemble with %d element.", "An ensemble with %d elements."), length(x))) invisible(x) } plot.cl_ensemble <- function(x, ..., main = NULL, layout = NULL) { if(!is.cl_ensemble(x)) stop("Wrong class.") ## What we can definitely plot is are cl_addtree, cl_dendrogram and ## cl_ultrametric objects. (We could also add simple methods for ## plotting raw dissimilarities, but of course seriation::dissplot() ## would be the thing to use.) What we cannot reasonably plot is ## partitions (in particular, as these do not know about the ## underlying dissimilarities. But then we could perhaps provide ## silhoutte plots etc for ensembles of partitions ... ## ## Think about this. ## ## So let us check for the things we can plot. ## (Note that currently there is neither is.cl_ultrametric() nor ## is.cl_addtree().) ok <- vapply(x, function(e) (is.cl_dendrogram(e) || inherits(e, c("cl_addtree", "cl_ultrametric"))), NA) if(!all(ok)) stop(gettextf("Plotting not available for elements %s of the ensemble.", paste(which(!ok), collapse = " ")), domain = NA) ## Prefer dendrogram plot methods to those for hclust objects. ind <- vapply(x, is.cl_dendrogram, NA) if(any(ind)) x[ind] <- lapply(x, as.cl_dendrogram) ## Now the usual layouting ... same as for plotting relation ## ensembles. ## Number of elements. n <- length(x) ## Layout. byrow <- TRUE if(is.null(layout)) { nc <- ceiling(sqrt(n)) nr <- ceiling(n / nc) } else { layout <- c(as.list(layout), byrow)[seq_len(3)] if(is.null(names(layout))) names(layout) <- c("nr", "nc", "byrow") nr <- layout[["nr"]] nc <- layout[["nc"]] byrow <- layout[["byrow"]] } op <- if(byrow) par(mfrow = c(nr, nc)) else par(mfcol = c(nr, nc)) on.exit(par(op)) ## Try recycling main (might want the same for others as well). if(!is.list(main)) { main <- if(is.null(main)) vector("list", length = n) else rep.int(as.list(main), n) } for(i in seq_along(x)) plot(x[[i]], main = main[[i]], ...) } unique.cl_ensemble <- function(x, incomparables = FALSE, ...) cl_ensemble(list = NextMethod("unique")) .cl_ensemble_type <- function(x) { if(inherits(x, "cl_partition_ensemble")) "partition" else if(inherits(x, "cl_hierarchy_ensemble")) "hierarchy" else NULL } clue/R/sumt.R0000644000175000017500000000722111304023136012643 0ustar nileshnileshsumt <- function(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) { ## Default values: make it nice for others to call us. if(is.null(eps)) eps <- sqrt(.Machine$double.eps) if(is.null(method)) method <- "CG" if(is.null(q)) q <- 10 if(is.null(verbose)) verbose <- getOption("verbose") Phi <- function(rho, x) L(x) + rho * P(x) if(is.null(grad_L) || is.null(grad_P)) { make_Phi <- function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) NULL } else { grad_Phi <- function(rho, x) grad_L(x) + rho * grad_P(x) make_Phi <- if(method == "nlm") { function(rho) { function(x) .structure(Phi(rho, x), gradient = grad_Phi(rho, x)) } } else function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) { function(x) grad_Phi(rho, x) } } ## ## For the penalized minimization, the Newton-type nlm() may be ## computationally infeasible (although it works much faster for ## fitting ultrametrics to the Phonemes data). ## De Soete recommends using Conjugate Gradients. ## We provide a simple choice: by default, optim(method = "CG") is ## used. If method is non-null and not "nlm", we use optim() with ## this method. In both cases, control gives the control parameters ## for optim(). ## If method is "nlm", nlm() is used, in which case control is ## ignored. Note that we call nlm() with checking analyticals ## turned off, as in some cases (e.g. when fitting ultrametrics) the ## penalty function is not even continuous ... optimize_with_penalty <- if(method == "nlm") function(rho, x) nlm(make_Phi(rho), x, check.analyticals = FALSE) $ estimate else { function(rho, x) optim(x, make_Phi(rho), gr = make_grad_Phi(rho), method = method, control = control) $ par } ## Note also that currently we do not check whether optimization was ## "successful" ... ## ## We currently require that x0 be a *list* of start values, the ## length of which gives the number of SUMT runs. But as always, ## let's be nice to users and developers, just in case ... if(!is.list(x0)) x0 <- list(x0) v_opt <- Inf x_opt <- NULL rho_opt <- NULL for(run in seq_along(x0)) { if(verbose) message(gettextf("SUMT run: %d", run)) x <- x0[[run]] ## ## Better upper/lower bounds for rho? rho <- max(L(x), 0.00001) / max(P(x), 0.00001) ## if(verbose) message(gettextf("Iteration: 0 Rho: %g P: %g", rho, P(x))) iter <- 1L repeat { ## ## Shouldnt't we also have maxiter, just in case ...? ## if(verbose) message(gettextf("Iteration: %d Rho: %g P: %g", iter, rho, P(x))) x_old <- x x <- optimize_with_penalty(rho, x) if(max(abs(x_old - x)) < eps) break iter <- iter + 1L rho <- q * rho } v <- Phi(rho, x) if(v < v_opt) { v_opt <- v x_opt <- x rho_opt <- rho } if(verbose) message(gettextf("Minimum: %g", v_opt)) } .structure(list(x = x_opt, L = L(x_opt), P = P(x_opt), rho = rho_opt, call = match.call()), class = "sumt") } clue/R/partition.R0000644000175000017500000004352212212427231013673 0ustar nileshnilesh### * n_of_classes ## Get the number of classes in a (hard or soft) partition. ## ## We generally allow for classes to be empty, unlike the current ## version of kmeans(). Package cclust has a version of k-means which ## does not stop in case of empty classes. ## However, we only count NON-EMPTY classes here. ## n_of_classes <- function(x) UseMethod("n_of_classes") ## Default method. n_of_classes.default <- function(x) length(unique(cl_class_ids(x))) ## Package stats: kmeans() (R 2.1.0 or better). n_of_classes.kmeans <- n_of_classes.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_classes.fanny <- function(x) sum(colSums(x$membership, na.rm = TRUE) > 0) n_of_classes.partition <- n_of_classes.default ## Package cclust: cclust(). n_of_classes.cclust <- n_of_classes.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_classes.fclust <- n_of_classes.fanny ## Package e1071: cshell(). n_of_classes.cshell <- n_of_classes.fanny ## Package e1071: bclust(). n_of_classes.bclust <- n_of_classes.default ## Package mclust: Mclust(). n_of_classes.Mclust <- n_of_classes.default ## Package clue: Memberships. n_of_classes.cl_membership <- function(x) attr(x, "n_of_classes") ## Package clue: pclust(). n_of_classes.pclust <- function(x) { if(is.null(m <- x$membership)) length(unique(cl_class_ids(x))) else sum(colSums(m, na.rm = TRUE) > 0) } ## Package clue: (virtual) class "cl_partition". n_of_classes.cl_partition <- function(x) n_of_classes(.get_representation(x)) ### * cl_class_ids ## Get ids of classes in a partition. ## ## Currently, all supported soft partitioning methods provide a softmax ## hard partitioning as well. ## cl_class_ids <- function(x) UseMethod("cl_class_ids") ## Default method. cl_class_ids.default <- function(x) { stop("Cannot infer class ids from given object.") } ## Package stats: kmeans() (R 2.1.0 or better). cl_class_ids.kmeans <- function(x) as.cl_class_ids(x$cluster) ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_class_ids.partition <- function(x) as.cl_class_ids(x$clustering) ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_class_ids.Weka_clusterer <- function(x) as.cl_class_ids(x$class_ids) ## Package cba: ccfkms(). cl_class_ids.ccfkms <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cba: rockCluster() returns objects of class "rock". cl_class_ids.rock <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cclust: cclust(). cl_class_ids.cclust <- cl_class_ids.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_class_ids.fclust <- cl_class_ids.kmeans ## Package e1071: cshell(). cl_class_ids.cshell <- cl_class_ids.kmeans ## Package e1071: bclust(). cl_class_ids.bclust <- cl_class_ids.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". ## ## We used to be able to call flexclust::cluster(), but this now only ## has S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexclust has been loaded ... ## cl_class_ids.kcca <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::cluster(), but this now only has ## S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexmix has been loaded ... ## cl_class_ids.flexmix <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_class_ids.specc <- function(x) { tmp <- unclass(x) as.cl_class_ids(.structure(as.vector(tmp), names = names(tmp))) } ## Package mclust: Mclust(). cl_class_ids.Mclust <- function(x) as.cl_class_ids(x$classification) ## Package relations: equivalence and preference relations. cl_class_ids.relation <- function(x) as.cl_class_ids(relations::relation_class_ids(x)) ## Package clue: Class ids. cl_class_ids.cl_class_ids <- identity ## Package clue: Memberships. cl_class_ids.cl_membership <- function(x) as.cl_class_ids(.structure(max.col(x), names = rownames(x))) ## (Cannot do cl_class_ids.cl_membership <- max.col for generic/method ## consistency.) ## Package clue: cl_pam(). cl_class_ids.cl_pam <- cl_class_ids.kmeans ## Package clue: cl_partition_by_class_ids(). cl_class_ids.cl_partition_by_class_ids <- function(x) .get_representation(x) ## Package clue: kmedoids(). cl_class_ids.kmedoids <- cl_class_ids.kmeans ## Package clue: pclust(). cl_class_ids.pclust <- cl_class_ids.kmeans ## Package clue: (virtual) class "cl_partition". cl_class_ids.cl_partition <- function(x) cl_class_ids(.get_representation(x)) ## Package movMF: class "movMF". cl_class_ids.movMF <- function(x) as.cl_class_ids(max.col(x$P)) ### * as.cl_class_ids as.cl_class_ids <- function(x) { ## For the time being, handle only "raw" class ids. ## Maybe add methods handling factors lateron (if necessary). ## ## This could also be used to canonicalize returned class ids ## according to the docs (vector of integers with the class ids), ## using someting like ## match(ids, unique(ids)) ## .structure(unclass(x), class = "cl_class_ids") } ### * print.cl_class_ids print.cl_class_ids <- function(x, ...) { writeLines("Class ids:") print(unclass(x), ...) invisible(x) } ### * cl_class_labels cl_class_labels <- function(x) UseMethod("cl_class_labels") ### * is.cl_partition ## Determine whether an object is a (generalized) partition. ## Note that this includes both hard and soft partitions, and allows ## sums of memberships of objects to be less than one. is.cl_partition <- function(x) UseMethod("is.cl_partition") ## Default method. is.cl_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". is.cl_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". ## (Note that Cobweb internally uses a classification tree, but ## definitely does not expose this structure.) is.cl_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_partition.rock <- .true ## Package cclust: cclust(). is.cl_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_partition.fclust <- .true ## Package e1071: cshell(). is.cl_partition.cshell <- .true ## Package e1071: bclust(). is.cl_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_partition.flexmix <- .true ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_partition.specc <- .true ## Package mclust: Mclust(). is.cl_partition.Mclust <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. is.cl_partition.cl_partition <- .true ## Package clue: kmedoids(). is.cl_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_partition.pclust <- .true ## Package movMF: class "movMF". is.cl_partition.movMF <- .true ### * as.cl_partition ## Note that cl_partition conceptually is a virtual class, so there are ## no prototypes and no cl_partition() creator. .cl_partition_classes <- "cl_partition" as.cl_partition <- function(x) { if(is.cl_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_partition_classes) else x } else cl_partition_by_memberships(as.cl_membership(x)) } ### * print.cl_partition print.cl_partition <- function(x, ...) .print_container(x, "cl_partition", ...) ### * print.cl_partition_by_class_ids print.cl_partition_by_class_ids <- function(x, ...) { writeLines(gettextf("A hard partition of %d objects.", n_of_objects(x))) print(cl_class_ids(x), ...) invisible(x) } ### * print.cl_partition_by_memberships print.cl_partition_by_memberships <- function(x, ...) { writeLines(gettextf("A partition of %d objects.", n_of_objects(x))) print(cl_membership(x), ...) invisible(x) } ### * Complex.cl_partition Complex.cl_partition <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_partition Math.cl_partition <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_partition Ops.cl_partition <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ci1 <- cl_class_ids(e1) ci2 <- cl_class_ids(e2) if(length(ci1) != length(ci2)) stop("Partitions must have the same number of objects.") z <- table(ci1, ci2) > 0 switch(.Generic, "<=" = all(rowSums(z) == 1), "<" = all(rowSums(z) == 1) && any(colSums(z) > 1), ">=" = all(colSums(z) == 1), ">" = all(colSums(z) == 1) && any(rowSums(z) > 1), "==" = all(rowSums(z) == 1) && all(colSums(z) == 1), "!=" = any(rowSums(z) > 1) || any(colSums(z) > 1)) } ### * Summary.cl_partition Summary.cl_partition <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * cl_partition_by_class_ids cl_partition_by_class_ids <- function(x, labels = NULL) { if(!is.atomic(x)) stop("Class ids must be atomic.") if(is.null(names(x))) names(x) <- labels ## ## Perhaps give the raw class ids more structure? ## E.g, class "cl_class_ids"? ## Problem is that we used to say about extensibility that all there ## is to do for a hard partitioner is to add a cl_class_ids() method ## and two predicates, but *not* to have the former give a suitably ## classed object. On the other hand, the recipe would need to be ## extended for soft partitioners, for which it would be necessary ## to provide a cl_membership() method which really returns an ## object of class cl_membership. Note that we can do this using ## as.cl_membership(m), where m is the raw membership matrix. So ## maybe we should ask for using as.cl_class_ids() to coerce raw ## class ids ... .make_container(as.cl_class_ids(x), c("cl_partition_by_class_ids", .cl_hard_partition_classes), list(n_of_objects = length(x), n_of_classes = length(unique(x)))) ## } ### * cl_partition_by_memberships cl_partition_by_memberships <- function(x, labels = NULL) { if(!is.matrix(x) || any(x < 0, na.rm = TRUE) || any(x > 1, na.rm = TRUE)) stop("Not a valid membership matrix.") ## Be nice. x <- x / rowSums(x, na.rm = TRUE) ## (Note that this does not imply all(rowSums(x) == 1). If we ## wanted to test for this, something like ## .is_stochastic_matrix <- function(x) ## identical(all.equal(rowSums(x), rep(1, nrow(x))), TRUE)) ## should do.) if(is.null(rownames(x))) rownames(x) <- labels .make_container(as.cl_membership(x), c("cl_partition_by_memberships", .cl_partition_classes), list(n_of_objects = nrow(x))) } ### * is.cl_hard_partition ## Determine whether an object is a hard partition. is.cl_hard_partition <- function(x) UseMethod("is.cl_hard_partition") ## Default method. is.cl_hard_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_hard_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". ## ## Of course, fuzzy clustering can also give a hard partition ... is.cl_hard_partition.fanny <- function(x) { all(rowSums(cl_membership(x) == 1, na.rm = TRUE) > 0) } ## is.cl_hard_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". is.cl_hard_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_hard_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_hard_partition.rock <- .true ## Package cclust: cclust(). is.cl_hard_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_hard_partition.fclust <- is.cl_hard_partition.fanny ## Package e1071: cshell(). is.cl_hard_partition.cshell <- is.cl_hard_partition.fanny ## Package e1071: bclust(). is.cl_hard_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_hard_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_hard_partition.flexmix <- is.cl_hard_partition.fanny ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_hard_partition.specc <- .true ## Package mclust: Mclust(). is.cl_hard_partition.Mclust <- is.cl_hard_partition.fanny ## Package clue: (virtual) class "cl_hard_partition". is.cl_hard_partition.cl_hard_partition <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. ## Rather than providing is.cl_hard_partition.cl_membership() we thus ## prefer explicit handling of cl_partition objects with a cl_membership ## representation. is.cl_hard_partition.cl_partition <- function(x) { ## If the object has a cl_membership representation ... y <- .get_representation(x) if(inherits(y, "cl_membership")) attr(y, "is_cl_hard_partition") ## Other representations, e.g. for "definitely" hard partitions via ## vectors of class ids or class labels, or a list of classes, may ## be added in future versions. ## In any case, this must be kept in sync with what is handled by ## as.cl_partition() [which currently runs as.cl_membership() in ## case is.cl_partition() gives false]. else is.cl_hard_partition(y) } ## Package clue: kmedoids(). is.cl_hard_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_hard_partition.pclust <- is.cl_hard_partition.fanny ## Package movMF: class "movMF". is.cl_hard_partition.movMF <- is.cl_hard_partition.fanny ### * as.cl_hard_partition .cl_hard_partition_classes <- c("cl_hard_partition", "cl_partition") as.cl_hard_partition <- function(x) { if(is.cl_hard_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_hard_partition_classes) else x } else if(is.cl_partition(x)) { ## A soft cl_partition ... ids <- cl_class_ids(x) cl_partition_by_class_ids(ids, names(ids)) } else if(is.matrix(x)) { ## A matrix of raw memberships, hopefully ... cl_partition_by_class_ids(max.col(x), rownames(x)) } else if(is.atomic(x)) { ## A vector of raw class ids, hopefully ... cl_partition_by_class_ids(x, names(x)) } else stop("Cannot coerce to 'cl_hard_partition'.") } ### * is.cl_soft_partition ## Determine whether an object is a soft partition. is.cl_soft_partition <- function(x) is.cl_partition(x) && ! is.cl_hard_partition(x) ### * .maybe_is_proper_soft_partition ## Determine whether an object might be a proper soft partition (in the ## sense that it is a cl_partition but not a cl_hard_partition). ## This is mostly useful when computing fuzziness measures. .maybe_is_proper_soft_partition <- function(x) UseMethod(".maybe_is_proper_soft_partition") .maybe_is_proper_soft_partition.default <- .false .maybe_is_proper_soft_partition.fanny <- .true .maybe_is_proper_soft_partition.fclust <- .true .maybe_is_proper_soft_partition.cshell <- .true .maybe_is_proper_soft_partition.flexmix <- .true .maybe_is_proper_soft_partition.Mclust <- .true ## See above for why we prefer not to have ## .maybe_is_proper_soft_partition.cl_membership(). ## (Although this is an internal generic really only used for making ## cl_fuzziness() computations more efficient, so we could be more ## generous here [perhaps using a slightly different name such as ## .maybe_represents_a_proper_soft_partition()]. .maybe_is_proper_soft_partition.cl_partition <- function(x) { y <- .get_representation(x) if(inherits(y, "cl_membership")) !attr(y, "is_cl_hard_partition") else .maybe_is_proper_soft_partition(y) } .maybe_is_proper_soft_partition.pclust <- function(x) x$m > 1 ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/inst/0000755000175000017500000000000014130772671012321 5ustar nileshnileshclue/inst/CITATION0000644000175000017500000000165012612741643013456 0ustar nileshnileshcitHeader("To cite in publications use:") ## R >= 2.8.0 passes package metadata to citation(). if(!exists("meta") || is.null(meta)) meta <- packageDescription("clue") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) bibentry("Manual", title = "clue: Cluster ensembles", author = person("Kurt", "Hornik", email = "Kurt.Hornik@R-project.org"), year = year, note = note, url = "https://CRAN.R-project.org/package=clue" ) bibentry("Article", title = "A {CLUE} for {CLUster Ensembles}", author = person("Kurt", "Hornik", email = "Kurt.Hornik@R-project.org"), year = 2005, journal = "Journal of Statistical Software", volume = 14, number = 12, month = "September", doi = "10.18637/jss.v014.i12" ) clue/inst/po/0000755000175000017500000000000012213262407012726 5ustar nileshnileshclue/inst/po/en@quot/0000755000175000017500000000000012213262407014341 5ustar nileshnileshclue/inst/po/en@quot/LC_MESSAGES/0000755000175000017500000000000012213262407016126 5ustar nileshnileshclue/inst/po/en@quot/LC_MESSAGES/R-clue.mo0000644000175000017500000002064013143661614017622 0ustar nileshnileshSqL/AHa?X ^ j5v2,X We V : 5O ) , / ! ;. 8j  % + )5 (_ 2 ) # * *4 _ %| )  & / I T1_*+19"X&{& *Eb%~  94.+Zy"07% ]+j(> 1,F's6/H"k?X  +572m,XW&V~:9-J0x7%C8K ")+)(H2q)#**H%e)&  2 =1H.z+1""*&M&t& #>[%w  94.$Sr&0 7" Z +g  , B !1!,K!'x!4I%R52'M+ D0.N O3,L"AS1)$=E*B#! J;-? > KQ8 PC<G(9:F&7@/H6A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class '%s', with representation:Argument 'weights' has negative elements.Argument 'weights' has no positive elements.Argument 'weights' must be compatible with 'x'.Argument 'x' must be a partition.Arguments 'x' and 'y' must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to 'cl_addtree'.Cannot coerce to 'cl_hard_partition'.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic '%s' not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method '%s'.Invalid consensus method '%s'.Invalid dissimilarity method '%s'.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter 'p' must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary '%s' not defined for "%s" objects.Value '%s' is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.Project-Id-Version: clue 0.3-54 POT-Creation-Date: 2017-08-07 11:31 PO-Revision-Date: 2017-08-07 11:31 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class ‘%s’, with representation:Argument ‘weights’ has negative elements.Argument ‘weights’ has no positive elements.Argument ‘weights’ must be compatible with ‘x’.Argument ‘x’ must be a partition.Arguments ‘x’ and ‘y’ must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to ‘cl_addtree’.Cannot coerce to ‘cl_hard_partition’.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic ‘%s’ not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method ‘%s’.Invalid consensus method ‘%s’.Invalid dissimilarity method ‘%s’.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter ‘p’ must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary ‘%s’ not defined for "%s" objects.Value ‘%s’ is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.clue/inst/doc/0000755000175000017500000000000014130772671013066 5ustar nileshnileshclue/inst/doc/clue.Rnw0000644000175000017500000016521512734170652014517 0ustar nileshnilesh\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/inst/doc/clue.R0000644000175000017500000001736414130772670014153 0ustar nileshnilesh### R code from vignette source 'clue.Rnw' ################################################### ### code chunk number 1: clue.Rnw:40-42 ################################################### options(width = 60) library("clue") ################################################### ### code chunk number 2: clue.Rnw:310-319 ################################################### cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE ################################################### ### code chunk number 3: Cassini-data (eval = FALSE) ################################################### ## data("Cassini") ## plot(Cassini$x, col = as.integer(Cassini$classes), ## xlab = "", ylab = "") ################################################### ### code chunk number 4: clue.Rnw:889-890 ################################################### data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ################################################### ### code chunk number 5: CKME (eval = FALSE) ################################################### ## data("CKME") ## plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 6: clue.Rnw:903-904 ################################################### data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 7: clue.Rnw:914-916 ################################################### m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) ################################################### ### code chunk number 8: Cassini-medoid (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 9: clue.Rnw:924-925 ################################################### plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 10: clue.Rnw:934-936 ################################################### set.seed(1234) m2 <- cl_consensus(CKME) ################################################### ### code chunk number 11: clue.Rnw:941-942 ################################################### table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) ################################################### ### code chunk number 12: Cassini-mean (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 13: clue.Rnw:950-951 ################################################### plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 14: clue.Rnw:984-989 ################################################### data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 15: clue.Rnw:993-994 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ################################################### ### code chunk number 16: clue.Rnw:998-1002 ################################################### data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 17: clue.Rnw:1009-1012 ################################################### set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 18: clue.Rnw:1016-1019 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ################################################### ### code chunk number 19: clue.Rnw:1022-1023 ################################################### max(abs(cl_membership(m1) - cl_membership(m2))) ################################################### ### code chunk number 20: clue.Rnw:1027-1029 ################################################### m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 21: clue.Rnw:1032-1033 ################################################### table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) ################################################### ### code chunk number 22: clue.Rnw:1036-1037 ################################################### rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] ################################################### ### code chunk number 23: clue.Rnw:1061-1066 ################################################### data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 24: clue.Rnw:1071-1072 ################################################### mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ################################################### ### code chunk number 25: clue.Rnw:1076-1079 ################################################### data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ################################################### ### code chunk number 26: clue.Rnw:1083-1085 ################################################### cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 27: clue.Rnw:1088-1089 ################################################### cl_fuzziness(cl_ensemble(m1, m2)) ################################################### ### code chunk number 28: clue.Rnw:1109-1111 ################################################### data("Phonemes") d <- as.dist(1 - Phonemes) ################################################### ### code chunk number 29: clue.Rnw:1115-1116 ################################################### u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ################################################### ### code chunk number 30: Phonemes (eval = FALSE) ################################################### ## plot(u) ################################################### ### code chunk number 31: clue.Rnw:1126-1127 ################################################### plot(u) ################################################### ### code chunk number 32: clue.Rnw:1137-1138 ################################################### round(cl_dissimilarity(d, u), 4) ################################################### ### code chunk number 33: clue.Rnw:1141-1146 ################################################### hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) ################################################### ### code chunk number 34: clue.Rnw:1153-1155 ################################################### ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) clue/inst/doc/clue.pdf0000644000175000017500000210403014130772670014510 0ustar nileshnilesh%PDF-1.5 % 100 0 obj << /Length 2699 /Filter /FlateDecode >> stream xڅYKs8Wr,yxT&޹L@Q$jHjo?APg Û>5Yx.LeܺEnUvYmr&'=O<4N6qѰ?|pvaL\eK.V3fu?Ofƙ+aN9o$XWV8WY(*Y$NrX4y:h=x\݌ X29|q؀B#jYF-'%G.[h:Y!+5v"A-sFh  ǽx3LX#~(hKhqqiMbY_D5;ɴ[EgӼG^(B7x#NZ)m/ NJ!0?2VkWHݴw9YvK8\[-r[ Nhe,KQ&mcep`Ka7Q4eua˱ D+c<:ාIH?'qE|3%@!3Jnu(5Qݐ(Ed+)+G(-3U\{GR;G&e553ӛA5?-5e:ݰܻo䲳X֜eq <7w?> ʼnE+{`eF5U[l fu {^9QŢ B%A2%9D<`ZB& BA2F^Ln|hv9hʽ+X|IEE s@B\ Y\ZTIz@π.wt>JAD=%˖$k\ =>me鸷Q0 *h5`f]va; -GfM.ia', ky׫bI1JZeI}F$2tSp^Z݁[={  Y'q8@ 3oЇ 4gF۞v` ɎUb6GGM ((q^R+hĽv@*LV\ aŒ;P###QtT g7d7x\5D:A0%!c|/_ +^op>؟2͋^RqqMdiW¤op> ]D, 6v~9..0]|>!uZ5Y˹islC֘YXI7~=HI'S84`ސɎ).80l3 "݄r)|V2Ӈ%kr>+>e' 5?<}>w]i}Nٻ&ǮnȖ&A-/Ȟ(vӻإgًKk{n9gZlugsYhj4xJ,/nYִ^m; c9WVp1Җ_+z;6^Ŋ"^L6% ^_%T+&mJ К$8c E9f-}jsOJ}3)/TFWʬ'Iڿ]|Jݟ$̶bS3{ʹN*-a_>̺G Ym0뷟FB52_ "Mk"bgl{VKJƵB"治P8'|B$~ok.OHe_ƀ'C?^N;ٓv=bxEl L3́,޼xW endstream endobj 127 0 obj << /Length 3622 /Filter /FlateDecode >> stream x[m_'83$-Т=\6 CZ[eRfw^Hi"C>J)r8y晡w_}^67hr7wijQIul%7w˛کb?š?znm;jczxcncqm#pA PtdVBG#ߨ!)@;nCo4(>M3{dIc(#.]ׅl ۂTF*տ)yg u3zj Kw4jb$~3k* O9_oU /3*d5mji3=Q mgFx♩C%?c9-q x{P{.vdنp{l O3lpW1 D812Ψ ue.[BTs~ϵׅo vGrdYY @G컋0 3ߧְ;_[ts3K1%̓W"i,L\1\?@# e`B]r7O(@MjSK2L+NsװohQM.SToxo[@OrOswޤW)@y=(`M+W5]cҋʜO ?)ВO/l(ZY!LGj9z;{ѮB:"E Fk|CFheAxSNEFoGv35㪂S} \7)ˎ3_$[x"PQB8+ȥk1B4H8@Jdڏa6eGv`9/"g@7%>; K;7^xeL$I b~ =PF4ƜC`]/#ɇENǒEC57j&1R{[|"HtLFwb4#q3qq!2x Q F܃ΪX[ˑlFEzdimrغ'FU /0lX&q i,B0 ;)kR)IԢ.#}~`&~z^$5>')JBյed+P*E) ݌6"@+_lDCVF=X Ew7=-a*72 ]p5j|;ܹr*UHvV)Uyֹ29Sz=Mn8@3C> *dR#O{ҁ<>$0K ܂<n:"Jk_'}mֻ16@ON|D&bЊzR&|c]`R^Ax!'T#T#yO66.]I00}o幱 O˸ZhQmTh(CmK`եf^:An5*!]R 8 pȟbV@.8 >@6-$cqBQe;quDp#BOc-_.(QuJ*xk(*kalJ6# ~6 |h}3bg`,:Frc|QP cL#䨙өCfP۲O!)i{p.KBHt(|tMEgEFGsO_X<|`e8/| tSLb#ϓ%:kyS7dLIG*ĕ6TBmKj$CB m)ެ5mx긕Ru">hއ`'4#91}fzBbuRuJ5EHa8=!Y',BρR$J%EG:q_| ;6gZw٢L-gOWmnYj*̤:Aߒq4V7G]-aUU:!~YKOsA)oN85%*ΰPQŒUQa5&Zxo'܃?Ëg=tR"È߇#3>aKt kPțk;dP(+釦yS endstream endobj 147 0 obj << /Length 3699 /Filter /FlateDecode >> stream xڽZKo8Fԋs>Y,0,ؖW;?$)v`)X$O~l&i1yتJdyysvUn5q𷿝i3B=;_`;W.NwYLэ/W6ݯȼ\Y=,8dߘrhy WT Io$M)~D8Q6jL & aɝvv1lf/uL>Z\NGM+McfX{?bw6Y!fldee_˚,|aA _36kM<NBb Ť>-&2m?FKB3t޸ߖfiBɇBMLI^ÚEG:%?a{{\@)౳c{cūqz۲'򕕏Zk\V?IWy9شmYt0љ3t}d0:]+4d(m.domWWVaE&6bc|b; ji9 "ib*%ǝEe,$zmUQvد6t 9Qu{pNzDF9L, 5DF Iaޡ ۔'z^=)e+ı}R_pwq Mx=G~"~KHGǜ39uP"\^5LܚpH/'8Sx|VϦc2r-1 -U AM\N5z֓*^ 'kU,Xºy K7R0nW"7iN]Лh8NYgk".zKH!A2܅<'ˇG9:,wn0x.pxEujdlz*ggH@hO|p)m `^"+d 'M`sdy 3ˎ'sγ2K \krE-Ux"K8']dgY'i[HsS%k$)dh V"7._Uc*UGȘߓZ,s`;,5I5)&6& iRj!JHV5'ByYH8G-j༎ Ӥ(Hv.3H q (Pνd; aJll;2ck)3Da>wnmbrƲjR{NP|Av nɍ=Knq>r24gN2"mppP>dq\g+[;0ǛUQFV"tjQ"6NRK144-$? j@7-YsD޶s eP<'A/Ks_4[ThCmZ=F̣,pE.Rc c~V7LWzi?4SǞƬ$8$Yۊ</ |j|\5bv8[AA#: fpdE=Jk8o0%8OqER!3Huqޕ&G:A~i3^ut*Id@׆(g'l` vMV0%eDT3[۽V3?J #/${Lw{YqKPr{Z{# %O& S?k_mG~7(:nQI *D 94<]KX-[%9( >{FNیRlxw B0'tӬʐF Je;U% 8oڑttSˋ]LȺ 'WW?$;XAR4Bt\Himd)S,kq'@ /}ŎtfhXp& lEŌ2_hfNq8,B,]Vi'>MP f]8CehJ暧-  I.v-b(yW̐J[zhwiq N)"4\°RqL`T:)fG(FJT̾yc񽀔Fk{2 _̫*+:IycʖeDB{K7o㵌It%"%(_L̛1 !Խ4M?|X97(-Cњ-Y7r֗vZ'nH+p3׭1Lfec.nZH.rr B>ZVl&OG%1w#I'j(CTYs1sItUw>12dub> 3Zش;{Jv~% 5MmDM E̊)Zߟn~ZT CGzHD>v3h#bMS! ZѧD+cnaY&sAQrB&xI **rֹ{ꨂ$noFn{d{.<.!xBYJ.yyn{΃= 4bZzqkHn KdKɑE-:x sW ԩ\X_,jAq{#_!Z WP·ѳ ~WPF* ?mt$KΰY3XeD yr˷8%/>Vbh5]fg Ptݾy:iɉDnyg$Rr7TH:;'YmtQL :!К3vGiˎ{1l4ƞbK1nli'yi%6u, ǣa$]QcOHNأ7's#Á{Iډ9Y)D8 A) ]1se ڂx~R$*]+۴x2 =)W|-frR8 9!lTo{,[^M(F ^VX=:4o ,}=~tc h #r# "Y^9OcPQN"0~*ө#(RD?Bl#u /19BN3Ju g w:A9CesM 3YP_TRm=jL ' ~FMv,_?YJ{ϘiDyvWɭa2TJw&r+ 6^SsE+'CPo| ᑿҐ˵[% sc1eY/ #& endstream endobj 168 0 obj << /Length 4390 /Filter /FlateDecode >> stream xڽ[s_'zzbdҙӛi{t:D[e8RK/}3Hb݅~wůߩ-[ͅ2lpuSjS_\.^ru]/u]p~ .q_:gД5az=7UU蘈5[G!&!?Kn(/jo2d+K]WӍD [t%(\w߇&~| c+n}|a!CBX(U~ok0ui!}UWGe㚲v&o[S\wMF{wY <.]0duiđ[ÁM'y-MFiaV:_֭ء0=b*:дGb592X C| tf!=eL{[X犧ur{#Nb*UĤ-m ̆(4NhVU2RV:ҪJ|c%i\5Yd 8ZZ?fFjfáh*mwJݞvJs mlz/¡G+Fi-GNbw*SBS,Y\ͻ鏲"O%h%z/Ʌcp\G\cmF4w瓍Exh;J$[+H8jӈr<_ ѺH*"tMLrAY62Q'K=vA"%HH<>!OIrdlLTe_jJS1W`c2ǘr>"@ `fSC&~`vKd/l-&wvbZ䛖 IzL~50G &klhœs2g(sF҇/{}&ektEN(& 5eW5* q+qrm:A)"9G>tiL߈Ƕ\r$sCxX^}˙]<Bm)Gz[*S<"1>ފvr^AN"_̱Gu \k- ӄz]86 k,NXMbsR<37w< nQ|"g`:o8jx$f)ؠĚ K~ K◲i}X&5t-Szt'J1kK֪Rk=};>GaPSDd{y73m`Ft~gxy44wQ $/dň'lDoq!bn!{񽰗=g^E$&-Uj[O-nJݔA7Ϡb֕Ukf֑"( -i=?zPy_Ê׋1R%fDmxE`=ZPut$¸렰Q+aw#,L윹#jЈ!G&n;$FLr96B-YCdPh q)d OT-; 6'&A{N"SaS|Bq&~싥cA]>glp268&|Yp] }f ɜd "s \ T, Ě=$g|ְ ~5f 3 GKL;M{QSH!0nboR!PA#E]O|ipg_` a.B‰ $%ESu5{N?SfR1dXᛘh0O1ABgd^pJEކ1~x;ʄ b6WϋI `vBF\Iay66O"ex&aA]pDEwuI\D҇" -yQem2vL.BFp(Ȉ &֒NRL,ظ1a?eI!2r6|];}Ł*3?&gz;A/1T,juu@9_8G픵.+> D"A@GKaVpBR܀c|4gj]yZJ*kJ!7RMII Mg$ /I|J.mhSx6OPLs&d=(>[esvPQEM$ =!)z'ݪK 2!n(T4!Mrѣo3?d11o(YYeZ^.v|Z^ڏa-E-MpM-Rm[GU?Jߡ~E7,D];{*Ir;ѯFaLFD kg@pxCE @ٔ\d&NV;?q_6$Apե7Xq/y;ZyI+:ZZ;~.RZrǨ],-q黤$cO':R2lq8YdDJXIj F׈+ :UZULs9 )?IMaJM5+2joG 1X-?>DYTPC7O*XƗQ;8@[YS؇ H_U&ҔwkZO-;S[̦sR~1_P=MLdMiԲVb {ΓLV1iSY@ JF}_Uf  6.8; v8Y0L7S2Ng%׷b 5Ikkg^ nK;`6XLo֡Jp6F!2_[+B}]7Nװ܏Aݤ.$PЎQ4gjA\ 4UGX}/yvv'Dw0@%eJ_UgT͉Leкܥ@̅VM"7R̹/U S8q?.^=241U1pT7B^DZdF>ƒScj濸tj ׵?IUUWFD}/´ [ isuϡV{e;gLGGW 0J' endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 789 /Length 2272 /Filter /FlateDecode >> stream xZmOH>?j~r$="- )=0$,&1KwꧺL0˴`"IfЏIcJB%SΔbGZB85E :⧙ɬ3u>cNf"sNs`55{|0Id9, (4U'<$:%4PH VDȤTq`oɤPq;G< d 08/J@ 0Sf3#D _TL+M`(i1G؉aH :DM#z!ۂϋ%wT|dL>^eSgC{M5C|}ﳌvYW_57ޚ.eqIݲC?wMzumgF @-q}]-CYN[/f<^4EYS==!UiUGblYW*u$<`>{[fEٜ #]7c$U^[WW (ʖ]WeMC>CgR}ӮvvE{L-_->^]l!;g 83mhaTzc ?-78,>4J+YuYVNfPн0p1^K^K^K^lghsv-ʋ b ů=4e>i$VL'y:{ncY9OS%k׹fHrO)x6x ɓpaF@z_- ;-βr~}c㔖s\0a9b [ҍ΋uV)DF NcJSRs*=uUhXu}`-,N,^*!Ą DRph}H(QncI=feqAl6P XHy!MLE`jujj p8m%7T!YgC u?ΗJOqCQEyCłez|Bo-'a2m|ߏ=#^hj2=OR$ufJZD䴗ZAҞ;kL  QZ԰ddì 0H܍-զi|9 JvܭQ8F 6iDധ;Ll,h-G"Veɔh}.0¤e 4yP0> sVPk#6|,m"{A{\0"(+cYaTBq3aT`kŎmO,¥{d(L ܇r[mB)a\c*4ŭv(Xw45l`ÍO |  !rf&g{Wut$ze2+zv$= sGGlI)'6ר0HĨ4( RQ-orf21ނ΀TCeh ɬ$hFM)3;sN|MW׹6[5I$]첩첩rr)%}.sv|ςAFa#?8T#鷺=+AUgspJ&@5(U㙠Ȟ<ƛ 5>ՎvSRX!zZPCJLx]g'EN l;sP HXm61QqmkXcv'(lbX!G"[.rŒ pZNVCGG^ \}GZY\tGbJ9]-ݞ(&w endstream endobj 194 0 obj << /Length 3372 /Filter /FlateDecode >> stream xZYV~ϯAS+:"PEBR TJwm {zz;Gl lK:K?/T}VL:̵qg˳g*;_ktxwqeooչ.󅩫oAuxYg8?*kky2Qu! l#qGxuxVe?ψ\i>qlT^;9>rSMߜ/\ݦbr Gs;PWB:/(/ʗHDer"}tV5;J$DP8aE^*I eY.&J7Q>ͫŽQȡ^Eݓu/@s ^r_ 9d$m 3D5QZ?CQ/9A3<_8öpJ0YwՃ`6AQML1E@H3!^V1k76(]kS9\/lQ4,$ :IYDR?aj_ ~ll6TQ;/3 94T;(- ۄ*gE.)ynM}F(/qK<"ue ,;lSTĴ͜0pS&ymդseF8R y%ogxŏMZzp%]5?3iFn,h,Ei!jV/$x҃^<ԧY/#71ʓ* 2ҹ*U<k5PBlʼ!{A.G31 wQ%rſzGkqE? ֦Π @&U>fy}bTx2hD oWk̈6gvew[GE]nKbe+MFdڗCQ=>sj[lp>)hm*lD܅#פ~"w۸-x=XqіQ=w\KgY %= SP]ujO}^ ~QPd A;_G N2rYao:2t*` XYWaX߻FYjS\m"L7Pt:@q&+Sc3u|Ra/࣬r(FIЄNg'#*0 VJCbx}M n^1mGbv8o8i ܑ;hP_˱0=WJgF, VBAP%v )JSSX}Qޓՙ. "e{I"qj*#`u#ytCeڌSxCv5C9˿nQ/?GR8k/TUj*mFe~4UĨ5- 'D1{V6ؔ-fmmOmڜ)*3i%C&r;XlD rL"98|I5l2OqA}b*_€}.p ?C˷Qr0ͭ+$2\pn@ κUuW1ڼ"9ɫ&_2}"q*lħ lsCFxﶃFYw{p'%$ǑǦȝob1O/cDء䘒6p.2nWFa_jhBX6Mec7(ſ(( UA1H]C#> }(zhi u)r1F^~_}T*"i9ԯAYD1 GMԁhObou?kN*$tNڃ6#N^/dʎo7wI=:^0J{L Oft'4uh4A5PLZZL p5K2> stream xڵZYo6~ϯ0I܌H:؇d>l-۝k['IngUoMg77LS7uhŰoB ^V2bn7J]7qeÝGj?u9%!v7%Ӏ ߜvů𷃿& ~)C?7ܶsT]39ϲ{sd"pYk $0DwEku R%BPḁ+\Á<jq[*-nG2bK$ CN/|5><5i}d*j"s;ʺfM|*\, &֡u6Teo[7=`gS<"1/Mg*o/54֍i+ {dZT]h݋4f&Vy[mrEm31~V=ܡr0"JC:kC{I@??lD;/6^Cz Wu$^ἭcV=pxl*N _zR2ǰ}g|a :a`Pc+wթaQ*"uK{zG F!}.jS6g#F !몱~m}q XU\KfkMeRPNGфWUb$ugC0a˜ NRsZW]1飠tg)E * 4d=I,Q=S ^BA;AbD P. i巧)m=䪟NH6HdV9oUB;C&"-$ ,A|Sk(D2Yա|i+NQ-Y2zZqu? iDe^k̬#n:&FCL> $$`bk X]zh rkzHשA9q$< H k-2{@'t"֊)/%+932'́CN$Jnp=H_6G6V&Cߦ^K- T;YԳT_[!2j["U{ljr ͬi^W#/{])X:LKQ2= _yCF. 1!q"֗ ̓}/j(hm" 93&3ƺ3@yNjp<,򻬄/~+ZGh)8sC-ތJE-kjk̜@n}/ ͏w߆UUd5J\ΪG#WE1$–M~dMVaNcN'q7-M gwbEӦBD \xX**ݵFG,lݠ!0 }f @}]pѾ k_buBgt(bu3(/w a&2yҔm'i,cʷ4gg_jBb8:ja7 Mj 38C ֢6k.eS6CKӨ%&ɅȞ-/Sd1E뱿ݔ~-6XVO6^lϩ8̯WW6Iy&0T-Cd ,>V1GfylĂ] B/F_gt!]߬]Hg O+H.bfBx}g)%TlǓ\RUJ kExjD8J2 j %Ab^ەnHS !\6ίҭV?im)v|R7KY#h|DbMjlINS*ܻGƷ+R0eUO.,0FU0ޝ9j q PПAƄ2Ɩ.nOB۶"L*)@VY#I M`U۟99_RPUqŋr1p>BL'Mr@:–*ΔYBhxI2s#Xoq?{ԏ[磊HYTyד{Wm-ָ<aQ߉eVnWnw%uV5\_+[2& xV#b7eB goitT6*% ϱ󲁎y\~# b ܚ69,F >uϾxQ8DE(WLЄ)A❪ſ0E<bJ*㐬O;U #^nG\Ws;q.39X7qșV| WDN.JaWe?RUU̠&K5~k/lpYyӖ,kN}5}7}# endstream endobj 213 0 obj << /Length 3515 /Filter /FlateDecode >> stream xڽZ[o~ϯ0D+sE_4E yhh[$ίioM$83g΍_~7!\7!ث+\^UܺpuWf)"zN|=wuY9g# 7mMiKܫZ:wIw/#[}n|,fž84"7+_l%zUV0eaóXK}Sc'|{: pp c6 t7>(x͵LǎȗbĪvOW`೟q5ăڄ/H{+kD$Z7lqZuJEdԡ#!w;|ܷO7ta}7IqZqDG|ʬ/Ĭ?rc%p#?XR < ٴjGn͜\e#[!}Z:vNL0*0::^cM#irҵMДM^$®qtB\.WEn G _VQUedu1*o08(Z01p'lnZ'0|x#`tnbVڽ>{Mx=@f/mXB:9>*-^r]UQϗlrgmr4eWY$P֍bPBO(t 4yhhae>E͙M\,pҔd%z8/MA E = zNw#i#%X;~ eTf!ِeB6V#8_myO 4S@e?()3+H|$ RG= 9٢0Ov+7^f,_Z7.qwxk'"Egn1Ghe(6`,:N;c؃[;$ŽJ8r l!wH^k~'S|u2M^z{&ܨ["g`'{5^{==AMI(5kXP~43.u3FTv;Cm}N cY4pgAٛnYg iUuᆌ[Ӫb0" 5{"#ީz# z+1HQ~ `&|Ք(f̋ڽy:D"#=ęh.]Wp#G|NF1Ϟ7bvHRI"CWNDwRg=ra-_C/0*4Oa.h "~6$k) +t[Yfvj! *E:[K ޷{i}@Zv#}`=&e1@ o?" v @l @c9O2Et:gptpn IAG3Hf1QP̰=y H ɺE,G`~q)6¢+D/6!lu)7Hhj:L#?^nk^0w sJX~]+Ťzbݥ *6J\. D',s|,apN.4 k!JM.1#LxNΣLC˫ef/Y1Ѽg0Øfy1j; UH$>yBߚ<lu;IrCKDY8cBR9U\6c>FX] )6W"3ˤ@D.hH#Y6J ώJ ̯p1ZUفK|4Z!e";l;\'`LBѳ$M^y|J:K MU.|.b1bm>FMH.-.>ױ|cM!`yxd'?fly<6 G)3lX'FM}rIUpY)|srBU A$Ċ഼։:[V3F ,AEqFUXoРUxTN6ihGO$NRZA+8)f:=FBp>j~ΐLR{O.kGCJSo&=J\>{E][(䫘^ x-Heysvj>q=C =>cTcJPYen#m(A 1C^&Xng-AVn*icک6*<./q4dw)ych\ț1U?Xv~"@V Q9D%2*.Zã 8h~en3v!eUqQ@X(kv 0KDxxgRzqPyL`SW2AOU?wȠXݳ'e)1>[E>ڲ̭7Icp.>N /b,CėN l NRe:~ ֓䅉2pUX k"3XSw2 Eeopi?ɐWϾ]0A\UKb%y"")o{^IZiqz/%9^S=`l{yљfl.FsVKJM>_ ӷ3a|[&heO*lt 9rEP3jT )kNU ^d/Nkéփ!\s[X91g5BU%}egft1_/G`BN?/但]%V E@MCAu^Wg}aa )f*\G[I7u:i(Ū&VZ%Θy6{3pW ; M/BM~3UvgzѲOQ%a{Q 4ɩ"ڻXp"h,x h endstream endobj 239 0 obj << /Length 3934 /Filter /FlateDecode >> stream x[[s~ϯ`DML{m<}HZMۤX3L$$1&=ł%ٱ{yϾT*r.fƹYur=0=w}sa9]_oƷ9Gx @%t>f'1Iɼ\~~h ;]ѢIJ7K Zm^\uKt:"ɋ==^JEgkgR%Ҥe낹t#dEJd|nq=VHġ=o+7|Ba-xMNXmD P~J[R^7\{S"S[@Rŵ^z+7]"&2 > g|y BZ֊露%v'I,D~*tLnJ뎟} @X0,W3s< 2VCme:4uV]vF8:T kyΟ%.q.Ox51rئiւU g~"y`j|êC;ƚlUk"ٍLQa) dI͠qd})A5v66 y-D,6F' &<(`\[LtJCg;]ɓ‡)<0!H щwAwM ^ K#bsCg"Ɩ Nfe-TYR"sf$K{Yٍ÷GGY<"/r@jQr3{Vyσw];q ~aF?] F8\on8dAdRJdQن`=M&6K?oȦt$oEt6pZXpsIq1FO?0@&/UI$ML%klsPl͍2j3Q88%q8D ^cQDkIۊ#XɔYcCw(J=K7 V* <cWU$T4">DEG![l 8S&&[u5uJT'ޚ{iIY XL EV<91 eO&"kqXoxۡ׹}U(U4W G}@aȢx*ΓY-9tp+Sp}MjҹH_NldPz_`$0 |Z?˓p%54\ٷtrQ+=rLb|J`떊i)DBd|:oF!4 A.PUpU+²sDW'$ݴz"V' *"yfZ27 vehvVJ׻sH˦ bB1E/]p5/bK ?_a~j6N#FG6UW@ogN))bN?w8vcT::Zo(痤`>\.kRus;\b$  av_a~d(8U<1]{%a^M1laπBcP*\RUOF/x{^տ&h(YXh]aYbEq@I| 9P٧GL.4>TE>7f(;K0~@iʂ\%8Y.mah|h5+۵В,cx5TWlCL0fqʺp#x!SJKa+TSI769  m;p8W ^}ݷ]\4nv2 FqQt̮'cEWƅRFF86UMDW ~ }'7^Q&x/pK}DŽ뻳z;dkݪEY4t,,a؞b0$"m/k!V^%hAA/(Pj&be#zSbE~V~8VȝJ!j#$XFm)o }W IũeMCы8S/BpTK8Y±Wnkr;iAE$dXr[mBx]oQ($&|`XY4!@& NhJ&B޴PS{"d.3P" 865[د $- `ts{PJ`*FU:ԯT\H;=@;iO/m 09QjNn qp@ǾFrev݈#0}:6?+$GGzWd:1`clNQSrww3bmbۏq^ie&0Kɫ`z"~WMz0Y GsLgs@!8Jdn'wykCO݇R ܳJӁf@ )n`{>H)X_ag}=B,APތHR{tYRK2!i.ջ-OՂN?~jA3B")R/E}fLl\rl4BtUYl>1dЛpw=7@tM~2uf5tHF&3Oݩagњ]77? 6-RC\=Ecl z?E`)^`ytǍ G| rgQ٨nz-9{({W9+B%;dzYe7$~Ngd 'K}0C*n]S^@U:-?7tJτ|tRD`^೤L-M\dVBr6X>8!6}"_vP&CS' D璦u $D]AVoZi304^#rKPMxFwdeB}{ɧV!aT 2,3erB2\murކZX:\KIW.LwvO&/w]"0&(-C>F/!G<%tx^F~hBu=ZA sl' VN6, >$1THQEg( endstream endobj 266 0 obj << /Length 3811 /Filter /FlateDecode >> stream xڕ˒6򞯘ʉ1Ad^vueO9[%q4cBJ'js4~w| \5irwus{eM+W_Js[^ݬlf&涨Y^%%o^eah2~{ <9D^l߳2oO(}>5ӒgY pFf,Y08䡇1Rkq `H4>A}p Ne;Wڦ.sWscҦ-iVyQvnvN 8氩/u}8%m{w@޳Lp`1w<><'Dzo؊ă<Sc*/rҺr#1NBa (]TF2-3`8;iᚫyE@2Ik["Jե(T%N+:pB[\ff̧k^6MZҺHIh[ )BV6IWդ]GbY[=йIKy]In~Oˢ恰;RӇ1w_܊fYm*5O&m۰=dOVutuzQ )!G5%1>P]: _X3R$ǁQe:L1`u㝗%_ټ'd)FcÁT=г]ҝ%>z `ğ*pݳI̥5)o1 Eu tzErTKyk7Wz12;1}}͏i )x O 26R"o*1U5Ȅ0E;cf /wdE&\M: v?n/ֵqN W<-u%:V-=0mw<eLI 7.-ׁF9<:-"ԥ#;ǩXa7EtObLnJ>>.HY-$;2u (=YjF8D0}|nOssd20^P1i)>%:*<OL`ASpB\$N s+1z`ƚx5AaZ %sFd秄̪)1DJ4O[:K2DpRՙ0Op4t)3ԘO2V29 >P}dtLwc$cWJs)GVD| Dhu+"^ aq8?M6oCRdP{rv{^4FD:O\ii$ik9g6IsV#җڮ[n\TUZdšvVYr' =\ FR#;FxɈ^Oa83ėCEE]LכTsbdn|Ց&EJpge90,0ߡnTPStդ;3A*$=+Y{y F7*KU1;°&lGFCPd\KJ b (oe/J'm뇢{yꬍ]j_4`Jƃ;'ϲ}]YB=޿dE'70.c().TQShX`V=cFVKRZ|hUn(^J6|c̈́U+ :j2WEePS@ gDnN@brs^1^bj=D&ښ"xRüƛzq+Z@R~ѭ4d~HSfķ/+˼c.쨷wEguGOf"f(_']seϵx)i *qK*li4M™j!璨\Լ׼:TiE?ćR -{LR=݆\S ҘB&$dTK*.8}ێoz|jge"<'ߣS~|$ciƝWrU|kNSMPpMN';u(` X.kβ4.OUȏ#hwQVmpǶ;ް\?OhѿUG p@~B4oy?wkbԙ/3 }!}ޚX7IR'쏪aᑌ!ɍPE첋-+е$K [i_\VVu{N6gp?D@bԌE(@u!b>g^:~S||C7EhƲOqi)*q^o{!qJsm_'TY!M >V O4!28'!'/ `&l^UA4UDzj24b #Bڌ!QW+i4?UJg<͢46um20HlHߑu۶Q붖{_xi8Zp1~dm^OR8,k]E_H׶0QrK04t57!L9a0 }Qr1XV:j5::\뭤Ah#"~dC]kgԝ[ND?/ۍ|/){I>N* iTQO\4㊦-nOc.b@aɆT;^]=LmJǞ4fنD,o^2}_E՛fSӭwg|NQW(ϭO>ʾ$M\p]^|9l=} N,Yv}̝O#PuN ['`T?PsK!>IGmRbRa]m}V'.c{}H^Y)A ²j}u<erjaU;I">y|Wѽ8>O8؉ϋ/KQ*o@ڟ׏{mNhߜ?V}n>j4ؙƏhr)5FK_=E endstream endobj 292 0 obj << /Length 3628 /Filter /FlateDecode >> stream xڝZoȗh!w5.$Ah 䂂h[9ItD+]R+}73Ͽ)tn LSWu몫ٰ.m-W rwWjOv︱%_( £_ewx>M๟& nBx-cACdv_UQsm촿^ck9nŽWG<' 4nO$~)Ȑq zUYg-?*{t!XwH\UMz㑟^N68 "ƻ'|)@<8H#rmsE-rkFYn%D!-֩S{ :y& 4ʁ$}: 0S8Ē]<}xZT| hii9w*<^"ÑO>*"sN'1~ &tȈָM_rUeǜ<=ɪ;9J;QȷzÈR DM㰓P,[+-N"&+兩 8!dmA+<9SV2u&#`drꞸssrԶIbQLŤJBck:?ٮ {$ZZb0yf2W&@yZ$=[\ ;V側͈?wE(J/"?,"U'eڰ% c=%5MUޚ:DQ$أBV$v,g&0UDMHX( :hs2زjed?%-C\?lx ;85$ޢid]jg9߆` n@ v..L;`);R~WWѰ?C˓kٚpP[O^D[lҴg>KjTy|;!he7e-%銅k?{i3Eor)E֍9q8-ʚ쯲 -x qD|=ξQBi}F`11M@V,pbcuY`h #F ^BM$qaBBx\AA4F !|Ш/4Êr iP=d(X?HxEnFb.!p~h((GsZ_K3\,aYB!kgJ- :"x B|\3|ⷄ;dLn?>/(eә7I},Z^`Dް m: V/eJ|tE)h#HI.24HڀsSL1^,6cqTo^>1z%{đOMo>iwΚZ<ȿqrN3>' 7ЎIbjx)f 0~^R!$v©5-cX,:nC+!O bΑ~#NF{͈3!:眺2 Knx!? M{^̞X)~VLp:SzfBу ib29OM"2D;DF>&`hMBiįo[ R[ [Z㡳t5z֖0vah{\0 yY/Hk). J7Qz3U5o(sܪ ٘V',%Ԅ7O+7ߔ/IisߪwpE92֗hJ윥 2t6J/VV|uZQG! [|Lsln r6t{=PT&eb2'.k JMi,m'F5+R('D~N}1(Y=Օ{Ƨ^q{*[ l帊JȨBod/$A.*r>{eUURT_⩧RB^:wj-g' h3&eCNrZ`e*~=SwaTPy' CRm٘<>h{Cjn5$\NQݯՔ0*LFlJ;4( bǃx ¢]E*ݥ bUx6n EYTgo89J '\8XW Q9[ M,h$y)d[v*Qiw j` MUL9$ΰQ:&V˻]hn^VŽ/5Su` K"Fν%5miC AJ'qv4x^yh2CppFvd>ռ endstream endobj 303 0 obj << /Length 3694 /Filter /FlateDecode >> stream x[mo_2zb%mR%βSwޖ\R+Yq^ -ܗgfU=k?)c૙w.W̗wjnZ{\ÿ{. \7.^ռ;7:8+| =DZnZHy{?\,o{u«zEe[\Hrp?/2+g ڛ(f5g^($ KW7U\*|ubOj\||֩޵-v0;GibgI(*cfspV~H/W^Gtb1qKܨƲqcָyo zI@Lq-k+ψl\`;rՇcn״?c.2x;OA]יrС_CVf8h ^zK3NA%邞;DoQi@#| ٵdZ'XKWt{t`q^woaNWju;ѫK7,;S-JMLCU#PZERz4zsP>_?h*Mׅپ̬ l7ԅ{5uwow풠??Q]޺@P%סk]6S7n*-M.Z7-'xDBM-*Lw,1͘n؂B>ٴѺ hlGvV Gu/k< tj|BS;{:}qЩ wS%aͨZֵ@NOaXk`O'Eѿ(b$btxOٝ!A a~-*|jq-[H tu<?8ލq}߷'KGOy!;3-zbj%z'a!!B<,QuTQU]l9ZX0 BJܕ #̓*rlJXå, O2F4I64VTcW:T8+Vhgm_2満/OM=R7= {'38v(#M!_4gڊ(oI+QW{e<5+1 wfKb^P{;r^c#ɈأvX?δb`'2:NWpԆ%ˢ0D86qdwH:~>kZd o1S2Yqf\Ɇ!p{4q[${-4+PCrPw܂ca)q;n"fA3&φPMêލ$?ALʖlȜ?G{3NycI$ qYۥl_Zοaɓ. /QYX.mlH 00Hz$a!@sOr4=&WѴC}b2Hw2^Wc}m9{L(=&wڈ!U=m#ӦtqnzqWU[ /oG ߉V\趟%g}  +dKl$~ƔdxEv 8G,,@f~ b^ _o/q4Z.2˘#!TɵqH++#V/|0)m!PbᶂI I~6, Ri em fm :CRIF%edeL(g#AKHT>Bo=Qqz`_w H}Dt8q;Nnt?A:Y+"j[y8mN[ST|L/˻4uk$N@NFpj`B(Q%V1w}xptq]/,~WFAHiL.w\z' =\ygyf^B>Èp]q(P;ml{m) u`]WQC} Am}R/a-yBNuӮ]kF#2.c`ici_e0uAc]0G mSjcqKr'/4*;9L]H-8J**&yzR)lN ]a*S2RdɌ eZ(UuZ(ݬ3@ҍF,L[rzt%Ѭ(H5zTmx Otj'}9Smj}$9et_)e _VDHV#iH\ QPrZ4u>H/RDhLz]H-MTE[,6ic9h&YSoO2"Vے#dOT.)kXh|iYrpS9f_LLwYN/ڀ84`%m#VFo^~Qs zk*>J·%|M2Zq529tNI_]Kf}fOΊ-jZM}߉GeX^mi&S`ufN[AnޥJM;9A ?C;hn_flo8pFݵ=/50(k>rp"iynHd91!ΜttM]кOT&VxRȬ9ƒ^ R>U(OO辧qlH&\b,$lƉ=AϛRR S?K=sWXo50ڭ&&1R~b$y:/S,}*.TyfnW$}7{o ܞC9͎ۘgT8Sv)պ24/Ac'3ʓ\=E=cS>niDdy*j~O͍4{$>!hgo-_WiL;uZ*Hq.(f?}xJI/bU\2U=!q̐7HU ==N&sM&?a4hAB,>k :/ʹկ4uf xG4M E٩˔Oc3΂yv@xPrP?gƗ#|{2F*dJwI/ K]QPs7+<*>uQ!OΏmmܤنQ A~qPl+Pp=u:w4oX ✶A559ooV&e.v+eZUT֧O$ogQ0PFJHJ~t}^6LY~d~+|^ *U2RYzYetzbZY|Ȕve2ɯNP+>OM/@O'Wǀ4NKhP5B}x*2(x{C8PE'Jɕ.2[ѧ_D\2l$iR&^|'RwJu`E:s'Q\gAsj%| &ԯ21CUu6Hg!U5xrTߊ )F endstream endobj 177 0 obj << /Type /ObjStm /N 100 /First 914 /Length 2811 /Filter /FlateDecode >> stream x[]o:}. ?mn5ѭVa;m=#QvlP5U7P!99C(8e)bK(bˬubRѣ>f-%,.*rO("/li,̞ᆔy?Ϸ;#9}hD=~fgjyӨىb[f[=fUG,wo<>({t2!FV2ZT)WWfmD SsQ!(Pwy&nwDmbʍLvL]t_ fs5{SUXOf^m70y`Ms>7l^ij F2Jڵӷ[ mL)\ʮWbػ{_>MEn.rs'8ꄡ:t'ݚ{tP;6}mz٬Wf;պ~k>.G֘03R:Dvi?p^:™ F|/f[k3H3A!4#xhp wփb;Y|Zl׋ꢛ+k5Y94]OImZbs>Eǘf S6/g -L21Imɋ@B5",t=r4crӹBEEXj蔄c(rͻN>5˪,q:p&BS8qav[nۺ<1EB!lF_'˲m|mM%gض)m.̶Ce>"65^ %#ư캌m1kbT5ySWz5o1a.N2ohQ#x؁廴@}o/GULȎ^Wv@e?+aޕ(U9@a|4*9gf. pgu=_N_̛MbØig\G.IK#|slyپecWߦ͗z|D\.{h9l7Z6zh nd0"ŧ&g,$!׈x強41j,b@ &c-#1|p~f@Ⱟockt܎?<: `Xp4v#Np?qa?$w woďEBE/Eȩ^WQÖ;[FF:җE+r8Q^WE/r}<_"y E^(J9/'0$\C -ױ|F| &Nta"ج ,!'d`1:>W-m\ləۈ&0,&ÓAHNrLb] ;:tvZfG>Yel'83.N$pOvS_VMtymL8ئctŲu|,+ޗJԦMCXjDP0â"[ijiNBAvhV- FcDcXG-AՃD}[;`% 0j d'./pq>G>E7NH_|sHXP`R{I,@bjZƌBpeCBemkC-uuR#VJ.Y qY0cڊHcBC#A'ѮM)n@4DŲj'b2v$C01X?u{-6tk[]}6J-dڒ9jK-c9(DhDwd,z#s\VfZzB8h v-| $ZbXD{8u lKo!jLh\{Xjf?,M ,ќDDM9@SVҸ-lPcFȃ sG w=,"FͽSlJ=6%^>AbdK2||ҕ> stream x[Ys7~ϯ`剪 '83ù**JUqjkDђb b`t*+L=L5i vjҍWܞLdt_u3]+ DU" U,#ۿ\O|v Y?WN=W)4l,.+B]qqRVG Zդ e Pp4Qs}9E \硡zjSS2w0ʢiPA UaOh`wD27M ':gya$0'.->_25Z7M6Y6^[jROaʔй[:H֒3K e`=ru!G9;{>gsڃ2)T),uhJ<蝊=ztH4~G Ӫ55<ݠϮ$N&Q#Fia, r^'E!LV~݂`\< |N,inX 91|f$DY83^ؾٺ] 3D>bbUSLP)VWD=q0 qnbh7IXC=}_U/.ag<Vr@vL=if-(F6e5c;|,{{|'{w])f剷o~_g]z[*hj)nT&e(oLIyYyUzxj(=۠XKnXN@)Aϐe݌rYt95~i-egAWGJNq2Lsl%IfҰf DLWfwvHyy96AS=X~LtCM3|ڮ{T1/ +̅ڱފOK~7Tżv) Fphh GuVϴ8 6nªiL7]{;srP ۍk[npq&2L4E]49DT8Gdvt.Ȥ! Q o8j h]aWu2xZ^V 3;<\ Q>F)- +H!&B剫x9B~Yf"W4qB1  F?WQIT8+ycц=G&`B2|V/H5faΣ`߄#|:nJ;MbVGQo$>?!/N1V::x&a<, Y`F1Y0+2,?93RE )\WBH{LLrst߃2{!pHZLdսoH@,^MAy;o@׶N#֎3+yʷF5,WTr)GJedQ"/s`&r ,Ef}ݛX *b@O8 [o_&A6}9MR^ I SW_θ]ec [ ˓d&a`FA6>  "CS%Ύcu j؛VB}%wU7D T:F%@I*0FqFz݅(y^K&g(*=T^c1*px 7 ޒ >% b.&dBza$@^11_tyA`w$%Rn8O ϱه3 bꉽ^RÜwjULYLAz#zOֲ`ւؓC*Ϊlj1To8ފ̗Iw-_2u!'Dgwۂ|{%.EFd1Ğf2`}=Cf.B6|2I1P|E0E'Vo#\&5NPpX֬yļWlkVFÇ=4l$B] EDb!֣US߻"u& RShv.qp\;yUy$!+wa QKH/0TMlQ_&5_=ALGĂWT#QrH٩>|0l^Xc¶$@/ۑ h:Fvq5{Կi7AB"pƺuCߺ`1_{4Į 6028ŎG P$.RD63U۫YD2^kJ+|BAC| nHL Fv!e:]|xVgsoB~,ANJ6Xv[ /EnkeNHق-{զif [6%2Bf gd*\2!9N*|cEo^%y1hX҃V.!]^֎Ns!.c @ʝq!~aUVa_UָaiyRoR8yz ^ffrC2n0*t U 7)޸zPTyIEly25b "_5w|]T$Bƣ"L2U6*؟g& o>I9d+8殹i;?^|)<];p^bfEe^ԕ'>/f{Vnbu ?Ƚ}|3ds_|9 [r_.᝛+6uv9fI@Wi5?! 5}/ҷ9FԿi,/{=0A|;`|ݽ77^' ~d&~P_^!ȃw X$j JW s:V%MTʟ$isi='P vCND)J>ÔH:v"@SR!#2Cd[" ϻ 51X i,HCb k]R41L~ ?|]V?;\+~Xnx~L!SҊfKtJUP H]STxVʏ(Cu]@`0\({țԜ002(-q; 5Xw{HT[i2(#%ʈ]e@(YtJA\rٿ k.g"Y@tb$[I\znXtw.ZyhE\ѻ.}q0(tȅ2A@BO3| q.0 aCi<9-an endstream endobj 359 0 obj << /Length 4270 /Filter /FlateDecode >> stream xڵ[ܶO{W@mҸ53P)ݮ>.8_yޞS}8yf8g/D{VPj2Wˋ\-.e3+fݡg9z_.uxζk7cx^HJټclںj ?֢6g%SVm86Ɉ„l%i$jw!+=Hx1!{1D]PSL SraBUVX&EhXŅ7nූkp>hx=:_55T}/H8I~^Q]鮭OO Q46*먫}Ae]"3ѿ~@v$=OPt;F 9 y˞GHV| p?a״ y q#W*+1wfBUi3pl笼Jp^^ypǏRӢHC{ ڢ k 7@ =vX/az %1mR܀ P3q{͂5wlNϪ"{)!̒ -ۼPLw(ct#Ox` JՇč-3Vf[ЏL:tKv<6̝\ -!$: ~A&Ax'2f 9]xsiO</۠5<S8);őhfߢ)EoxhJYoݦV5){D(`nE|E{4w92p=? o8kK᱅KQ%I-mM #Gx=TNCh6M50 rQ`]5F$p\*ט#[iRe=q[v0/HSUjD@]!U:_I L ,jeL t9cPYB Cͳ_<;&qbWPcEiJwW )?0f|gE_Ah>9b1n CD.l8AgwOddik"XGQƢ^'P֙걐<Ed K4Q1^H2ړVd-[?vUֈ`%D\ + T˧aT[{8o*HWcry@gSyDh Y5q0waaSt67R-QbʰOO11#)-L%=8J!?S(ܽrt󾼜 ,iŸ/aIU09y8~yt. SX7d9odY0!6nlNOaNm]iɇ]mtĕx3tSdЌN 'S:wu36 z *;Ÿ?p3>\6Iɺtt)zܹgvhr܅jf?rRChC!GP1˱E@b Zn?s֟C&. . :S$QC}0%?<rBLu<VgE!x/&ӆM$}<&Fr#5.f['n EZQ&KJt~H9qrhlt3$gxaeYB]gN|q4F.V^yZI8[s=ACNR2lKc4A뤴E m"c ܝ pf+U_ <} 8gQ7H]번k=B/#+hτ0Oj#'O1n1 r/02l+S@̐/_ 8? ?Θա $S_F횹^'gÈ?ņ~0.]uڅ_3(N%;KZiN씭3&q"cE+dnh?c9b7ς\ëbx=p0BP,/K~6m'IJeS0LNAe9A8>!OZm؎#!;rcZJhASYCl(랛/BM߁'$.[|/t?cX55Kk)DK\JR/^}?o endstream endobj 384 0 obj << /Length 4015 /Filter /FlateDecode >> stream xڭ[Yo~_!i\X$Y 65J\W}cMkAY]U_].:զ֪n/QVW*/M[<_.li~o/o**ڋ..uq?:]Qr\8k~[x~ɶ)qAzw_;%8#;\?Rr+s ߰ĵe D bQ7BkUrj~"ʶد[:2y~˅wu]0 <->t H~K #tm'!wPRxٺS‰40^E؜]U6su膄F\ےx/qb <mIO(K iKj5ATgk*TUGyKG`{){^OvO Ču`K@O1QDNUN4Aџ8Xfe]J#Y %ῠ!8{Bq⓶ms3x`owXlx<7oxԚ|#iv_z|B{ѲM,lhb]@:)4^_{[IOi:5Mߋ<#+Ty|bhP?Ɨⰼ˹<ʈ)7.|^y#-%?,rVă-~iFDKWldWz~da<6|J`QΝ)>z%c\}ѳm%bVv"]Z@^7 {C} OĤ =M&Q| kY &x_ Nu'˷9{: ]`<[O6] Ab[Nnl lٿF -GΟUUu$ $Z~]/?~5 =ptQO[o0Z-y5l|7GAi,+~ R;qAe<ȮDZj¤zY4/eb.:7LB0&'a0 $LG稺 +Yr?7 Oe nM֎ c1%<(~_Ad8CUް iS\/*X.#{%w_4YHm<ݕ71Av5T<ƪ]`VA;ZEd}(:מV.8D2m|.&+| A| ωG ! '*b=蠋Y41n%$Ȓ.L8;6^,nAǕk+!nȽUfRն_,9wC+>2Y{RYѶꖈg :.®V?T%xșHah VKs@ vq b5ݔT`ct&1U^S吜jpH!AQo6& K>CnyMVb೓^(3/I_YA QZgI$g [LtI[$}We)"՚; 䶚Nh8ҁZ;qN\qt|mЀ!U 4/C0 ½bkJ2;[+نw_^Fy 3v3SRݯAӪqT㔍'ٯE^ D)BF̦.a#6H婝r6|SN$>䭞A*pm|ja%M9:>;AD#7 \ vOQ/jgy'B1]7!=͢鄿f3غk fL&'G8d'3 J Te-)iⲬ/(=PPspT&nd8e]ᙴ!YMH%sTdC440k^ hXߞקR.0p_(+O՚E֓*|l3sjAU1{Fqez@NU1u |cǥ Ue.ZJZᲯ+nc8#v5*csַJ-NORF}'WE4$"QἍ~i {6YF/()ݓ@E҈Ps9Whi&>1jGXfR9r^~jw!3k;ł,.{l )d&6X\ʗ q] x⋂F՘{&Gl)dz f &֮oG}зaܨ Gf2F J>ܦMmbT%x'aK1xWm*__Eo?PF6@ز s't|]e4 WNךO헭a$7gd D"Zĝ˰ )gʸ ķt*9nPA-x#-j!]M|YI݁ 4>Hq+u"HOx\~HO Hd73dƹH#I_)'B4Z<"h`!>J[TCLIgof_ff5:Xy$R"ÞV1.)w{9IE3,>M#!Fs3Up.lS_`np6ޠq&H2hHm{u"BBvԛfCמfxWp`1~{o#hZ 6 ھw6] endstream endobj 402 0 obj << /Length 1611 /Filter /FlateDecode >> stream xڝXYoF~ >PكDz@Pڤ@%ѲtD)}Z6E sٝo43Ve&JsGJ•D0Җ6Yxkc7ş/l%"2[SF%,<+TVF+΢mWve=v#Ws.6?4uv ê%X::Hs(1)om8_n{Ub q7]3up}]#0FX7?5\̰P[s!5帣FӂIS\Cm5kT],r#DY*E-?&~ZY?ނ1GoS*ƉHf*.T-YaAP\Y PkbBؤʥEd@8JF0hMV?K GR3lD59P:U Gxz#6m#* k0}uaf04Bny&W)r!X!*!c@y3Tϛ'=$|Wp躌v ')r"fj#HClה%XeO?ӂ{2 I#Jǀp iD#B6Qy fޱ4I$O噲d @*0GKSRɘ4b!u5;B_4[le4wݘ aY8?$ISL- E&z6Y"~"\CVeW\{@YHBϭH#bof4yV$T1-A-|ѯ!Yu04LS0WT#ԤG<K^SC.m UGW -xN088+&mH$?K~E痎2CQ?]D8xMyҭC #UG_,璎 sPXZj lh%J@!RK=߄P%Nc")Y:7_0.˖Km>w,2P 9A}ȇd 䚊(Q@ȵX c@z^BCU8yZ\OoGwwebZR~_.(x6N="lC#j nbCB27=DeCnǨݸNsqeLL);yAs\LV3^L=n?6q&vg ' O5\֬*ɬ$t̩N꽜ݧuG$?Y:.'|*0QF<}͟&ͻP4|%.>~辁=IyW ;=c" (=uQ5Ɣۢj" Cfv hܢdv k^2B&e!ʸz_y *<} 1]^g,@ endstream endobj 398 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpr1a6dg/Rbuild1225e624c0c06d/clue/vignettes/clue-004.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 405 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 406 0 R>> /ExtGState << >>/ColorSpace << /sRGB 407 0 R >>>> /Length 59126 /Filter /FlateDecode >> stream x\.XO{ O@p4D4 ;9.E^zk&u?}MS)Z{???7㿼ܧ???ۿɩOno~oN;Kտs7uS?z/WS߱[vtremϿ^_;,y5o {KFN ~p;OŠn^Mk,}mG7)^ͱ-]vo M[Ӷ=ߧߒVT lڶ%Ɩ\v {ۭe=l.欿g[֗ze nk/(⌿nRc[.|ۖ6SN*4V7˄ݼZ?}ztqWvKo}g7Vw{Վ"Qx?v)t<\ֿ W{lmZz[y_ӂg-ﰣ{u|aEA7c 0b nPO[r%m?4_~0rJx=/Ca7z#oxEռO%WRHm7?Wj@N }'V^Jk \ێVTcR*kmxWޜ}'нX0+ƇΜtrEn-3q^ss~^La ӑjTIמt mN1)xlm5溗@joƞA{+nƻ(ymf@Ub2̟Da $x)Ƶ6 wge=[,h,{ޟ9s~$bD%O 87{tĶǶ\v {׼cןHޜx?4&^"? \t{b7S78{le}Cˌ/zŝ{͎e)޹tx$[fb}F% R$eގ\=FyٓĻ(wդ=AI&/aMA76_nPԭ%]v-y3.+s["NxݩHXMz4ZĒ57go.j`&ŷcK.حe͙K= uϿυ5bd~kK7#]v{0Lo9<+[Yj~+ 4+ Wkle]31h1>ř^O^"5UP(|p'*bݤؖ.b?vwy7>cPD[nl{'SSDazup`^nPܘhle_g ikOpo[ߟNg_qk y8z=䲋؏] gJU<5X6{W᫱|28men.zledNq{s}O1J)_W3.BMGT\v {waYϘ9M{omxƽ@!sbdXQͫy `a&Yj$]v[9p?w1s<5F=B35!_5Ĭg[tW{"c7w/Nۇޯ;:=y I‰V\*F@Z+\1)5ݶH>洽>Ͻ~:VqR^2\c5UckfN,el?ƺSl<䲋؏[uMoۣFN9.t0>QD'WpMPMb_%~ };CHs~?x*h;S,\t + y8Z7(H9r%m|N7m u}U:j"lKsŠn^-HMeݠxƖ\v {O|?B"s*~wG=lﻵoaU/e~{kGsi?v 'ؖWQƑދkN1\K(6bӯ9ZKwcK.߲c*=c1lf?gMaG}0VT3ug?c1Kn?c 3_o?{uI>ۖ8vL,oX+8)XK'u'ǦvKLOb+8 Ύ'*!/mWp [bۮ[r%mK[{sNsI0sُm[{spI&/ќG.n/V6- /7UX/)vBՃ5g– %\[r+u QtLgFS9^\߁}jM`,KOy|@gFZO<&>6c_{17錄iwJ se> :ቛ~EpǏ1%5ݶz6<3C6ԙ%6VWk_ĉ3X/7g@Q]sþKWl5d[nP>rh9䶋؏ݞL/=6e=dRwy+zp֛v:Wcv[+wy(x j S׺|iSغΡzcK.c¡D){YS wg3Bᰯvm6#OoT$g; wAW$ liBCL<3͙/7Gŋ4IUS򆭠5q8f.0-"c7Y|oud{szĀFaWgyg@c)X|Cwr0E-[K/.ꃳzeջxpL|>QaMM/Ӄť"ݤpƖvێ~ 9=…(%;e hsjǢj츘ZcK.2%ߨGK1s*޷y?xriI`_o.?sa_1ݤx5X.'RV`Tpn)s yBXŚQ ߮[r%m,_dq(β<ִ܁u/p b,'}/[;mVycC޹#SҞI[Ω *jD4L̨2*%RYCm|Ü GYkzL;y@E8qfj x#uāƔ|an[4ֈ3d<{P4?1]#cK._9[[!aGBNDm1~9֘n[VtWX@0w%IQA?7 0( 8Yrb K "6WSܟ3ߛ#CwxL4ˑ/Ɗ:'X ,Hxle/⛃eO}oP}'G輬l>0 `EE|spXBw7ؔ8؏h}/pb\Ơn!|a%d%881MWsle^*egj˻Y |o5W>Io^bM=35kDawPMKE-"c',8;*x~<)C1|-(a2{in1Kno;c ߛyG;V_7rpoPN`MA7m>YMQƖ\v {mWD~)R\#_nj\Y癖|?rsT*vZݤx5Ƕ\v {ۭy}8rEZy~: M+ X)kx8NXY7)\-]~>ai: L9 mg~!wc 3Y=<ty Ɩe1)|_()fƊ:ipv ºؒ.b?v㪶ݜ7gU6bqRAX^ k9:$8Jc[rEn/^;gNbO=\)B>`MA7qؼw;4 Ɩ m؎U4ub' ܁&v?cXQ.X!xjBwK-[sCsgr|%U:3Fv^Gޜ'硒Fٮ؂ռu[r% ԚS~o;,nĶ)L7>5ˡb3t'rvnNcKnحߥ,}@CO,[*I5K>`MS#eؒ8~a9߿^8excd'ׄ5m]p=ϥcK.t ^w1웆 Z[4)ܻ_TW${IA4IXR$ ms&,)2\saM=qmN[EX}<[ǯғa0Nk p*9*YSNp瞁M9+*E*8MWsl5 {x >ZTȜ|}{g9DP9 l5|cK.>{W)'+L)kw.S^\ȇ`E tdc7:ebиSя ƫ8`C2W]K^}3.1{AmljtP7)`sKnZOá೽]wzY <r+*2U{`T%O>ۿ8X<cwͷT~'3΄Խ ,j;61n[OûNZ샫̦nfSOG XQ5;olS,v =^K>$䈃=ly'Q/0<T98 lދН;)]~>ް,Hb.t+O DpS'ShfYk^cJZj eɔ|E>8Kނ/ϑ6Vujqϰ/$#\[r[8(^}V7<8H4v|VfV ;E|pe[r%mwU@dppS\eJ?+5㡲$FšvLilAy.vLؒ.aoWߛ[Nf:4ziɦowv Ns;&5_'SqХ2%n )ΖcW(Gy_XSgaػ[c+6L]8rlm.=RaҒ8TLq:X,Tx+Hή2 ]~TDR li& dM4{"H{@ER86Z$2L6D.ˌMhbyL0l p[Durj9΅qӈ_ezjxbWIc ʤ\KɵI# i+> @O^!]~Z ZqӌÔf pƤTwz/'K7)Vr=ȵ0}z('K0I;clËm Y?Y gGNQoOYLa4w (OApvݤ,hs"c2M0ߋ`zϻ{ 16gⱳ  W'$SRYV0$$ԙSYV}Qَ~,'%L;=&jq4[7)^ͱ-]vWHQ/8UB>]&ONqŠ|Q;Eq>mwˊ' NݻO n _^)lƊʎ' NϹ TR%m|ߛ@ee::WOy*uyǧXQ͑,삲1Kn{$ŝ0, |)V(6XSQ18fXfXwǦvmT1`v `(v(XQӛ1ӛcle]%8pLم*(:6uXιJkjjͨ.fQ1Kn{xkN^53plrHe>MpKݤ|Ƕv{mҖVm@9+\XQFe4naTremmhG`Y8RO t+iNXQescYMʿyG.v.vDKp;>/SMݴƊaKKc{MG+Cn"\MFnRZc[N,lXwUZRVVfΨ()fZy)ȫn"QZIRi);-aߛSHUNW9G$-gycO:Բ1%ଛԏ|J\v {Csu@p١҅Yi'c# 7ʯVTQgp؄hNN)]~n*CRAԽ:p$cX=XS#BRAPl`qkzle nQOUOšUzVxYj  54ؒ.b?v;13f7Yo]D侔ܺJ\5 ?nfDq(iQWcakzJV vYGuVXJe7~e1򸰢NsT+EBlkcK.>#AXÒ(U hbe0^f=˙b8ﯷR` &ū9K؏>yl(=9#fȞ!C(Cq4r- n.RC^KH~Ԟt;( }] cK.t_"nۗV.~̟BD] gHӘn[ @"OIwi*>w|}o,Cw$ؔ.b?voy5@M Tx7kjppkS.cSn}knt?yBf:2Hal nšjgw!NblfA0QcK.^rx,>r>S90hr632VT/f` #W.Ɩ\v { 9<^<8_͑,}PO8Z` xޅ.\|~i$IcJJkm{1hk:`)U]aNwzX ,bBw9rEnoq5oу3X¦ҵ xUt 9&ūmK>Y}w.a,E2 aYr5Vɰ0ey1ؒGi9a"i; v0<T|bddC3GV1%5ݶ3sSX Z,ܼ8ޫ!w%顙xX O$uX+J +N(}cjH޹yVTdbӱO5#Jغ{dbzle^氒ṟO̷ Rs3䃅Ʌ5raCc[YAw$-օ>VKL^^{^ϖ8%He2M Ɗ>AJ}"FImۮQ>8NM̦ptSt[^rk*Slk#*{lm R@pfll0x9jKkytW9e-fqDT[֞AFxSEtū΂#cKnK}yuib,;$f?m-RG^TKm~=䶋؏]X.z8Ex72{cSV,UO-s}w zmile)a"eFv8~Ղ:?/iQfq’n^-΃#aw:MWslev3Pb0s1S']e) m#Ni0&7J$솢sYƸV=ݮdiNwdXS0iWqȧ=E9SQҺsylɋ=J8nnN~ݟ 8roy& =\K^%h)|8-blm*NX.RP!R`nU%nP-I vj솖aK<;`pFCYEDq]-*~*"¢5vNg%U?vp(nj'9)*W9 #qʶĩ(;^59&Cd p[Stv6N,)[m^C?93zRKw>?p<]?ctsE;<+ 9gEq?3$XQqV9pbݤ|t#mc0}O$qbw{/3t&4|L'C*t6 bNؖ.co^=Qel\ԭKKmY)!erp$׍f4y ZgB))d7]VB5Pbi<Pe'ؘ3T](0ݤxur ,[0t~S98)ez;.@dẰ͵qg9 zlmnO^Q7 bGhx_M,:?k#sP XnR?YSkܺts<2z {mF&6l1<׽I[XM]O؁ƶ<>S۲ǿ7wv9ɢC=_ +<<&iϒ.ao_x"wќЦ}$Vv0e)<:Vsr#`nRK)j< X| {nʞ"R٪GXS=‚bÊػ.)>6'"0h}Zǔ_j[h)=wHR^Gqr "($g.aMEKK`9[=6嶋؏~p˛2xy;gOpGdžC 5F))>GFط;XX(-;\yk-gPWOSʨRǫ%]~ϑg#$8Lǔ,؃JvN#fyllP95 ؎I&e]LNJAZ?{uzEr/e ?.vFvqn-,.%J{leg0Xu l7(f@BxNJ4 !e1A\Y0 IQcTYZ"R.[/}aY9> {e #>| "tx9tWؔ.b?v{Ym1"n{`W)N k]DEݸy5v79IQ7Ƕ\ {O}wwGIסoݦZq/Lv+ X%dH7).4˱Tn|GK6'*vbO3ŰwaVTu8jc*ژJ7)^ͱ-]v_ 7.lNC`La*ޗ~Վ5U9(flkݤ}!]~쾊D0y:"TE]-?G>VԈ DTt* Jw;<1ZJc18Qq do Q_Zsט>g^4{SGNa[@6,XQ5dH16y ތb꺰ݧ<>u˻J:_èXR A> WSq6mlEҖuremC'MRRD;{Yړk<kjzƠt s䲋؏ݧt-戈 ݸjձj)2]<:+*""&-nEEDǶ\v {}jMi5lN#i,0frF0*ęt2[w{>Ae^ ?ۿ1gE/;x/>EqKm)J'"f f7{ҧ.mXVjZ3I *nN==$NI`Ia2..ŠѺɜ._ Nn}"oa7mͣU'i@̖/p;XS5c~>tpylɻJ0:مJFe'MܠU?95g6_==7뵊Vnm=P :3i\Qe|R hY>bxo psXҝÏ%]vP^p*Cy`0 UU~UstG.b.C..N:{w}{sg;SgU]/4iMݤ~Pv ${s~D:djpH̒MVԈȜic5%=䲫y=.ΞSlvYc)yv=H-%^ܣDlaM5킃NW-ld.o-"cl;ztæ;f103':p3yaE,|$tcK.v՞?Y ΟĬS ay W˾Nxj]O5t-"cq O0NXGKs)<;+9rFKy8hbleS*鮧iƶ\ {Ot|gΚEM+K{\X|Q{jdΚÒ7 ؔ.b?vD gp Ss>j+c&UsWݑ` gpP))5`Q3ƦvECRjJgŠ:=$/ݧVeJy.ng}֙7z˔S*TŠDMQgNؖ.ao}zsEp-3%Qď|9YѷN8K@>p+14VOִGCYx! )NNzӄEݞ'ɢI_MG/Ttfˆs<@}*.99KEHaۈnr)^ͱ-v;-,="(ba54"*Y IƊJѻ\<+c3jZwZremXB=;rü﯉H#X;MƊzbn`Rb_&;e-3#Ҷajm{b9$v#NBXAnRr%m۞]**7 /J㩉eUzC {;˩-]v;}[ӔQlNbwq#{hv*)-,EbFtDIrbP0 ЅҼ*6w캃-kĨ+5} [ihk**C뎞=1屲>V׳Wc/".T'cҥ,8 & WcJJkl=S$mBmGБZcEE#E`CA籛CEDžI#䩸kpw &emGhls= w1'Ev#T #F1λLjXMdS:Y, t{櫧Sv(V-:QoW46d߈JCE5uKZYQXQ- h0;GJǖ<3a)laumssm ?Quw~yUͩzQ;16嶋؏>sWamh:Jf;hQ󡞈1m--Lݤx5ǖvqm?\Y,T amC^zvVTE9LqG&K7)o؏ga⊎Qc $XW^XS=:].煽V5tFWf`e?Y eΦ1'"jR.!>/64[uȵؒˮ o#ava/9<̓CqeW`>a=gNCtXw9%h4Ks@>(VĿ7 pwۋIPƑO{f=ԈU9n a+]폱%9a^QG/rN&[6ŝ,F(i'>m!--.\y>}Ny93{23[5:wS2|BA? MۤxEK7k)m2nf8̟ĹOG,7$nσE|řz<{DǶ\ {} d.B*'m࣯P"lZƊJ.d޾H7)^ͱ-^{ڒWߝܕɘbQ+Z{t![Wdk`KaqɎnTLp8+AĻ$Dk̐=:ZݙS13[޴qw[F>mn*keG9%:$F gL?HfŠnXծ fnR|U8岔؏ݧc;ÜɉvGDeJWDr9~/*2CJ%]~>n yԹ)p^Wϥ_xuktkרڦ  Ș͘9Ր>Xֳ1Ays&ӹAv)$Xu,_&+t{c 6,֭bcK. ̸\N)+qp(X40E)Y9ΆŠ*Qʜċ~th3%]~>Gj/9͢bqp82*Š*e4Gca(Iw/ǖ\vehnLXnnNaܬuG; -g^K6Y7)Nplm)uP:1+NC)Vz&tO-Aɏ(68-N~4 ?wj)rDuX0#l11F|)9&`N ,|;ble6 >8#W_5; kj lόeu豫?_DF!dPp25޻W-k*r2M_DF$]v6lbdv@v{DM*Dfp$R$1qV#܄UZIRi)1@ ^88ݮ3_loșfppE߲]%]v3%)s$SfU=kczpTD*7Cgr++kXIePwvrz; =u"֯L(D!cGOV[l%қ=Kn{ {LwggDxn7D +{Q|:P3Ɩ\v {}5jHXTl&wu GHe<ґmkr:IpF س\i-]vۡPpT'!)ZvO)V84]OƖ\v {}}yVN6IcK>lc[k2iQU*`4 #g>VpT5.O+Nwco3>'8l*p*[!\G*+x죨`5 +ꉣ(sx"/ilɳK>vbfcPe=U+LjEŠs',2zBw^[rսo̗n6?>ԫ[A(wEʌMRf +f ؎*M+3-"cwWlwXS횃Mݎ6Uf8*ÌbaEEus *[ܺKXؖ.cotߛSU=9cch#Gq`E'hO.,3 ؖ.coOz}FvF[T&=gFn5բ9|Ԍc*9Zxj`?vDEtiTfтkU3ldmU]-8(ؼW1;;U-Ɩ\v {}ʜ/f'G'󆂚)ܻ Be'(l(cK.kK8}OK!h|=s?<-VTq0^pfMؼ"3ݪ!\*T08X%}݊)<)LVԩ$NG5"c;b䭻G'my ͈<|gR~XG5+^c#|p]m#+FE)( GI!{t=LfYL?krE 94%XS#l#:ؒ.b?v&&a>j  &GQސ 5Cs-"74qvtⳤ#븯K7w\8\xO3K uA‰.lFZ]cR*km'EIQ3XCfN;F9rJ+jZ] ![r%mIkt+jzDhݤx5Ƕ\v {ۭHڙČk9=<^☺T~nh1)5ݶEc95oVN)́ y889ad;E/F-yqZaLuyzmT[%Ô9×Μ}B-jXw;-yu\`o]׭ Fq?q<Ƽ R61BƜ aMgN&L؂R]ǖ\v {}bT cozŻFbʜ3Qișiot\2wODM\H7);oؖW˫3t>5̯HOۿ쬦W @ƊQ$f$;U!b ͉:Tݸ9kkcK.Y/?dƲNw)B2ZΒƊpX=Kx@B9r%m?j\UA5Df5{WKfk*GMss^*~nR~䲋؏,YdNϬ[[Cɟ]>~Nj5SgAK ,_f&:n-%c˧!lwVoW sO,Ea4({-I8<6!غS3{le/:Ҿa7'3Uwí4H@ŠJq:,NIzt(@c䩣b,n# 0&G(~Ϫ$<* \益'X*_iim{قm7SK-`E 9(6X.qD{le c4eStuYn-ESj%;9aM881ff9n[O)>xP>o/P[r0yyUucsX7)^ͱ%]~_ǽPٍ`Ek`(,ڍ-=ñ$BzX^tLaM XS4!"EM^TQ┪}]-EʾO))Ζǁ3XQQDI^O J7)vKn!aaKl_Cmh`}񎰠Vg" vbZJ >azof0?*8*RB$Yp"JWGcJ:sn[OugN Q#IfCʧt]\}ytr!{\exߒ~|1' ]Ƕ\v {}j"7\Km̵tޒۑhQ>ޒJ uMWG(e^y=0 gH3cuWl&!q$P`lp1%5]^ gH [TRQ}E}+s )HDu437E5JyLIip( 4Y7<(ti)B#1▊PW8c[kdk{LIipթ;'QHgzotd6gOF kꉝ9{yAꥻcK.mJb-uy),΂'a4z7E/GթK3O`[r%mYޝc}pOe*_ׇ+|NJoP&jG.}E3SsZ^5^arW-k"~ևⰲC鮱>rao,&'pb9)7oOT|>^MN̗n~X1qcU>2xx6[prĜ`aS;(Ɩ\v {wH*]y%D}Xc+|a1VTsE ]ǖ\v {ۭFw3[ lpBRx';PZnTԭJrLIe q>ѩ؊j#wqa StȭN9M,3"xjlm/vwxkKNk6(b^=6$elm6 -)tMǛȇ9$)2GɌOFjFjd^3XQ)GšpCݤx5ǖ\v ۫_)qLJ,Τ|S1GXQS₃V&MGh]~S;avŻVeЎų哮gaeNg/a6t[r%mTz.;նaZTo\lHQ/XS9ǛFQphݤ~7r%mX%t7guCs27!83nYasOo,魻IJcK.&+)ۥlGi?G7ֳ yw WîR [stؖ'קx?>f:Y+x#Nfx+)~io8 wMbnRremgui^5ӟ-+d.*7tSPcK.>_{[ DDzHmH:Ti43J܀ 54/>he@S!*Ct}J¢O3SEqfR''1fj&U &lΩ@p0m\~4Ly!ꃠȧtx16嶫^]+pR/,UhI0W>։+leqzÈӘ&,˓[wxdcF~̫v )AAL۸] o\;(͡:ũ(a,˓[w=5ʗ.bvaixY<ǔ_T֕|9YDqޔ ،֝Ommy$ /LSžذJJ"Y | \P6g@ F+"]- 4 Ng ΅2Ḳ1əiv),,lfPt?[r%mF1ywx5\ o#gcE82'2/Udq%]v@.u ,?=<8FanHtqO"\2{Bw[r%mK4&a{sP}שJU7)O)|oFpP ,TZw%]v6g/dp5-EU:kwE,7XQ_ ,ͤ=KnUP`vo@q=wO{ Z9KEC8q&z|b8pHncakzbbFn5O5  jn%qKaPawMWk{/J.fR..k0Ɲ2鮁a?I5EGv]>+\aM"*EН)]~>5T:88T{aЇü'=ݓۑI$}=KnW׵I(TxPEB>fGQOGȱ,):*MWsleW2oψ9Cu4i9#zh2ƚ:gDXMߕ8#z| ]:_GԎ͜OziЇԫ#gʲ{pt Q;rY*msߛGB5֧)Wq?l#fFpp<X$ bF-rTLoK4:nfݝѮ\;-@LcI]<݄/Ls,p#VvNv_4m[-֗WfsګϽmm)n8+g`v?+t#,>?g ݛ  ctܳ#ǖ=F 8= ծ[r%m81;b/+*~5BAɱ sL؆uxlm]ME h {Cwτu(ܻmBQXS=i]{tH@ؒ.ao}.؜"rZ#Bo9"12yu~>REE$Ip)ݘu?I%]~N){nS\sJ]z0?l14LygQf+W"lfnRc[.KyЖ*RA!d ~_+)nh8"F7ƪT-"c]+*9lӺ;y(.["yG3cM8NVyhli^[v>9,7nѫԇ)- LrJ֌wtrm"c0{: )˲ur5rWŠgCNuWԝYpli>^,Hz71 H\M%)z~qvrD VTmJ =RTdO%R-MpHYѭ"`O+*Z'Il7ɺKؖ.aoO56998{7Jn ;~!. kj&;뮱sؒ.b?v{ 5]Dc򧳦}i(XiPƒnx϶dlitj!~><{V#N2aW̕vO娥XRT>1mGbT>䄥~v]z7Jq2[NXo8Eis䝑’:Ka (-v{m9*tsx]}ob]U-I5faFc+jXw=5 5䲋؏is#4w9 S- Ě*A7ԁeu{le`Ge8hη1o@} I.yCcM=4m2hgݤ~tW{cߛJ e3T-UԏN䌟7ԉ5}*zw5+nSt[o f8N@1p,Sk~J>y) fea%Q0cK.>j{GE0=PD*;iXQQ&85VAu-ĸҧFH9l)΀!Z@uQrNƊkN_^thHT%]vR_˫.q+. !uV)ru3T4 ;;O{F-Σn2,]S58ۑ6}7iKXzwqiw+x%0vx@f[3*HtUDPXK5=`ctY[~Z{c_`oaqOmR: Vjgw,sF ¥؜vM>kLM̠Il(%k޶/co0겢?f(NyH싐$](z^ V\;G3:-?9:Uc_QNw")idvb-=9f$=˷veB &5f%W%?$jڕ8+iH`yF=5TKԓPPQ$y$=Fd[㄂^=}]~>KMJмTT~:'UJi֡ Z:f4JQ˵7kRQuNndZEvsէJݕ{g>$β:vk)nF|Y`c X]km_~;Rb8VyheA7&\HY ڈ(N A:U ` UUV}SV{#+GN4U{7Ҏ=NtڍшQ?;>ア/coϴPe55 wFKHQQ*yc2ȅ&6>=vQso̮km-?g[zQ4J/60;@xH3x%V䱬ݲm_z}T?Rhx-KyD\ı.F^jb.2tve7[۟?&8`<{Tykٮ+Ru:5jv>GmI?{~{}b g2=qUD.1h}%Xy!kE_^}2zi(*˶Et$^# i8&} -A\' ߅*9,? 2֏h6v\|㘐;5ﯱf&$آl.d^ro5s_? 6֒V|ޚQΚ9ΤnOXUNˇ58yT!҆_;}BXK|,*xcY{򡽱/co9).n(T̈tJˍ}=ީ#% -I?PoB" DZTh1PCJFZ鯈.%A{MDOEmOͧnꢌYpuI~t|F!c-dD0l1J?Q%駵%ߔE tg)~w-(Ӯ=XKMn>ǭ GA;awd$eB{BBH%鯭A] vu)kK".r(O[Foe,?_v(Uyn@qIZ[?]`oaﳶiݞ )/jwyOyqU KSyqWyN'}IL$SlM IS#k,Ffo헱@eҌZ<8uik)֤fW9yoK9f+xwnEu]v򢋦Cآȗ.ɧ޶/co&blqjj E+.Ko细c-hDv RkKOko앞~T$}CgMUSj`ICE f,=-I?ScoOV2yffXyLf&1B岍43flLk䱷oBbZDЕ7Ϝ p7څMO[#iI{n&3HaMcNUc ){iwXK5ּb7}(d 駵7vH5$CTZmwt.YR|*6r&e9zoO[gl=<|BQ A6"kIk#--)?߱Saլn,8B% =r͵ 6~ V91OO6vuE m v*;%ͫ;b`k[OW^ 7_lZ !5J} m]Ɨl$++*7}4oPÃ}ڒvc_`ou#Dlygf%5)3x]_JY?]<5׾V7vXWdGaXxE҃ubt2yh"i +Syoo26lHxz]fScʾ};)['R#&kgD-_?|^q|9˚]A_5Q-q|w}{ּW >֖DմKF\kC/R)|Ud$9X7YhbQ-k[Kϫ~[.5<gؗvZ/ھDc_4q؈lٶ{n>aD[ zZn0Rt#kie-hF{:kK"㗰?~lqh4+jC_QKCRj\XKgΨ5Msm#ۙ3꽱/'ʠČ]H5s}"񛎆c^b-B8fmIi~{fch*UG~h*R}K*ߢvk+0d'E2AG7J߰-U &bfjKdbJ;|}]X"娱4;[iX}^iX`ܟphz>UVmYՑ9PeO5ZyG -^#-Odn>tS`rCZ@[?*UpQk$Y4vpDm]~z|đ6dٕ~ߵG3XBib8k޶/coOPfC4t4Zcz6slsczb-fC4%X5i6do헱_"y)n% #Wm )D}=k)Y`Sت1^;Y`so헱ߧlMhfMΧ7lVK*p`SUrkve7vf5ot6 h ۷#w{k(5"Sڝ҇ve7ߗ#ͲGO[7 $=XK'5Eθ8%ߧj4p:R׭KRJ4[_f>RƦElU[g`o9<ј!yUkt-IƇĩEEzH+y 2ԧSs֖3nrsMYjTdxz/I8<5HMl8rL޶'`omOϯCO"&`/I뉰kCi,d5 6RX)ǰC# @{gu! vIGe?G`T (fCYk7n>,ʏU+֧wTO,H+ѡ'+V~CӃD n}vm2 Uf(#!5տ(ղi Ԩ.~fevOIOiYj\y8N*ݵrM zؾ(pmIeE~jcˀiL$n&P; ͣY{^$h\iK8y?3`>βve72h"B*޽Q|Em_k|dbkdo헱߄^ ^vEAKx |?Q&d4vX< ơ%@KyՋ]9M$A'|<,:ؕkiS1tM}o^ d鋬Uߟ[T7RiIsk-invn.XH]%G5g4 Ak 87vPe2U7S?mch klco헱߼B1ϭoe?Ф`JUś.5Bs13o{ێ_}0Sɴq0t7Q=hIiHJ^z|S!"e#h8KWGC ׌NjVO[ƼgmI$ؓ3[4E[3.)E|J<;q߹:i)SS6HraP񛔋^{̦XGq|>Sk֒nWzc$D莖oQf\kr2݊F!O}D 4Ġ%6F" 55hj!kn޶/co99z*SSӠ[үHޏ=~MTamCS3vڒxo®\5ٔC)Q|vEX/YêMeO'쪽n>iW+M|#R ]]/i ծ4T4E˵IS4ŧ[w}ػx/7@>{ejsI#E-E#+ĵH5Gbє)cH4\2m;~ tT[VG@=v~OUi>C(JyJ&#֖T~:5q"g$XK5骼7V=֖sړrD!EGC0aEvUvEzr^.qؘqk޶/av8\Ti= QD'jOm8 YJ"B%ĪYG͞z6d%#n:UL3UH:6JvgELA3(olpk7o?Ҵ5֮/DӐx8+5Gs*2ڮ+湷1 'Ju2F,!bn>N/3}G}\1XGaoWei?\ 7KQYL5#qOM'kE*ŷk 5+~xh1|lA*o1Ф\0)š8&6Qj"גU$)kB׮B9 hb _:"c6', Ɣ?U1 '?m\rȽm_~ivM$I#QD4c<륩& Ơk1_ˣv]6e/a Y|*5*ܚ .rQ|䈜ϣ'6N`Kbo헱t5] R4(.I#ZR@(" -5Wi}-;~ 7W|ggr&x"ѢÃDbc6ZJO[b[/h헱ߐ?D!Q5A43ZϏ6*M{||"h3V5l]ϔpm;~ 7De:?Eh];]CJM+5|b-Wk cE.~W$AE)짒J`S[(Dža`j}[%駵%߇^GR3۬! }rx:ʵ}hb9ikg$@څ0R-;~ 7nhѱL݆N"C)}(k9rHBje}E~CroKIWJ^R.k5FAEP$үHYsi7{o֑W|4QjO+y,z bۗ 4&o\m_~sQ\J?&h=aEdAһ9I +)MhbR!`[(YK UWbo-5ef?mS[E:mMW4D%)Tm_ͻrNS(HM|b.Hڿ}=TN0P]J=Rڒ8v 7)93 jSuUeß\>kiiLy]CI⧵7ve7w1eւ,9cVEʌwj"kXl=hxo헰?~}q-Q?z1?XIӞc!XK#Ii0r``dm{gV UG*G4KXo/5ٽ#Z@9lVI3 j.k!-;~ 7GMڃDj~Eiok|jVQ5|jWb#kco߼_rL=]unw=¾Ch4ol]sve[85x,wdhIǤުE47[-Ikko 7 Msu*"4Ѩ4^iYXg.!E*K߮$fx7?BBj98v;k=ڱb)z+j.co 7sϚ]KɚUt=9D}@s/5or %׶nO滯wHFw}/:N*A.yi%Y[~Z{ێ_ w+htHěg Z5&w 5]+`- -ќlPv˰{n> %3潌ckÎAӄYֱ{89xӋ.lUQ׮4޶WɔD],5-޶E݈75_$jlJukd5#H=ֳ q{헱?~QY<\nF^)N\^HOc~`-_zk֮`o헱4~hS[lC1K9^{cWXyZ!}%kƙ"~{͙?[iB0`7M MSM춻IliMchRSm֮4޲㗰?~sk7DBf7d7 De_Ɂ}9'É&WU:9'go~kbTf0uPUW' vt"XKJT2jQ$s2m)&ObX|HeJ?:ki n&}8kK" ڞcEM2x\(ۼ*3[))vq!_Uh\ b>Z{JaoqU8IC4+I9>$Evu/YkɀJ]eݓ;m~ /һjZ;в >cMXdl65blp޶㩰?~S<]⢰cUN 8B侫`Ws*Q q'˵kΩ`o헱RTBtzSAH(> w7=N䉵Oi5+J.9ml'5=%53NTBIʋ#XKi 5Ҟ^$㗱\Xi\Ur {ǎUϿ%8$ǰ+h#M nOW}nnĐM=U?ohUSGN {GQ=4%<;:U*^.;Sb Nuk)KSWɵ[~gR]}3? gyspTyR'J>SU%]kΰyaUѵ2"+AVJ"7Z&QXܱido7L/a&j^siڮb,VmKuZh" v"ڒCE'%EJܚ8t @`K ѼdāfRjݡk[&~{lu+P_v%r"< i֩tp/Cʍ[U%ďV*&p?#&1|.*y~t}5)|[{֖̄~#~h8tUt',8yh]\^ ANHߋsH MNy{n1D&a5KNO_E$U &'5S#kmILhogFZhbGVTnCrKSylWV4*K=#m{8D\zW)[;^TR4{.YV^"G]b-̕YCףO?ݨ uao䉸D՟[;Qaj3ֱu'Dd)=1vi -;~ 7,iGAG+ҏ{ R+c؇)MLD 7B-I?W4иۀ 41>$x.ڧQI5JQ7)R=hvv޶ۯɀZ4t?uq ̻د?E7XKMx|ږ헱ߧG}RTwX`W߆k7$j ﰫRh =̎{Ҿ'8&6#s#mPۍᆭhfUM;> ü)SQxa-imtiL$}x*߇K\SKLT4}[YHzh.4KؔZ^TO-qd4m]4nGѫYx69y̔v|z32A̍.ԛx*ߧJl(Vskb>Jy}S%WH|g?vqTݚt`G=$.~-sCUYEZS6qJ"W4qLhZYиKSտI4U.u!LFaI%}mo:,TGҚ]JymIi<żo7wrHhefYHŮF=`N idFbo헱jjֱ0尿sVO[#ckUiB/hfO45&ڒ~|E>>G-D݆BRA{9罽G:lhX Vl}N[vo]ʟNMm7 W&(efWȑS1:c';Ѩ=ݬ=盽/co9zMYbێYDLۼba[6XK,~jFa˽e/a>dmKŦn`d;=%c\ZxEJԈl0؞{n8Y졏 fs5D>gc 7R͇693ڒ{oTJ2Pއҳ(po9l*>!ekj̏gmIĘg=BSbMMX[~Z{cw+ê+d 63tע,)N}rG㥦cbkfV|z-;~ 7!GUqksk4i,DpfI"_ ̒fwnv$7MxOٽҚõ3U92ؕuVZhvɾ2nO>7g^Y7#M~7,kw!'km5x~Y!jM+"&QkjBT4H_.B%~i>7a Q-I?Sco9è手SU1J]`-AB=bcd̽e/a&_?fueg|X- 9ԳϱKYE5*>kdeoKVW^I4"GPj4eeF-z%`pXc-na4%BmAڒ2 pK5㻟K ~#P9?l˰T3}%i:35 ik^*8#/mobn8LϺ է$E#9Ln}45LI{<9DOE9KAF׉gj7K.^[4vzON=9Шݻ #qe!)9EօtHЈ0ltwڧxW`qߜQ}idPVީQXy!t"{X%Ye6{ˎ_ͻebhf|G!!Ǜ~|0J:T>Ք`J|\LJjJ;9#~{FiQKu.OICi(sxRHgs^EW:&3{JVrB43fHJccj)yxS츹v޲㗰?~T?)w\J쁊bvn"Y+[bڒ嗱?~L7g +ܯr<#ggiii֧)®uIa]~7{#s0S.59ǖ>H`݄`?X8ҀM%5=Zo8v#{{n5vd'-G<>`-BC\f{s4uOpk_f/ ,5nMU"O.!{}^S7JDhuܪFb5$^kI$>Kl4h{ne:%nt$ s ~;gՎ}@f?=Ile:9*?e/aI^vhVT0E͂ۇ)>؇4s4hKl=k:޲㗰?~Pk> M m2.;}6rdϽm_~ٿj0[8B+Bg:RvԼQVaf;kKc0b1XKBj'knÏፖa$4WgGTk^I:3=Zj|DDB`w_Y[g=Oi_eR%gbK'Юf1t}3XK3]D[b#k 鲷%'(,/iw]⃤L{$ӾY%&_TUPg`K\ے~ZðۯB.E ߓ,HjFG:h#]5^=~{Mh_]<򗬤52z`G"sY k=YIkem3I?/coy8ϭyiFsE[Ѭ7Iz,ikiP&II޶/a&I0A0&HXx<8Lzadڒ헱ߧr7BԈ)-P)-wvm +Nx7!8u?QU!nk!wUԓc``r1֡@|!YSc.ĆkܬͬI3'[Ungh;!zZ*9"$_?vu%׵nǏ|Qϭbv/e֋w@Lk`ZKu+-<}hT۽oJ;0$`O~4X}hT F8kkeoW˹<޽,uW=(c Lv&*#w'9 TV=m_~E*jnP+sջw*,<0|R#GS ˜%X헱|r&VkALe<7te736ll /nI?~cO_;tvbZۣHؔJN W-Id7:KtX'R|3O&Q5qCFe=5Y[~Z{c_~Tm,RNhF!H޶/a>=2WO#iQgVIGNK^N'~3=υXS(SCF5)1 ikT] 3E5#{w~`]>E5t^jrg,ߴ{H3FhF;tӡro헰?~HRxܿ򵩝Y璂JvRv~*~ښĄW=vˑm2PQy4͕C5/Uxnx"k$5ۗFkqw.~~aM|3h\A1)WRf0q͵/~{MܲqŸcIaE׈s$b=E#,D]7c>ͤ7p?u2l;35#+Zbcz=RFVД.[KV&7@y&q N.k^U29;&SOya%imɡyyMuuoԳ${oTO rwuvC1sؗ_Jz·ƾ:;z-7v{jɋ-2Mv˧j\(5jP"rVH4iwYKta@uaoueFϭѸ=S|$sƎF`1c/\͕30DzoKU]1 kzfZXjڼ`r1Z}l7'?U]*rF߀񈲩!)QTq(t/kIk맭yUR/쫹4^[bсaa\ih^(bNIw:gPEZFƔ]=m_>D]j(!b`L$3moWo,R?Ƭ±Fax]e/a&`cĞ5KK5o/ H2d͈e1)W޶/avhWsrohS0|‡I-͓U%YHd4NjWI=mn+W|:To H#µ)D3>[=)-k5j3 kKg9ve78D54:-gJKPmj1sGx샱C!=N{nrdĸ֕e|rr#ʌdi,ۑ~UoLJr|GjI &`'-I?hgo^`^֨RfsC)Cs^Z'B-ڕKAm2R-tVnS3J(̖z/ a1i4%ZY[^n}f߇/#Y3V. uo!NzZzg̚G7]S!-G„~{}FSj)~z~6=m)HxOlk) F +ƶa Zdn<fF/z{hޯI^(i51=Qkg{@m2˧G|4#s^VE7W$ۇ3Z4rm`cv e t,wkm5+==IF}+K%I/(l ,ғth"Q ε{ҡoB.>Ԉ7c'uӸ8[h_oﻻM1υ]Rh6{J+B;kKOko 7W/z5zϢ^ {$ˇaYBvc-@X*X[հ/}+턹ol%墨" tUGX{qv+W{j+B`\sk{? T]HvO|"ϣ FĶ}$̵ve7-iT<:*9߼87HOV2 kg0%oaox- EȜnWkzHj(k.ݓ( c-<̠!`'2-I?/coObufu+x#K4=qD".mUb-扳u֖Db~{}1܌(诲&>2D)G՜WŶس]"AcΚݖk[ڱwỹցY )gju?ݭt ôkXK3;@ Uee"v{n>} ˻=u"DȮ?o܎`%ym5Q`.鹶%}_4=obj!_Cy4Dy ?$.=^4=6.vcoK9EjŠeh$TbZk%E'r+2v\ڒ2䑆ʗ|`NM4{>=SNV$H7"Ԡ)2,VMkK"nߢROr"1){Ӿ4&()i\_m_g,C ߈dx2k` I:;skiFxvg${ˎ_w+&q2[Kcz@ue(i+E*[Wk8ڒȵ~9q3~U30:$̒/̱`&ؾ8%/c]zE5ظ UEZztjbcz}%W[+5_|4EŸ|w1jؾ,!cT6E׶gzdy/.,g&]xMyUcV^K0_Nq#PH5E*.v?\ے7vsq8>a͜;"i?ٲ=Vk'-%'>OO=5QIKzȼD|iUaZT:D[tw\oX` ^kb؏ѐ|'Ljez-ƅR*-{_<~ 7AUhi`ݢhq*g-]sgZdBt*16Z rUmjxƵ0oTknRy_`mP>bS>f, Gƿu%|_ʉ'&N3 q(vsi:upf`g̊8n>8 u8R3ᷣupo~H" 7ўH:8mZzJplc#gy$n5s̳8qMW2ACQ-ۃ%wy*.lIזD㩰?~~YNr|O~~ k]ؔ+>5YW\+}}c#Cn o3겝_7"5~K-se7)?FSA'l8 =]Vr:Q?Z[ֽ7velz:GkSWN}5-cxv}4y"k~veU<( /.T6᲋',s ݘzUv]^Jm2_"6S(KMF?@ػbF"&kK(yq:`隯?i3kl?v}8@S?kӿάۯvޟ%R3TǟJ\>!zW}딒[voޕMr-. e(AT!Kc_*0vAGE%Q޶/cohUOUg!V .(jFU5w&6UE䝰BxmIiݞ.Z ,skzӗURt?اwZ: )hxo}t%ሪW3]kLz&VU5G(g>c7Is4gAk約veLOT\]u+$/PvE +Ikg~fؿ-zݞ{ٲ:2fJDˇm\\XKIM!xڇIm_g&754IKXUWe/ņ}pSѝU=O})bc])6]Fš/JqGdۉF87]= k}݉$zH;~=|ֵ1k^ŸtVQQj$>5w0ΧƚI2S9kK"Ƿn>CS'ߏDkoS)6U;HR+XҮ؏SO[3 vkm_~}Qej93Mez"ҾSn|cGx7v%'nY\"dܲ(:i'D[Po2 w챍c`M|% Yda?{n񛿋V)ϭ }z|[ۈL{>cׯJ4NҸkKOko:B2oMμMiw~+DRfcWeXIZ[?m ƖG%Z[~y*.!F':6¹S2d]mW XKL%FJKvs2{nfec5T:ը&7Ig{;.$ͧ&4h4֧-I?mӗXB2uׂ M1_vk%%w7v뻰fh.RU3'=oSy<*@#DAiQ)F}%|]m޶OJ.*Y?urO߅}QvOTFbM40\غ ~Z{c6oIJE35o%kxudLW{}XIZ7yh^h~}gmIiݞ{}b}KB#.ar*F_lZB銥[5x)ve7ʨNhcutMDQK*Z=}? 량)c_~OvaR${PijC0I`fɽe/'o?x6yIEڡTe k4/`-im/YQAi> %e^wOh\afYfD ۶!Kc׍XsMj&0+(ڒt޶o!IUԂCR/Jitx\4NqF`U:-Hwcw+Euq_6T=KU8Jv+޶'֒&/]T|zmXݞ.~{u ěkG,'t/Hߚv)k*\uQ3J?E,s]Mɬ3LlR_$}?ݟu*rIj M>E'hyLcQ3i,:L8 YZm;~ 7]t5-zK}CAveS5[%駵7ve7:5f6\ڐnn˶ qVF3 k1}=%Eu_T]. )/ا_JyQpV%^$/=v{jUwR6~HO`-)h5X[4KN!door89a2T$;}/Dsm51!80yo헱ˣڽ`u/5j˻}Ru8ޔ6ʢjݣz-WҎߥ7055:&v7nV|^gdTZ*YoMUڒnЉ#߼ֆdtev$]6EIdSm1kM֮'U+זQU~sN~CV9FW Uaꯨ?-=MIkH5h0(KvoΖKjbLs{BȾG,R*cՂ%v%߇Wa?Fܚ/;3igIaפOcVRXqF1#%|Ɩr23g$0W7js%qj#?5`f|ĸvu|䩸K,}CٝvkT 6T[S>gV% so/Mz?:i/β̿1'&:S=ym{~{>|GV}y:*^;X XI)YY9"\)yo>DdB8>Q3c]|/]vlw@}(e,/v=V>$>O9vZ]ˊmInW4Ǵ`}x1MZkI:IcU Jgz, ۔(a?oEi姪Ǟ"mcAО&=!(n[JLk49LO>M B26U ^=:- T"ߑZhKI7.wT7^= 4[ݖŸf(r%Q7};.w`Tdyo58!ZPѝ ꃻ5-4Τ cL.l¿ڒSf#QO{YvN]RO evy?Z[rEZd0>&Ny6#AW;(Y?E۲lwڔ/fcWvzzN]JK9\L]Q3*]\hY_6 /~H6틃l@j3^#f(~K̀kmASԖǔ:-\/劂wՑ:]r/ }t\ ۠t]Ro 7Kݧn*lR$s_~֍fX9Enn~9+sIԎS 6(x~Q7lSҗZԊ`nTVaݐ@N4uЅū%_~G|Yl,hag{蕳Ъ8I{u☜w>6яµ%/20ȕ {X?3?AGnDƾZSVtr{q#.}ڮ^ڔ/d ώ["[uP^$Q[/ @UE}0:PnȻrz {C򦨱׳5\rcѝ&Ty H0.&w_tҵ&b[~n;-c~r8KH ;jD3Iy0b+ 1ݶG#P5[n4d98bën̙tI-rt}wVM9n;pXK>G5WâBfɷW -Ư,}%tvQrk~0S gcKM>| tIE8aPw"A3̹6 /~I+ BGhLQ t fTۃ[PwTVkSNd[=l𒹊? <HwvPWT 1(^7j ғm/ IYToBE%J^_ ⶕE+@Meٽ$=> צ qo~rUu(;q+U9y\g&$ZAu? {vNl {Yxjfs]Q-{}ց|9'RU;[]Jß[B>WߑW j)/nG޲g%TLMIivvJo}K6XVXk$Bݢݚ!?6. BwA -9n;f@!?>KmPzGrUTؒmDž 8]'2. -B`wP{K*,)z݈԰ O< /~+g1ʿbgkP"GriEE7uID"q\C]5&qmuK=T%1etZPqP'EТkcuW64v.&-9n;+ٷ1!eё*|;D}BUꒊ)[ G}M9n;z&2L[o(V@3S ݢoYqt$-r\rNn±Rˇ'[voRTÁv-9n;2?3TNGoy"qZBWT&fC9Z ZgߑKj%9·hҎ=fȉ(A]Rϖ⠉'PZ\aq,hɶ8uJig@g"uJ:@DE85.h;)qm/g.o'V FeqSp l3 @uU'rB'dkC.ⷞ⠨03麆JvT钃<un J7CN~5_0&x["Ĕ!K*œaLv'wMcJ[.ӈ  ӃiPD+rSWT.s!l89aX[rN5명#W N0Z vC\G%uD.]+lR˯%lpT86P6Be%Jj`nu"O9l8!oZ:'WU,j,(\^}n[(~FA}lw6塀@>>q.x -dAaAUSIW̓Yi]v\[?7jeK?3gGe9 xw ]Q] D.ݮZDaڔ/V*BF//{װG/L=._vr,hvwێkC.wd\uj<P%c(HOM4u3A" kS.8-1XpxYq mޒ_`T|Clo/̦ ۠78sn[3Aęcnҙ퐳 DA?tQL.=N\Gs5gw`uZnk3tcRE;KRN%SH+@xvL!ڔ/frwaV,.) &,r`qXtymPxS`m)t_3? gT/ȍrÊG]U(8ZҪznU Kjhw ƥ&ٮ1߻ꭹwEw17qQ ·[u5@o[z 9ZK]%v Ζm3pGYew-vޝ]>KtIM;qE(kM9n; )N7p؝tQzzsNM]Q-0!ɩu.e; [8; g@H T*J{@!w1Km=G+/Əc;p@6(RmaCq) 5x:5 rt!)XrJTBAyt"q ڔ/frYZ;sa,Ĺ!钊jnQEzt_N}|aigIN~Th`rK*:iĘM.j{I7^oByX>! 5vP+uE-rDN|$ڞ'WrE~l ٵ`L΍m3f4˜Ƙ#vHR8ʐ.*HZcgu(ʽ:*YcwѦRY3[qM*N]7>*lmP&~GϻǶ% 8x1PJRf9U 8sG݂3.QƵ%/,ߑ(cE+BJD1kv4_eɎ8w|S(R^lU/{%1&8@WGDi4 ʑe Z#Z+Ceo  mPX[rEw-GDÃff| m0$V6(?/]95y !ߚnkP1Y Zdm¿n`1rw&(NXz﹧یcq|c( ?,@M9zϮu4vQ@r|=k3ыWuZSL2:.'Ϻdɻ]3 D۷UxE>~}gY/kz۷?Qj&ٷU~П endstream endobj 409 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 417 0 obj << /Length 1214 /Filter /FlateDecode >> stream xڵWY6~}J+:@z䡨>4E ۊcԒKnsѢr( pqf8Oy:a8"ͽU=8 ,318SZRM? ?d`~?+SoSƄ,Ʀ[?lxW@lj_̓8ƕ^ki7.k\K=@0 <a 8̀Įw/+B oOHvTҩ UHeu& A-tp2@a=G^ 'Dz=o*qVvTDJ_k:wo3<?Z!N{Fܕ*z˝]rt̝ xx:}&h.bɊ[I 6ԱtII9,y!k6Ze[e9O{Rk`w:>?~83Cؕ#?ל(eViHYvِ~hI\ʱt(˱-N YLSꖃUKmnd.BQ|D<(]ϱ}˃F8,(y#'1 ]>F#"Ebҁb endstream endobj 412 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpr1a6dg/Rbuild1225e624c0c06d/clue/vignettes/clue-006.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 419 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 420 0 R/F3 421 0 R>> /ExtGState << >>/ColorSpace << /sRGB 422 0 R >>>> /Length 1152 /Filter /FlateDecode >> stream xMo7+a$q.R nqP& ݙM BxD/gYJŽkx?οZ\o%R^ /J Zqx;HD2$1ͨuz'(c|ei{ ^`t 2F 2"Һ``GY "Rhd9(Fg00VzGIg0A*$0i@IAF&L@9LbPvH cb!gkogm4 oH_hnip;( h:tX$QZ 2 }&'b|A6߭ʹe0QZV)Y)a42Zf e= bIr]ιJLbe +9|h\0.AO ;l6}q.K A?Y3Uq_5ӌ~0JӀjy@Ewo[e#-!iOTݘ{7hPyRZfgN& $Mι9-ώ^_fШp8Z^C mW2eBE6߬ާ-0$0 k TNW`,uiF@D& l"Q oiI``oiI>o*2D4 4oiIg0>Y #2}"Ud&@]E`D`=UD2]*2:]E5 ȯvIF0lc4bUӪVf]u}Z2 "=o$ɥpjˋ#Fk}L3Qc{³M=<<>HkӸlv Ҹۿq2^|8zW??GK#oaɟ8]:6F]fM @G\~iA|iE 67emsVEͬ|mj[Ԗ`fӲVfEX (,~s}xx?=\ XwݴaP8<=zwߋӟs9*4L sx>#R{9>{~.NunFyL?f8 endstream endobj 424 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 434 0 obj << /Length 1511 /Filter /FlateDecode >> stream xڵWYoF~ >P.Wvm@UERPTE*>~}Ze Pkgj/LhT=*O2ܜ}lF9mL\Du{+R , g7:r07_z:*4g\7?Ww_ς(,6?T-nx]hkFjcQa$/r\blPn2kkL/XTeq-sybN^{įcp:d|6ѺGZ h3]]pr# ]U"f/PG-޹ ѦmjrBZ@wdGxoE=J4&֓s[! B뫆ǓUþ{#aqZIpcprz@9ʰl8#4a[b.]?Z" Mnг;b aU HlT!Q2EF7-W9t]նPNtXN?U<1b[=o\͊-Nq^_@KԿF'm;EEb/֊ l<)㣫uٟ*oWJЧPҕW XBD'ԨzK)u6[ \I,@l\Mw"d#:n:FCON nspiE~m̕57t SKTR%Zh8^nU#b?9١'O-ú.Kj*;+zWٹJC!4*^"5K_Q|t 4CصrSOCY)TiprŃ@||!&tq ,jXKՁ=ZH eS.%q+bY0 Lh )Բ}A9 W|"8-؉9^b\́1wdgPHvvlq.F՚kNݽoA}VR#N'+ƚ+ɩX%e'G }ЗUM=N1/Gm]m ?XT!9> /ExtGState << >>/ColorSpace << /sRGB 438 0 R >>>> /Length 59582 /Filter /FlateDecode >> stream x6?OOM5 $@|u21p<[]\9J_~-"uӯ?~__w=Ư>jj_^-/_ϿߥW-w~QszZ/u.?z?1^bEA7&?ؾ~ؖ.cO+MW1yoR'm^ W; (V/[7)^ͱ-]ƞv_}»%9#?m [~\}nu{`Eu-ɩaObKؚ[r%i7J [99W] z_w=W3?Wb&ū9Kn-+E+KL|-S)_&(ם/[7)^w5䶋f]x;QŻzӖ(k y89c W{lmv Psrޗa<;k1Ey?Wsab0cbnPO[r%ioIxͨxTG7i7z-EռkK*&K1` zocb|_zxn-?mf'Zy)+qZARI!Տû3sr4W{|&(m|,xhNnAjM"n-3~ܮܜ;l`y87S.ֲ寚5sR0絧ll޺KN rEݚkb͈ywf3^Laެ74[~`AQ5/&ƬJTLb\Kip~d+=XbQ~c9h)&nݤ<ؖ.aO_X#^wB=&əae[;۶n.om- hRbEA-x^Ŋ:u[r%i1 ypr֋چ_E*]aM-o39s}&3Ƕ\vaՁߠ)M70ލ5LacNK/I~sŪ;#Vݩ7G[r_)kJZSŻVɂ>_o)pnAj-"aܒ^mxosk5q<kj۝b{[rE_ fG<|.r>^3|pe(éݼZǏwTnRN*}e.X/{hNspޱ9[ *R|Vv?+ y59?`6|G[7?ݠFreiW3|ޚyo"W=05)/ƚs65gnwboݤx0\va7~N/ ŧI[D@+.ŲոVkK5&VXhaΈKscp_XnHq*pH^Tϭ93#.ؒ.b?4+|CX\\ic|ȩN"MQ1枀!驚D\%~L_XlO-eYϿru.UeKpX~q+(p2cmݠ"{e>;ncF |)u,p,)Vtjq KlW$u[r%iCeNdzQ{`sr˗=.?Ə^[ؘ nRc[9ۛbzfS9Wxpq^/|5h[NzN3}V5j-[K.%Cxxx[~1V𓗜uW?y9Kn-᜺AiΈ_/XIt@pro*򁽅Ҝ5t@ZvPzlmvۉT%&碳#JGur arwҳd[r%iˇs/+8|>-h+j+=\ľغI֖ˮ|0˴gpXs>fSm+R ElS$oZXSW5͑Fҭ%]~ 3&͸b Y89usT8t8Gf02k>`>l<3~F3"+ *) 'Z}R 0*%^)!ճ?'7b+8;Pj/̬gMQ/Qݠ}.aOE\8usr&8qS>NdyIXSWZsPc> )vagK/p7<90߽w#L&{7-o8t{NT؋MʫRnjV;<.LKIjgv6׍5uyX}Mv-jטGrT%5;Qlr$_+Q%Nl4Ci~i`KR?LcneN΢~ cwU(=ykbM唞`8t笞cSn[רּ O"n=|x){4*ԍ5ݼZ_ĶbnP1䲋Vs9]3;c-V9|}{c tjs!U[7)ؖ.aOnޛw*>2?'GG3ltSblSXSÈ&a+MS#ǖvaxF>Ɛ#P,!-p%樔x UE.Kɱ%]žv+G5n!_@wpW) zo7RwIxmݤpƖva;3;|޸Y}Kҷގp9Ivz.5Kn iNZX,ȬcK5( ߂j()x}۲uTcX+-93|L򲣖Wz55?ZRؒ.aOnJ{ eA97H 8{r$V^Be\"Y^Bylɫ _PٰcƁN{@ڈ(s. |O߹pvNİnjoq ximИN[\: ?wo%= 8P(}ND3piyȬ1%>; igjZL/>|wT kmؒ.aOi): i~)SS:!l[~9 XQD'ޑԝ96%ga^ Rf+b[qɻQ.S[k)cE-.8nRc[.=RMl¡һ{w&6;=>ĚlמF0MKP-"nOn8Lso>QE9]˱%]žvۋwaAW+99Pz}gqSHy; w |m)VUcbuSԭ%]žvuR6\k`y$qU$v ocEAe:]+Ll޶nRc[.=:̒+Ws8;ߏϻ}qtF:=jƚjvC$bc;rM Wklev-,Ik"fYaܼG@c)7ccMFssC7(alw~U;Io\ Dą.tz%X|+jM؉;wGcKnzBy#?'gƚ08޻h!X>;aM $cck,RRw#ǖ\va 4RsŔc/xYzºہO~XSwK&Ǝ5uWؒ{=xB\1sqCx:,5OS%xRwT-[MosW%?/Bk]i'G֖^kS'G&a`ݤx5ǖ\v a9cjD_ذ$Y3#@gq(Jݼz9]3+}ncKnkpV}́O<:H>FH>,ƚ7ŗ(k ؒό a!zX%_atTL}27C 5ut'zCw#ǖ\vaT^#y;vf -僇Š*4?y4IG{3.3ŝA0sHޙs ; ̹2V3toǖ+z97 AcwߜI.ᷖs7VTzq7祛ؖ# it>͜\e69G ;*^>)](- R< 8z{1%J+uWQē9\vc"|x\@3XQͫŹ}TJ4ӥ%tiU&{ AolTU =u,h- vBәs@ꮞ#rlez޵}U ][:h!ps^vGV0 k겋79_bc>Yw7ǖ\v {ڭf)p.[L1-Z8XS!)19[7)\-Wjʛbq:Fᦗco>Jr~95wI۱~M1ǖvayv5m6/Wϒ7˵{xD%i+M/6m5;MVbTЌ+>7-'p4wicN(ZFp0\I h.v6-a5Õ8ĜH|Dzs`(cWbc W %!,%nIIʆO޽rS)qhʳˢ*Oqfy)~#q%ĝ{4^"7o?|JKMf$Y8L jLJe q;XOV].O0Nnghm *mɭ_cQ!ug[|{ڽCbʌ;C*LEbn)Z{Xr kcЄhЭ~o[McdAxnF>70"F~ gL>05J+TcJ*khq"TQܤ9у+0QgRհ*Rxb:Vc~&ū9K'KaaF ʋԝp$$),e[X)DKؖˮ̭knN srC}_hLYul5L3]`[7)O$rEc{*0bq$4O u˧+@+dqcl|HnRc[N=3?'YeU8ƃ(QM)[(@XQŋ&q$ MIRVϓ@C_wdeh5_I  J9S0UI&P&k12J ޚ"]gUNjwֵz*833r9Kn#;Ee2$v3Ȱ"!g78Qk*&\|'ؔ.b?2ō-iqoT^(XQĎII9Kn.NOjDU8XG5Ov_3$YE[g9Kn-T+_i{_YXQeɩ݄7MG1nKvýjP%U%AXQ@gNVYw:m92ۿKBypR 뉟!);`,Wz$ ƩMʿ[.=vك-;ޗ 5؄xkw߼3XQ+3̙d5v)ݤhRjmv]>vɹ>ܲ~(Ɠ+I]ΙTE3cJ\vu|'!򵛢Oc5fJXQ;'Ŝ8нsR<6嶋]$~s 88îW&\?|x0MͬMhN=UYcSn*~H7oZgquRw=g9 2O̹#Vtrja}[*i Xu!Coˌr-e[\1qƙ8^2׀ZcJJk;lc5,cҥ#~%DarnYgY!BqcM,,Nama 饛ؖ.a?v|?6ZwҝnVg8~d@3R|\A :+$x)ƴ w=Xt(!P(\1ZUNu9d, &Z7?K*yr%im8jwIrU0ƠE%qzTTVTApr ؂ .N1ɱ)]~ݣ0\!栞q*`r,p9RhơPfW ]^V8UĢGkuv>o`L!70kjcMrE]\Zť"ť1dը+*Wə,Mux4u-]žv7y@^]\};nRRn;3}T+1Zk3cak՝T(>뷾0;&zlr_m=學d[65XFOsͱ%]-ZQ/nwP _nê~9"ita-+ 1h+2iTcR*k;muekN)@H-x\§XQM&Eكԝerlmvoy'F4ATM탕muqxDGNkr &{\9Ǧvavh#s1φM[*S H _V1"#wC4LVQ=Kn0w ?35N!239=ĉE~f[r%ik=OEgKӝ5Mp"Xp0PBZWlӘN[|-Ven\hԋ /-Srmn{b[vؔ.b?*aGCr&K;umybL+u,TOޒ`GcK7Aw_cK.=VU0ϭO+*1{o.Ow\K-,ܬ;)ǖ\v]y߳:+0ӧ9j*QKerX~fyН)9vsӺvQ?;n,gs<nlyod쬛$9׺{vؖ.aOZ,:+ӑ*l e;^/aMe`2{f -"n/J)#9+p ۘM7/nu7'9W|w;[|{pc?v!mN-$bD+~pTg /XQyHN! @.ƖH]N-fS+ b[cawVoi͹`Lq9i#^tmi=VT)[3 %6C77'˟ 5߉J:'e K-elO|gHԝIrlmv6'<.ԜҹS7p7JצÈSClOjv֙ Qaq՝݃bbTBEO/VT;}TVٺhlɫv3Z l`z;9ï<:;t9/̍#: ĩi=8&d prE?8o# }UJ'xOb~9zli[PmMU3}=]2gP հ2]8ӫT3KccJJk;l.dkLH(Tu'n4F3VԝE̙ .YnR8 eG{\Ts|r.fMVKJiڣqρ,0؊tNYSXa<qVXbeIه ܒVT#9xxe&.aO3?՜`qn"jxu?5+Ae&6&?@j {{Ϫ$fI"82eor)l%+C=hfߗ[rE+¬}ge;SX>x:u{)Q>Ebݤx5ǖ\v aZ;9\eԢZq)ἁf+#aMy"hbKԭuDKaG,ΤLk$3AƿJrY36w 9UnvnRzD`{DUsrPOFG9S/Rݚ[]>k0YM"]d?=hQQf./ZNo'Y‰Rf |8wl!1%>mu 22l{t#(0B6 DHvšʶA`)]~Nw޿懌?^ ߃jSq/qJ`) #qFF 6Ug ))Yfvs~:hs63hLXS )N,lgܯtؒ.a?춏i(9L]B8+8,Ls6(sk셤[&e]L*5ڭęC:p #4*zgk[[?]ŞLbw[r%i^- =WK7X,WRYJ7>1(.h9XWؤzΛT 3`Ry3ǖ|ڇۄ.^]XEGjc\ 5D(!ęf O~ZƔTaBgT_Aņr_ٝXNoz&WS&flF˹v:i|G‡/Ck:^ -ᝯ&؎4@&ū9~2yۖZa0OQ{:e?)l Vr$M*}p.b?e1g|krW ^W僦g*`Ee|kr Lw{Ìoؖ.aO+ǫws.:tN0݃97=,gf`3EEg u#l,Z&g|9>%KQzG8yadpJoXk@ksA_kgɹ^܋-#tD+g{9sӟz;LcK.=vR0g,[ψsx_VIuDy\8)>6TOߡ9 {cH%݇[qbv/ a3݉{p:"KFs&n8zRgAǴO.;9pA7 Ԝ 3+?7:eb`k7nрӜ+pq35[r%ij;VnLdz~^ 挬TG3O.ﭛwwrٕqެbR^aӹJ^a cΣ CUQMh&Gku>ǶK̈́^Ya1ӺwSw~X.VIΜA e=g=kؿ0_~Cu+jL\;Ee~{#I/ahrй/Xئ7 ǖ\va3l}> 5gMg4|Hفʟqʧ79X'TΧ7ǖ\v {Xn <!œ-f ݏ}8+uѳx~\ }wKؒ.b?<^gL{(~nՙƁ=jOm,kMDz܅ù栺KS>:w6\|w氀Sw>96嶋Gh0sbM2EWu*ڑa9K k*&U'֝xslmv?t_}yaoӫx)ywO޺/iWkl}T MvrJST/n|F>NQAՍ+VTo&';d]r-]žvO@md ,N?Xzp$EJ+ 6`LKiq?;&3d)R bV + 3ea}TT~حGpX@sqbL 7'ƊʪA2Kgc:{dcAY6:'e7z 21fSv3U 6Fa#vW'9VqèweWldb9܊}H3Rw˱%]žvMT_sЮ1't;tS\k˿ +jI9dMle=鮙%/}ؽ ~]dEt+?wVq6ԕޯOzCw.slmvWn0Λ3tX2>lz2 mNݤx5ǖvEi\t99}YfJ$){(9rmܷ27$TE{C˵4v33C,LƏH~.H 8?Kws-y^[]*"Z/(,$dƢt28YE >ڳƔTw}6 M~NcF'p>+ -I<0wE]sؒ.EvC^?&SXG)^"ٵtzLJe p{x2Di٢Q)R4R2gX"W3cJ*;mu?xjz@~n|'V#}|ODݾc2n=C+ H)Շ.N&B.᥮Ք=l9Kk*؂u^{lmvoyBSnNWxF[~Jy6Uso^ݲ3&faɪ5ۖF1M2D(/]M (eܺMlz, ּ-9ZN5z<7ޡlRt9%Ewv-Vi'>tgh>>vzׁ53̜A ye[r5\ y}] p3&+LTCǾ4g X:P[r%iE*99,9tϧXS-CQ|M{|׳*NngNK,gNe3rV22$FK5u9,i)lgt-]~ح: N/BWM)<™^b~6id֚Yf [܇4r3/x< {RM`ZK7\Cg֚km V.uKoei>8w<B"'U~;zTS|8t4}1 +!,VBPfNdE+0{S=ynv$ؒgSakvjb` Zpl=4#-_VTwTr&X<^x'cK.=vdD0=dhj5]{3Eߘʌ%EݼZ}΋2Mʞ5\ {ڽ:We'".n&-4գxGl6XֽnylmvVgL)"(~NfKп&Fc)?g4|Ɩ2^Ü2ºwǶW8|y/!}?VvQkcE)_p ,L'f8x k&h$*9a{Rj p$ #SpאVI/2?mu$뱷gN2d7^ꬢ0bx.X%՞:)Gb(iݤx5Ƕ\ {rU@Ȩ, 7j5&fQK#,ÃvMC1nd?RZJ; c6o9 I8Xqg{<~am(!27 {f@Ƕ\ {ڭePavf9D,jMenrxP!,r{zlmvU ҵU5; qe= ^tSz5zR(X 8kj%a)UO[JsCabcy]8HCI7;m9zE4c6 =rC%]žvOe Xm|88L)l̷Y.z[XĊjYΜD´k.xcKn{ mjMAvW?#_\sHtzleWuSŒ:hspWةǧژ`9!EviLlņκIcKnK[.03L3@IzK"O@H1y1%N7 i#gW3/9 Q{w Q}ȆYTrLQ}LX}YwFؒ.aO<B4i>Ͼq1yҪ>[ΊrƊ* 1"t_ƶv{߅V]'9ݽ2AC3421D(7R7[t&.aO=u5,ccY3SJQr+jSc }q\v {mmu*bᷨt3)|n6ĚMvrpvz>c[.=[Fә/PxPj2-;K4x e`i‹*±ⲴW"ZJJk;lGHrGf`) HPW7TM8۱nR>,.a?ivC{%`YSbSJ -gcE\ FJȕiMˆ9)Q)z_#Stx[-E|q$]Th%K1ƸVW'?' 1D$kW4^ٵkIyL2#TەТ>Fc[.ۯSs[ V9{1}j7G>V]<^VT'׷FKHcK ָ^\]{_ոV%ɇst{:/frm8=k\+нe[rճ_'Nw.Hw/v7;$t-|-%ƚ>cg=z=i/[SmKZ< URcMDNScui+ R)CN(uxЬ]`SQ ]3DDt2ά[K+ ^1-5)|NM*.էZn7O9uyWc1uw/sle>uÏ\:ʣwo9?q; QƜʠ}8-iI#I[T^#X(K!R{ؾ؆yn\-]žv?obʕײ' dk{jD+v!9'^Sww(m-h*8*Y8NZlƳս-v-FA֝D,_ז3oXQٍ09y9Fc s/<,9l߮~E~b㲾 )͍5U}b6FM Wklev{K\q#T39>Xv`0?GjfɘOacݤhsޞ-]žvD&Wy<šJӈݽxڶ֖ӈƔw2ʍrStfkX)I9"Vb1(Z:u\&ҷE.p  ]_ވ+eZs"*s cw`y6oޝErpgw|#D[5 ۣ/ne9e%-+۪J".՘N6^܇$Ru~Nh$kF˔=o ¦am?nᬏb6VnRc[.=v( ڽCQnGf@WvmŔKlޡ(޼ %&P-"}ȩ"mryhy'zO}rMŠ=#cjlɫwvm\Yo\#t]b\ݶ-3MXQ\ ̱ݳǖ\v]C7ov- aCW_g0)׫8B9H4B*&!ǖvawlMK撛$RX."JpmjGNK~3]XQ%miclmmݤxuv{iL GH˖#=vqٰMʑ1nT3 a)sr#_:3QN-Q۵뮍55e9|+5[vyZuc?^#ȾuW L[ШVYjf9#oüpaEu!L %(uWDؒ.aO֊wuYq`31PZC1O+#)5w"НwsleWwo-oA7\3㵺hʃ8~ʖ36ujqRdl3ū9岫}VdrߗӒ"Ɏ 8m\4|v4q.2Bօu_Y4c[^K2ԛwJ7@_p߬\DuaVݸK-T3-FG4m5>\RYCisnT*3ͳTÖO6Xr55ޜ ΄UЅtA[rEk0]Zĉ~:?b.ƒG>97sQsnLf[7)/[rYzscM}3̸(2%;8lq7PZ+{LJe qnDD]XzjT=jtm9 5w;qX$ֽg-{kbAj_LFFtCdQ捝 ϜVyAQ&ū9Ko\W'31nRvLؖwA=0~\|˜U^O#ٶIiƊYXE]α%]žv #DT E}mf+*ӴN3&:婕\ [܇_ą#:8kǀ3}0$G\Ir k* %Ή"O8ugrE^k)WGW.W4ie[ܯgS=T /z/~-4RY6g/N 7%*k[2Ɗ}KrXN:x,E5.r%i ee%RnJ n wӧ"y"kfs`-lLk[7)岋Np\5 (5mꏼ78pu}aQVI#!~y|7v NN^+nn )L~$gco{cNጰ9]2c[.=C_sr*Cy$Fnnۖ)oPJrUVTS}qYÌE[7)^'.a?>/e\*-C1$*`bH4τ4{ΣK“-Aĝ:62y.w m嗗ڎ sPv6\[ؒ.aO]&^K&%~-(K&Hl)ܥ k*=iOt'>ǮYtgc?zz鯜fxsbb (TaR%b^byg5rؒ.a?d%%F]Eܫ:U*ӳh/9Q5%F ݲĨǶ|:(vϽoy\:adu:uAFyZB-se&E/(y؞Zձf~x,|rfK+:,9/*ݤ|~-]žvP xpxe >8An|8QzQ6'i]YJƑ6n1DZwډƔwgs3i{`1l*yTbRg8Y' "ҚE=FpEƘ桑Mq-Ehq$f~1j ֚5sa.sӏXz R=COuer0%Xn%]žv{Oªx^s={s=% )g3XS'#Mgl"FSۥ7b&'-W2Kr-u8hmlzV{g=vzn'>Lv8Ӊ*/*0-Š:Y˱%]žv;PR#9e+*N{ަ|خ|+LZƊʜ(Oc!u%]žv=3t.]ańQ8QmJz,v^q~jťbLT!."S+ \1%5ĝ:[gOVVN4Exc-;Ċ*;؜sÎ5ty . NHΤc{*JD+3ļF!魛;-nC v 99Yy;$(Lr+db8<*1ܣQ7)^ͱ%]~RoP~qUԝQInŠV 4nR4eN>O?@%4쑎 iQ5Wٙ4ǦvuC,NuOÏEc)[˲s (55w-q@eNvJ=mRtsPZ'IjJJNJ@I>tA<Ʋ5u"Acu"K|uzr&wqݦJ&S}?[$!ut@c[.=pAi``i :Iف '%k1qwy&zR s'ϐ:0k1v`8k]\lM)>rϔ,˦]yleOBm_!]krq%+arjNĢH.Jؒ.aO`PɺI6P?i t}[:;2L\S!Zg VGx=b g5_ nN?i&O$YLmy_-F [{nRZ eWqv^RNE\0:+# (F.=AĢRꮹؔ.b?vr$BWX8qϛ=fRXNycWs"'ug|P-"8TyeZ!RGup"䜰ճƙ9¢JΦ9Kn>y}3޵uG z2"9b̓op{E7;'hŠʠFˠ Ar%i[ۓ WtCPCm(ю#Xu$ujqPi؆%&ū9T~oOщMuh>m8kƚڧmX-ߕ\e:ӜtW C ξDn9KKU{Ic&Tnb"K ?'vcA #;Z8_M0K~q3ADbȒ}39K:~mQ44 #G#˿NK>QXRG97~7aFm,[ vr-]P-q,S䶸斳j0҅]{c[.=veT"zQKnr$^[8$5Y4h GƒnoL!l0g>8G.RMq<8S]齢2&'9L^ͺؒ.b?vEH$ܦ>7a:يmj=fQ./WVnRc[.K=yҖoA50dUN~_ Ss)n~ 8?ͫElEMg $&ة[-"LVYwm*sX_j\F@uQ] #fƚڵYw]=%kצؖ.cO#/D oCo5MymZM5rlfFxMkrE^ (w52Pջ t7WŠ{CNuŢt]^pliyXHN`wJR3.刲JnUr˜Utrl%2.b?vvtag6JΠ2.)n&1a"NBrLlnJ) >&n٨c[.=+msrrЯ9J%#w'55sfeʦuܹylev{ g8#bߍ6Lq9%%^JYbI]vNu]Hcic998]Fy<* ]bD-j %GnbE#9blnRc[.=pAT6^8Nj87.Ou8Q’!\a -žv{m9*t%sx(ߑs|ܛ{c/tzkjf%sث؎JwQ-"n sr .b+XR)Nq6PlS-2,T ST;.]RYCb+RUZJfnfqbRYCiv,79W[bb|YzLٱ XQ;.7EJ0[r%i7I(Y[]۬Lp:Gl&b'gpT<ʒE]JrLIe q2~D83KH…IOuWe^AzK#_  P GBtsgXFiXwǖ\va(b^{J|MI-`gok-9hclGsS&Gw-]žv{ş]tƲ!*E_ ͂b/z#54Hlg1YcK.=qpR$*WlkMqSo5U3bYXHWNreiλ}̻1 "b|p)2)GlԜ6ww,rE0sNk4ϬRbQ3XS;b#غXؖˮ com,~\lŜq:Z(1.N؊9藞XVv,6M/9 E6PqbvN9'LcE /6/ot:Cwqؒ.aao-Ɏ΃z+վ 9?KWHdf;SD s`KH EI&l*Hn{ +ɣU®^ni_CY;,)2ؚ;7Y4 I_b)e=lb_: ޲/a_Z |tϞXf%KvL\RQҚFel(NCi_ľ~ˆ *^MC 48!HJD K$Ma}>޴/b_ﰓ[Fv8: W,IH˄6T_i0U؊g]]ԥi_ľ~;˜ x.9߃ö#q",~FDBLۈ{NO}V+O.iF&%X",% ¢}KkMwi-y5EH,geG]XJE-֠l^>{N}1sX8-1LOyQ"> yނUXIy!j<_^}hoY|BUm~iTIx-j ?mN&mXqjN[Wɦ3Y5S8>so~5k4 6!͵2/ޛR3nij+FzЌG=`:zR"Jj&B]Q!!P ؏ߗݱ૭i0wu՞k.7zK)@( FIֆ^N}V}n=k"C^1"B^)&)~aNYT.M,`khmH4_~ߺp_ɗzp1$׫+RWH;#&K586$|{N}Ɵ-$q"EΦ d%dz($'H N**W; X8i1E~E9@q|>o ^&F|Q5}\bfRhľ~Q2o58yTBELOdM b)1UC;wn~c~jդNtϴM G8>''v]QNjpmH4KCi_¾~%灠H4Y#EPgԘI ݆od`)%GZ $V~ڐ|}VDEeUoFGwB,EwDR|QXJ̡ BڐiMʿIGiάKRݕ`;R <$Y%@ݵe޲/b_1q# p ! |VR2I:5=hAD!)y~[,Qq<|jVN 8(eHxY;꒐>Ow4n1251F\i_ľ~߬dh<ēC .TW~+;RJZ]kNo):ouLS3?buR"sqcvJxspZqi',Y\ŏ=qou4N(-(MCW0L]c#ےkC(:REWb"_\;+J{N}g'e1θG/aE4(>%;!RR|^o kC§Y=/}VVGM ZԀ2b΃)L<6$|{NO}Y Ib*ʃMäL^ rRI "lL֮io (JoLS‰lokբOSrpaYMi_ľ~+3HΨ僚"QC& ك6!:b)e_%Y'v-4I4~ H4$kdZmUZR|&*6dox7൦B OsMʼk{޴߶D,lIxy)\^XJX[:V=wmHFk~yD#(C#LK5Ba'%'1D!`O]>e_ľ~+WUw%*0Rwcic)Uw& sv`M;"[gwp I9̒mK-٨5]gfFJ;W)lIpL*Ўxb%-糤5Fצ޲/`?~Wɫ;k0A|Ōg8vw-w\ti!RuW836"^mKޛvE #'^͎~zD]$so0a)MbH-®hOڐFv-N A4>0EP W=j-5Mڃt'RS0A؂ɴ\ɵ[v%Ͱjvr541V T=s͒7 Ec)MaIQq![vE0]8 ܕo;lowąЕ.׽6Cpz9/c^:}-֠*dt̿LP ESiq{>yq"VGYS@@lVXvo}b/MFeաɑU}ƀXJW|i"?!l׈ֆodN}c&M%!]}#FI,fNعmP$޲/a_UG7kHvHi>eOҾ,)qCS78U4t4Zު9lxrwt K6J\rƢPkfCM;"[HpnϫQЂ N HJWuAh,%sZsa7[6Vh?&glM:J;dq_$k箰Hjо~vzM 4=mnTIw{5 ƷFkW>xoqM-ij<"FpVǃ Iza)=\$4"㚢Ʌi_~%0;Sl[F3`HHu5sNQ}H 36GY;"~*_ VdNULO*b+A"&,nZ^7oO\y8 q;񬾟xT^A]"fq;oK{CcO6L4 '*5:rM zۤu6P>IE,}kbS%Si-;"[Q?&Kԁ1ir )6n42g,޴/b_gk:X31(.lqGQ5͋tҠJzް/`?~늏l7Ǜ5PpU$ z`309ެ 6cCvE~^MPe LRVuJvmpi#5lZ;ܛvoYbX^MUTB'(v*4Ԕ=H䍥t_;ĂUk?[v%~Hiiv FRY^+8=XJ7MVdUZ>i_~82dMn0480aTI8g+"MˮOS<#bN #yoᆯWq8 pg7I _;I! %Z곶$xoȕ,\5"HJ[$5&\KiONe`'؇i$ǽeϚbG>pµwSEk6%i] h+wS,G1jlM b *`Z.Y]E5];􉥴'MM * ;"fnK&йCI -}XJW|ix+~ r+*~^MЬ{Bg $ҐwI 2XK]41`%Z\o*y.h>Kei.SA/{nA5 Mڐ޴/b?~]qI*&~c1@Ӌ{YY6.VR@hN)u3ڐtoᗰJ 1cI\P(#>$'юpvMM?៧e[vEb,~bLuAwFzη{#ʍkx;]L [qŸ7o_ *|Kgyh>Fm:$@ $֠ijg%ܵ![vzJy"& ՅsNMbSU} إ,PB,S&Ƚe,ԅ:ۥIoJfP“i(KWo?iUŝڛKu̿t ]Ǥr ]ǭ]{Eyxbֆgd/`?~sT\+{)7Y4AQ*"VO\B|{J{}{vܵ&EѤHƔ%ڵo /%RW5 >bK>vqV{N}oMG&#)+iMn3=3sKz }׀YO-9ʢǞkSdľ~+IC%*]$L9,T5Q_ڛvEo',0 ?ԅ\@gia)[KM D&poG* !Fg$OZ:r$'Ra$j(lFki_ľ~37]?WyVEDP R";o %aa|26ckriΛ:V SD8IY;]c]s~F*{Ei({ifv{.p֌]阍2 F58+9|d F !]~ťi#? L.kEcZ$'pQժU_=C!oF "*kK{=(YtZ7H~)JRv]Mi΍V:ZLfPr| ekTe߯y^BuLjMm {ڐPi_~Vci6bedY:vF_b%-Jbυ I1 ;"뷎^KM(MIX׎_b%X58% @֘Y`aoY*UՐ\i0vG`.ƘDر\j2ZN<޲sߪD`3D4/D82ڧ|.cfԋ䌷56D٭E:ܛvo^XN p0 `U4iOC.I1E؂Ȯ`dM{,[T[wQ9J3 T]$T -{UtJda1Wk!vo 4i7e$%uSorϷf&K\b%o4elv6o}|c!xsizuhRnv.i_ľ~갉,0!7ĈS5Y%I8&E%XIY Ҝ2tKݵ!am-;[.F0i2FSNqﱎ\V!n4Ռ=6$eo.8~ I~cTAa|֭\;*|4 ;bQ}[v%,|osStJ Qt KQ,(E9Y y'M mľ~뷏wHW> $X}~/$Q ;h4Eo͈]bg ڐiM㷨xZx*DǠSc)FӼưbEXJMk0'[X\vq@{N}  ^Fa 1ȠIBsL9X*XT.6kg5{oڳɤߊ|5%޶TiK*jhoHuK)ny]~C>vEoqT&0%ϫk3ISЎd=c1Zkg/7o5~hCϫ Ʌ^?O=eGрm"EPޛvEnYWӔ4[FE"&v٤N[Q)MФ64a_~])N$ilOmy9YH\99Kdq"IVԭv޴XOU5MC];~uE &8!v3nRt&cIiHR0k_ľ~߱- P]Hi3= 1B҇6$A˸MbyQwxQf*)fdm SR|\5{BAoqF8bPi{4,|?ۖnG)bRIT)lykWj6`_/ջJ ZR(.CWReMqlh!6wq* ޴S`?~x:EĪ젳K5VI:NKzN4q06d^;{N~BB|k"7l$uh-"‚RDk'O޴gl:Nbz@5e'\QIʒHK"5o{Tkء)MʙJ*®kmH4=kJS`_/+ T#M)719KBRԃn7kbܽуi_]bHKG)Iq;1t;l1ǛצZN}k=*쓲f 8݀Ɏha_4CJ>qמhoMpWQ8[uv96%)b%U3HZc9k'3hoڇo1- GOAtb=i;hegg<6>]Ŵߦ 9S }M<<$sT ʐ:c`+K]K Mk8"MFOLi_>zWrֿ4-ݜݯѫ$wjd -amxZVG8Ie\O}NhbWQ \IүkGܩiA l  ޲/a_/9E*BYAӚSmd"8+ҸJHcAӪ/~;e#)MF b]gH((ᾄ>%b%%HRs~؆׆ m>5cϿ])04S߼ݨyr$ŞF3b%04͇ȩoI{N}CUcIe^; eş|M.Yҵ/b_U"3<  H:O9afUlb%y@4A$!=iH{Nˎ|OkNr*H3Ҽ9uK/`kGG<4XgICv[D}bā'8K߳DYwV=BHzm{zoߺ~z"c~^MROcAA Uk*;e 5)fHZAݯ~+{ /c8d9ƪkikwDaq+[v%*E2jCbkՂc4VO($,,쐘4-ln ޲gU_ߗ}`rCTMl,mҵΤwxm|smHDz[H7Ћ4='7ק$BU5 st탥z&}ް/`?~i lm?&߇`U̒;}&kh1&v!d`%Z>]Fa+8?#lvoчo3[#6cgJ2c5<6ٽ۽7o':{& fI{Sא9{^7XJmip lkC§Y~ %Z9LtMtF{q氠R.RJ-SOx׆sמT5;_~dU/ȟ$Iٶ=RʪH㔄AmKo]YÔWS[dJV2PvT K)sjbfֆO3D;sSľ~,'P'H Lk=]~[!䁪|j5(]=,@w;KtgIPs; o#-Ё`?~+IX]IMᚙyBBEّuK`!i2-;=%V \ᄇH3"'3 ҝ%^0Ɖ!qm_gMEw=d\ʢ W}|Fv(5%TDG Q ɾԬ*am|zi(܀X>eľ~ j#͈(A2H5=HQ`,,fiP',!7 ؏xjZ\'~/;','P pkͳijP| ,#Z;V{.}*%ꕔ(\YdR@K,-,MPbKP imH4~~SV {|OP30,&RvȚ<s5X;;}eľ~+6||v OH/Nx7AsUOSAJlC!nho pMƳfB)$ _fb%2J Hf<{7 ؏:'16I`<.fAR;5aM\D;"[g!ġ<Ki 4Ř]`9}5VعAdkoٛRGoan}ir<?7NR oZXDTiwC`_7JC׊s' %61vXI6RA&jnֆ,~oDݰfpN8@ҿK׾Dҕ]`Q]i_ľ~3&a RĽt!-GԬ7𤆉EZLiҭ¥Ug4pOZG֯z &I(M㮢:.T}kehRV ՚8GU^>/b_u`oϫd脋D r84UIiʶsԐpXqmH 2vEo3˙ojjTd"[>"1?Av@/okr?فNj*ͽeľ~FBF74c5tpw*l7>p #R4#|צÕ~G8f*i}LbÒ#voU#aˉR+{Yp簺%`({NK wHkvKQWP߷EJ F1>.h׾4+5X\Ʋk~wѧ7o=NJ9џV'9.9jChxnHDEdHkȑ~HTn1'M<0+LSRVrkVҍKԓQ5o\{W$pE' ):4B} jmޒԶ˜xAi_ľ~? ٿ%/  ʚg__RnfEY I,Ϳs 5V? w`ϥkS§1 [vKڸ9aOГfF;25 `#]L5^HVHs zڛvEB9JRY̪N VId僥nV";X%[vE*xF(y5KhhY< `}zHZ6>MM9-ʤ) 7 ؏ߊT%D0"M 2'x!Iqܯ}+e d!)׵/b_on/ր{$-)uŧJ;.,%[paa_~f9N矑}RQ!~nsJ3NǶWU#cCQBk& KHݑLW59&Iښ5ɂ'Q$,$OS b.6$=#vzJXkL{UˎKgԜRr50blxpM{֜[ɖHQh62|nIf^ ,%OӘ7c"\{"]NH곶~5ÿ u¯>8v@@ү~kRQJV(,HvR{.S%Mܾ#iFM Nt/0ݾkE42}EZioߢ};P<˜d^)]#ijb@@ڐܛvy ۣ˚FI~GXY`'q4Rvݞ4'צ9~}k;.kƽ#Yt:- %KBT1 s`Sk5m޴/`?~ZjVZjƜ&":Qs6LY}n> ;XIZ"}Ol٘![~+_E*>=k.r\}zTvT K}zҔD.޲WuZ߷2Qэ:t! ԅ)Rl TLVX&|. ޴7{<10fF붃vCXIS1koڐ[ޕg,/ob"\@F1)fW!y*OQaJAMW`gnw*]~V&LIռ 4 A(}E;.V Pǵoq gϫ!hy&C k*gv\]τv|zyJ{x4{m3xoګM{'vG3bi ’V9d ol7BꎦYr[b{ĸvw4M{NO}Vf9" UOȧG`|JOԬK)BMsR6$|{}gˁS (_̉#1H 翅װ#a),%eBY õ!amV NO}:'K8oLk+7@+#Q⵫ڛv>~z*FE]UJpx7WK)=h-K-K§}{.O}kH8WgKj6·Lcmaݳ!>"ҺgKip>$vȵ=[ro)7/66.;4lUBQG1XI#]ɚyY;yN>,`]4y57r8WzNHt8FK2DJ3l3b޴/`?~b|Y1>< kraZlylcocXI% Fe 1ZZ޲_~+vqÂC#:J떄hDBmQ{XJXf؅4\bSNO38&qIDYp|/IgރTXIldҐ2ؠldڛvEJKQ~P-4~X?" H]'VRƨ1c0k'io^Pi55+^ VpX}']Wi_~fhEٳ`0)^@D0{b (Q'Y`$#q98^j6 ^_u_G՛5iDP( ,rԠy/ZӟZ&"JQ,Ӣy&EOXJ{G MQñ6v#M;Ȉ+Kxr4ѕXU%PO֗+Iq a7 ޲7Nz=7zՐ %[(y^/}b Zbzκxo*nIXs4NGTBlb%MjfhDF ykC˔muw|IXeuHw~Qt"ۇ5Ki57! I/ ;"I dI|z~=Ipu`wlKcA c.e_ľ~ף4=zPcyt@RJ4kBXI&PFv{~e #>᜗%+ ֎5$`#ElXvIt,k_zҳ6tҠyFc4ܲ ྃv\4L.M$J 2h]Mi_ľ~+Tq)o'юxcngHZ7M( lSɗth@aFC s.{M'QŴY XD!=)}Ob%M3I^қҗ~}:3%'oN8GsF*q˚5',zZ޲߳\޿K?&Yߤ&:7uƐK]Riܵ![v%뷮;x<{Y/3eKK{C!;CX*v 7,հK}*vNœ72[qHF.pj|y'D*QI+ W[ qՌ["Z$v}XIyir^i_ľ~*Z.ީE۵/ 󕆫Ĥi" k;Ki/Zjʄjc45`LOA lD,c41XؾF![vEQ?Cv┖vbޙod!JW%q224#Ğ]goMbڭ*j,3#J9؈bj;] +)GrM k޶ӯw3&y.RNk&uTĞVH8&,i2]u޴/b_o_Cbu_9N"9u-K6^QXH\& =%1kS¯d~Ưh~~_Mo+y׵S$3)v( $] iVRNjZk>]~ωT&N#uZpQRh)JYDĞ]>e_ľ~ߜSCD#62KRxҾ]XI0i6(ذ6$޲/b_gA+܉E-@Ve${l?!(MW)r]i_~E!:ϓ,E/pDSjagD["n \v入tKYcc׾7 ؏>GNCQ 5;pL{O G3>5xYE;6%,ӯs|>F9b#4+Ϩ3:%qv4|fq}׆Gv"~^MW7 GJHWJ<k ލ&Tqt+7Z)?k_#DyѱB-5{w$yqأXHק5z;M<::gmQ4?w`% qVG烋.a29iLmWj慕F-v/7Εljkt сsOY^LwGQ),ő&}8ڻQ~UM~91[^y la+}ށ9$搕#vܡ!nb x6$|{N}w~%4u1| )N}ds3j},a%9afM66b^ 3M;?M)_"XRLd14"JJ|r<צK-;A[X ' /I؏h:]c)U3JS!,ZF7 ؏<VZE,DYy_ekÅ2T/MVL`n\;3 N%{VDE4^:V$)´ӵw6^Ex  ^ IwXw~稦&)I mSS'<$ᝊycVx ݓi_EsԊ C_413".@]p~QfIҭhao`$V/Ncџu޵!nmo ~.|hߧxiD`$vC$ZQ&cZ5]Lj[i]{Ӂ1+UŤ ቸbvwX#yY9y_n]a%MHž6$e_ľ~6>}v佅iXͪJra5 [8E 톌PW5+H% ă><KiΚ12˵.K~u(/OX)(LHSQp5C\y޶$"G{ K k˻,4[@wȵ!)؏~Mܫr`N_|ϻdI1 O噱o ,1m.~M;x_qϫix o '׷Zb%?`\ Iqqý Ϛ'ףՠϷ#1tZ{lBpPЎX7gAMdqm4 oAװY$)}6})1HIfOso:`Y]7g <3ڻcx4]ّh}XHX[g~5Y ޲ө?y{ok&[ϫG1uVI#J,ˍ#M1+&6$evĖ\X U/aMLkΨrP?K$=^vkCK. 6hAgxot714'G=[?-N>IH ]cvFwkC/nkľ~ )͖,.>"ZW4{7&0=XJoHC'`ڗoH{NY[Q f tHP1 ,[#}U3ZM{`?~+ZήSh 5;C»?᫔\ y/̞= +ih 5_c`O]Λ.jY[snQR)w\OeҼj6r Iճ>Kp[AaFҁ:"xf;.Ln>MMà EF\ݦYM;"[":y3Q07OMo)XI6ɠ8 ؏7~Xt=Mq4o0R7馐-kiR4vCk'7ho.jWw 7xuw*k_o$<\>e_IY_z jroN}.& 9;B45K9q6$|A)DKMg4\F G}}Ua%E<#J;*TL jpc}޴/b_Ccn朁/whG$$HL.`!am}BYbyڐiM<]gv#Z,M4ܝljFGI'Cҿ!RKVr])iƾ~w)k'MG?BnjD+%Ul{ Xa%y56EUk'7]%QRDi8FJv.bG URmӴST;I§. C#m; 5++)Z[JL\dY@,$3Fh|6$|{NO}Y)4`Ha*!G SΏ9hqfYSS"6cT|dz7od UC|kΰhYy%nx%b)[kG qĮ%/b_;!/5Hτ7oɵ%(c8̃d "̓a_,xw|gf,WO/VdZb,_,%YhfT{)wmH7)љz,go$[Ҳ6E)&k*+uڐp7޴/v~_J.)'jP^2QzbN,%Mǫk?خĩOS\aQjI~r3*H~ǐ4aDY84Ǚj,%|fe6#6%3vz:QDb򍍚 /0N9ݓ?|cdu],%*-Ϩ)wmHGC[(AjP8j4Px$Z&Mcփk,$=6$7ƃv%?{d fGS\6%w O@1&ЅMAͤ!am | ! π$I7[XJ=ҠQت'MtsYvuڞzq==w-[$a\{EMx`- WHdV=򒄧VƘׯ¨Q2Qi`%Z8DC]FŭY"cWE.P59su{ع7 ؏`,ȫϫ)0((<$# .hOSHaspghmH4~z?\" z+:!Hiz4Yz`KSHѯ_~w\8mC&\oJSoa;Bھ!xXI0 I"NӠEy5h- k> ,;!Tf"`Sk [vEnr(U8PÜFُ)KR#>p!^&waMZ;_4M;"[e> j$wDž[k^{Wk[2 1ԁ Il~ֿKAbkpT$@}aΒ0.0ӵc K)Äxb1SkC§IA{Y_H֙PͿVUF_|wIq?qʠf .gpmHX{*<:.( @ ݜ؎A>JʾH \[i_~w Z&JYtߓzΡ:F3b%+!6Z;Q{N`O5kJE|&= DJdW,G}5<, b`M9:KNw J f#^{GA<j̘)Wuq M"~?G8/YeSCN#[t\#uEYe")is:R闷a3Ds ;.((q6Ei+ +Cmc7C${^mR^!(3&Yk9,Ht7)uSYɘKw8(ݑ.NQݫv~?0d=كh8G[!;ꊊ qD3ډe;z$ R8 (hw}S>/]P|.8e5֖RyDgB.)?qz@陱=r\PT8h.m|&֖~Q+ wK7F1FE,Ιi'?3[6tEm II&.dz֦~Q;ʎuez:kW8Digt!IS }mt_Vp"[=!2 }ƚ|1=Z=J]Q>9hnR ;ڐ/ª~Gč]qNLx7%9;{&9^|tK"۠%5㼴.[ J2>&_ԽVJQRZ`yvecFCN=uEFõ%_ҽ~%/($EQ%_V]Rt6Gg+I`t{|BqsSb=*ȪȖ$/AE8S?ڔ/˶Go-Fqd"K2Dy%U|'')*.ϵ{|i-e qL ĤbvB^d.[r RJ U6w}|V/~sjg:l1%2wCyU_ZuۊY0owfg%5{kC.wk9`̩/)8;[vtۤ=s/׽V߬>wM@eG¹g;9b9TL@w۞:n"*JxN"~toíDGkA/gI.Q"JQC5tIE8P#hC=z'6 /~h&\ۈo ) BTfx\][-3)<ݶ[IAM3}htodp@l#go ?Uw#|(hjP-A͂tr8h!_ԽV6͊Vqs0y픭};(YDn.Sd)xM9/94~s {QUe, M*H#UV6@X2%ӷ}/8. J =7?$ū 7 ΁`ujZR,@͕&SQGGl{lg߃6 /~2P31ʈ*gP8U(GʹtIuIUk_P}mțWw-`iM{7'lQ٢rԲKTs#@ؠDݻf\$]RW9`@]Se;'vtoX"bB&xN+ /\8%FB uI&'.xN J􋺷M9qs xdj yClIV^H\Wo@P 54 Ź =E $tұV)(nA EAﲆqq ` 2͸ɒ6+A.()tR4==agm)uo,xuxu*9U ?ͮS M]Qr3p>L%6({[sJl-mǘymI8UW̓ ςƷ@9lr tq 'b49lTrHVM!98 v<~J)J xoPTLgNԸcז~QՃGN$TJkO9!DgLSpJB5l/{\[rEaԢ fќ,D[BoꒊJ\rEoO-2:`N终1[E4W690+;]mN ߏdzw"8U%|tvwz+!.@ۇ 5􋺷zR0~:Lш~y,"]BWTˈ\&vh&/~?OژLNSHfo%+!YrbPT8hB A)+gh5eۮ!ͪ3`h#cUg)Ui7T3pT9]VUuƵ]>H~?/ Jap [Dy&wˏM=._t95'0~!_}?!w:8h/st`f6.]R3A ql!_Խ_0b^hOp_/cKꊶÈ9uM+^5ݐҽ@αo/,tSZD!N 9C0&Ld:V ۠ڒ/wDbU#9Fq,,,T|ힹ ?!.]RU|?6(r,Iw FB7].]@("04 5.]R9MzrsSAsj+Eҹ}\%3!T[KC.sk,Gwwוּ7ڍvS֦~QrwSp l C ^L*ҥK*{g92ea'm>Fȫj*_%?nD7T9.&w ]Q !ݪDaڔ/l @<KPG$娛XL۠6Ao=b8=iaꗽA)&]RqJA)>w)uo  [AF@WUn9ev5<-:*ٮqlX 6~ܜ+1Snv,J1B>*.-^8 Gcʶ֦~Q$yT)TG>k` WHM@ {]RqS#6(6/~wayz8>Q֚8MK^+uXOhuZr{J-[iD¥7RO6(TqqnmۑFY豾kzs|=q@jr,jINWSw* ]Q, ]BexEk};JqtR30udH#Ju?|h1 ~gt"/urmPX[rzJopf`u8Uԛ_hɭuIyJqcn5nK!_ҽ~~ZxıcioI̊6l̘.]RM'砵AhHŴ6􋺷Q2NQq!^T5&Cn*r8T\rűշϘt/=@bf*fv@b_|,N2u#-7Q\rEoH\[Vje`\uYcUQzfjqiJ ߒ^ ] v`jmɧpߏ;sQm@AКW}B,J{K!wTDK m= GAFϐ306t).Rt9LlC]SBuL.]Rqs}ECl]kSN{|Ȼ7i$?sMF2MkpK*~REhZ{.._}qnR7+@\&譬K/&G IR~OvrZ'ҽqP-83g?& P=uEYvI]uu_Ɔ|#8Od48F*XAtqcڐ/Y|;yQ0 Q3N \b|de ۠+E-{u,'焨Qqj(dJM;s K9@"nB$mڒ'\@[8[H3$%oʏ.T!x>r._}y~m[Uzy~m(d#gD]Q)/*1,@ޘNa|dNbJ}9f4}w위Zfv8'ۚ~Lޕ58,ҽg^uẝNLtTQM`|` .uI-$ΰ7u6(k-9-OBg !B5UhnmQ 3 /~G3ZGF5p6D)[ȧ&TL"Myuۤ𯋦*\;ı߹7L[غ-Q ' F+jx86gu]*ہm)_}~wpZRa{Ψ!WI]Q>9Yuَֆ\~u/l?U`SmZ㭢o?IK1|x ղ7.ܿ(oxGÌ_>߾YF}@Ksw[\%],~!hkqh_8bo4ܘS*!O1ɐ[Bn%6 CNw$14ۆ ~oQ9Gwk[KNn:(/_7~d?ZUdV$ ?_UԅR endstream endobj 309 0 obj << /Type /ObjStm /N 100 /First 923 /Length 2986 /Filter /FlateDecode >> stream x[ko7_ q*MvMAlDس5H*{ G8;j%,p2/ΝsH*%vG- XMQXG-BL8Z+>*ɺNh;V{Nw0oX1I"*aAp(c0I3|_9X3!vxز AN$Ai+q6 L,)A[ SOpa g=8\w = R.ޖp)$6ibhs ÖN§`@0n&,gר~ f?v"^ZeD?u߃r\nWCf>unEswo.@w1_5vVzZQZ Adgew ]_k p&m>t_|^ǘ+8\d < IL]J&6Y#y?X:t= I&Lpq ^ytF X՟rfŽLа;<>rJ3%a` ,bQp,M.A J""2a:6U% VF0ՉNmNzLyZwM2X'%SZ2xӑGlD^bvt`X'$u: |!@-xPl 5@I`0:iؠ @6cscGFY 7tQhkuFjiG$ B@%Ȝ-1@LM#ſNwCߥOi۔})KP|FQ|7(dn@3uL)Swԝ2u, (ۥl=(>穉9eF4v3#JZzo7IGEg0H >L@7U hfalH^xthsں:Y[U,D>s2 m #ȼBu2&&KǦ} / 4mt ;=`lc9:B'&1gAIp'괭zo75G ܳ0 @i̼u[],gꂱr8k%SgT:/ %1XgG4fi[Ϯ1]/ >$ID/+t+ݲN{Z飶/{4$H,XiOXeJۑ}#lvuuxb}\\4s.믘:e}+{ics! NvB ^b>r4ݞ RI=6ϦCP%;Ҙ?QȦ?x{[݂BwNT`( ]c(x+7mu8yTg^,@uL~ح2  R҇emWAcՎH}܎v]r}r)sS>Q^)eIY,R?)˟OJ2q[߼yn\qY/7+|YWDżAW8oߪmU1͢X׋yU\Y eJ9QyOU!ѦM Xۛyn.f]4Ͳz[JzWΪWV3cn6SwiJGǵ_J +iPxX=4N mxH:  2(k,<,( G)^=rnA*E5ָ.mZe?$K=oIPw昄<3>ߴ>P'i'-8]bOwe ;eMx?ApU,t| L>H=o心IQy,{ONmAnI ]ky׿TJ7SJǨS6ΐ2_ݾr߾n?X!{uc7Ґw O.NW endstream endobj 440 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 453 0 obj << /Length 1423 /Filter /FlateDecode >> stream xڝW6 ¸OV,Y~lmۚupbcǩ׏/rCH&)GdESt(ϤQXiɲ gJ,%˄p1ދv ,+B^zKO%IX3qFwR7\;AǾr?@+R^>B=>y둲^k?/{ّ/v0j+w"70An wԙeBTq2$XV(@f:Fw]^>8}U@0z4؃WLFa~H @yU*H x~v,iX1PkTs xS2$"a0"/P !Sv١IcGY/GfzoXR4vIQԎ{br'Hm^lDuwwg0ѯ)g)*%J>7Xu;qT&,[!*3* rbcDLK0u)JZ\ǫq /wd5d'¤]s􃉓);IM0֜Z4/Q<s΀1ԃ=.|-SR1,?b)$:'X}bj0fO* #  8eynd[8bӞFvٍK$pr Vn/ԍy8ЪчZb?f5Rh?t6!3Y[Ŭoxxvx@E&+&Mѻ BD( Nv|v+Q';0w= `e)e& F{y`S^|/hg/H_USmye FP[O$ mܞ\wb^J.FBk9b^Kpwwht'9,ԅOH7>*ZnL:*'/{뇌[X4:WP rҘm3cމPIId13%PaǬS(xZ # endstream endobj 427 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpr1a6dg/Rbuild1225e624c0c06d/clue/vignettes/clue-013.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 456 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 457 0 R>> /ExtGState << >>/ColorSpace << /sRGB 458 0 R >>>> /Length 59311 /Filter /FlateDecode >> stream x\.XO{ O@p4D4 ;9.E^zk&u?}MS)Z{???'y9?߿'_9M?rjk[ߛSRoi/M{= (wݤx5Ƕv{חK^M[޸Roi/S\=zW3K`_[MWsli[{CmO})ƶ~p#oU'o{;o%]vwi 9zYwއ}½G> + y8-#ؖ.ao/߶ -Կ/'12MaU 2aEA7'O`߿ݤxu-]~ߒ[9sU|^#Huk y8trEn-V{s״`Y;hs2E޹f6_XQͫƖ\v {em;͗wKny&cAQ5/&Ezm $x)ƕT6w<1k'>"0&H %p"=W.՘n[8-պ?=7}#s?/5?xć}nXQ5>td<ƾإؔ.b?voϞ)[xמW#gb _UsjL Mh[wIcSnح7׽zWC}3 ;\q3eFo+0C-`$} k(&K1Q`?[\.gFcٳWq=X6Q`g)&ݤ<-]v5Xg?7g}/WpHO{d5ݞM67MG3rEn'e52c^qG'gc@w.;1(c|gzIx{ wc&9}Q$. >]u5)<2pOPKXSЍd%ٛؽ8I-ؒ.a?vo3ef`s&F{Rnς&=h݃osaEX:- *=e^7L;+ߛ3=ev0~|~V9߄-x7t[r%m~Lv7ZgᦽOqW9 HMza1? kjz1w>G7)~G9岋؏]MnXp8=Qpۇ0)lމk?T7Qؠ?5{ض7&[rn.u)B'>'}&ř?c}n^-ƾ^Aj-"c~gfrB_RO,+MUj,_55 N_bk[rEn9SޜCߓp%xJG.( ><ľ|tѺ##]voXE/e3fNnqSޛ}޷q/PHX)Y3VtjrXu9IhiVݣy̜O&qE{Ly8j`M=13'ݤ~G.Ϳ坦m{1!+{Nb~B҅p"Q2и jLJe q){=O9ms/ar aMؚ1K[3-"c7w)wS"(S 6LpO99IA)*<\c>$5Tk`Ie#z%2Ҝ_ ^eb<;Šn^-'!c+ /x\v {-}ii[CsoիZ.[휰W7R`~B7(^%]v_|ȜQon[~XGK^Z>Gv\zڏݿ &ū9~T`oq1ӷG g!{ͨ+kN'Dҝcؒ.aolXJXp:f28OuSo;?x`hlƄkX-؂8i^3~+JkƤTw*0g'fsq|p.ZpN3PANDwpiq`1%m>_ظV z}NVeG XR*~g? >@n0 Cr oΉ9m;l>'[r%m?}: FS~jTQa<:/{!; XQ(~16)+7c;|2;@ sp,\v1r~,_X +jw y}vtr%m<w3qnx[nհteyWXSOLQxtRcKn Nzm0߲ze#| + Fo`^[-ĎX>t|uM!j?XSm8qƆOVSԭ%]vl6/W)װ1#kVDoy;_cEA5VG7)^ͱ-]vk{)Ǧ\ VGwCSr"*=j*Sc+bM WkleOXgk9S@šj NŦXb>Ɩ\v o;ΙX>Su2Wgzs:XSE$g6b> %o{-/cmU"Mppq w{'w ρ2VTKp>V,Н|cK.֜gY{svpIⳑW7 ys+<`5oݤx5ǖ\v $TߛCG"`G3SeOr ݼ#[ؒ.b?vw< |o9PS<<˖wRw@?XS-lƪĺs|Y=#N}ע="uGb|pI5aMu[ꤰFs.ؒ.b?v;c<])!;b cAaE^RЅ +n6 1}`ۜ= ïf.2\XSOlVVstOǖ){ljFTS&)ܹg `rNƊJ,ξ-AtrMv;^.:2g:a߆}h2ųDY*cE=Tg[e`u7ؒ.aoO@)|2S "ᝫ˔W9XQ>0ƍtGYy#4.mqn*.ЩUWt +qL~p^P7M.۟pφ9dƊL`X ;2UclɓϺ"'O7u1];U-g>Ɍ3aE 8u%ˀZA-y֥./4*)䟛Tlӑ?,1VTŎ9쿃-۟um[#VOd!m>.XAEҶZς<&FyR19ĩ*kO[I 9:5b/eN#tgblm7#Kzp1R&- ]Jw9$B:n-TuڸYZVwz2*_(NgD;sfsşEݼZǮ|3G<WƖܖ>ʬnf $M;`6)僙g'_=y7<Ɩ\v {ۭ]g*1{,|٧idx,/ ib[Pƺ1Kn1z<`V~ec۝}f%)"(x؂#k&ů2%~߲Rr-g҈&0]fQ8)Wqe N2+xPȅ[WLWRHm'N3A4wOڊ?ᓗrH_ւV\*40Y+\1)5ݶ@.m a fub\r7Ǯ` a#E{莤[{}BVOdQy dSo1r kS1G7)-.حLƸ1bu{=./DH(xl7jI+}ƔT ɩ(I1uTDUtoT4{jI;bNIZ#-MWsleU8"R 'tr(ODCS8_p}DlbNQc[."\0+#ow; w~?CO:5upy .'"c) l»_@2sM Eȇk+*E=qDaݤx5Ƕv{-ne:oruЖMhCґ/g!GЅi&KYKTOݲ≂SdS…"a—A0!g ㉂s.D'$x-FTF x`/߀G> ,|&;l-y7Iq< *8|ʰA> ġf6T@ YY$]c)]~>l3U78XD`ؾ7J'ť{)9V`;mGL[r%mz'%9b2N$2Sqv uz g]:c)sګZc3Ywvz-皓W̅**۠}2$gYccE%'w6?R7)-]vp6nm;]1{JVt }Vxto.ao {*Ei )~y7-glfdu3Ft#.b?v:8^;iӶx U eVBw~j,qcso /s oֹ߶b-ey:XQ'R!!} <6嶋؏he9V#vb!0u&c,o^%uú>u<6嶋؏իp}8OuE޴ʢ̤xXcrt6VW[yU}tr j>X*mߗz]2S9'E˚9-1^3q(dY'N^o ǔwjx T$^ʤ@ucM(+Nbaؖ.a?v ]~3*J|j)>#ƁV^J򪄛HiVcZJlNepXT&sU@Y铎0le 8&=R ]v{P\&xivtapVډ+572U}"6!blmoTsPu3x;t<|!fX$V ԈTsXw![r%m,e[7ESqs^BaMŦ983A;61䲋؏rDFG MxQo=/%"6qM)Ꮫ(Qi GZkԕ`>*uVQl鬣RM:+ux{y<.SgJۚؒ.aoO4HP?V$JfUwd٪)YFr+*c%ؽ?Ij-c~8{888 .{OΈهO $/kA5| rESg3## ؊XkY3(N\Z,{"MʓS\v {Ϳe,Te2c{d݆^lrs'j}`\&l<&"g>"P=MGe˂KԐRj8'8BtWGؒˮnhtG2*W@8f˪~68u#iqYa%R4&V.8{$qiᓹv]2c`cEF/8t 16嶋؏[i -"{spSkc如n80;yl卹Ěꮨb+ؔ.b?v(ۢ!9lΥ/R[6H]SYLs`ؒ.ao6E\pĠφ\Ow ͌rX…ȕ%]vwAu39fs$KT9#y&u3ki)]X`8S6]4RO4%)wD$15n"_Zg4IҘn[m>}s-w~Ǭ#ivU0|]/;Vsp]wǦv[\v:j[ tm!^a3ϑ+trM ;cK@w_cK.>U]]uDUWpTԯd*F7c>Gy8leuWؒˮZlsw39E* Q׸t k*SP)XfX7):Un:Ioy^P1yR~[ 9ƚenf%]~>U0"c%b\w)Rxscc;; Ky8e&ū9䶔؏WemMg[EFܛ"Ky.K2S%i%᧶.[}(~oDv@UHܒ0?ŏƚ ,9Bw4-~vL6; Llq;#ὬyӮ׽H,g=8n ,}֝ؒ.b?v{#j 3xW *ۣXĊ\RwI$Ԉ9p~ScK.vLv1p{L'dpllEHGc[.khiNEj+57'Wšrc,Z[rEn.' iu'7=g-Gp>q4UN1v~vF6lgzѦ2^G/S'!MyNƖ<;v{Ųe̩t%Wnv=mysCQ(I\[7riG.ǖvYCh*HNQwZ8Tld-o̟K3͑]83!t4ؒRb?vdh(g]4h6IOwW5҃GcEt-"cnasv/t&ލLީKAh˜s_]F[rYJnGذ`'N_mG)ϋEj2ud~WHD؝NrtrY*mF.TL܅%~gWGs| BkHS*6MR+ |1~bO+Yӝ9ֱԭ;Lڕ,wP)j=]+ ?u=n-(m5WfkT=D4uq[VJH,ue{cJJkl=Y⍻~1P+D ء>y4Ot@+"6 U G7)^~27V>9 TNg`n@y =*>{.ls-*w\0fYwrۥWlǍcA4>^pdS|%{vAOĚzT2-0ԏ|Vn).\0OF,^軉 ;l#u:apVtv-cTb-]Nyh.)PWŠ*1q9I7GڳKn-W;]4g9i;:L:~ 4'?@jkxsaC7+8^ygT s,ZM%Ɩ\vۡ,>r45J9h$`Ħç7F8 ^ު:' MWsley8D:sDԮJ7x8B&$ؔDXSO8 ckch&E*f%ۿKEHΑ ڌ6tbWw|.9v(nR {}}_U39h9ƻ#>KXS[W`SŹǦv܎V,c{Ϩ"|Kʿϻ g5‰/f 8t_h1%mW#)#Z~=Qܯ? ٠KXSR.8hXN}xM"c+&?^N~;>"Ón)ƑesCmkQ,@cJJklϑ-Gfs6i:JrZgf$r2Tilesd>; S!1% H%&Y^9Tu9H4#qҺIC99aS0?vbEOvq^{^KDGYK]yD` KKw[r%mY< k5V*XL3Mpzܭ\K]PQ&80ux-nnb-|Ŝ]`^F‚%NIGΞ{ƪ_ԗī!lEQ_cKnK}N =( vfZ2bV3ݵŠn.eAMʿKG.K/"l[vɧmJgr;mU|2{YXQ9\&$} [ݤ|b:.aoO9 ÃE;Eޱ" BFHYLP8WBRd8U|H FG_hV`scvYeH>§54^bnGd16嶋؏0vVe[~Ey{qG}O4TMݤx5Ƕv{mIӻ0$zrΞ>hV#ޥ1(j%ݤ"c)]9""C7ou졳stʤ !N2I_-]vZpxZ x*}l4> q& 6ݞOPE:rY*mWoAQb,FK5ދOoR[#‰w:)KVղ g0żcR b˼jn2p6˗|C.v~lwEțrhM[ehs|Z. T 9+c)d[(a7'mvQI>;7hA(-eN)q٦͗:tOz"|9w@ԠGpNl}p~TY$16Bu;tƸtylegmƆʐ| Bw)y߭yaE';zupݤ}ˮK7S^"]}aEu' MlZzS73?i.ao4I&D<?Z)ܻ''dӅ5b22q,,kXw`I-jmK.]psXJvmK(0[XS~U` KwcK.m氩Yp L 剀9\Gp ~^XQD 9 Dؒ.ao]@O5ts}!gdBԜʘ gsUUrw,)TJ ,{ XwTጱ)]~rr} Z;+}8=$GujNIs;@/qK!CrYBKę⻯z_uf^b2ꔩ UD,8%dcSTٺS,N) 7ҐdQMqǒwwVT 9 lfa.Α%O9} [6~X]ud$l\[i3?OT_kGms^~%jM"cܕ;,qEXyΦR'َ@Zd_pYl9O@ĆKOb +jD9ayn5rm<䲫bCt[y]Kw$Pf\)zgOAek`,4]N -aŢw-\lvR;qީY5bgNBJ?tW{cl#sumw6Oy{s*TSX&wrHm0 ɖMq=+++}jI3E[6wcKnK >nOqwo̞Vuͤϐ|9#|>oY,ǥ1cK'+n'fTw)J`NcE\pS)0 b-VTuJJpnX$|>VVH!fbJ ixVձZrY'5@h;-̰9d&:[TY'Nq2̺Ijm,?5*j+ =`EQ7vq&/O>-vB*K>JC+ԽHw65"/3)MWsl˫v;z`ժw'6we?}2&0jl'XԊ֞g]HRdm#N@l.5 *G33E~qwT7DܝǖOۯ yeN<,Yp>϶0Y>V+9Ye9_m,%cn0grbEQY\lm/,$-zlmOouiC)ԨKTKxleWnzVxpO0G>NWMš3cf5:4zυQ55/[O#UKq{ƚJ'9estiݧ;]Хy:,4|@@: IdL7CVcZ kl="׽'*.pLW.,r&+{Ecѷ,tW/clecIʅ.ɔYUOw9\JFVR]岵rB8C:,;FȻ؀0{ē/[wfm^aŻ>A:< <;Š^cԌ%]v_27>,AQ/RGtt+Nq&,tzklevh?0\~ 8O$:2\ {n`#*(8Pk,a]]~ Ɩ\vۿ C9a <`ߋ z#!TqY &؊5u\zm.30"ќA+.cbHֱ]}5N8eź#tꩱ%]vw_t"\o]eJ$w:z}KݗuBc[^/0Eq=`5Al&*6*9`g[>S/ ( KiqM6q+`ygx{`*,#yPW`6tMtzؒl'z֚@wTC(șiUkvyKwӊ]Fnv6>  ?k9n2w>  ˨&5ݭ7c7tj&65 Pn+%#? %.NXf:X7m`:r%mZ]~nzvpbu)AfDxŨērx5FV=t  >#kz3M!b@EPOMw7K0 k}y\Sؼa'۸8]2oD:BwoKUs9 lr1Lح(t'a~js>{Z_>ٴqq3J* TgVR)}l E菛g9Vb*Q!c#ZjLBMWsle<'q4Bbifbe!L9!#HP u-\H7)?plm!JsQ 8(j?rMmŠz(HKw;|[쒻OgYlX-+TYOn6SZ,G.ww ]ǖ\vu󥛁͏s]}qѫ2cƫ٭ Jo9Hm3uʌcKny栶nS{ GsYG#EJ0XXQbݜJ"4>-=+`mUpO)agXQ8ړ C&h=S+GQ++{ɥvF<[`MhEc5cJwV4;Zd>؏.Q(]+mڮ'YZզL1%r=l[aEUE J56ULNU%]v2ǧ Q9鼡&~d n®gF k* n:Be"ۥ; nؒˮ&o-N$720dni?n}DOUY6L,x5Ƕ  j(lrFsb Agl=Su=Q؎yQc[=>hF3b`~QM"芮j&\W[>ȊQQ J2E_cR*km]q~,G9Y/.6\BM$Ԉas:{5䲋؏ݧb$ɑ.6~`s7dC͐?\ 8 y\>3qݤ,e:Wwy3lbwExGB]p"K=h"8Vyn[IQR>v7 &PN=QƊV8g,g>ǖ\v {}RrW jX 5EhwO<]Ɗ4:G7)^ͱ-]v+ұv>"f>212ZjNO){8.!_8ۭ5xLJe qgX|~oNc<[nSJs`svN=>μuN9ExGNыcK^ߥm'0##SwD]p^;zI0eGmG%cE83mP` ]cK^nuQOjj<@+1oM̫1BXS=S Tu(%]v+F^.X2gƠtLbT8rf;{Ũ QaW,MΛ=x3b9O k2/;ĕ#gjz2{`9-tgocle*4Ywt8Yy7?zHU'8QWQxf tR$nVpqL2l p[O,Ē -G0DcMVHQ cr&ȣquYO<ȝ Q)Ʀvk^-XO2ɨzb=XqsUc7nN)ؒ.aogO>ãahЧ̺䩱bV67PwEǶ\v {c㻚9(?hUiP >YMU$/ƚQ ?"c?)˯z3V'Pg׭y-p(ybvMYP}˗ٺI[nK)0rvgp*6~6Hz#9KQ8 ^ sM2r%m}No fUwpk3My#;RgdR2Fݤx: P.9yhH̱ɑJ߳" 4x%3w;%ɸ= nspwZZj%^`* byR XQgCnMK\>[r%mB")8p>]wGt[KTwZ NNXS?NaE>;1v" {iS{s zp!m5Ch?떜)m^xn!s؆MWsle07Da`qo=TvFa:XZr)k* vc BDaAmp,~>E9j<<݇1F.SazvS%F'T:MHmQ&ttUA8*vwj dGog1SJeq 1VTvQpM'_Hp{x>[#lrv*X_Gx#,fw]/ŸF.O7ޛ } %IyV#'>;btƪ?imјܸSپ0E'0*D:FԈ:h|;lhW(vvݤx5Ƕ\ivo):7o}8(F=ި|3BCE-$z̉eu-]v(a2R['&s-8dv3ZC$8<Bt2 r%m+yz.yL䂣b7X]4 o8b>T3$#kLIip'-CTgw_Q m\B'8Q xc=ihSRZ#esE70 v O_ , ]ZbHb4TSRZ#e~ujs|ҙe)8͙䓑šzbaN^^znؒ.b?vۃX D]pD I>Mŵ|`ujuX< Dǖ\v {}ևw'X>S6@D6ʗ'!߱"k9?o>VAAy\xն&!)^6͖#%j10=XaN>&%]voa=8JGW^>|AJ _rÜ;cQB0yK .zt&9|t3ya{rQs2QnY#uL74VTQq76?P7)^ͱ%]~kuJ9~fů3i: oLV㔸࠽ u$ڦ9a_픯u]U-+26E,;hY|bKX4 ݠxƖ\v {D*{sxNaiw 5TvZG7͵\v {C,V yzbv{p]{s-yMgpgErFƊ*8|a ؒ.aoޖd,}~)v*EČvfnWpEfJ%8 K1u%_Nv9x7(Hd6(uSx[c[.F '6H#e'$ĩ,Pyleݧ |MzQk/dQ/+4VT28xoz;E3i-_2[F3Pw^yRPe7d?,N/`>حش[Eqq SM1u«9 [u}ܒqjq1TĝtK.K˚?Lq sk`.FFRM]WXS->栈J`QE%t{lmO ,Ϩ9!f?]Qa;'䅑šn^- (Q"1䲋؏'@(.SB6*~Yf͉Vsb5VTD ;ؒ.ao߲Ƥ?ΙDLu/lpW@~{r;A@>8ݺρǶ\v { }ֲ" E U݇l( w9%EݼZTG2 PźIjm,\-9:yh΀&-?gD-PXS3gDKKݗڱI7 sz?|LY6v/nR8jG.Kv|tz{sh\&4j0U{]Ո%]v_ΖimF-۬3ڕ\u%i,+ٜIwEǶ\ {?;zrS]\iZǁ+4M9ǑrdÊ1=-]v{L}NX{U?pW_-R3¾??'P:~'rnQ~q䏛vL;G>8\!Gʣ$ :}'"I<䶋؏ vmkNI\qK-)l`9lujqP،qMWsle^=2\E*88 kbzE3 mUZVԈVP`X5tWW%]~>`cW1mZvG64e{K$hcSGɪ7=OUm92k7E<>-z0嵥:i5YT9ښnR^[-]~"yOa|:36xpY]bUR*]XQxɴNxZJ`Es4DxleQh,fvEY)5i?!F$X μpTwZ?.嘒n[e>pf0Ñ; xT]F{ɖ2E8ſ<i֚syLJe qvz<%>4~k:i߶%XS%> :t-"c(6F;$)5%o`'MԏjG.B?.!΃egA^$؏).Y|!<`+ˉHwr"[r%m7%8 b۱k>7ujjQXS9w=Y{d-]v̂|õ"VOqvfSDCtcM5ai&-e 16嶋؏'RzOt{sX L9|frIƚ:OQ]Oƶ\v%mʗkcs sh es O'#>t s8,d5 fxle'Ym[ʻޛXC֢xy"m9"+*Ɗ<Ȱs%]vY5-;4R #*QC XQ5s J1KSk`y%bc!aŠ5wCnƚ&5AcguS%]yB&kr;oA8'ͱ,q Qy[or_ l =YJF"#u'1/qp NC0/v׆޲/a_ ZJytϞXf%KvL>RQҚAcl(Nūi_ľ~ˆ *^MC 48!HJD K$Mba}>޴/b_脅[F84* ,IH/6lLi0P؊Jb]]i_ľ~*ē| 0<.ޛ×#q",~FDBLۈ{NO}|Ad€| (diXl~>ӄ܎ܲo|6kB4 6!͵2/ޛRrmؠc AOkvcܮ:y(U ^k,De!Zh N}9 >ڪ;;WWM_Ѿ1`XJuDI5J´6$eZvEst YaH4I0'|%vߢ uifF[@kCOcoً-=|Cq tJ"u>b҈ʽ~Q3ʸkCOco$.,RDl:@fsQ8{;M֨wBR~NԫB;[cG'4-&βs=((r ^RڈS@YYKIL o3^m'SH×鉬>QC,y[>*x6c=m޲/b_ȕS]~^ Tډ)(eE׆fi(KoqD<i&kpٹHzs!,bRr@tl zKoEP^^ltS'dR{J!gz ;4@)а( Fu ?i_~VWB4HS5-8z}I] {q壑&)]. ~} ƭ H Eq=Y' !鯩A]*lR Iϳ؏*gPpjVN 8(eHxY;%M;"a"-&ǐSD;|0x!RR|^ !᧱Y=/}VVCGM UԀ2b΃)L<Z~{NO}Y Ib*ʃLdL^ brRI"lL֮io (JAoS‰lokѢ²k{N}Vf"^kQ5EdBdMi;CtRʾJR>NZ& i[v%#ѐܒ\IkVAfH9ؐDlӛC 15)]I{~۲>cB1"fЦsyR`)amZ1T޵![1T lJ83-TRwL 5cb==8wmHi-;"[zb4 ,UЅ jb8KKzb49؁Mʵǃho:[myJD2o4۹W:b%;xaNJܛvEcMmED`JhNvEBÉ#j,fi"(al!vV[~͐XG.m`#>4hPU4Q8 pHR$R{J{}q-4jq ﵂gHx`;+ՉТ.6\`7 ؏߷[#|Мp|B+8`w(dʵc Ӻ9eJe81/y]{ŷXJX?M2+6$0K].e4ӳ\tiԕ5)? Ւ$T6JZgIcTj1M)~{.}_'^:dO\zHQܵx^qѥ}XJ_i؈l{m-yo7Why5;V}EkuFYcĹk1aWkmH #a'p2T ]BMv+&URsF)#GlQ]Q-;fX589v I+b߹fIMGC1{XJuXĤ7agTkmHi-;"[g Jr7LE;5uBEJJb^!j8)1Ofn>P}kDMD,k:fD&(|4=g}ƀXJW|i"?!l׈ֆOdN}t&M%!]}#FI,fNعmP$޲/a_UG7kHvH4wi_hVi8{a!ڛvEmlPX:ZPqboj6<9~1 XJP3̈́Z6joߺD*R~^͈Tvhp@RjkEc)d$õi_ľ~߲B59e#lJ=EP&;b%Sc uErVkg7o]AvjVt65h ;Q%˵#&,! k@'lO֮*}޴/b_} 1Ҙ,[3yD`qrKdN4Ƴ~Q-k{Eun˚־/ = 2Ѹ2tLʫwˁ6)m@K':MMLKOco!ɨx/5lIB`h5 wHb|b{~M;"[2^Ii"B(>y {iTm FtiqͽڛvEB/îpD&o4}c"I3:všJkCR-r/`?~U5=ZH2ŃOq׎t#㮽|޲7VoU#Z̧:Ggt1SRq X Z M;ޕCfUifnr`@ z !DQ*b%Ui&*IM޴/`?~ bZ꧉S1('|_xqģi_ľ~t+t`Sg(8I8*bڻRUٚ‚Ck'_7I`sWmIwsBpgvBZ˹ȾUpX`g-;[ʢA Ԡ]AeݒLJRn amcڛvEjixT%!QF;KtmIsЄ]1Kk;o{.}0ɴv0J %+y $Zfl_Nۭ+{*Yjdbi_ľ~ LHSy*:bf<=$|:"hg6~&5fݵ!)|d}ֿJT`u)H)Sz批~RG\mM'Ab絓àvo\R=c6935;Ч,%ܮa!am45 `У;Z=J{NO2Y[)lViS$%QRc-%A-,FjtӭܛvE='GoSek*>+sr&c)eI#|ڐfᗰ b pz®DPB7cϧR׷2 O;x?tî KfX]X8ҳoOC]$ZT`ڃXJx&xڌ-^ Ih"BXΣy5AU03JG[(ٵ=>J#¥A>X԰i|sooATcy5%W%FS ةRtp?RS 7%o JVroᗰ# F$I}&{dD0{ `)K4Y5csUimHiMőa$kzp1qNJ9]7\mXv453b,&j0]~[nhXy5 wvXJ`MQY_©>k{H]~[)\RŚ ^1 (k~ERcb%{Tv}FB}[)vcW,\ky7[fI\Evy7)r$lް/`?~.02~^ AU$)^ӵMXJ˽})ڔY[a_ľ~_sW|i$zp:Wy()ŴOK/MT}޴/b_uA\nE%ϫ sO,zDn6AFRck& aRkKýCcP%O{٥<^>mx+Q0] JM;"ޕwR k;$p?XǮ%ᛵnnb% R7c! IgV~ s3fM)2#KMqb.Kiity J޲/b_uCfi{f. 3s1Tn\Sءc6sgPf>*ac{T5&lGvo.(^j}OJB|{40Ғg`<ip~EUVu 1b' ,](hfqi ]ا(KZ^bk|ڵvu7=;7[}h1AjV)-҃J SM?e\{U J25ߵ!T{.(KǠi l6&j@tSZI'>$JZ. 4Ğ ]b2YvEvo<4Qi[!IѯJ2jpJV1޴/b?~+U!`Bh\,K 1cia%!d %#͵xeb_U3gi^hqdO [ \~sǾ(+ok"km,[u7 ؏(ϫ`A/iӞ6\b)5KSc]ܛYZ巨rf7e&VI@[HK%b־Cr7 ؏ 4i7e$%uS 'WXIS#M0ij'/~4Aq+ޅ'\n:}F釰[a#굋ko߷:l" ck 1,dhhII!{(VRV4=RwmHX{}VѦ:L "4Ti?UN(ARշfD.3JjmHiM㷨xZx*DǠSc)~wi^cQ",5-,.TZ8li_ľ~ߌGy/0pbjd$!CyNLGwXT.6kg5{oڳɤߊ|5%޶TiM% !RRJG|׆~[ sAj0Lt||?',41YX M;"[=jm(fr!ǠbzyQ4 ~[!m%g3~3[4% fidHiv6vqTJS446?kg7Miou+:oI4[9o[ހxN');.׾tN9YHDu]d=7E~3VSUfMP׎o@F]Q|‡ Nȴ]*ݬɘElE wlK"y/Ԭ bd'Eg.RZ}LO,~x'` Ia2=._~}2x~EAJf,Yf'ԯT*$.1W}P>*j?fM ϶%!ۑ}5vҴ8U [q^ڻb>˿AER|ЕT?'zA#,yy#Z]JOco)UNnkb(:UTBӪGvU#]/4F֮.޴'}PgrU)Є*rfVQ USb 8 K%CljvwQ{^ř_~h͉i&٩l>܃=>rؚJyENtb!%?窰)Z;eϞa3[!aTѕd4DfYUq00I *E͝־ ڛ ؏J롚~͎nS:(^p6ji;&֎&žO]{Wm`_Ut)d?&$18;h_&f6JY8 P M k8vE7[D"7J!D X;҉uo7 "&7%B͢gUț}|U-j6ݎ4`ٸNJy~t@XKٯzjyv,amHL`A-1LF1>kob_J- vEoe%0?aG> uAvI; fk\[r%MaO~C*!Ԑ.5 O;I<@RDwhGѷ9ѩEl_!!3e_~VDWisz;2:;r˞C;2%RꎴJhak|vuU{^{oEXz/-ƛ;PR4{,k.@ /ڣXJٹ2jV0 8駋j9|>;y".=~jbxG4*^MGr׎:b%ij Xw:* O=ui[GVi Ā%ڶ?yMdsk^iKX,[c֮֠a_~yִ#VH;i$);ڇ4vhJrD+BZ~{˞5)ߗƕ&ƇĔE%q`wQ)Jr51Av޴ӯY@t;50kJ~u\n a)i>@)V/b_oZE ٨g',poP50;Q-TfHI3@44ԇ2ӗ-;"[} * gkΎ<0ۦ$}?0_ji@kl4GzdMPۃ>9C3(N ' VzOW1ilCΔkCw{.O}D*5\"g2RwRBZ*H>!6Sk'DjoWFޕoweu zՓ$N ܺ% @1tRiS`?~*S<ڣX| F%WjmQ!,.wjZ=ۂAkCOcoᗰߗjqf^,i)CI6qJti\ciڗU{N}2}ꑔ&3rdp_G{$9ilCJkCRkױ_~.”nIon3fZ+)Amf jڐ3m)؏ߦ; EwP3(9(>wuK7`kGG<4XgICv[D}bā'~w %Y@,{IȻ+ !l$~=7o]X=1?&'ıq ho5o[3[oI N}V =C1cLXJ6K;jh-;["5 !1ijjA1 Nm'[v SRvHLh6Eֆ޲gU_ߗ}`rCTMl,mҵΤwxm45R;5ڐtn߷27Ћ4='7ק$BU5 st탥z&}ް/`?~i lm?&߇`U̒3}&kh1&v!d`%Z~xH#ϫi1H6~/xyˡ5z$vEֆΪ{4~TXEx]Sgix:vL$%Hzcp=+ޖjaZ;y~񛅔שt>hH8{-0Aq0 "H([U!垴қ}|U3Hb9Iv4i'&ҠJA, kca:`_ T|WRԭؼMI" }/缗uv* K־lvo=?k& +$f}U#rD{GخAI-zvsϷ~#XgWߚ v10Zϸu&X-,,5-xڎyoE6nMP FCqیgr*v+ɌրH`dfl޴/b_uQ4 ' xM]Cc;ypb)e?3 ?ez([}l(H, DGDhg !e",259x=̏wmH1|IU#7ɓYEɵ&MuF1GgqAK,;6bK}hmHi-;ľ~?V\fOa]0tVyM)K*{U}rjWjdz9hdOϽa_~Vy* nm?3D JdW;K'Ra%YԜnakC{>t`=؏ !w|WRqff0c 3CfQvdeR-5@H,>jZ;L{NO}׻, EɌ8ùtgIqiH\WzYS]OlY2.hBdUEH:E->!a:2ͅW6~Wb vE{NP[ot5U6Oq%IFp0+ޖQjO6ĵi_~9L`;Ԙvql6kMo j~!q [4hڐ%:[6 a&է!XJ[0Х)Ƭ)[ Tp&N "K==H\{Đ:_~ u|Kq;2HDIi9mI⾯tkب)LDɹ6}޲ o}_"F!QW#cHg'u#,b,%ibD<ּ6%t1߷_S҆{7dž-~Z, y'xE굟eD ׆瞦l_¾~^w֩M4'{O~E<.MIUewR}$Ҁ0@ڷD{gѯ~ZxboviT\1UګjVR"gŽHlkjm ޕE2ImqZȇ"ެڗ x~il6?k⽳F}0SpAd&; nyœ}dK,[ ~":0ݽM@|9W:VO?Q]`,9MCJ*9 2!Vsð6$` ؏x~+~&4wÚ:QIT}}+ sJ`ck2{N}f* Mf{DCZYAoI hŵ8 @[K1&hf__MTQ]Eou\22(ůkehRV ՚8GU^~e_ľ~b֟W PqhbӾm'b%%6!$ڐ d-؏gO3Ԩ>-8D|Dc I G_D媿_`U{NO}`4qՍ`Oij >VAUoԅ}@F,⿢4#|צ.K;"[.q.4UyTA-tԸ4U$.VR4UԔ(6}Izz ؏߷Bt2KUdXxeb$x]38N,)x%ڐ{8oQ-<,@)vp"9󲷜b+NJ-$+б˵!OsoWr?rz/ Ϙ4SD֭OM #^ua uM 30;g&4Ȭ=k 5;Qu>5{,INk_Ki*g͈gLJ\]ӵ_~))wLJes $sUw4%E%&%4%yn޴/b_tPTDfZ)%}W |#~7:uoÂ"\tkU=kOXo뫲s 1{F0؇PV:$=sf;`%ךllzmzo(Xϫ)ȝc&1{ K~1PJvݼ3TzcG ڐ޲/b?~10˳!5Ì+B/|<<<Rj&*i;=TNMCR HRԭkg(©HeI b?~cǍRDhf*Gb&8$gv 04Qsgl4xm7xotptL͚D17IYk8} _E=uZh3F^kWOް/`?~_-Pj8x52fp @Dq@[߭\&ǚ5u]~{N}sX#iiUqj삽6tpOKFAGXDioߊN#zn4%e%:,`%x4M=uZY˵wUO؏ Wt{Ҁ9jC#WfIi -)Am^[̉IDm^lޛvE[2o@()Im_,v(kVU{ڐoڻo=cU6e\EwxÃ/ԀB@،ou}h6oUwҽ`΍c$|/o4L{ܵ!!egm_PYEͺ5}%opK鎢fW)v(oU$$L4s p2{s2l vԱ*F0iE,9v`[vEB/Ut*= ԰Eq<DXXJU7k I7M;"Dx8$Mϙijz g&Ȏja) "!aO_]z>;%#JĖ nI@$|b)uWJnFkt7 ؏7?}fE5Š8KiSY8+ yme r \6%4aNr Y4' zLHsG5Vf澴l䔽f+]i؊i]=AO{N}Vh] ;XIZ"}Ol٘![~+_E*>=k.r\}zTvT K}zҔD.޲WuZ߷2Qэ:t! ԅ)Rl TLVX&|. ޴7{<10fF붃vCXIS1k7wmHk37[1U.wh۔C`+ z3PRJJA"=6$e+vvu%6nS>jX1+'wsI; a5y%uagܵ6$e޲#9؏$* jM CNfFJk(}ڛvU%|]/R*ZCJz`p6$=)vE>c{4%DZSY!o"] L,%XG&.޲S`?~1SŪBu/(#Rjn.k6$Do] :\>KRdk "ҺdTԌ6$4~K6iZ HC§f XIΟY.l tiPW`_śޟ1k0^?cAksD_OTMKɕcҤ6EeN޲/a_oHLF-sy慧8bGS!7 ,Q_箱9mvd b8iZ1a, n~ɩIk|;!V{b66$J~[8'<zK:Z0]H,qO%`#q׆jo3b\^M9'C")H@ G,Evt ;5Vjja [Q4-;[ hU=LT1#3 6 H:uWib)[C F [Qõ޴/b_Uq:X $)L i42.=$ΠwW2bd\޲/b_s,[S)Yq( E>!y*OQaJAMW`gnw*]~V&LIռ 4 A(}E;.V Pǵoq gϫ!hy&C k*gv\]τv|hf&޴WOa}N?fĔ8%rA/FoMqih>(sEӫ'kooO@i`zuvU*K=v\,%,u. ?]5=~뽿cHJkP]\cHVӠf#RTBy-_{99OSbardq_@؏ߪ~K3U@ +Z'xFrJ~I$ӳCyoH%Y3_𵠝Y撂J_Dv~56~ =Yx~PP&y)+8c^`ɒ #D.,d&jY['Fc긻~ o]i{ 4z^9S}?Rr5='_?M;"[qˢ~g|L0ƎVq8CRlkJth46?/޲}9 IoT5_'e#ޑ)Ȏj֯k]ɑiRT Xk'W&ko٫L%qrQP}PFȐ-I\¤),$grh^S I7؏߷yuf|X+8;ֆ=[y/b!{CbΎ\{ݳ%}bl"MNY5.5 {di_pTm_NS{>AwmHi-{t/#x3;XGjP r_!ҟe*ҡiW+@%,7hK~Rt^8G4}tIH%$͖z/ a!ViKd6$LVG{w˗đ,%\vYD|*7uL'}^3Q30qڐH/b_`JħGgӡ><Ϛ$ Wf{G<4[:`1[vE뷞]az=Jӣ 5v_AK$G&Jnnl{mxol|_08yY"ohQJ6RĦE>Ok[(DDz/=k=L. g?FZ- ;(hJXItD tڛvEB&>>kq87v$_A%{ӄ6|LW8o4$) : ؏OJoR<Ĩ,Oj"R I~R+iaOozxmJoJ_vE=̔|4n8mc8A ~wL%V/kDxio {~r/{6.g#ڗΛxK꼻oC>,%wIIsWr׆޲/a_u]Ϊx1,[.g] ىTL% ^UhooY]S^w漱,BEB/D(6 tUGgqN˫B8?=i{c x!R0.a+@DהnoU+9>/M0:[Αk>7oUWT%V|Pq;U`s"|rdp4 ^VTqmg 7\K/UUPm^|l#Һ"=Hxa#T|l& 7Ȱ6$4~}-'sXΰ!;qJKNBt;_1_g72E݈%+8\KiCgcb IE1V5i qlDs^d{# 9k{vצcoyƁW)5d:*bOk+$jv \Rw4 UF׮:zo߷/|y!/\SB'Rt%a|i/(,$& =%1kS·d~Ʒh~~?o+y׵S$7)v( $] iVRNjZk>]~ωM IS:E`-()GGXJSb,"bOp ?e_ľ~ߜSCD#62KRxҾ]XIş0i6(ذ6$޲/b_oA+܉"D 2I[J+JFkW7 ؏ߺ(DyȚwXK m{a,淋4 %6Rkg?O7 ؏~Nt}2]5|/N\xU#Ol_Y샕.749.bfֆl}aJ9q"M|:Ykf1gh ْt8B rJ>H3kC#_;WU5tMCч*}WXIbM؄"n~U+gmk( :VPH3^S缭C3K·bHҩ+iUڳvu~7&ºuᠬ*U?Bv'АJ;5R]|T} ,׫i_ľ~cϫ,KOXC$(=Edm/`  2"|+Hďgm%7qabc!Vso^oʳ7t@]4"4љf.!]esڮ+M6a ]praponmz N*8v\#0wf"kGɧnlGflD\hIUAyYf+&r #v&>pC^v r߆4#-`ڐ[vE۷o. 3\Zq&hQ+&+i4kb&Nyofh=O}q7uDbNs"?| !VRr?4Ô6%gboᗰ݌mK^#=/N@I~7F{TKXac7a_~{b(Ʒ&ϣ.~JHn{3+/MVL`\;ݓ N;VE̹4^:V$)ӵwL^ۥx   I7[w~:N5iaj꼔$S1WvLROѬᵳ;մ7,֞5]Qz&&I 7,IE--JI3p,λ6?<ѭmOPۥM4(s7|BkbD=vwm~qLܳ?P=}K;ko:0v곘4~$<׳vYL{.zb$#[ 5;2K-c>,iiCS q׆o[чWdPߎz\-ڣUXI.Wza \a_~=q<fa[cʻ$xЧ}b) uY1|XF~vziׯ k"1B i*ʰv ۖ\Dh?a)amyV|` H6$S]vWfLXKv_ԣk ֠PXLf޴ӯ=l*_$p"}~G.VRӚ /`ڐ=7,}yBs="~^ ;R# *Ω'I130 Kiu[0!D}~Ͽa 5U4V1{Jr _S+U?S)،y\;i_ľ~;#SŕXvNlwI';Ro%NNj)vrom1rMk9A>.&Io󽗽"vۏFM[*&9J,n?~ߟGNM u 3ÂK|fʵc) ]iSfp|{t+iryF ;kZ'S (2lقXI.&G_X& 7 ؏|>΂JidHRl(D SDdi>aF" ?ͽi_ľ~D9guL:4h]weG1qF/b!am1#X~6$4N۳?[308l~^M,A>JJG+.c4 m~r +){4` 61uw~F# %N(Z~n69QҮXIn6&EÈ)Ivr~OBϫ JsE~}.RZ$ܸ݀_kCOcoWko{Wt!iރ G8z&HkGU6~%FM;X~E)EN$x4=\F!G~U%L#jK;*T jpg}޴/b_Ddn§朢hGS&$HL`!am}B\byڐ؛vyIvDņ,M4g $!oCG~`\rB{gߺuwVMq#%7:*WR=G uC7- "aIn#s]{ק$=AUO$6\3Laָ7wn-4QG'/I^Q?XKU]U$ᙲ1ϔ3uJ-I'Z2sMٽr-{4o35//ޡ˝kϽi_~wܣ 3?ăẉD6D*FoVڎ>{Džٶy)uV5%֯#H,$5Fڐtc+):U ՟W^PܒXX`)ɣ #(]k'O޲/b_uVxpFf:ΉaMYNA .%ijb*^޴/b_u"^湠LX[q?ڨ܊,s\)nwU¶e k+'P!C0ڐt7vy o]8f"[cm%*<Sv1GvKXJ=*d1Z~'1*0ڟ_lUe[D2$eMk+ۚ9@!aSb_}_̺XKFr`'t:#WP`;+)># G&Vvru]~'k7ΚT}O9y"`T&X4h~IM;y2?-?(DH7U=zNhmn#]qNLx7%9;Mr|DAy5~Q9͵SX8@?%Kh(Cvs%]Tӿ8SmPjŨϻ+R]r J@Y6wW@] t_sjg:_l!2wCyU?ZuۊY0owfr%{kC.*d⽥*OS]Rxt?Q⽕/'xoDk'FMJ{(red_3,`NH5+Ė#Z^ʳg$r q{FbdŀOr=#16Q} ЌѕձPMm!]$`R7然SpmɛٞT.¯_@DݑB(\\G]R5F8$)\rE`Rp\"#E¼KH]ElWjmuo~A ' fՏEDW.1?Aᯱ􋺷վ[Ί95}E0IVFyEW]U)l'ſ/~W=d\7D)[ϢëAr{]:qLWPM mkK.ⷞx\8-a7E_{&ʵo.]D^,Ǿ&Ai9 }[Z蜍Ln"_RRSM9߁)BfxE'SD%_hm+*YvmRv y)Ҏ=o9 z{voe߼+lNQѿ%NAʳT[6(I.Ouu[ Dp…D78vtCS=NCCjmenTYIN]5H9VstI xtofM9sCZ,;;vІ,=3qf=,% (e9O(ҽǝi\N̒4s܊(Ŵvy< %] Bݎ mڔ/pm#~-$&R#@puQk5fwk!g&$ͶOҽgjF@~ |(FPȣՠZ !*JQBN{`<N~;x !J9Ct<ۥKr |Sv3M\/~?[ |w%T>\E)jael^Kye/Hrړorb!۠S/~3qx=F< N{_njF9p+;TTQ7X6 /~ʹ H3\1zfPUd(GutIu/־l!dڐ7?,vi|7'cQ٢Y 9֥K*EF9lmw>ޢ<Ƒ]Ns|+2e\[KSqtvp`{mw4z!$k&l.E=ڔӯyv cwLu3@ڊ4  Yȃ世+*PũzE]vֆ\~UϪmĺZM$:D z'צ.V11 ]pmʳЦ[[BY6uǬc`N1TuqpH7cMchmt_8U#i- "9Q2G~A]C܍8Sq7{i7[r;x6R<'!J;Y Nf%5YpmP:rEe52O%NB8<9Swھ"U3.8.zbģtNgES+9JJ ^-9-ŪHrp\[oYhz*pGtI-g`X۠/k$])%;>8~gل ,qʁ:_R J^[!/ߪf@#g٥8nxy8l|Mi}ZeDkSN{\<[)8(P6/w o%_4A靿Fȫ*__%?nDT.((z{ߟ ]!*4Dǵ)_}9ʐcRof%/Qʐ14m3v]hkKNO{,]9Yg])ME@%]dzĩ8VCmPڿr%'-_K;h/&ΌI+%մ#q#5>LkSN{]}& >i"!x`KU`B`_Mf" яŵ!_}o4yL3V1uuXϗ.XT1S8Vsڔ/~Jxpũk8P2v`E7VkO+.9l֖| WVmAuz,JCXFۺz5ېȉ36t).w):KܜL f 6@W!~6.8B@颥L䮵)_Խ~>oO:s9j#g JYC9%DO)ݢY6-9/hwҪ9Nx.{tg֥B#$]Rѝ)N?Ʋe;9:NU{~_=QnHtɟP {(GrNNg-t;>muWrE@`6NUwGVTe*{u#( JDvjmt_VTpDd%L^bLC⌽|*lS-L4o6(e y~4b&ˑ9!jwR{אzI;M\[>4r[~ g &ihqD߳ [=ʂGRN= /~&o^6&䭑, [K9Yuَֆ\~u/l?UxBa0?x#$ ^b.)V>8mPJ6t>^3!. e>S芊9(Ka)_] c-,H1;!i93ϐ"Cy9Ћ,;t (msԐ/~w׍f{Ko#翿v=|۴0[E _~*'9,`i]2V\N/\d_rB??޲\$t>|o۷?7\oUҥ~'ʋXV!;mNC9q>uAB %I^m]2tG˻_5> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 466 0 obj << /Length 1265 /Filter /FlateDecode >> stream xXo6_aI"E$Y}Ҭ-6jK?%JV6ha#y<w?RK#3G, cOpgΟ$prVsT_NV4'7 /j"#ͬa.XOF%x2+s"m51j~}Ib~2 ,3W^":JzU:\n' '=OIpWa8u&]"5S^{\77ר`;V%g6܀wKk9?b';um ې8}e]a9^$B ="n1obKc;8Y_u/c[qeYX[-> 𸝕>r}dF聁mU`s rƹQV^2O&Z889(KG $V\B (0w6&-Z$ 9`Ӵk^'Z MFHWV`#8ڂ)pg)Æshs(-kq͔kA* ,J&pV+I[&;+a3^B; ]uUKi T+z^d5xw*ZKm٣ 4̗PozJ<7iHf|L¢f&WІ3yb! 17$uO7HƼ1 ,zJpz! g" c[2o>c(`y}{̷G0N00~o|^-n TA/qȌ-+/-Y݅邑#s?z~?.8v+m"/CR/I~ߧyr8W endstream endobj 479 0 obj << /Length 2001 /Filter /FlateDecode >> stream xڥYK6WldkEWH4hS8!I ْj,ɰl΋ekm{Кp<\-7W䁉8 t_-˫7gs5;o_ _ ___;j I4 es~ /bM TBOLtD\?):'hXa;Z88 v$:YGdb/IkPuՄN,e壀 ' g/^'kIĢͶ l#ދ\Rf*q#{Ab:TDbU@MokSN^*OcƕHKhڽX^DS}(iӗ(\RztkzgcaOm`G9 i# ٹ;S-p;ȻmCF 0@;ub:,άf=qbO E.k1t|(LWȾUL{j-P1ck9dTEҗ@T7SN'@ 1OzSu}F8LPW kDAecHNQ)ѱq'_ %tu*45C"M'6b|<\jhV vsX5Ed$N.6C 0L3gm>$eħ&i Sjw$b 8c0o%-* 8x\,]O\?<T~CJQ$LzV FÉXxě=Oz< Kttj*kå)&ęDRth}`;RRXP,A$thlu9y~ܽbGL=k8mX Lhv.to ZIB=˜*܉wRߏA#Ky)O<φ{R̫ۡR6aglךֶ`GowO |ǘwAoIBPL8?:V endstream endobj 493 0 obj << /Length 1830 /Filter /FlateDecode >> stream xڭX[F~WDԑ}h UiAԉK؇pzxxݝx<^lQei>K dfnFN5Z/~!ߠ5:h[yC ֒Iy`fl dɁwu5x% y9ujQ3im/*TZ' yeQw2sGCQjy?-/FB #p0,S2zxbhgԧՂ~maNgu[yG)7%fF,&bK\q 3sߋ<ҵ8[Vx[ƺ㱵‹/D@In-/Py'wɚF O$WǪQd۝ii|ubUފL{Lܐ^cxEpA)>@D%|H"ɗ*NcŎ,jAC{ Ku[%I@//-Zۅv2>l39HmM*PGDsKӳ$eIY*MNt&Bdk]>;ōbzUPBF1`>p5n˒BkVkyIG(\ -ed=nP7(r|5E\] ็=c,D~RkEbJŢ7g^c-[+J,nd)dUDZj0YtIvJrˈȼ~b:l[ܚְPnpV^Z MP,TQN$]b ȶ 9 μ,˄ غnJf ^m_t\DɶUj[|M څW%I+noEDqL{w^l-Y ?ZU)-{wԘČ'=R4h )cl4nXӶƁl#fL"D\ ݚ߰jXSгQcw"r\p g)x@Y{Cu7tvq<^K[K_/n$.;$e8)CT<֢5OA҈g+;Ix  ~D6s㡔o/|07-|ՈtMCkT~"}Iea\&H+mK/2jF.;a%߁$06^2X1UQeݍ@XvV25r;'w$aW)l%HP2#.&%Rn6G){*J,&?] USP %fnڝ)0OG˕[s[h\Cx0 r .\8eL+ э.N0D8x qw'^2pN">-862d%~YVM;o[LZ.Kh5DE{&-NV,pzHL,˥΂&%#)AbGS.S nd|!n5|TP; 4,^=xx? endstream endobj 499 0 obj << /Length 1309 /Filter /FlateDecode >> stream xڥW]o6 }ϯ`j}ؖ/=l6`emQ8Y?b9qzb[")T8?9V,Efdzzu$LWZ7s3R&O͝,Ll<}KEF"IKt7 ⰃjOZ})mbWN?w.5 }_3;o}" |eTCW<*1fϽ:oZ-7eW"R)n ڪaA>hΚ60$wF{ѽ2z Vt\}` d ft}&@e:.!n"-byTzbŖc)OaOøu?gT5D apFQq+=JBxHR lH|('%nw8+NI pKvV\34U=X>qW?jvA;zl1/"\ߟ佈?ي \۹ո-C18ƀ-n:u\4ϒ7'Djt _2(05$>=nai vnWzKKyd v (gv݀y3:IgK0j]L#L,#23s'X2i\p0vs*WgL치ʡ@l?,^\J> ZuYs*E93 N'k\0n+ed QcD<N$V]E߭y#&kh}ro&ޛA "1Bv ݽ>GuWݜT\2~2/%l$ғG.\Fw L1O -p[P4I9f=Mr5Icx=.i&;+܇YPrt;X+ endstream endobj 496 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpr1a6dg/Rbuild1225e624c0c06d/clue/vignettes/clue-031.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 502 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 503 0 R>> /ExtGState << >>/ColorSpace << /sRGB 504 0 R >>>> /Length 959 /Filter /FlateDecode >> stream xXn1W9`\8, JDH8" r){ 4a???knP ԝ>\㻳cj3mQ{2>w|Ʃ䴷^#Ơ|tڍ,8И4d9i}~yNaCl~6߯x;oaxyd )b% '*`Qg7-C iy{5VC2Mw=FnjGns#*Y{T?B2j)(,JxXn\C"WL#h \C"AF˨z&,(JtS '_S8+h!% Fju ;*1Kb4LRɴBB (cVi%h"QGG8kH9i fҹBB %()joF}bgOtABB͵7L5d**f9$Hc0%2mi@oCט%782QE~H4L)IH1l*P#;N}RiFJqWQtd&YSk pO KE%+'wUi4{]ݿO$ȺT@cAt`g\qˈ ftL1;J Qg#cX_D:T|Qv!cRbH1X8% p 4F,% X.v|@Cң!%̒s R@c"lqlZT)z]Ow{+I:,Hcz2#z4$"eR-9FvHct>f.!X:B,Wc+FS;C`604s\2J&!XW?7tBl {~] endstream endobj 506 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 514 0 obj << /Length 2794 /Filter /FlateDecode >> stream xڭZo߿'8kEW\XJmD)__EimZLÙoFpݷTq|s{Qiyɳ"im6[gEtLUE ์ru?}= }щˆ7pӽghe}up't\| Ϙ*b`ѝ+L\+̲HR!S%߷Zy-0tYovJU ?xZ'XuKxFŦ'y JCʫv]Ѳ/IWyǞ=^YY#+@cܻ;{f99!틃-T{prQygYj^l*ׄgvDwH$;U'+~o2xn(JE\7@E-!&w|aTt?j"S=qӁiJ,`GBvtWs;Q7 n`H߂.bUqYmwNѡZ{㞬 7{WJ@ۉ=Pw>⒪KW>/x)ݺW͑F85 HSрIfqD SeR 0טJ~„Ei'F%BB!Nsz!.Q4-̡ω]\ Q45YbZ̹%5@ד`SyYS0H E _im/v]}qdg\0e}+q}Z% "~L&hB!(QAƯ=q9.@$C 55ilاK݂|! ?<@8yp gLBP7aݹ'BsK|@gapht%,x|9KÐ (BmA=g* -BU []O;hqww st9g00 sA=ŃKGťͼiP\+W/ '&A=I) `}m'{'<Ĥ3'FWoar;2##~AӺ{e]$QP,75F⇘.50V"/Tx'.ho[k#B$[` w'FGOOAIx^`w:r;1&F3,%+OozT6W@&,`ƕZVM#+ę|跙v?fB*ΊGp=A D[SVWș  2#xSR\d*]p=S([pϟߥ /jEǟGKvՒG{ V-QO(),߭A x|3IxLxτI!B} iTt[Z{m#"a]=N-3֭AÜVB'2KRqW$9$,̂C͞{ Gt~[-o[ށYLsaw\25~xjjvwn 1a6y<#l+-~A7ꥐ WyNyWokVܣǣ QPt]n__onG}2)AsypK so#+XC#UmOkϊO$^ 2*럴73g&"-1EDM8)& [+ Fɣ% ~ْ)^ْ)ԭeR;u{ NHθ`P 0EuyÙӖ)9WPUPM,&>kXL_{3+S&qF`EU}wB@(>e$+MCuUp6OUEvL!/zR\hrpGxeŶv "9=t ɌWz5/YpR9솗 ?w= a7lnfj/K+laqHÌ B(V?ёU6Ekvؚ‹/U eoR h2fX|eMYo՘b!KD<ӠO}]Қ{A b\ôUl;%GtpFҪw_f\zn^/O\:^un^~ Aco˹ط3ёyBrKΝӕHe=}/2|}o֤) endstream endobj 537 0 obj << /Length 3059 /Filter /FlateDecode >> stream xڕZKW9f7rYgqp$${$ 4xg% @O]]]]ZyuZMM>MrW&s_fF̸*Z4j߼yژYŦ' sdqUd%QE9,BTĉ3ÎE"c[dԊH׵=ʸsݵ~0)Wx<1Nxޞw2+85x4(qPf²w53۬`%LtWMFH?]E5<Б*-ɿȜ|e"~/uzevEdGuS߿ښ(WQU`0\HhQmtk:&d;;4F)`O":Xf,]ݓ{đ;-*dLD#fՋT 0u.Vϫi'X'^éq aJj[@SY˿2gO?h2O4U VdT:y3֫J*yi&3H'Y;W)Ԓ3;ɸqH|H|.K3/ςf%U#(Z!,k@SdՎ@'oT3Q~%s^2rE3UPTyntU\jpQܫ)d6.j~jM-L7[G\SDdae.Zl^=oEt8XU`Ҳe,y|E-|B& ,A_GCɧ[ͥ.vH}m4tī ]JܼK./Ĝi/we^;) h^ո5j1h=y**|i:oP/k$S>쟙\,‹/2A 6+|R gIuwL`e)f<9<!ԀnUFy[Ty$>M{WӡWdM]Uj):_]b@Eg9se0|D59wZ~~cه&>Ė'U&kyǬLН++ȗ) Q w@TǕ#6Ip=Eʶ4k3ek!Gq>_TA 2"vOڹ ͮk֨q8"|{/\qs_tU=ЬED3x9x]SQ5tZLߎ|걚q'@}'ls1yG n웃Y ,ooRqN ]&F8NYd/._ cK*_$&bDդcuUx5Jk keYK)p% 'cA8^i&s+0mkR4ğ6DSTm5֒!8XkXÕUlr/-jeYNQboJ>/*Qv֊p&-"^ ze(Egyؤ@It jIuK2F>?D*(,%Z3&;?(uP[< R>Iw{Qca;VMKC= [k#)3l)p5ƺ/60vnD[N<#{Dٱ : NKb۳Kע ~-}'E5-Í"<%7,Lp:5 'E1lPnQayȰ `ƬZV~#N a3[)S_ ËZ@?KQ1U AamvyEfq1^1Z+aX~&d/ѤײvQVZlytxswb_碸K'hdMBUP^}ћx4e솟Jȩ@:*fĿ>u`soCƼ̤qVa8kFbBbШ{!^'P:s ":1p׮R)A1PW8uVh"8W` K)2_bN@=/n7+p+הDrZk1>#%-!/b[D,Y ;Nʰ̘ɸfz _H> stream x[[oܸ~_R$A7t 6 Y#HF˿w48YŞbƤ(xx-%5^^819.pC f.&ENjOc%)|͕.)blI""Hloš_a@"5c=ag MPiuԼF,I4^y̋i-RE DQaёɁ࣐P!A Z&X,[fLS fv6GfҁüAX f,vbF1IS\c`$mϰ\Îm,;q?ƫ\'_x'N7_NK.YUOG<{۪ޫl˺(3[Tj)jr^uC͚n^.Uv,/V]U[GsUY-m-EQ]60ŲZfnYy;+Y5ƓU}<_uYs凬yi^'? j|6^6l)(%>{=xP'AYt)'g!mO54doO<|("{ڼiD/Uǫ@+i U҃5HAi(|~gj99|iX$i+ ; V`5(!;FTg6S4UqR16]QZ( 6$^ }b0%D-zJphLI}@jf׾{SA{iTRNW0UQJ-a_oW̍zάGٳr"lK1U0{IxO{4F_ ,; #|9ku ᬝa#I6Qf0BqK;tch!Sݦ%;sQ:rNF50HD#-t3ό6b/ 5pϱ [lI6"!$6Ό7~h_ޯU v1V+QxuPE{K% zywU `׃!Jwsl# ;X`L`K8F(jxwts џ;dž([6P 5yk|S ؅pS؅AA#mрKΚqx8S`yv,lmu2^jClD#,,)7<`Vc 8L4((F`d)=0m UZ'pR;v CsF:X]Y>S˩N/bjb+̻L1lE6R!w9/]~lrP^c.8۷Kٞطt5M[V<[C&'a $Tq&Zx_ipE5eU/sm7A Uҁw"iE$ƃfL4" ]nʮdi c@E&HGŢ#2w-dXqk<݆^uCӍѤnMKU}&I>gAKA~bAi~-H>'> Lj-<JzMŅ`ew.[WY|F9m/k4}p3ŷ RZ ʸ<C䨆8i8"X} T骬#m3ro[DHu!11V\^VE/-= X`0F܆w$4$Yx7?u!1:C a:]uwut8jUrʖ? qn)Pך55ڜ/vaehœ k/-˗Ë7CZ@ ޘ\S`J7~(aK2oa҂(h''; m8ѨULqT;Q-_w/ݝq`ǝgnsoP5(7J~71G?nz>*zOh|YSu'm5ٶFĴw;o)n8|}WoIu5}p*/A!h?:%G[)=>OBD['wrcZt90?DEH;.(8Jje({^oǔʞߨ1n#}|Rm8TA-[}?ߋ-q` eI4ߙQ&Nxg3I)d-9o͡RܔL@éȹ"%xb߳۝Xd/Jolg ^ZGv5 bH:Gqnt{w endstream endobj 566 0 obj << /Length 3401 /Filter /FlateDecode >> stream xZ[s۸~_᧖YHg8v6Yg,Ӳ]ě?sAr3}D+sWUi}qtu{d<-}u]]#yy^'tdIۮgAj >75<;Q呤pYli`X%w#['k.M0ȇvhlKi62 bIϺ.xFfW/j8hlY+e2^)Y۪Jq͌Mu49΀n~Yϫ*Gw9Jt'<X)y)x$"!o$MsS_^֊F)]م>{zz 7JV%kTD4=(,ʁ0Ȕ%zwA>y֬hݜHkfQ2Ҡ N:pFFE ,[E3}#X"{͊G6-7jLAT[V.烞V^Ӥz{ri^ p$ o%0 f PaV܇L ԹMX ωC<~4'IcQaԝ˧}MmYR}O cbIX=ɮiRpNi"ev[MȤ*-UQ$WwG읮EQ؋3KP yNe"-Ci mDnp  vOUl+6peZ"y 3Д)hlüؼ/*pD/#s|NdDӸn4Tҥ+dQcXpʾX!V1hC"D!#T d#1L"NC/Hч{ (P dgTԢ8'گA)GےWڱU2+[ӰGeX.O+KfCL WdyUV 9NE4m*f92ΛnFNY+Er{F#2hΚV%Yd nqEp; VDZOƖ ew: O!S &mӲ8c7#XGˡռ-̓ æ,O3f9Wdd 2y\t.ǵ$ ?fÌMYgY<'F9\Ka/z*R=NK&RA!0AH$B7[yE%$"zF#Ռ1lyaE`e'!*JFnxRrbyDb%=B'iɒ悹YjE*Ay"" |0EأHlr-3iLIڧbD‘ )BƩ Z7lI@ߏ\MCi7M&kDgQ@jȼ\̋}P/'BAw$rq !S^Y}{2?t 35bz:)BB Wíjq硛"`=9lp4*>R1jW>pXa=V;!+QNXѓVB!<Z s3 |* !rqPQ%AHXFR/uV.i>](D%NqKMWDY쾚EKϺz;symBZO%Uul%1t U||rCRݰa6e@s[+QGXۓʦe7GeE'v*<,8J[(oe'="u *vG9l"[G U5JBr:rRFFwTt9 k?;E#OwH[D6rb|c2(!1U/ѳ7ӡiQWGک^Dc/gNM5:uo;xP{Q*vMWE%^S /_~xI=nN^dϥK 6Ruc-.]eF;zt,7s-e +#n1S~ Dhԯwec=<+ ?jMO> lƒɴ]%/*%+d^"(ZM9SQ%z[ދ~[]Vʦj 9بEt ^HzC"6+c=ɔ .\T-MMIb,*MM|UP} ~-JQ՟ \qKaҪ u_4[胸AasVVl\P*-&,blކZ"W7xB eQ[t,mF|\a g3 5 d,]-Cr-.Mwtg/pM[:?;]"Uqj7_,EWuT'Qi:hi%x{( 0|Yޛ_iXS Q_T-P\r`PiQ;`ċ@W;wH/C^TT2[6QpZF]#}aa2r{dIGE/T͎s$> stream xڵZKsHϯihljxlO|ك,ѲƖLyR@dgk@lD>7?*k_ Zy NIlY}A?[ow}6r3G{eJ+[FE^豥wv舍YFphZӀ yj``aPFʜğV5?hU;蜯ztb[kZw8GoyђU!6ςX px)[+.*>R̾f'ȅדlZl6q1x f8_X/FPE]$%nfqMJr{1],N ِ,# f^h9^A)N)\Wk>4pk]beN%sXI[*k(IƂGa -U@x[5>GM_OxEӍMx.>sF g [/ 6V@oЫ:TrG6`-d 4kX", h,9CnK e3:L)1CoAyqT7@x㓁KhM-X$f&R2>ff=w]DrM.vÀv!M$#A,E4bCl G#2l E~vP2%ܓ?z {w.ن6`cMphD6qQ$,'醛 ,RX(VeY`w!WSM(n΂.yA2@ {A>nQX'd 1PhXDu!-2r_ɗb|l!&Yrn"D{pС0 'miJIC)gakmܓp'&V>FVn)&罬5ގSsF9\F$Dp`˽2ˀ"(k0E򉴳X Ix^v}έ0[ƕ=153eK0Lޞ'ɨLdNs 1 qgFH+N%cF4 ,c ǵ'V#,Q*g19,qkZ5谒iT3&n1Wj)5l=2d TwKb!qvȎ3.UrZK%Py+abl7P6IЁy@\:KuǺ,Xxp)XvvN p eeEoJVn}^ 1@;66x8ƌUϚۢlS5#e:Z_H_0 B0sX0mb, ea$Bc;1K33fN~e7U G!$X-nE0&a^ VWMp'`GM]/ػ3jd-4gNCg_Lp_fIJj3$A&gz4 Q/Bnu|o6e s)BUG#ZEĝ,J',>K5oۼ8 }GqMfX!HPJ4oc"땤M~%q) ܆ƘE~"}*IPÅ&JhEg[yē, ]/PDEEs-?ڗV/XE tY!/+ɯt=2!pr .L>A 9pv2epT/eA5^|o?b05?I6hQc9}bd.A9B?jyMDp4:xݔq|ߠ }\GDMvN2YC{FT{"6* $pG~@!ާخ ժ$V^6 %(OREQ endstream endobj 605 0 obj << /Length 3563 /Filter /FlateDecode >> stream xڝZ[S~_AI4V.Tf!30&рc= KUs$%i]}{UZۻg<-jejswu4JOLdߖIӮ^-29T>RB?JnubzXo5e+֚\+d:77C_V-`-Jc,kuko ߴz'MS8c2Շ,A5}UvO }@zsSr8 2ŞߞTUjΦ,2G̩3h.Dg)^&>OT/ӸaKB RҾTQ ՂFnx~TW~'):'a|@&~1 ?hgSJ\(-?Nʚp jha&ʆ$0٢:Nj"@Ȳ !04I ?J{,h2;)@Bv"K&b.,e?Ƿbfhw)U2>+hKLo+lNl4by,!HV_aKOu 4c>uLwE/d> 2b" $ާuY۪U\LY'~"ictնlTcRxv& 9(Q>њxC.0u8"S#xzx`YZ+rYWVI1 Zh{-xC{w  {͆a[UيM}6Jm]+'NtJ/j:R'kl]/υǽH 7$%'t>cYH_t #riwp,ApΐR+&XFccDTb3EվL|$&Db`ԙ,S# fGҰPxՍv:%(<31?˫,lnӬtZa-M'לlqc#xԘ;!+Mj-A9gQv˨,p#^uY0u+$j f+8C"b5us x#ilR*:P180܊DMN}#Y*d7p"YӾٵQ;AwY³S* ;~Eg~wYj\H+J HZ9.Y/N-0_Sf:X`WTO;(B S&*u@jŵ^IpZ s[ٔD-y/E 5_)hi9|Z*݊]%ֺPvU.`ly9Wr.' qQtF"lrvwp aRwI,cd s.mKu(g}ժ˹ads !z*@aڍEZ[TŠ:Ϲ8|T"ۢe `2WN&qgy(_&3-(#u@teAa͍tq*/1y]q-+"]]/^#sWӭZQPQfIͶ#v&\\ISK}PTENæ!aeR," -WFSGN#e9(Viot VpEЭK),nJ4l`^ 'b29GPUrP|&pMt8TJ~Rƈ*g`ˆ0Z99W &*|xΔ:@.ب() cO_;^r6'4xVPLRfU5;8#ҥC󧉱jeH]C#E7m:6Jr?7Rd]i4x{nl.`)eSM#ΉKݥDV"\4Pon(:Y،=f0+ȏ_+7Ӈ JWsyr6P'._sQ9GiҼysBlLnYfr~`|jH?㦆#OzLYayzq^g3Ju!{3$L[+&R 6 ?yD@څՌ|D"lKfy)-Qb8yD endstream endobj 549 0 obj << /Type /ObjStm /N 100 /First 905 /Length 2645 /Filter /FlateDecode >> stream xZn9}Wq 6;@Av'$yeĖ =μ̷V+WybCfw5Y,!ɩ8EStEuZ\J++; 8X'1(&W:*d"cfch(3OcՓ 4p.x(TpS_(>M C;{e<ɄOɞ$**,$TXj/TE/Z}qD dAXE,)'әjAhFS.0d&ܠCG" ú]' 4_u)k#A%N")I!YÐ2TƑ`u0Lb&Kٺpy”\B1FtP+ #k'jfA="W A&2hcBlΰq,P@0Ts$U(j%0%w+B%IrpƍDD[5u2"b3E p'j$v5"A8eH6.1fRAmVs36c,8Q6J6BuNN&OU;u?/Zꡱ[^]NLzպQaT*iZ0"6{]TUX3*/rHI~ztzYE7;D?WvNJ)|?0 a !@BD?!w\%jw`!\ ࣾiߓb>T2LGN-*8ڸOev1/mzZX1 =̰U`凨 /lZ_n  eNhF2"YPcgc H*X/T=PJ嗫_֛Yo(m>e py/LcoY t/ %WN@9gXxeK ^\1´4kD8"NacyHtD+Q'J2x5L'^)#Кmg& @b $6~u0#:Kʆ P ޖn s-0a0*-9C zٚCz_qӞ\<='='{.==.kk-=W/};EqT"-]w9m;kl{\]_6c8[@@uooslܶf{)fq|e{}5>{D^0l`L#)9V3[o-"f~g*y 5^{o 7Rc{v1ȇO]o:G'ڱB[F}-gs=V5:VP ]BnI񕎲V@H#Y.U"~ʘ\F0Y 4.<Ȳ0e1 ?z$c4!~az@cDce !\1]"?I.uRlR~/՘$@ɢ9}^V;#y0jh \`fxT ~}uX/l¬)AW5ħ nG.'P/m2J1D-;x "OW sjc.Re 9,!'epBmKlkga :,{NAa5pͧsD endstream endobj 623 0 obj << /Length 3532 /Filter /FlateDecode >> stream xڝZ[SH~WQT[U@ؐL2$,05pbcֲ!yؿj :}rn9W{'E=i]fbKOʢJ+&דߓ74MrDWd 7~xD_c 3QYZ~uԖ0:{nz/[fE[n[|^!_԰3n25YZ>nfjT|]G˸߷uxm7i]I Fɓ _w49/` Z׸E@ $5fK[OͲhVɪوdMU+B ~s< !%-|/[lc ;yr=k3\vè}ٓdfy|<]}ˤk,kPH΃u+[q,8%Tխt,I}o]Ï+\r+z7}\ 45Նlq+/ɡYIc*n\ztHy4MI}GsxWn*[ njU;ƶN]e"Ogu.+^'Tx8GJ vab k/ǸU/LJqCҙ0W\]҈IqزBEe{vUP`4ЗB,B o/h7:5.pH]]'TlE*y|X¾ΒlFUf6ck߹?`xFɶ1%@zQ+[#>4!F3@aexvw֋Wt+kTѳzH"i/sR^&o F}Њ蘶*D5*]9i^"M~A@[~q6L@_~˔7,L"?ԘgShyZg>i s;MP dd"3~P :R 3D"UzQoQlzMp `A.nYkw2Ρ8(}~)Lʲ>I̟ TC-p_,gD FH-, oӬc;{d*^+$cx] e),7\ CnDve{Q^}UB*2rHW gRԘI΀ 9H3p諁`H@P`i:\،fԹó]mQtG(%bt4C,ri.D<@mہfbS5MWC9hӹJvW$fsqɥ:Rț56YHŒ\Pw[Ʌ݅zm W: B,f0Ρeͳx$\Cq&P<ݾX+ a+m!p 3*e^LkV 9\p'^h0:D3P-[i רHy>͎ D=!8bi݊vϬ2ꤥ&e7eCAi>ryHfGYL8et)X zmcRq\ҦG|9hԣ3zxr_et aE>[Ӓ,TvP8ѾNnd o|60 y?"gSsLKj&|i$i-b<9 T *!y0 5G`$(:R~\tM~68}(MIg}I ~l=MHg,TvG JM6ђ!-o>&"}! ²F~ K+z_<]5;\uY7.B Sr;~EGLw`ځ,:ĥׂN9~:+|u9苞M t{'(݉ LO@ڨ:TZ+U˒z ] 6NhbYz%BٟV t*èB*T?<c|fFgy`zȵi`9>K&졜tC|EbZm|Aˬ/rMeو#M nX%ݬA5S'2j^Uv(l{žTۧ.^B禫g-}$1 ^[k>ykx`BmyWK1J 7>R2-=DKmIc0VTar,H2֦>^:$-| =w@+7Pl]:a ??]JjI)XHg'L>"l?2rGm?~PqE!iq`N nYjM;Ktm)PDFJ)>qHU ( v*(ypGe%;n L}iſy-@Tq endstream endobj 630 0 obj << /Length 792 /Filter /FlateDecode >> stream xڍUKo0 W;aRn]v(6`hlC8K \E??k SLJȮ"}ULV>XWLOqZj!ˑV\ص kN_gQ}Z4m2f Z$2jf4Q<:MGMfWyڒ8]Jyqۀ8G,U,8mfE:MX ?51[c'׼XP lBW2TQL pmO5 l Mlߊ*}|Ŕt5]I&7dt 4hDä^QަIx&1D:Obg`7` n5B}_՜\1􆓧Gerz2&7yc=a(3OH/酒QR&qD>B`v7'cG`^|@8\3) 9gj+0ᴸO̗*Bȧp ڭ8)xI b=5{KS}zHvӏ$QJmQW^j2!_yr"pmz9c4e #*{*Ǩ*ͨ]gC1rgud:aaK V5ב:e> stream xڍP]ٶ u\5wNp'Kpw';<ܓ+`ǜc!'VT63J:330DELL LL,pՁNv<2u: ?l2.fV33' yb&y- \#|PS9v- m@Ɔ;c Agl`h`h&@Mpp6(@_ >jn\X[m><\lMi9=c 93@;zXؚL- 9gwg:_NvֆFWnV~4-읝,j0,nk"jgcuv>1 GDZ{0sVvn^S [ӿ0qgTppJCGft311qr09_U=+teog0hca d 8;} 00v,lDMwph3}3鯟~/;[k?/ ?W'"b`г3>s~|a -Sƿ|mMTqLS?A ߱>}L-@guؙ?~1]M_Q߆$bmo?Cvk`M5,I;~IhhllϬ#WkǬ-lvN=TLLGXV[؛Rca::z}\?6l?\LOv_(b(_d0J!b0J #}dP/q1G?AqG|F#g4~t~`_d(_Y۹8_`4/}ǃŇ_шտG+6cc$xKQiݿ΂Qӟ^"kg##OʏiftvGw.\AoGx?zV]c~1@cy;c ڠja<7 =Tjz%'$$wI=(;TBD^-С Jm/qS{mp `U_[@;eȿ;p!)}zpt/[ S! R6M_KfPb,F{ڶQ tcܪc]EYv#ő ?RahkԮi +5a~vFDGo;</P` Zb)^sq#kD&DJ/U4]M/>$[=ȔR.sx`e.9ڰd(xIMEx)\WRCr5`կ5eH[ZDOk[4cv'56.qKIjP8w#3i@I]g[E2}(@^q܅=R(y• xܛc&|ؔYuN56s萝ƜyH{@/=D1lB߇+Z/RA9,_Ï(f mJKB Wϋ]*>gFX* ҥ>[{q @*Vu9aC辇X*SR7ci/*?H&+~X@29h9\r-S Kׁ Ȭ|AΘR0 ?DXգVȚ֗…씲_{ | )8,̯L`1@~ѡfT*u xޕ/}H+Ʒlqx`H]I!rf !1.⑊~.B÷hqLbЭOgx]qP ~0Xw tu4HK=2+bRM=Ͳc%iK<<@Dyvt<JMva%]?I#*#] hO)}Tؠ b7b{CL佁:ܶbo؁!j$W{U:z2̙Rz% W1auòLR7ޡh%Nax'8GYc7ԥeq3ᩈ%g*da*?ʦJd8'iTMv^;[5Tl)ASMɑrUߞW֨6͹i{HM9e ~a>>ђVEdi14w=A4,^0ɨFcFČ68==a ?[m1g a9mV?5>0 )"zOF=Fʧ" n=3Ie&sbxqF/oJySCf ћ|&DJ(>Ɉ ֫Dve՝hٷ'$/Vx\z5SDNu",l@cam4xVu>ݴLJ&.4+f,T~z1s( җ%tW+k3HHM'_[^XLKmݲn 6'wyltH!ْ+&$B*cHiFiv5" o@$G_A(Zs6ŵh'eviB_ù3To m`a7?f~0JHݏJJ*؂1,-l*>%`j1-ڃ c2(?k nAQXHV\<}R$y4[8K\VbR[$,,nOxP@枬.09x?41Œ[ikz%ڄXF_0_&0pW9O %cru=Y vJ) }Ln[Hz]Ĵn$ߍ`䜶*CbZ;$WqH-) {1 EmH9{4E[V I#{[䀂:%-Dr&s]q% [9_SXzL-6!ڀ\Kc> I d.KƻOֻcs}`u޺+מ8p?MEET$' _w:|$/ k^< b준`k1w.0i(GR+̽-d:qFw,⫸QRO4RT>}層nfI.k>4Ck\D'UKF33<:L94<8m݃m f'˨Ԛm D: `F)qPTsi }kԤ,&U.LUliNDogC&&>AY ~ިό'C^WJ҄%8&DD },_z@43C:AB/~ٷs p^[<-Q:S8wW;[ÂQ^oQ~WI_+.B:Nϴw)kHXS,1S%/?qn4 Oq;dr6Hw[}8yCoeew vC}h}*|ЙbURc}@U]\ .a.d{dkl ?Ds5[\ݭz[xBunު8MpW'be,OD.g yZ;Yآ]0./ĵ ZϛFԳ+bn"ӡ ӫk~ FtooPD_%NyiYȬ34ZkUާ,`eÆr؁;.cD=P:^XV)fI6٘sUlЧn=Vh?d(k>qZ B ^} o"/uxag)3%MLOۅwZ{rCA+0km iW~F7rBJ*C] ffz2l'ڳlj)L>h{BmT4}9jyڅ}:0^8/H0ԡǴϳmgؒ=0 #w DnCq0;AL Q$"fWyb(lpT+ Au?+. 6IbzI .lz{j˞4h]) H(D; AE^O'֦κ3N#T!.7U_}}eͬ_,9Srh Б`z0-cK ֚4oe 6z-f{m_&zŻ?5~>Bo}q]utR~/[WUU`kPF#z\3*  S5 X wůPAˌ(@N"窽%byN=i<Ю͐!ɐ7U/V1.5q=+Qd ߳XԴTߌ k+>w*5ͅVfz$"f09D݀DOQꯅS5Ane/e+ Ɍ7C? ś4<82(ěkVҞ{$8ނU d kDZӋfM lcsEfyZW^P'(vSX+:aYi@5dcyDؓ&aCwۡ>xO>÷]#=kzEa*fuғ_ɿ!)&q)gMe0/[As9 5$' ʮ3|C8]XN)gW(;=޷VRN;=J0@Vnb@UlzDgD%[FC,7?`&q*06(sHfҭE`h#w + Mj!@pԝBq}A&[UdLLᢠ^Jqe7U9H4 $TQ:ٲ3nФF]ll݉ⓃRjL@١FH6+;0]1{nF5#LżOA,zQ-'jt=:u*$'#lRzB bQ0+N`&Q xkC$ÿC\Q^gDlS!jkR|\yDĎRNdṷb=ڲkvܕ#!$qƖx&-n2Q`>p]ф^: 4 C]ͥ:"iG D%˳\[h@w$Ga[@7gfyIv:%pwCsjֻ=[ i>-irH`|YυLlk$>_?& ߞ[*J#1,@D ^œXC4u_y ×= [-xFS_\jɁUl2*!hg 80#fp(6r1uy!4UN[{SC/X _9G;a8#7ϞXnG+C;nRBIԬK >Kb?PnV4 ln^P\_DUBzP1:?C:[݈OyŦ۬_ aӂ,zg.QD{o:ї条t> V/OS+K/l)9[~+L ; VmFUsCP*`볆r.!Arw O 8"@B>K\GkHj88-pryF՝:dL-YAQ[1q/DU nG,H=h\k8dx1&^y韍)dw.R@y 3!) C~*\=g6GReTXsN^-?ꗠhͦaYl :IL~ݭ-VyIEpg*>Z;%wnz#B>˨SY2Q]!fuCH˱Ŝ7,;2(3%)"i<íM/&5C J ,EWW8IU!udfa+GƤ3ؿ T#bCj$x6hEpukE`znY3{vUr\.,?sʹpҡ{¬&ŕz0 HνLvf-ݣat'< [ =53I>Lnte"DlI32"L}ϵx4%At]п4x*fJ,w@!WUC?Fdj4L (lKd]FrYzƞ#* 9EH:wuFД.ƗLج_tIa9KBXLA&I2wbG-Klʒh. S`"#:ʼ[Y%[?k<(8fxKL|#ă/N[܍{KX91?x!9 | ŰMWi) 8g8+`O٩Ey`_jvG!"ccº.ct7!f(V7#Mzg(R /n\_MQr5Yk xoinv8el- Xb6XGaK2T kFqUqHCto I|'8vQ4; %CM3Oú,vHo(NFaPs\ ~yB^ѳOX`cWWC3&os6F˫8ق i2??%PyMb&~_{Mx`r9QNeŐ\ īv+J_=Ϣhi#w/|Ɩ`H \aIڒP|;ס ijYxQc A0c&g>ih !LI,I;eK~S}'=J㆗Xo C:ie~xH9>GED #/319#$uy$esroͬO@; rgwҲV3[sTo&f!r=ara>W6dx5cyfJ`k)[kƺڒyhX  Y;QkHI!kt9~(w\ /OIesVjHoPI^]bDquЪW*֎+<*6Y Pڳ)$O T;S/"pO]\X;އgg)#v*bGƠd9HtR ZA!U#%E|yv#=D7imzeY+۶IWT+:;R _(gd 1w.[+8VgeGș[G+M }R`氏eW[B@ۥqEI}7J, 2~;RP)R uq/"PVV(NHLFevQTZvlPޝZ. ĝS{ |(45EE@aw|ר&V*-NchqLt4(ix+ZO#VpEYյy{sC*vcdB]7bhcd2kp AIѭJ)M/UCA@U%\/ubVb'eTŤ4o7]EAbϠ#kY&AaGqH掃]4\=62 ݹ!1'%(*;BY|otJ2)9l%=o7䊍fK+j~^danS~ȩH)%J3[aS_ZmO.I_A".-W#S#CkJgG㫜W6!O_-jpϻhSkY}:;P7(u2C7߳L'KĩԱ7$s>&Z*D߁x~˒HnutA|aYtV{Sߏ!lay/֒_:="52Vzd28 ̎]^쮇r݋NZ(`GMHZ=>8, j̦R Fyw L~QGfb̼&DgE,ksAduA r0(|:џ/y\ E eߣuQ4@xX {o?{Et SB}CU!V ן3lJ`n;:$'Q'TwԿE|\K]0 6kK-˦K UzmԚX uqNQˋp og.rcCCD&|hRk) 2I/W \9bJ"vר$A4r[:[V3"ꌊUHVO;،JBtNC}a djْr=" ^`ᐢ}kf a nLLH*0Sn8EjD=I`|cёv]׼~V0}uڭjn)hh/7t`9^`ܴ0g<&Zz(7gp_, A⢓h^#@`\|$nIѢ1`m+KRAgI ˴ W,5j\ J2/N1Rӄҝav;m,#شcpA;18 <d2PP\%G_0~̒ #z1sZwxkw:w_zRKpJu$b{Zza%$ U!XimJP[BvՍ5m\N!FKGK_}V͂Ԙ5j?x;[ FC@~0S++ڟJ-ȆakusL )JGc;M s n "ZS!'g췃%L|S4Oa ~!J7UKXe}&B[{j"o%ii<|3v~Op 2$'qW0lP1|C!L;@6ᒶ$P&a"m搢V9j\?A%0DTmlRRc?ACrHxgp˅Ǥ f1/+aoerFlޔ5+-XpxA) f--yʲ[z=8hoak[-VSײO 8?5H/ЩM'-HVLKkBxX!|b7E 'Xr9(g.7#3\|#;e.m9fV"hҤM`pOD5I!V"}zX@:o3S}2-c5{nխLAsFl"j,2%8<=%вmsqg\0GtI+ D©(;ȋ rt7.>4ѺY=-:4awCw;H/I[`fi$-ՍKVח hϮJQoQ>h}0&~4Jcgz Dž84D5Iil„{&g`K}63;ODN!Wy2=#xFeZDVn!B-ڽӼN?&~eԬjե1:&NM{PN#%^VAw c/TVk~irAyra#>c>fW_L0.}-a|,s6Ow "@J^6q ~m3ShE"{8S~LlyVyXDG7휺੪+kR930:BXȮ1ݭxs_|(45ڸX^/,/5 ]w>}'|C|AKY~06akN$}W{k< IQx\S;E* iPSBq4!ӢR[o—Q0}QJ_J[(=.E]OidFJ8s#S6u ?{4h: =^=? z9 @-̀I +b{"4sR]07J@ ChO2H.S7?jCUY.U|rF+S3$v JT-{o,A"*AvvJQ #MqP2psrӠӈ 2Xδ./q_n`*L!-:k2OA DYD]c XihH.:ZഽIVZ"i -U'5RK >~:4te-Է!B/ endstream endobj 655 0 obj << /Length1 1413 /Length2 6118 /Length3 0 /Length 7081 /Filter /FlateDecode >> stream xڍtT]6 RH!Jw0 0 C CH#%"t)J"- H*)!J>>>}kֺ>>{s]7' !B"5] @DDLHDD'4 |p0&Bu0=_P JɊDEDd&u +A|8^(y߯0/(##%; A@ @txy`(G yW$KVX_#G( HW8~ yB&D 0qÝ p!0_`"E9Pr~'`\P@_SG`N M>9~h@7z!}|fU5`NjpOO C?u(9wuaWP1|MaPo_M1 !"#*%) x `W_ A h/f H0rGAH/$?WD@ F!.Pѿ߄!o Xכœ0Կ鿯XXB@UU8@PTFRHIY$kÜ9{#?FϿn#"!yz*#M_8_yB=P7E@~cS!yWoT _ ф@ H_+nkPuED 1 s#?R;2$@PD7|7t2@XGޤn 8DTZ ~1EDBE n[#7z$&"j#[NUW>sg gdiU#g4x^7c>1l>l4L45LxSIDy5;, g4A!ՉV@]ϫ'V W$_L4M {1Y3Nώd& 5vx25D[b%hѤrQ; V؇GЪk&ѥ%sEclI^ZJG&aHwln Z}ǠPմ-UH G38\XMr5]L\6&hrR< Z!$b=%QϤzrhe| lU A(P~ŒM/nū=I`S> _?gݤ /O\&M/v+UJzX r8m9 IyZi WBGnJOR5u OBOf )̯[X a{SmBw$ znb}9cɎg{Wf?|QRb3@$WZE'4#۩g픅a5=sE*i#.-NCX# z^JQ7/m3`fcgTLnc4wk̢tSKCBV+xN֠%ق-a ZżNpvFVYTl4&FF(%g p"?x{̱H~E &&E|:yLJ:L*1&Wrm*x؎l=˾~]Q$Ŷ#OG}·V4'ʥJϨ6(6Nj%dW[݋b5aΝ&d-.:E*m([Q~zدǡSɍi#3+ʠ+ܷЊ-e=\@U *-_g~#<-+汛Iѵ5m}i\jݶ88Ο!FMV[S)-)g:u^;U츘L9ԧ.6/'"o?껷6xtnK+۷Y>-;ڳF'\})PX2'RC?KΪ$Yf4(9~ړ%> /4]Ǭ,]%c]hO{6uo%y|65DYm*ԿWBkdՋa[&V2Tr G<""J՘*gf;CxI !!{GIsYsXT_l26 5v/ZggJz5p.KT9͹eM . #z.h۪Oe.|xhH]+/LWOC]s 1a~韕a39X-+? oTWF=lu+"+| H^;r ~G͞?Ͷ겔AN/6Do0 WݗpTR$'СQ9ߦ&ҫ:ȭ*joԭ4eCצ2}.tqp|T:l (SwC2Ŭ~S㮬.܈Tkm#JPob]؄w1'-/l˒٠}EmUd5VOH3c*əoB} ;AKӊ#߶yMBڿ=c^*N $MH.3r'pHPƄjL u#%9dc2+=RSyf[p~~_<yGCKfHv@W ;ofz鹣,'h)>ePR&*ڶ%6"ʁ5m\`csrÒh4.@Qy+CxұO=ƅw՜FA6$|z{!dehQ.\g<2+#(I_Y8v1kқİږ" rs&ѽF/mb2jgpx@wPuO \# YqP LPW'j<<˺Hg>^!}0IbS[l݂ KRơ;4Pou0ȦiQ[.\FUmna ^n$`/4Op& Ҥ>]_姃cB2݆bA2Jc}熙< -D"m ۦ:BWIf^O7<..$2.iԏ%T$,%P ֘ {8Q:1HK3%(јșwdl§\ x;FN[AyP T;V${NaMoEطWI|r|)uuQ ҿxF`yиn/p4Eh9yJMVuiƀʮ)&w S~P oRL@L=/ JREժAc[T|}+Mzm^o~sZB?9 eX2G·~ *8#~UT׸5ufwl-(&W6sÙϑWG. N2,a jmLIX qbeopnAIFn'QQ6Ǚ(?Yf mYoIuz44jL֖i?\O7-Ux{jiӎJR%hŠQIdG]4'PP]g_ڀf}?37)BS&gSdERptR{P52V&+ڴ0l[@Si99nmwn$fGDD<҃Zg5B=c2ZRjGe*BƱG:E³SzVBTcp8evVꥈK:$n1Y.w 1#וpN&FG r TևƯCWOr"S;XE,h9m_̙7"ux5laq-hIj|Wϛa2=a<"){w,=}{_b, jPT~5s[di܆Luc8 tx8th9nY.v+VD*༉bh6Sޜф!'ϧ԰v(Yh gkD71\6kwaIS[ToM8rrs&—[eB?p_;`BEi7͊<} BBsMzmtRa{>&ꝛ]5ѷ*_S2MJ}"M,PNZ|E"8Y؞4KpaX=^Ƥl_3tIrJ4`G-Pj)\28YC󀆪xEV!H.۲ɇr8${t+L#TtW/]ڎNadlˆ ӄXwy{\_:u7^Ml-UӋਔm7BOSh/F=IdV42`nKSASYEyeefF~qXˀ8|U.fމػnq9FϚRԗqEi^{|6_hB3s1]wsP!-;/*>uMz%5^c}~.Sj/TWԈ j>Ny-rh㈐v^*Yr"Qh:T=nihONCCiCkY;{91ͨ~dM8-H$U _k]{-=z'B(m+nKL6ĭ];c"w涄a4.}JCxCy$d6N}~ \I4>>/ : ư_ (u) $J;^bpsZ05c$dlCQZ ~&qo[=Ȩy伏1-ɢ9G+ZIOXEZc[ttw3||aZ [ryLo$ ƲOE"t䌻wlNHb =kD5diŇŧJwE 1F_*_-jRy+* PkϥUM/R "XRL~2sdHϵK8>yq0i}ҌWp5);ݱ'~)Kjv b_I- |c$vjv1EjH]*:n [*%6cġe.Tw$ggKn\ZSH[7 ,ZWkwfx@8XO W*u 9KagP5> w7Ƙi7Fo,sYȱ~A n>#=1.]ydMI)u7XF=ԬU]1Q[\Se`HdhLcO/J5,5j4R CRU;=v5-Js@WQG|6=U7oI{E@w{"e!vh/+Kc\bav=xWob~`t{  Al199) Ebg5}uI\.be$c;&1{YdO&=-Jڣ 5/ZP6zLQΜkS8{bn} )mE328QY}b,"ΠЁTn <^1j8wڴrTeV> stream xڍP\ 4n!hCh%8n 9ܛszc1m1wCE(j6I!L,|qEEYV ; 2;2 l/#yI!/DE=@`ccavH]L9= Jhea yG)_Q;)X^*m`S+R XB |̮L@;'&;r9G(@ƄLаrۡ6A)% xPU(;&+MxpLMODVMMv@{w+{ -,q~Nx hBu @JT|L NLNVgd%vv {$A/؃]=̭aio$+ńf8Yxy8%/3x{:/cA/_ȞN@ 2++0YX#b_ "?V ^fuC늙e%$4eN11 `epmG5+-J^d5hɒ+zwXF}u <1գfN:Ձv@NWo Χrxb75gm97rgD8(+=zl%_ EkH~ 6 \.U:a30_q? :qq_4*#?_C7@}Co 7Si立9\z ݽ 8Xl,FZոc_G|Y~و%? & lFWù`c{mI1Rfg6xH@^Y6~AR¿6bnZ>b*TOajU4M qQq(^m$rCm'1hGV`)seD ͒[hJ*KoS% 3?kWtޗrd데8X INՒu)u4 kV_enCm4d3{ANpKY`v1/R@AGR%u2A&HC3/ ,G?0s\nrܑk]kddMmtkn֕ǩni to[vIn_1sp=FjReapٿQ}HPZ٨L ߩ, $@YQ*XWDYhZP5_!/y]p|[%uūטaۨgs=%yv#)}M$:4Dol"'Kؚң;HN.u^,,ﻔ2Fw}\(_>fDD9[u Tn|%=ь:>E/ǽ[l)V`\H^-簃(Qߣy;IGs֑@d-)Nwmy/SFB^`N7NѴF>Ojȣ,~6 ڔڅ٬EQNh=mсwL+հ$za/_{l:V£MQ(0Gký̮QIfO:s8#'\${e&r>2bڴ}iO)Z"ORi,bT(J3{;J5ݴlEtWii8+DG;Q˟YpfJ[>x&J:VۮR4Kz헨4SӻG8]06cFy .IFąxm(JX =H0UɊUe\tv4]oiWuw>~7DF|d$Yb}e Ի[.WEر?jZj{SF[+pdO_lÓ=tF6_Z?aBE - od*q0 a n5zR) !,3BXc~00ě$'.=3o{ڛLgbŎnHSa;};oR<|gMiw@IVʒ L%7Wq~i~|Ab X y@9-mӒvVLl2+n,Tο;oJZE2m} pEu(ͪ:BjHsUYRƬOB%hWwY%'vOL?=//5Z80BrcO1^u},+2H0ph$tye-WR¥?!U"$:?h۬ƽG0NX6զ2G2oӣ90ŀH!H@1L0f~ *.n*=IGY>%UΡ qf^CEt0&(CF "SnyN&,yH{ !_g];t:/}C߸z/Zo"8>a6:=#c(.[#7i 'C-rgnKpkatگ8o0U|ԟI嗟%ow 33 )ِ(梘f;6t5?7:>_T$jPn$lmMDUGCpI(!XNvp=+iS#({^,7`|-INd%a/ siUB?geƴk \fH8AKw')~眉_eMHӘ̎HND{M l˩l=.X2>N?%Tĕ8KRO]'P,\$yɡ=_~4),!@7rbPXx4|ceOzbk"4i.UªkمI_QҼM0.50s=|11a B*w EլFt 6#SkpQMw&wi$koo5(:dW Et*Ŷ餺~I@|u*:4V ?W*cIb-3phάC7~'ǫ_&aeƙucqT Bb"HmT5DC[8hꔃIٍ-'Y̍N3~n41K,vȰ+6_pump??;}( r\Hͧۖش(-lP0 GDg?VW+)B5fB5yjKBKCRN@.<9L>e^B §d[+Fi^K$MĢx tأ ͨK+TD?v4Ur8fٶ{%XUz6s@5G"'^raqJPj8SXxTMJmifb3fߡ|a ݬ%HKOٽRXW#e ~Xܝ~ٖw_)C3CeRZvL- lWzZ1 ()#$J"1`pxnF wDfXw|,'ln(* Գ/9ta]KjeқOQך4?P dw2* <zjJ[-O9Q_l9'U;W$L[UJhjh#mElU{_vi)^$Ff]%t@?|ڕ7h5K}wCDG1ʮDNde̮@_ԣB{vYcHKh3p4{MᒩB0ٌE45-򄬷<P絞[cl@?F\;gLB^ӠHܚ[PH^L!S0 i :oV t纕>Rb1&9X@=)1i,}"L0dMZAڣ1Ѽ&AcscVpoQו c y fn01Z'&/LEtD=@!:E ~l]v5ed'HA`&#>]3gDy0Q[ںV ; ӻ ;*c؝; @]aiDOgϝv̩|9/͜ߎ,6UI!x,n D3qOvaY :r1=sWHiַN߲d:g|w>0O=}@CQ?`*R hAּRߴJE>ksKq F.v Ʉc,\PePPb&=IycWedrw % m>x$Zэ VFb0MΗ|3&:pF{߻EjU+(C,5s?p[_MKÌ\,%4ǖyC)Q d4$){䞸 G~&j 'M ,V~ Y{-[d0rSaYtj5x]ey/'֝ 1x'vK])Ąl0!9PPghCl̵JllݲYF4U$F[fXL) Ֆ%|q)SocDYGQL#̮)"WN1%7cX 8B}5yߑ8N쀹ǙN67A{tD-ab|a %/Sʷw(,MU "ΥVm_":3Rg4l#=.rXI5j] ö&(DA4\(0f0- GtČ%W1ؙn [>YODۜ3SQk0t $u RrhzzbgSيhF:3+_hK KU+gMHOV{ޞmg`LEUxBJY@ĥ Izȹ˚[4qQo-Z0jJNkmH8=-7kMӨQk띥jaЦѨZqP&^%)ѭ+W ipj~j4ҫt>3A3>c%]c`\MN 2&`dVQ؟U@}ͻs#$vhG‚&zuXKӭ7oL*NAv%+H\bt4"+ԕxtamx[)7=- wcH(Wڝ" G'kPFW"E]ĵ>4@sg1s@ j{Aa(+$˛.w&. (iNEt Ôeq[ϓCo4t+w*( zݽ$M+9lhOu@L'KF4wܚB+qtw"vnDqZH`ZEL; >+>qGkuk"z7z[Xk} ,Hf6A`(U yh*w h@~fç`B)eAhPo7#e ~oj>:ؾR=䁇MLqL Ɇc!L>]bâW9*KL-Gg>>Q'#z83~0xZ>!/ W!"S%UUvł2^ɿ`(dh`͑~ ,&E )̵#6m{3a;VP8/0 +4Sǘ.85jNj{mA;s>1e`eX> Urřkw5ZRcl Ѓ?]RQ]VD |_(k7@*- ӀN͠ %<`H;AZ}7H5W޽ִ%Md;նngr ; &Vx ,A|j7Zݹnv0pw#%a?0dL"W{|p~m X?*M|g@w":c "=H3~94UV#,B%ӦU D[CDVN')LBh1Z\u>0 9Uey5ZHuK̨m>"ZV4}0fiK|ҭvE>g"[&B|T-§͒>Ѷۊ&[;!C 5tT[s*qdв1NV8Q/eN,ZZj# (\z*e$A\^-re}/b̷cnw~у>{9Xk@d$o7PuHZ1ƣ)B/cFlza<8Kd +u HTӮdt^,Qn2$pGǾJΘ=ȔiyqX;$H  8m&*g/FhG&>JBt{*Cva"=̻k}`RG2~ۣ+F0mZޒ_S Bپw*"G G6&cYO0{8g[?7ER "hCq{\Rie#/-UE\aS~&0&^S˂i;K[[NW jE[<LKtudۆS-M,٢ {WYkbtljB['te#t")K̦w]zO~ZH_WK@/Tx~R_ڑ aF6hwȤ]N.ާpO9lR(rqe4{BzLkfsL+UO-&# &leKȂ?ve7 /.?޻lN۽S?v)9c,@*r:"?5鲪,tC׋1M :oxi99PhЂoUClV^ C]2%dA/԰[&u<[sz7OFp"8ETWfuYuKGXۺm_Un!E(Et*&j#NKHS`ͱuew ;P2[&xc|}yn4?ٺFyPz&zh$ j*͗O 57O¼(Ol Z8 @$(}@Ҷ e( ڴx ~>nRM*η%N\3N Ҡycu=]McnIx? Q&Ѷ +Ӧ|$6PѺ~.u:>BxCLI_C2ҡ?&Gz]:8%(hL,NܰJ;WOLHߒbnBI6dW K;Vu?^0uu쐤twl7…d)J@Wqydְy}i$yG2(% ۉf_#UZq7R&+xEw):${  j?պr0hЅsk^wD5%SK$zDFߑOM.AU䘜D}h6h}Ck?>#eYp!uttE|RE:rvƹ+_UVz$x2x-"*'v^u1K0*\{ i#|:Xf6A{b'q y7H ֌-D[\[HR5Y,vN;p8 cCLfMIFc#h^aN%hJ:m}Zj Gl% 5xZm7фΉ#};(^Rjo ч?.˙FM dž/v9,}K7s@Nk&R]K <FmTAӆʼnj/s՗#TI ŮHMy&@g$2K`mi #}}/RZ`K)<:qZW,UCLH1 ~ٷ }mMo4&u=%|G~m9h#*fۍ˨n6#IΓc, gMUѡ#2cEԀ;Pq[fjdqyjsTncm6@GPDqy#R^)؄M{~-m8IGe_X_47=`4.xo$tsJ:[,o>znX?vT٨3,\uuft׹;ں4][Zcv_͎C3kr3gkzU~d 59)~JGu$Aj ZQKWRbSy$qOSg$I'?`hXJ4&+Z"[W?'k{ q3y![ +ӊ7X9L+3F89J.bҡ2}[d/E;ϊWRH4s˷H*UʫO\wKkt:*9(lGL5$Ulk{r9C[,jFC,zR"^H k" k5G_\(;mX麯!ăQaDA7z;n3-s+6{ +v4>عd\S,%X,آ Ոjf]Ӥ-Evخ_~׍ sjAlq> 5:fIvs%ԁi@087>Ʊ 5?/f|}ko0~~8f1f3"%rnJq*$쓟`  [3tFE, [55gl/+7ž5Vڙ͍E70.7)XdֻVr|nL͡RR{.DӗMrox*ώCiq"2),ӔlxRm,U˜t:9} WCA?ZκumnhF )iq$,UOMa9q{Q>w29 U3RƆL22ܘwTB& W'`ȿdxayPQboG;sOyҺ/KHg/:P R ~|'~0_Dd~ E*{M$q}ĖVa' ~A7d 2E4J8Xp }OsW%Yo [ -J>=CX8q-^u]Yƒ `0 $t${8j9IXCkFIC,x{Ă K;d>Vw6Db†-PR+zpgoǎm]RL2Ak┞T̲2+Qi,K:AisR.߃N_jd]l^EyZ~+bY"lgAꌢ٢QqMF.H'.K _)J}ڋk$ ,)F95~fS2<L* г~<—7ૄk.kZ?AOc; ITp," ߖt]kۤJߋWUѝ-Ajz[mCֻ(!G'Kk{:{RVڷyKi0[52iis.!ʝF/rbabec?N r(񁯢lU+nj~?(AxsˣſC6*:a`Ba3\:}Q I-O,N33ӆr/< ѩ#HJ+E3^nF)5 .$0Kۖ~Fg,BKV[W&K$}֪K]F4>soG3$׿aJ<<tFFkh΃-XIA%JL|~JOJyUcu &|ݓ;}ZJ oEܙ(3hkXjB&= >;rO ; OCh6n&UL$_OU!Nv\w$k` z&RW@3=#.D{^BuC 8KM&սW=uLZ))k8yJYE/K䢜Âz7VT^d$`jQ"YRlmZxdVj>O,nWc7P#?e( j~ca-0Op endstream endobj 659 0 obj << /Length1 1415 /Length2 6470 /Length3 0 /Length 7436 /Filter /FlateDecode >> stream xڍtT.)%5tww0C t# !! HKJ#) {׺wZ|X y` JՅ@ /(bE8A,&7w(&  S!Pqp@ /@n%' Ѐ ,p7#u̿^~qqQyghgԉ '! A#" rv煻9ppG r`u^ 78Am!0wT@0T@`p f*N]@0(`utUxnf+G^JfYD mP T[77T[ɂj_oC [Id蓚*y/!)t#k\D$Yͷ*3*O>("_/L[e:aPt`=EO;e$nD5MU(vBӣ!'keBV1JV"A窒,- Zq&pњO* !+Qi:LR*IzzH;bkx> G*K* T]{dޗ۱*[HE;X1rvn-RMzߗ$7X#r<@?I682z)$IL߇hY۸Gi[;Q 7 8jK *OFcGE@v6͖8J>N:ێ ,F~.x/FeITR_w`ΫFo~%{a\JLd$BG$/zp]/L>f`j ؘ̧CM.xY : T]bdp6yԯ %11|V<slTAꯪم[9i9+ ++pGO*Uɣ5ra+NCy*gfXD6lw2+ )|I_O [t80yj2w bl |fjPޠyŎg ^+D Px(Te ?햯i= 5 Ot(/)ʋJzȎP -V# ve{;N$2r6m$!ޣ }k'.κMsb3 v-[]~zuj6Ҿ2JwZv ӉCu,{8W k2CqNlv0ZἛz*I&ExhQZ˼uhGКEϻ9\.+w.U]z5E?w:P"?qUXM' Bq6^OSK׍z$ΐ)TKڤ ք* ŠcZ: OS(d`MˎЪӛjh_Unݳ/I4׵gΨdqH~Vk2q$*=V9ZvSfOauἸW&37uYE嬔Yd9Xv%B +WL3 9˵b = r) mu6'e~ah'SJJ/k\ӸK5I/G欄p|DZWÂ.u iGe,wu+Ρݠ4݈ꑫ @k.P6 .sRmI-3B;\Di޸ xsfQQqRyɿsۨߔ dofz,}V` imݒXzOc}?Dx(!D[Sְp]Guض hD{l/@UJUr!}ueL6/VLf5mwGb>ؠ칓*hFZyxTNԘHE4ԃ~y!pSP`dk⚡D0} "][c,'vwj~#fa bMm~rt-445t;aR/2c(hrHH ']͑BstT V˷\$TmixI\AovL%a{MSu GG uE IUҥ[zF'Mdѩo{U۟Ntb9gEJ U5AEw~ƶ Fwr.nTHP/Vf)/\zSbB4/h5lWE8j3 JĦiOCb21_DCs~ sȲ'Iz=,w3韥DȦfD; q:/#9!ǓJu󤬷1dlk|gRE( OǦtXG?6jfKg-[/=aW72Ѡ o(u/.M=nN{u~;qQ*D%aoӝl.D_# 3J!sjX^m -7\SM@z}&˛1v]wCcyH}8!H67QU +gɧJ zr6짛7`nZﲻip2_Rg5W QOfc+[HD~}TY'x@OO#Ȧ#ܓ,oh޸vWu) *@18Ǫ%C59vO,x?ЊzCWO?L{="LxDRnpt~-炙 DroYGʩ;Ę?`5K_U3ra=1ts<-te*e}]q{(g:!NX4)X~w]z5T]%yZIB*l\}5Ef(7qd$gI։8j|'`{p3T#Za›]lKRxquY ̇ԍa?KVYsȸ~N$lTUIj߉Z}*˷4NC`DMd bi[xA~W)oO=)24kyx }ň:3'>TWݺgsů=x]T Yor;`)ȃrQ.!|x;[x(P un_##{`>ysnU]]DkޥEG⤇:COkbYK7Ih1j l$ͪGdwTtm(e>Ig14UG }( b޲Ey'qbd-GPx\|7hObHpEg(A{7XB O5-m$+yHQI~RI&jtˊȶ i1fe&,|_DENJ3z`ᄍCUc+A_-pcArkfktƬ;$aS—5Q%SCg-'\T- )/?Ow̼/țePdZI gyb48_HE I-V#Vj1=ZGM\]ޛEb㧀'Z{6L5ln8ƛ%$vB rꎘq;cyzaɤeߴ@Q`HbKu,k`O^=b<zm>IՏ\o<\wY'z\\uL<˓_^ r2LbӟHDhj8xNJ:"#+TZsK'󴭚jvzm*[%O&6V96ST wwMeMp26hXʾ|nOꂤ3HUA .<{}ʩ:w[?!ܱ#,zLw@2.(ymuQoK^R8ax"a\?b:-&VH {"N7nu>nU8E aTR nF8gs wClZ6ƽCLq4ĩ=Yf:M 6Ak!/P&r9<Ipm8d{)W]Y[y!,`o" Wwn(.צFF{{=04k;**bl(WW֟3(Va9)!j 4~#eK[Ղ4W:Hi Cŀ1U! (?ؾPlAcoHd?{t0s7?ʇC8rcxSDzغ-9wRAfil)hU5eVVJ};Ki-0Qp ##>Y-Y]֐nFGfgI>8fy(WgJ=z)9:u?MTfXj;xI{Kk!$G;:U +7 ɂfbf{*!ik&韍^3BTp'7ImbF>iݱ7_fI~t\w\ǫ$ -$' N)pcToۋ>RfyBB\ode%j6>؝o%D~"Ԝ=n}+p$}8C,}+ ;ղX>RFVo{rջ!`p E@EHÉi|#c_\]ZHWrSkx8̳p~ÿ/&/4Y,'/QSglZm*-_OSj,"R(A+ 5IS;:pkrkt^}h,LQKFɷDA( 7Gļi?<)>=q(ɀ7y[A28LBջN)^g7ȒGTx¶UkֹŘn țLxq endstream endobj 661 0 obj << /Length1 1632 /Length2 9317 /Length3 0 /Length 10391 /Filter /FlateDecode >> stream xڍP-[pиK.AhH#{܂B.5瑙3sUUWu}j:* m6I+G #ԍ )VUUprrsrr@.Gп ] g N Pr@>! ''S?.B Pr]] 6ݞ`dYpH:] (@dvݼ+k77'!OOOv++vxvƎFy qSh rK0jv<'h+ԝ?U4`5w@pYZ::8 b˩y@P߆ {Wgbx6r@NRzn\-] Nn-r yެ@7aġ 8e2y#x9`g5:N?|M 4_WX,6(?џ`?@F8><jˡn"#g뤤l@/'~E \֎ >?{Eƿ Y 0CrcN^N/3pc(7oAr _Ϥuw{^U5>ϥU[AWz^I3ـ<<!r/S{!P+?{~?\y ln\o ~^Cjh{x 7?#^/yA^0ut{v<vtA}||ߢ??CoTFݿozF`7^ALrsȿse >|]?<:=os.?8-]\ sÑ$A[.ļ+8xk!'d4͔\է^3^$A}F >,lsㆺeKʧab@jSܗ 5 rQ[(P.WՇVؒC#* /6koJm^=X71GJ͔ ҉ e,TU]l+@2o@H̺$jnCwʜ[_U7mv?Q%*4(oW.:XV'ذk"]!si̋H*mx\YRt3}{Nab^ 3(V˚\NAH=ZB 3x"#DӺ0,0?d *Xy cY`RњZZB>Tg7ʴ` 1V, QYww03YP޿OL@J3A~}a{Gνh0v/IcG]OL"gQo*ex6jx~ssyo/'4N҆\ͱw /6ӢIO$Ƹq6qrY yAwKT3KW`4~]XO#,jwg "/}oޞqE $*=z/R/ -܀(ϧP IPq_ڨg0;W߄"mvAvK s'КE?rіm4|Rz h86MՅ SvlV(;byHDdFٱN0rP4۔k/V֎[M̼2 f >"aaxkDZ1? hÎS[?oOjͧDvQB ):>Xnq |nr:iwX*e,) j=#'"/MLH`:c3eƶdܷTw:yj78gׂ5#[tW:@3 l#qk-}XvO|ʣGܱ敢B:X&PBi1W'oxɘ c҃Jo#2=.pIEaM^8;1t2^`_ӕCD a+i}f+h[\)m(p>[(*:ɎSZCYp;AiHZxJS;N A9^:2ҶُuNnO/XfJ"+.07My0Y2ƩCh2TM?KFt?I2 `?\S~$#+hSw9W(J!U~'JiR܎9O,^u I⯪Y4DJQY|TV^/jԚ1.f< -Dl PYvp#eX&? ~/?:#ɹɭ*S~ZP~ي~b%mmi,^u 8H)q- >KPN;]]"΁Pr֤7$M۠Іđ^Ъa„rth!2 oCmRVQ珢Gj_ZE Ea 9&U3 R?; g>1j[%n!+ice(ĥFm4H/4cIgaº[6\T_ jΈgy8#qէm@mGjL5N>xKZyZBqN?)*H|oWNL9XG 8;-S %[я98SdVY (~=hn C`+ Y{]ݲ;0퍥U\'\[Gop:\iȌus!}!_K8M'gi7,8mCUL'%4qԑB#.~?6w?U ߰O9 +aن a+:Hu?$]OCbh,%J/嬒'>5DUR UނsLr $t7txpߤeꍿ0PVݯ}xSgcC.ޥ%KK>jMo9[Ӣ |%Vlr8nM"}}Yr>k6xMwrUi/%b/η%YS/.(OHc?ԺiM{KwT~%RAYC#7VxG,=[=V/Žw(Þ抁,Etur4'ɠE"ZB7ekdtk;Ûm_t<<3 Ύ?CC#B,r=Y-[Ikjps fiP1-6Jj0~z:ADbUWtS;ٖ7/Hr9K0Sr[?%I a%৔MnZ8;@=YMyJr0'.Mҟ!2L)K#rS?j1bc|r"' ] 8!7 R\/{Lo64aWu~HʽjQ_ :C%ֶ|uIr/W&pAF7rPɮtI bKƣeQ@ugv]ˉCMu5ގ$)=0N~xk`@~,`τo,K_n`m ɜU1|x:qo䨓jCqn*ӺN@ekhr)#LCM&fŔq3KG>-wʊ^5 !yFD%f%?ҬY^P_<%y{_UJz--:jB >QUgYU&=7']_ypUAn4&[^jz2nM8:_fAQ0*e[C,ⰳr*t)1*'76jK9%DwPDbW4*o.ͺ"0M7qp;_є7Jf!6'&1.OZJhz#SN܏`!?v( qzfb>3N+hf];YӻT w|ζGr{ q] E%#. e9Y5آ[*gfbLBrT~Tu:aۃ[au*Dz[CyGBu۲{X'윊 e-^_'"ֿ󉮍sGn|vyZ.DcH@9N儋OL62BifR&<MCX> L­H{'%Cj{Xvn6E_D$;)>ISHQqm+˄lϱЌ 7Ӌ:$NƊ{3%:ti-Κ܆MY}xރsթ/D_{Vw\OnYVˋd2EJ"QЩrXU,_VF&3¬8z]hLR#Q,*zfݘ_aX7:G[yTZap+ ĽL F,5@GgI^ؓ0;+d"=#S kU7Շ%E0Pqō^h  R?(PCfg\1QXǽf ᥶_ Z(0~?B3tU}C:2++.~$=KF}{R`[K:>pǙ:'$>0/C,h6t8qiR1_[*Y!v B{R!:j.W#<%nTw  +gR\OHS(Zf1KmN 3įKɭG͝1ŝHj >F'?+=z:)3o%~+n@tو^U%pqqK r Gtn}Y7Tfwjb7|["VJ4l/pJ.a6bR0W&5sp$;6!0]Om'K1lZP0SI 2 naފ t0}9DY\>&nǟp̼OsyKԩʂM!,&|#@P)+M|"-|{Y s: W^Ľ c(P&ט7ˏs l_%~a}۸ {| s!OƲh|SG0TI1uW@ڕnfaf0Bɬ|;~a#VyyM{mTk+s#8Wxԍ>.Mۮdg>SJ]UhBqu TDt2&"z{#R92UX8[q75G?/՟dy^NxޟVhʘg}gL~|5CNvWF NBf/j{*R`@i%ّj "nmPHǩChv2.йUWġ3-2 V[k 7(0-qt?]/9FPx7~d1] < o"Wtx"p2AGc|eG\% YS, UpDFF.FZe6mfl"9?=>M~! a[UN}骥,I[΁kQS3R$9JZ\:v]{) .u͐qIޥvMY㍣u_RSDD#B&oN.㌃#rgώ͵tSY#Um~]jrY|M?['DtJ85j`F jl#$Nwi*Lujvŀt-`U[(XXK {[]ڶm/+Eܞ# {q7I8YkYDr\IMeev5MƋ/Mh~a T}#Q] k >! c= LTZ]q<Ӆ 3 0&lGvm98ٹGNoC?q+ !Vhtϫ}x ;V*AxbQnq(_\<9z.$fB&wUilݧ ƳOhiIHڙG=ìڅ79gp2yrf~=5E֊Od=W1,13E=(/Z|%?kKqҰifR5V-AwUG nr: qt>R2'ңrNs3"6eTseXeJ?6pH,qr§ .CCBY@ѢJ`бbtUL۬n2^߻`;oBn C^B~=Kh!ZZ709f(ψUyBsPr),FP5_9KC(PL5UZl5llBX:O!t=;2wPnn0?L5 ru2Ry'3ֺk _0w~Й͵ރ1)n&QCj2ѫN\j҂JQ4[6Wxr #L27RrNYMa'կ5oOL#$9->u~8jF{"ϙG bkC.]% 5 ʼng]>5I9q@b%~7 0̽pGlj)}C;EcHU]Cɛi}ȟmr.GC7svKJv+J {QiUvҒ#ff&/KKCY'E+d]BٔRk%Ky4%E?l:7 Y땽G5XrC51n rU0~H 0Մ  ^zm~טr̹$.l>n"OZRrW̰90FwSDqzj& +]~, υs5u-(}EIG4a?e  S`ɖgIW)ѽ欮 ⿜cӹyi`9nt#P·MRᰡl`s.V6KUG:Y[MY/]ӽ§ni 30j"s\ľPC 1/y{)ƿ(h);n2mJX6$K,ۼoٯ_ѓ98UǥӮߗ(7y^[L.v+[wٻz.+߯-6q84sޮ4GIS[x/W38ᨑe9%yqyR?̜\C+t;k>\V.6<ޤw)]C?vԪPV̀Q?X*TE+a,+@Qn|53jw3Nzt_ݚvW0 TLU``"'iBjڴWJMޚ1\+}gN^] F>/T`C0b/@J=!9L&VU5] !i:[Ӏ4܆ < ;Y a"2R/iw%g4{V'vIm!~J} _്˜& 8xwoб˶v鼬zYKƜ agQ<~mKj"-cQ'fG*([C[}Q;BUZClK2[ؗB*$ "RhJ52i}^У t׬8?\ _a.0PF[[v"O_f5~_{@t%$i'XFBbvJ#AsElƇ?zܤMY5ڝ0|d$nT&"S_ -OwF07)?a;('nVnQ(r ڠ$rv&z(py.΢G Ķx~Z&CeP|m>Sh߻0z BmIs<)tp$VfN3σnGFXYhk=W'-*`Il 0Zv8H2@Č)%\ AʌUt3zѦA`)|mrӧyjގ*AHM嚉l(CyB%Ö9a;7ˉyB:rӌ8"Y\O0ZyHM!hO&7[h3}^ЫM!>^m"3!L"Ι;6F WZhjFrӛSj*uڷO?y +?K{'eQTXeUR-J: F2fU)M S bt8Y`/,.tl|[7GX썹>>5]nma`J~EIVR~yimd_bns"a)5d.I|~ :`sfnNJu].u;~IDstk_C'a1VڝYpd?nFKਗ਼K%3 8=2/ȟ?@g cIuHSujMymoUDE~ KgIrڰ0,v@q-ڧ+8`G\ˆAf}'6Z<Ƿ_bOM?[7=8 󷊿⪮11s˒4~pt2mt xolyNkZ!OY1fGHd}`5  Ϫ?T3ؑ@+Ш'_PL4>`%/QEpRa5g>#ɕꇏ؂1=Yk'kN7UtSw]]x|) s"| ыɕUD#ӘqsxEJ/cA3 iEG/ V(P/3l09LZ225;vZfȣ*<l_6cKo.ML *I+sJ,e-%NB[f. endstream endobj 663 0 obj << /Length1 1401 /Length2 6331 /Length3 0 /Length 7291 /Filter /FlateDecode >> stream xڍt4^A!a({/QF2hA$zM w;ˈ!$R9߹w{׬ͷ~~~|\z@y5TGAEmmu $ qqNпn2.c(C%@ >%0FnNAa}IA1I I @J06?@ȸ.^H={̿^<^ؽyg(`={"0@@`P?JH٣.`g?i'{CBQP;k`g2~2.= oE{P@(l`kt]?`?{wwٿ ࿓ 0'(@WE퉾m~N(69ߝ*0v H ŏ9QW-+mP8E?% ^:pᅥ- nck7#8 ugEDAb"+ U;(ˍBd(;FA}w Av08cP?6vH' 7K,lp'WXCTM)( <@! PHİ/,m\u-]=e ^? xr (}sw⿪X 99a3/Z74V }Zm h0V p;," ?~J у!8oKkN08T`@aq~@PXb QX }(!_#`/2걖([Poj46`@Z3$_?&?!>~ÿbB=ȃ` a)½c!pctjAFf.A: ǙK3߽1շjDT>ݲ:ĝL>bᘦy&ٍAF*kcmEss6@EL#I ]Exg!pC㝬5瘠HI:R-'d+} 4]8i(/UD>ƚ 04Y?6*6v%Q'sc2ɐ1턟,];|FF:.c(-!pNc( e4֘ Wr(?g=? Ό#MEZ'KK'(p.d7[0EP\1aU> &uW~%Q̜B(x*1i(̹l}"AaE!'q_RmZT63<<_9fb)w#$)2Im`9kҮ%sc'։֨F8-H{!"o[Bf֫&<טܫ_2h`yvi"T3%M3AHEop;Kvopȥ(p5%vH_MfLsO#7$<񂂐dɴ[4 D_Ͼ>No;L~7x|L B4~4 Amrz|S$_'sl7ZSrr=t圔"ֶPex-.GUdk)9|5Y?EUu+ݝx|5 OoL /H5RI-2G35O9je~-D~FJ;m vǷ~ Stu0{񕛷#}|Co++ zAzȜ_T8&]XTlU*l PɴC nOX/P4|yDyQg g B6֧ěw{&ȣކ;HG͝G3D hyͽr`jUˍ q%Ъ35!ϛ/)W4D_DsuJaڳM9HeZ9$pk*Xr+B ʊf٤ܺ(0IB>4_U#/7v3޵$r9OnU=Djr7E"%9w5(Ƽk=KTbݡ$]N \|Da6(,;B}:u5T APP1?iq0.IH2ۦr&F#9a<uuME]ʛ5 }n0M=$G!k\.˼jJ(v"ƂVG~PvͲr\ږ {"kxzG n* $Zdo8gG26{Ma33yӤ;TO6(îQ@?Y!+9l)4Ykr]N  3>e>" g/i}>9bY\ L!KuA!mf*l$.CRI{x_Z.}MX(I˹d:^/6~Phd&Fhꝉnp[{J%m ӾZ6y<ǃs˲7ED]ǝݯҁ%Yu+zra<\c|7*eiߣ$0a:OIÌYtSu*5|',ov\_lӫ4@†khIw)Bfԗm 5~!}OxJ2g&X^H;ޚ"LG 61 ެay .)};ܟcN]ӗ)0МN;O1d=oa)B 9~ ۋaz>W̃1z 8LcGGʹ-&KV Sg&lqFtLLbf`wO|bCߺ CDI4aO/b@~ч=3-C1gtQNBC @/9?߾X{%e5;ӒB뙊%TxH׬9c'Ž~Ջ\ڦqjQ=w$)7U(h1&3cf䪄nWD%QRA'5Дo8eh '81+Ά=8k ?0oT(&< ḩڳ8]"8f0gHQ)Z %ŰWfMi'!C/eؓ@kgplt[ PnB=Jdٕbdp49ݙ8Caoض߽X²k#܍2Kw/d$u汓֩/Rl_1|m~$70BXl\Tb7Ծbm]0+Jds\C(Pv0+M̑"Y{Vys˧L*(N#`+ڧw}[2_I+*WPø -#_+/2rY$dr&sso4P1 ^Rm /QqGH_7ж<*[3.M\N 9Z DqM794Ky3  ӳoQy6~/i>WC-A &9[ R"1ۑRE_uye]N ( Dlb'| ESJʇcBWdPګ_ _RߎhJdxR2bsC` Rݏ`ov7,60QVȲ>\cSz,E*&3?|[gŵ{%n$:U̫#@~6wRW _^$k,{c+i_nP!&.>?\$ Izj+áh9&!V橾gՃG ~NntuFZ`~6re9Π"隲K2kyWk=O8} \+a*JV RuC}YLE!J̚a$wyܷi |ZS_f'k1C3ټDt .%"eޥ'@2I\E 2nICfS(rUS|ҭwփ~039wh|f,B܂xXk*9"z0WTS>Q]9[~_/:&6ϋ f4cc$ ZX ]^)J>o}cSQ.m9h{V3禟Z]ёݨ?EW.JgL/[h586T'DRkVvz*.TR3}K$uxITjC9w-Ʒ?<8 =K9*r{7rr:7[hÙou;$[H)+t/"h^2n R~^|ߩoESa,R1˗O =(A%ҀLg&Úxɡc!>|Eq :-sw:w/LeN&fGf_ݍzY]}]FEϩ?|KwXNH%0eZoMM~y).8E OTsMU -7bu7HӑOx^O(NȦpAK†J88Zs/ rnmvH )lJDjy͐;JH-? D}Jq=DE+./)[ӎE;7N9oH.柑x ʙ7T5 avUF *?Qg1b< lr.]j9`K`R:d  8!TO#|j8}&όTލϔi\Z:` D9ВڕER H(l9Ԝ)Gǯu$>zA{7u5W&\8 /MNu :q:mJn f8Hqg3btU&Jр  .^b:tc~~ybJ\R9#f}c(CKFUj^FQ)rf]h{ѭi6[}()άO 4I [?&97dMJFtetbSV@^l㔬; =.^ #&҂!oyH#5@83S endstream endobj 665 0 obj << /Length1 2840 /Length2 24289 /Length3 0 /Length 25875 /Filter /FlateDecode >> stream xڌP Aww'ww @pw' 46U[T s>}J2eUFQ3 PdWx`aagbaaCTrcF:9[wmo]q {-`ca_ ȉ )ANV.2 1r3u jt25(XMm S+RX813393,hV.w@g aΘ)jVUA.N@`ke wp7:2%0 tƦ ;c{O+{ -$)07hl 7v356 )07O{ΦNV.LVZdk8/.sv_'3ق? /d6 ͬN@gwo`2S hgfl ?m\8 05x)! l_7Y_8\&޺%,iXw`/wl`2vƦNlCNp6{c?x@qS:v9?܈y -ll]P¯q:+hfbG\xǮXM|hg_s1Npg߮CuVY~Ę],̕W<[w9i]ͻd>W/8Ry7:|-zMfAֵբ;S;) N(i2ޯ9]&v-oI\,>yhhnb q~XѺ>b"F5]'G@/9<(yRu}Ká;*U\rH?OSdù0ayN_]Oad#Ez묳Ex9wSC_a OPy'y.h($FfH^fDcgKzaS6ɺ&_S҄eD]fRn$4[ijnL;YFֶqe7ƛ[_}5? YIX//'Ö@uieyaCߟn@pGV4)vOٺcq)|I>;+d0M1w5Z0T|^ēT>nsCFggr0\ァo>ӉFz[ g Q~CXPH/uYH`Ԇwpw]JRPE7[3=&o욢Q5FJeRR.(,Ͽ-xVۡtK:m%2cw a`3muɑ-- o ~OжTds[ik4ŸÛWEjҧ׎a޼`g=PgcgYozeܓF7 V:ծb] uiCR~0ȕ=!b_FmUE9E67([~UOKz<Ė 5@mt;Lev\`FtjqQY*&؅ZqQYRon7=]?ffg/"KHYKr&D~z &#!M5 d؝7gZU74TW  _p'A2΁MP`!tXDEJmiuse=`HSJév#}3%[s< kmPp#M7iͫ>zkAF\SAN![|8͛k /aV8FΎ/x6-Ǵ,w8HW껜q1Db%p96%)J.e ۇfxu.~x|9*6J*aTWB9[/תk\jN4]Z]r_7r 5|SZ 3l *ᒭ{89fzJ׳c"lCiw88mХʙL00ExZq/.UrՔ_܂r -]qc?zUnfc Eq ה&G G锱ЕnGbTE+8{#Vkh綏|BAƨTbRZbh)% : \!\U5yHh2YBּ_:43tg̡Š[=(t(l<\s69s6JG "oH> _? )04Mmg7/3Uᆽ-'d,BRO9#JQxBW'mnwF!4wXiVV &Y<^҅3쐈L?usLbR+D[ImOU}lԊ;[Ay<iR~Sd+k'%kƺޛ*{pH`|_"B$g\xܽݐ#*ӅeMM=6T-ubm{hpf> o,Va@qҭrk#}GυEγ֪?mZ8B;1c/?< s3j9>W,aŒj@m /~`.Y[%]5J8J(`t{VENz갋LAE; ӂ6ѹȼ=vSr`7}h{Wå*uZ8\#}\a&Ȇ!Ȧ!e৵w(M %He TXoo.Z-QuUB-Kw:={sJfIxa"aJ&p!p"BD8V&ho3b2{J[m9c;aXNBeZI\Jz#VCk>-n$%c !*M$wt ̔|OQSwa\!Jپw>EeJqdLy/ZR-r$=jsWӤZO Mڽ Hs#dҝJ/#Qwa?c7erQj R,*E]LV{"n;z._f\^*oz` ҄W ܸ͒-qQ+mΓz NcՔr#<8SQ}yJ6FM'[+x\=)~&\$":s~#{Zџɼ':BQgtɡݵөһDSiT < |v*w_eq.G~6A$I.Wc]K6q=jkNZd9&|,ɦӏ'7~;& խm x8yf* \9 _6,=0Fz^(@׭ G3v' {dLB:kꧼq0.A>SFW;aQ),t((U:%^1"xI'O匄FŔFIy'+_K[݆ھ=~Þ}RQhbB޹"G\5qaqh1bZ/3*Ѓu\Z>uO5`q!ŦQ,6>3븣<T~|0.n[[E}LTkq(7wl.O }w;E62Zq%Л/cO EZG7Ⱥ{dTQIaO&De_ӗ~!xCVJ^| =t1룛;I\3 )Y=grukHO}ww:$^oK\D^1ҳEi4-? m׏5$jN|w 6 :dN>.w=f4V- E^o`:?^{diiL~7Q|&K.*d;iv{W nfmͼuK1(RScZ(|)==r9N%w,2/yJU K@9m,\`y&%:5(ZU F{w':␗U7HH` ^gz.FѸ~_#ճ|"ÂDPgѸdHS5 fxdX|qȽOnKfC/\)Q0ޝ3|qoMaK2WH d8D9h)- 0O/g3|3vퟻEOgTzcgu̚e3pH*~-<ޡ tZN|KszǮ욨|uf#DY2\*U⒰)5z ±b۲*'om:,ތ:}m>x%Z: 'w}ZO[!XdN=^TNf2F}\^zC?Q38FV4 Uw#F*EKzqzq'%NǥVkok2JbH( }̦?gPiۅ4 :>mV>L auouH uQRտT6: ؠhWQsP)T|9F TUdm@lo.y_ԀwOrYeyx6vWY+n3f.kQi7&S,~RwHQ$0ɓ]f;˾M*; (8T}V>zo8#T~ Y:+ix!(OH4afxuj.0BmKĺV2b8~OM)P:ut El'ĵ0:mϗ T4E>Ŷ/A6]yvs2|̡Z f#\El99G h1}#rXl=ה9t ^BUu~&H~उhitч^l%7lK5pe?6׹iQ {^`Ɍ_ٚTpvi"oZ&ڬeHՆr5=X{=C[{]/[G5lv`tyME0}L 5|M)r}|1m6"9Q`mz>* BpHR_ `P)a(h^+JvpuWK$ 2_!x|m-Z"Yj/ߌWӢgd?BMR]. r5|M9 0:%}1YiXg"KQʲ,__IJ5[`})c8ؚɺYadďtI#@JDq=fdc<3bmMO;F) m2LvFx1XiF)m"孾DDS$M0_偼`V'#_4{Vc~1-m_ ټZCKn7oˮg;uP+(=ˎ?}=.:}Fs VKFll;ATO\jTAjmWQMJ6pl?6.]`r3VD92V}wF9~S=TK]ћ\:Ns[R A:U]һ!0mwK[,wC _dpͯ&eq4(j'xe|Ҙ "*!2RoiV5Cј'vFf\y T5/e9U6Up,)U A=[ ~wk%Vp$qVז  oc[0C|ղRpIụ2}ĺ f\us4!v3qZmZWH.G·J^޻3n8QKG^Ǣn0~-E?rD'K\|wT\rWL.@g[\; oRiGܪ eǴQ攚~t:Oe|t̓ sDBP>LjL`wvֻšϧ1fl%Nçw[:!VB6_K *NaQUUרʱ}%~j$K' k+Z^$JŢSKjvYQ238xx(`~\/c/5̈́Y )}g*k ^^>gv@=,]ZB@qW$s-gIDCB)݄֭32WE.쟶^Wlpr4O~VY越&~Ev_P7'_|lY&ʄ%aUhgD^#dnȆ pdKЃ_QJ,^zw}-&:,#S¡SR#HS;.b~J6MK$Sz|i mԃNW`cQe~%ឫ`fqyJv׎A|=x79&IC;׉yl,'VNy2Cĩj0Mt~ɦx\{e؍1^ܙuV>LPUYnyuM%9 ~jU^):7$wiN>Uе0 rqU*Qaƾ3ԦwZrZ –t5y=;Q)1w\hpV;FyG5IUɇ7[= GZ^Ԗ4ӶՇRdbVE 6Ϳtt["6ocWۚ'L;ߕel1~U e P5Ex(Deu,yte"i1?C[U$vMԇhl QAv#fC㼮zeHMd)}gH,4GsW}kLK RPк ٹKϒB)32_*opZ)7ImzF%՜ΌdW}lMCMivSh9gza"i048ּ%s/j YDu1wO[8RdNC[DbhƬ֎hr\D"}$9&ɖ04~ - s0K08R/l2i5B'f'C܀%>zQ&*hp}fQ5ְ 9{=A{JϹvGvXc.Yy7tH| A3ƭU%P7\rOtab~xG'>|GA6b,=I&Aݧݟtg/[}s7`ւ K C?ba WֶZ\PT~<mI\@M%_tA],=m_:b^>.r=;&n೫ :d$QAQ ]+OXeY\$Rɑsmf[6㆞g% nG~鬫y::~t9tb(3#-!9ʉL⡬[Nں, Yxߛ: 3aԱ$:=OC_O_Y2a9L#V+$5rqVE^wލ$~rW'SgX"`uڨtcA|حU[4*۹gJGJ[N&Wx1Ut2:ӝ{X6e=-}5⡈VW"Y#2Ϧe:+cN CL[7&DM-؋3Yȧ _֥}(tQ &=L_- LXړ=e+Gkٞ'U'J6rSBQ*C;YLR&_wToIm+-4"$o zZ!@xY;,4z1ѧ1&.!** ~ap3'c=o-(+ FU5Yi ǘbHN)?F$qGߚ;9RUҡ*.Kӟ= ˰33& KFL y2Ru,FsC[[d?U?>ziƹ@xv]a-s5MqK%##jw6akK7԰? ޕ*FQ @-Ro26"S\fO#xBˮCۘP!z, oYR#Gw;+ev9Kްpvot*ލ) z;k(OOO%ESlX̭Rg(]# ֨l&zgA*.[Ajj#Og1U Nt.f$;OEETHTȗcLst-]%?]hC3"@ߓRO]Zq5㝏rv6 Qk߳U^w/M<ЁD~ܡ.>7L]m0пC{So<eUXիpJS.~ANHQMʼn{6C'Ud"tm nV&0d:DHV6aƆOr ]cicN=dy5;5Mw۟ꜬiJ5mZDSNޠf=[#B4Tk>_y,JNAstH4QK"\& +iȶ.LK*ZF-b|\#bX̴~V9p& VPD0ްG>L6Y}@֛yDGEƽh7yr>bb-xQOhWtoiQwW̷&tCp]lwuiGƺ]rJ5.YNK3qubYٌ/2nvDW+YvW9UگpH"D hv ` ܉0YaI(Jik"<> Ƽ(s玞=3(;"}֕;P8e#++=.#(<9 #ͼGrOZԺ1ԓz^HӅ**D|TwFn)?Nc&4D^}(~Y àƹM;$`ŝ۔f_-a":_0 =yR,Qx>.qQtm8]G1G4 ҇ vD1+Z3"{wfsYBm8XyJxy.:4A*+4"yjم5q՞[k☫ei_$MPl8B+PDg)-ݟ$?2V衅G7%@uqdco=$SQ 1p>!aAKulHLH3)%t-|gubPؕw#@ &OdRBޮ Z*/LC7#4lds6r>1U ERFa}!i+h{ "S iVdO[mxވ['va($af֗|gLz-f>P.V /3>/?y&rMXIfW|zھǔ`(kr} _ Cӥtd9N RI€ vR\[x,䃜fU;Lu&..WLmM @- :T.<s3,mDl!Vq)t_&bLAbQ2'%q=]m` Mt^r986\1r^k #i;m"T~oXIRMؗܗ./0V=BJ6!3)ۤm&i5s!n3J9檟oO1\1I6aUDrr"ՊBgijpәB~]+W/CU &\,F~g(~ZU瑎,Lu8 .kyP0M"#%FLaJ 1hZ=Wc,4A+&zSed[m0L{%#0ٍ;>"8%'5M/2He| ^_]9[c•Ph Fd hw B'muKF">![7ν~9"o('[z:PʀPteOWH ֧3)I1ElPA~2;U;>^<) oȡia-ظGRv*87佞A$%0gB]^Y˽YڙuSo=E*dwdJ^-⡿T51IW`-V@eU~|Ċ8 {z N"nv%7"DJe عM_)64ʜ!9qA嚪(lF÷Ͱ=:7j!uCMO u[Rx rx=DYu3R6@CY t|*n!EUVm&V{>š`ǑfIreIkAO*{\Ѹwڢ,OZri_+CPC-S{)5ch S+7NtǏyB8KePtjx&8jN*b)#6]^Y+wa,B7W/(Z;}j)S'˫|?:}sAп}bCp5+׊)g"M9@5 7?N8'=_&6cnJ:*zVkۍ2V9 sJKxp<}ǚr;.*Y*އQZ&{-T^?5ծL):O\[\:l14?+quV?m\4wy&C}U=_ʇ+3|X(Ƞ; %Ė2Tdsn0+ 2L鳨v5sg Z){./8L𐂗ڽ$XR]&_Nҗ@/zj20~;63P߀T j}Rn,+1e`nKvlzŨiUF}ݬERtYE7j>ӢsxѢY%#y:UDŽJ^Wz˵ķ:-IL:٩)mG3H;<ʶ;O1!x{mަ_<&>S|?i=xRpߘ~نPJ$3jY$'0_kjFߜ|Y쀺T{\oÛ=?fsX.Д.q}_P餄U98Hi@LHAԎWX~-r749̥c;D 3&-xWOE>S,~hY+n%lrmɡ:m/? ;SGVҜFlkRs`|ER"S o62Z:ڿ=jnf{'=8xgbu ύ}]+HYN:|Qig/$-;ﭰ!Uzj9ʍ0fXUjzn`AŃ*~ƃbvM% dnhwVzS`2r k>o3̗'B\)r o7q24.0-Wg {E#d7l!gj`A J{$G OeVU>"yt"zCE ښ+m`EÃjمLN'Z|j)S#zJ7椟0^jUaV >@3ODɟ8ȯH]XـTڲ*a`|Ĩ$7=)74| ˽G@Uc[EǓGmԪiLo W=jxQVv=g g1UU1=֘#1Cl.BGJ4CY!JG|?_Zc1 W"pR,:#w1zƎkx|}EzN*bMKS>y]dm^ X\hYf"22{]QNCGL2MBQ śj` yV3nEz֖NuUĜ;XE(L#X|Ah my ]`/IBU%cݡ+ugAA.S$4=):\ӋY%|b Fرlqxrt؜ jH57i{Þ)MulM[BoEs%A'qpqp9-Q2} *7Ūk(dX`?Cڨjrnd ַCwI~ @LnD}'L{2x|,9=з{zZyFC.3y:.GZ#xzpD+3nZo2p8FxCR"5CMSY'+q`c)] [=73TDZQ82ji35wɆ/'roU 4ὺwHb 01Mڑ*T xP zUA>{UlxL"&xXZjF|PIpFf,&z뛴_mwlٿ4j`n9V![u< WH5"dPg CVdfs7+2\k) uG:|fmoc"%L3m/#J~S/i;6eh~(J "DӡYA6@Q7<?-8`M)~ #]9, DEݗ4r̻u?ZZZLpE(;Ch/_4\Ŋe"[i>MV~)կ`~Nl$wxSAi"UBbu aƺk,u?I@Ka qc`̷T4.-avB'"F6uY MELj} ;ZuVcZ\,~7~F6 {U8ifNKXu>qeIHB"\ ~sbxK\]Tq+^PR%Clɧ},YEJO}JF,1O! !h*scʋFTz4+ڢR$ܤ zfl@MB(e#Od.Qcn`VϿAaz;>6b t5v Y&ݑ`^=Z1YE~TNq=o5ONhpjT`4=Fbx,NKn/YW~L$LX]HFuD/JZ2C4Kȩ eaMQms8vҜhorgBoˁ{3~ '6Z'9ss{;9a#( W $;O:2$ˆ@ɾ K& nBSSTA+ꇼyx -|) }C6QN0`VŶ++c5rB?ic?YUE(8*z`gH*Wq} +$_ܺfW_u'gh߄ZI``2=Ņ[QsTcxBR[+FAJJc,Jn>E46%7V bI9߳cHp^TYlа% .ڞxbwM}}<%_I>0XhC6!.qh'4<<878ᤘÅp(9:w3#MGxDЅo{$5t-', /-́ѡ]ob6R]3fx=P5`FIBZ5|7Hӹ)fF hufAqT>uCdgÒ44": IK=\9Z#;t  UE(r⨗WƳ~D)Ơ&p{h_'[OHIԽP]yb\lB'5墼MdF9+)dc`>ѫjuo~f_):4~˩0L&2n Y$P ,Z!pU[ =~R|BTß:uN+|{KpL+r{N3SIz~hZs9yh܁z0g] 19Y#\!I$16Wv5jqgw' {#oWV,z)#eyiMw+aAo@rQ_x[H =1`%h>9r+BY=>`Chy͔FPT?V&nF?ťh..qjBjXf%q*DHƓxj-͆v2N C kqnE/&S~9wTD;#J~\(nCxK&MӄsR$(Ϲ$Hha@"߷n |: c.ph`0:6!ѵZQi%?ww"R9L5fCȤsK6=,r;8O_2跔~Ū"h錰J9`譵5%ry*0Ki iЦ6?ڨW#UJ|&$ !M`[ 2 jn2:!GKk)*dѣ㞹[陂OkU㛫n[VR(M(ҏ)V(@CFίQ~UEIlsСH2eWn,h mqAr y=}Z>.|")KQgR০R%WV l+*[wOgu L251K%'=/i ^9ƒg Wd5Y~̡Cyl trqUo^C/~Pذ |j8!8 ?%ߩ&:k$Mj N47^$pGj9uG}Ȉ,Ul.e 4C.Z:>hw,|'xH@lJz1-z[È iU&/+y|xy^7PF|"?* P?/r7J i,#x~Vt匩hz_/|f?~(YXz!ȧ lc<&Jz6o0Iiߩpq6l&Oh7\w) cM/=5k^6[H9'-2َ@wT?Yd1E0Fƴ 1IƼW T̆`Y䒈Q|Rڂb8<V(=b~o]jm:bOLZBx44}oUAݼ"kCLCggI`^9ֵw05ys0g4fY/}4/< |=P3c(B?2oM9>KO-6ע_/T+cu18O4Ҁ  zB{vp=vMJq_a|=ŷC_Cu]YoX[[_GtN 2 a5CUYؘuGz:ir:U)xE0Aa 2O~1ƞN?*W@ŗ4JQnv2_)US3 GELn]̃E>Qnt$+Ok/COz "¬e%4gpaC8HI[$ s> U6ͣr)/cGiGxvFKM~= j#$~RIZJ/t:^観( 䀢\ַ_%S]\JTh/jه,09 ?,d}}3-SY. w|Mǟĺ`kTKm#$r'd_$rP%Kx dzo۟Qp ϱ` kz4j KG\3']=5B["ۜieʳD8C=}6z~yXwsq$^“s)Az4L)}t+;KD<.L[e˓B,e$GcgM=V{GhBxϾyzwBKc_%K'2UkFX @[ 0_ kZ y_xT;6^*s]1~浟XRIE D¼҃Z٪G Ç/-n%=Yܐ.i~zp>s74(/4nE'PwV =26[:-Ya߀~f@._D}NsYWp`n6,Rrd^@BzXpҜSr@dif4Ң= WVo>2o}cG84\hAshȝ.L6-\_)emޣ;To mc|S'I/|ac7'_'9=ԫ<#Q?#S^]F}9ż_yxU7IgoЅ9<ǖm L^ґvu}D#֗ٸ[ Ne_/Is&# Ej:ߥ-:o/{}Z, urN8k7Y%%a[IJpP_xՀ aT2^ߎdG}u[&?N_T\X@T~0t!8{¨ĨZP?Ά96vl*`U='"q/!w` [`֙uѼIhҪ0g98sC.lfgL(XA6INTHH !%AZ(9`+.)kbU~%7]!kBi^h?fږ1>䂟կtŹ)V /`l׊MJޚp)P6ظw~4 k CuhCXSs7K~Z lhKEɮ:[ uaJ)TJ?MXJ^ endstream endobj 667 0 obj << /Length1 1585 /Length2 8585 /Length3 0 /Length 9637 /Filter /FlateDecode >> stream xڍT6LHHw Ct# 0 HH7J7(*twHJsZ߷f-f~޽w-emJ0$7P x@~ff} a6#\!0GA{Lpܠ>88(!6 3 , B@'͚'&&W:@ X` t5 s^E&iDyy=<V {EV"L6s'ܪ"۩ NS-VDsN˕Ez wޡٚ:lZn} _ G&65<7Q|_ʞ5=e:e#ܱ1/Jǘ)`"rxѫ%r ,]{Ͽwmb2C?%fLUySH'8潮 ud<$FEɋN2fY%ɩ! S0`NmSHʕK+ r[̭b#6е'Ku EСְG]R,賞2OZ(oH1p.#hk^gءH*:ӹmcƉ5.fesJ.'Kl:3& XƔ&:m> \͖̊_=۫eewDU."ȕ/+,G,Eݲ}<toV )ӓ#ӄ"1Jx?'Dc.VEz+^́S '8Χ+ 0X6ȵHsҊF!h湓Zsjܵ ,AeY`\jj1_a%_|xIBh= 97zNf'ڿsBK#l?w<.)7L(̦Zų./OۘiV%=X\5 crxk`&?ȽG3x^&kгJ~n~=j(ɧʨe:_ݯtnVٵGn0ˉ up(I'j< %vž |+NЋM 3ތPxdZ!%_:^luJ뤳g utqRG9t /O`DYІ 4f\pn̛maxVwn ?1d׷._} 5$f(3c@H_Is/M?f+Ph^-rHyBHN<-ؼ>.47I>X^?΋7't9Գ[cs4/eZnK|=4֤bdlk9-X)e"}*Ʌ i3*-B>bNԑMr)r̅Ąѯã)C42| I]a ?wbF8Xp V\FȒY@GCY.Vצq{/`ԭ]x%]~%3usSVݓ>j\ >:2߳xNdq:sqd(djߩgz4^B-`$WyEu, v+'5<&u1ZW\=ˆ_*lLy *p1TBΌ˥*h≅01k*w9KG}zf9]BSܦlve\1sx&e1Nyxf/":#Gt"XhRy(&mF~~n8e ZkR>CWXHtxUN~HMuFMy){h q\Fq?-ճQ g^Cص G]\WuyAmkbyz= 6$S]IVL}eTA?{!p₊f$[j1V0R1><'.\SkrEK?:w [Z]'/Eo*'d&y^ E q'+<@Lo}p .}QLi7X͜f-3oLz'KP~syP_α6V1ܰĈ $3ƺ^QVB6$ZB /mb-4cTLp̌:BF8+×IZ/}Hky`f\wu`l=[I|jx$F~I+A.D/*.YٿZN}S_Yg3!en`Rhҋ!%b9Ck> Jh rBiK2Wh wRy`&R]Ft;yٯGƥrxr`r2.n:m)KZJ92㜪O-?Kt #y5(oS?b٢|A!an{3wCČjkըuǷ]tp q1Q3'” U7c.Ϣl193X(_ꄣPE? ̯~8̙r46EmݻKÃzXYX.*ijf%a8s^3pTC|},1 SYr )'~ϦfzPA}knܷ&ŵ;^1C#f(4كh-4j3/8;1 % c؃`!Xi5p EF}4MuR\K+w{\V8hr0\wMn/'(Nxt@.")t:Eh-P-D]cͅk/~o~HFS??zޮ*ٽ&:n7(n djºig_j"ܝj(O/ZrkFH?Ֆ s9%%aEݒW)L&aԱmN<ÎZ3%+}H *;$D;d Lkʾ,Օm^04 y&WV  %ݔun+hhʤ쐦Vt_/TuB&"̴$Ã"/D6VE9~V?y'pX)7K=Wxh.QW%:{_pSab,R<ƵLjRL%]) a'džy~ U=˝5mQH,Yių8]\9|E4 !_7MtLQ 4B)\K˼c !ij/SR:Y&Yvi>c*Rl_k݌>{fԮjVr, wa X(1Lt(X 8N 6,(DHنQe閠aA|TkqPhz[qD?qOMn}$淛IyA*r>os˚OƙB$r2IL3cc`B7tLP92,O[[ʶL_7tǺrX۝L;9ںG葘>D&ޜ(d44"ox_]HuQv];{S04gadžwQfC7M9]Ԛ8C2c1pq1o/=Y?15ns7}ʏ"_Q,;iD1k"nt÷y<[H.Nm9QeA0$S.wL`(c@nL8LրjM:F7-jѮfmc n =]vGneU#6;̱ ;PE]ƎPL14^؍ Gã(Ehf"L8.Y>fW2APf'k}1>}RDE􉙾oFޏ 0y̋؇X_JSV/NW7U7>5/g*Tm_9rM-ϰ Kžծdw`.o,ESlMy4,:;b|[h{SeQÙccǘ/Ĭ_h=Qf{׳RvqsZ񹽃4e;mLn@FҙƐ2I3Tu|3*+ ZG}FuM`됯s F[cxWG4K{i5Mlp.3Z^[>a:&f9fJІWoRz?%Jee ;a%m#YZl>5@Е>4ƐX!Y^ūW߉Tc@ϘtVW`RS3fɊ;0i>AQM`d1?F^/_Jqbp x˼![Ȏ.79}cStܖD1)QK}ߞj mbys?1X=B'XRDl0YnR #itAv:ңCˍC#ut;±*. Mge`rSȰ|N|=l֞mePP &u&ƷBa-8 xpMya1خყk(+Fu4> $t4٦&eDu7%LCS?)/u[!AY}_QD 2?~Nv%<tޝL&tnl)Yz:Iv{etiShT&$SU)oWRj .7V1v%#kS2V w2F)7,=6U8H\671h;NW1vuÚ c J+@lMݤ$qOt<gt[;7z`Zmg%(~rzl-@ WY q^ғ֊*o\n!uzq*ewisWkU5f WwDžlP΋=̳|y^S7{Sz+u6ɗ&~gQDr'Ue~-9q(N3ڕzhwff6/I6?C\GB@f#2aKo8<蛷Y,ϒ^q5ReΕK*#i T9mɖ/ƤyndiGAT.%~bm<P*Je $ .Y7L["xMDgwFmeחE*C,"ê*u{v /??d2O~SJ\f\Y0ue1hw{sxApvG"ڷ9jVi6n P^Y,M̉ImCXYZ,#R3bWbm>RANkY[]{&hj0xۀc1q?(^\(aAA~Ī|cxf˰--@KeAyqzFo@˫ͼ=ctr^|JX5L4md7'o~9 --x2-t𭎐'kEGfUWg\ ~F{ѫyʘi @yTH6xk9F{0vx_+VZr{v(9TbvAtf%y͂"+3 uUO7,\c`i{H0!SHXH&|I0> stream xڍTl6LI*+,- .))ݠt7"))݈"!><|sؽf{fk u-V +Hue` T49@ ȉNK v2ꂜ]0R sG#O(A\^A>A   sH*lEN+srغ>KFpliڂO4```uuudg`3wpa9ۈ2<M d0@36tZ-] fa < `K1 jr<RP9,f`;_ѿ--aP/0` jl,s/9on[<~Wn?6W{.`GW60W݋CaP5je +7Gv( ф |<ii+#跓?G# b:|o[,@6`(? ?#8_? xC}: Z:') rqX9yn￳OSS_n0;*Q ? Z>V?$v3`_Gͺ>_ ^W=@#E RZί% u ׫`Y/ˣ"@G@-aV6`l|'q@5 `g\Can.إAv/]o7[8u[ >!89vc*ؿ 7_ ]+[ŏ$@ K9P]]hes֝qitFVNkL׌k?a/o0/R|om@hKh5MԜiGD88Q]~U[n=b+|"m?z!ޥ{9/ռJSq:/Jfh ,rf]YQpxb͜OMvJ(tΑ/~Ȩ?e;Pcy9hR>Y"`w^κ(Nd-V[_83'yjv)ro41[S{-XdI= k%KY)ώv8!L`9Ym͛ 7*۟' :S,c_ho]; ?#LuņhRZ|@5[eֶF~{`uM)'.X>w[tJ֣|gc>yH,A.01lzToZLMfTnpˮMmuGᢧV|Fi_!Mu*t3k-1Š/VGa;NW;.E>e"?gw1e,$ ݛDh+,Vяq?Xbg+lAF?L!,P7L("&Ѕ8gVz{F~ʖeGLSί9辤=&gަ ց7_nie;j&N_LƌxH'UcPB-pċ={7!h] 5\'wBRRb]$TC Mq}i'*MjL$P͟I eoĽ$/|S+rZ\3}es3W0/3xt?t3Wֱ|)O:⹹|\/UQ1'd(샱kY Ks?U8Ѧ7m>,.6 h=48+'y~XcR=C?}gmQX&mx*,hf<_cQh>[NvlYRxG.џY`޲6Xs h^DUa֙EM,7Y#Á,a 5}BO/4y+M8!]\Qa* *HkgT`/}9Y֞ 9J۶D?ZQ:KQgRZguðhxcax$Զ+8{ݛJkg* :ѫ1LcL ?z=rk)6b'I:5C%z\s] BL.l{?zAٔBF_8(f$ip [!̀57FFc#,i.\p߫LTᩨAFKbz0}Z /N:F.`h1_ 7IF~q`pKdg3/2ВrHa(e%rlI(͎[`) H$T{KٹvK|<N֖/t47n 鿹!il ?(g[[ft ·%Gh*P8[ {ʍW,>F侨<׭U\Dv2cVNj wBݮ 2K xfrmqzQ\^Ub+Vh8d1)("XĨ`Td#i`Z;ёmDЛb187Yʈfʊ7/;aRvJ+mS Dòf 5HA8EwO%C&TZ&`$*=ʢv'%yaY+}NvNϦ,̼}-ѲDvYPdSqJ'%h;΢Ms},?jϘ8')4ڳG~+gE'?5JiǏkfW~*)GiuU]*e}M.fP~O~WVRIẼNDMՁnX<{Ut=a$ 9b$"3(b'4~F+|Yd/[<@%_xpMjolp!M )R9^Z6 øœ̄fg5T#S }lP.[ʏ vs7Rpmnwy`iH'rV} XD#o &l="/mԳ+SКFut~J^ X _VKq);֢IsĠ41ݧq 9#1)!Rr' SopѓxW+Nz'ZN3-G#ڑ$Q /إ/ph^d{|XWr!E^ҟ#_дZsN,Zx y)|[l1=|-:zc>'kg4v֞cLWO_9>c`L*,]aJ,wh{?F:u!הq=1T&|*#)bѬ+Km1o1& !aAWhLr$G}Nas%\p vg[jfʌD"HJ[-'IUPd_2Xͽ5ٷ }կ)ΆRF dZW/U cf \^}1KusuU/bFJd=`|Bq+Y6〇nAnJID%һ&SDTi!CRZ*=mg_PYS qJ%m|{*6;yf6Ay*غA6mݳb,hcvQ|7FfJV`hɄx,={X9znK?/!ۙb{j7_@n{ E " c>pӧo ކ+y^+'^qlzu޺&˨!"E}ـW eDs@}z>9G-extK9fSgDSId)uUA 21Vƒ*_|;0݆-J,JK \TI'i@*Q#D8 ү]_? P>}bY+VRrjȓF.X+ip9ǾTHhOXOg5ߖD%4^a8g{mzVh9x`nb66Rދz57^qXֱʞbهH j= PNxEU^ȹ V~tֳ76@L\y4&DneaFmUnR];Yru^֢hf.G*7%O,B@jH4p2O4'֘I9})L@_eNn&24}ھfΥpv9k*y,xݍYf u&Tj?$ I^e|810[SH.&ÎC()JidN!߰2ZOӟwAGbcښ>>|ڣFwX%8[M|bqm-k[]$ٻ 377l*u~д/e^BŁ#o$#3]ȴ_^ ]6<^[e-^:7.E꾖S q$VAK^(E,?ay݊/T3n);<7۪$,Oue@~< \$IŒɜ7 M0vJRO^S8h&<# n z$GQ{1ΏUZ~k,__պ [SssC@ ]d~Z`lx@h&d-/ie&bCTfV#hZ!N5[)R(^ޱ8i+ifGbwL6F l)ʨ'#>k[uZ|S6p$N)şWPf[(F*,LɟHNxrBupD!E&=[zr>0[_)4{(O}+vs Gw%a97/௃?E|8.O|8Y 3+-ԉZu1 #"W依/(Ӎ*'|G%T^cLPQéW/RgۯCA]p#)4O7rs (7vCT)"S7!"99*TMx>o4q>WfAf-HT f^X)+[-J&"#E^KE1D+`c8E_ Ζ<.UFj:WC@ν3P:{$PNe/$Bfޕf,,S Wm dRsbh4&QIg4`U{&뾫)raArI1Пy+wJe/Nւ+~vcFn_w4RLrS0'갃58^[݊mv>03!M/e '3~1$1z1)#sCQڲn/qZn_߱^3}!}YN9HZCٔt9hyӉI>XJ>楩;!mf&Kr(P#绤Ǣd9\H[ۤP:4m! s`d1Bvҩ~@ gIRDWjPNȬSRbRLol}%F`=ܑhŞYg/>Ȱw]:_tV~#G`{c=LQ&}OC~[<\w=IWڹqrn5 HܩYmAMA}RdԌ&LLP84SO0#5>_km~E'n8뻇+U*E!8ƄE,!*ըR2&=wL C P-5D*4wxSnְʸ%kq ŃF:n_rg_"btzbϦ?:{ϝժY"Zf W'\{hۓp0\V='9 YҙC~U2 7a|Hr?Pe!(N_ P IE2գͱ6U5RF4~ic#_StyȆtɰBfMC'tK\'Uʅ?D2 f =LDz^pҩUf 6D͆(>])0 >%/x%ˌK]Zk&$ ;2(#ⱐTEHDՍw0$2u<pnIZD(d5Ʊ2qF.X0̑$#0Xd7 U7Ö"RH(6*LӰŘ\PX+DCY[A6qj2 9GF J GWŒz_2%yA-xh8Iz]ZjK%yfP &8lFo? v%HȬyJu+t̬*_% _)vFrb$`S< Dh {S+;Wҥ"{}͋&r7+LCԉ+&#EwFܣp5k=GΈZN/ZQV!h)|[E_ϋX{\<jx)-:ܶ@J ~OIoV&mx)q2&i6@~LTXgV_;@A-[!41ѫmP0wj"^:S)llۯyNC[5E9ЊXJ6J}/t1E 1_q{U΂н*7AYOierqqjJڬ{Q>T( F^N=tZNd!ޘ2t߶X8a{J3s<ʨưAO.5w)ez(rPfDO3Zu <_r7]d]Sk VN@jNJB6m=q/N(ԙE cZSD.\lƵp> stream xڍVTZ[@Jj$C:``I%钔nSDiި{kfs>{}g -"xǚ" P$`cӇ l`w8ocw0)(7M J IA P_0wIb `8c;:_K @@BBw8@A u >H)@J{yy\|0wG\</ =_\#`;@z0; @!6`(PgT5Z`g?< 'tEJ\\AP`q4j A 3p@INBWwpw+8W%+Bm\\PW} w } YAvZp7B< yL`o_}\A_fT~0Wp!<8l!65%;; GM 0'{e- kk>!yy7W ̢ UXU X-`Ͽ0b,7̀"@ԗC7v?Qpvr r8@q_R?] x'4 w%BJo6a* apȯ?0lP%R(!@ dh `?@P`),G=KHlN`o 4acuXEgM4.^9V$1n2WEfȊ\@"kM:W?-u7 f?Q0՗v7 vlDoWcv'ΡV)Y~1U!Nd7 ,p)Z/#$gߎS&؏3Y|u9T/OgBˈyJ><'F3W;(E^RmݗUsm_a _)-Y*-_R mvf| Y{mlq=qǹx#yU"4@_^7 Bwm ߥ3vJ LIžzr+Rjr 5bIzJ!H|1i{U'2p}s/ cZaLMB1pۤ} [ Tܱ f5BQ$#ܫCw --io!r8]n-= x5LbEh{Ÿ͝Y_x:<QMGzl6{]Xϑi40Ԓ@ھfQ?%wֵ Ÿ\RRI*ڢF,xfY_%@(D5PYfi3e12Ey}q(l`+XIK%[tNvMCfK+v^n߀ P.:ɶ&OsD :&mއ'HBmՇvz1"߉<;|}>#0RjږRLLj_bG & 1;( .zSFOe͑=Cnry Y 7v Q(6Hɵw{dnP+vUb''>J% M\u+2iAvܠ'Rl'񵷾y>-31N„F0S0cQLRO !^yu4'p?ΠL ԜtΈ 3&$HWc|jw˛. Ώ; Մ8~;Y S |$5?[ɾjx!8hX Zľi\ƨޓ1:en:<-S(G0rHp|[,ؒP=Ǖ !Y jZ}7xCT{E* 7\DB\sSyr=4ض "XU@`ZZ K~8wu4xLSM 4'Yaҡb< 䅮^Q4R_|3%MR9Vl(Q/ik"o~~:A>,{F#~#kQH4iQ)s;H&b$z4zZ6NZcĤ6O=p`T'SH~,۽R]@ ?@Ftp~iBx]e#hԆhB*5M57u7g,'ld˵PruʰvR"G`t2+Fψ N[o\7L>OQս 6@7+t9D39nRޥ-18\1 !~?mdk!w%Y Jpo"%K;>aw2:+,N-Jw*u R#^͕ox*oIˋHp|kgfUVƾ4×)yt-B`Kx6!uP7]0avTdܬvOhJu!*{5Ђfk:iyńd9ƲA E'J" V (ڒ Mo48Z{|pl3}mumgп (\:&Eኅ6+&5i{IDiRf@iVO~I|j)Pٸ K)1E.(>8?hL#5溊[I6TN~c3*zPT$813[{bGE#K9Dv BÏUv*U;Y~mVrUNY1>x¿`;"D~W%稀ډ+#v΢fIۧSϢzЬ+08e-N2MͫNʂ/-T2\)>w{R6;&#xWpw"̉g3|m44/Ȟ?Zy \1K8`}OHV٠i2ʵ-x ޣ#8*zdn|3Hi@M׽axQ|~7G²inmAƎy_4-Ź]FEH"+PCҹfXi(?:ъ|u+iK&lǑP ݮlA9^s)D&8C_5l"``)G 12&zm3x9`iyc30)7.'WĔW4؍K+S588 bI()9z#{n&ovl?EkOu0EIgpݚ$KJ|`9ЭVD,2,0if6vͶ&}-K9`f)a.evMH~U\"䩑UA [ E\}l;}¥ =|PmV$UK37ڐ]&)ECN遊^* uV(|7d$\pqFp8ʲQo.IK[R{sR]Aex<uI=[Nxa9zbN.5Db9j^D.phr }t'C:d?E#x s2^'Жd#Qlny$'#藢V`.@O \}ԡqB򦧇w6)hhf2A}r1w&~㞤9n(MIo͵X\9_lC:w‡iFOTYT/ËpDɄ%E\o)UAmjꪍ7Q^sA~|K8gBYãWL>H:_1`X9ͣh_tqI} BvCu… Jcϋ/hTB{l/)ݫCOA -)lj|gxkk:7T}fy?-iM\Iܽ{6sEls+ge޴$4.'>*4мMH&rȖ0/=Q ;x\ [W,O4e3:/|%QW⫲铓uסMA6ie/ 'l3S>SI&|Mn/WaKvPw\;] AkGU?M4$n 7L [1`<ZyNp]+lMlJTTBT>>Y-tu?;]hSxE{wT\;F?Pqˤ;̝:6m "鿆ak[5&3)8Id`澩Ld)V5s-Ac9e6eSDQs> QMv{7~xcr[]gq)]o7DAe]wdI{im~ןq*C.vLN~xAYX =fo}dガRG<߃BdLޮIW}᷹-O0jxDHY?Ĺ,,{ X ye~R!W)Qj]yeI%D"qDjCǖvӲa^x+n'mOWSYJ~-v8/ֿeo){:g/e& V'UzvO)8W ew)74ד=:+ QSf_'N!,†(\r:=&XCɟhIR_}P\ѩY/0{ $B+-sU>ds<~j,JurвNb4,xHL)NM$!buHr$<~wEi#3ydv$|DSvy?(-鴐m= ;^ 3i"} xb/qkv|(VnzlaݨPiRnRQ\-UWdZۑkc''l*^ryÛ6~l0cI=?%C|$\e,F)H{&h6aK^qQ?G"^(>R$9[k'7F "Jfiܛk?K)6ܨRey0,R8[ROبTWitQ|44ɥZ#yo endstream endobj 673 0 obj << /Length1 1520 /Length2 7259 /Length3 0 /Length 8283 /Filter /FlateDecode >> stream xڍvTl7%1eRh.1 F(iF@PR A?yw]Mc!%G\e*zF@0XT ps |PF/T>X3=4 !2I0(K%T!z@m4 VA{z!]|YsiiI@%$ zP8uaOBʺxȀ@P0YOq^~pGvP$wcn lvzX(Gh G1c ja H(*r:!;>>@(!AP¡@u%C _yü>_~^QDQ>ހ_"0~ Ga(G_-8zLQO__X `Ii0 \@z+!`l! <0P?8ߊK舀XGN  }rrpAjƪz[bD$B"` ""B ?Z('4PO[O~M;>X8[0]oAGBX`nMUEV%ߗVG >0?TZ0w nFzPB0tحa o,إwJ5 kD%P//h ;b$@k` H^_󄀁;H3 K 鋝1i #c#x`;gâXlX;/?񰝂|HAAp?zza&b#~f8 03ݎt}F_hmXn{ fƫ'Q_uvĢױR@էU5#Y VkÖsd#Ϸj{HXL׃/<7ksyJQОhϽZ3\!=/ 4^wóIFB!6b~ʉqܑ+vd@C"ՒȣɠJ&.&+F6#c<tmҢ kXO86S+$d 7םl65bfӟ_VUyorèv1Sl48ϟK~J"yYNV*W,!Sz(9!:^' >"7&3.N+O[xN"m[Q@p&¶{@K8Ox$n<)v O_np'cRrR'i;IuǥpKnwݛlߑUשVaʇ&ZgOꪬ9x`$aԎk _UGiGLVNbd9]Kˋc i+ni ZYM,!a[sk&OB鹴<hq Ѥl Stn+.˦ZԖp {2+4[8P3 0׼@Y_RvS6 0N8r'0ݐê^miUwJG>mrۖD:ۘ}wqDt~!Dy=lHH; i=6KE$_jj~; ZJxsU*o#zw?K )tOѴ_*/ᘘPaۛBbQv#ZOggzĵByj;@OO^O*EYza9! %wOKd>ZQ'!S;u;'> ׬p{x.g(rۇJ2 )H%cB>!}YwD~V"%eluهT=P+Ԡ:_ӦJ&}-%U@M▁k\ mpV^qx4e-VuNrԁ{W|gۤ~$}j2>WL 9K1oP/.*>Q=$:; xu%a Lb朆Y%x:,Vc_d!U**xگ*fLDYkB9 1'tu ˤ ]0th5)jnSv<-eN Mx˚M@S>e%O{h//XSjWkd.F# :%Ls'?"6r6[fSTDN\XOMͫ f|3dk,>-.fa_ÉRJ|+mBaDO1GYTN}`d KZp] }ŤKWHjU?GC'􋨱t͡C RoHWOgJ6 \W=o9:\>@ejzf$ّ oK>ˆPE+?Ζ Cq8Лuggn7ټz_ [k0V"^Sv'gzt'Y-5;ok칞qx 8|d]pW%)p"zכ䘏仳N3Neӝrҷ)nˋ,'3NK9NT]\_gN~{O~QUZ匲\ '^{ K!DDQ?:\/`|1:T_u"߯ck.{[MsSʫ7a[pЎAŗ՛dQng{`o}IV&Mvj\Q%Q1]+{_)J8}/ᅛKDTH«!aV 'i6irT[/zZKk1zu:Q&P*} !bQ{]MaBU.gx s+ee uNJs I}B+uKou:L2c$yC‵1Xأ덥ٱKg}m })8P1/n:b5K]AN~.)GeP_!#3W/-w3:$'mM}f,gxAř$ds6Q)T <G01"Bm=_ CxpJY /+BgYAw-MUl}oul늣 *7۸h"KwnkUŊXj.Q&ipmiMdܩG*>w Lʭi,%skdi0VCIGtI&!9uρ{^wnИ*v뢍MUoY#uT֛#O#8QA9\Z{%Cbu``~aw]&ŷZ<ҞJ@8{.'nM8X/cQ<\nﱇ)Q|7=ꡓ7w:G]p2$$Vszm~* .A&QB梘ȫz)c.et@Hh¼ _c){B{:aLݝ6_ImP[3!FM7'@˪h -ݣZP˜=.$=D4${@4Wpۛt<ve#3Jŝ&a߁T=#Ӫ<7J7B`s%Dӏ'+ f]W7fI, f7|MKa>tqBAeqoRVQ nLQJc8A5P5zH?񞂊ҾC=I-JQF5w#ɯ*<"FSihS[ 3+wWh$NIi2y8r?{yd\-hVp 0h&zE?Uȳ9XJn\D%ei%UlNCv)I[` 2n9M|FhBg$1oȩ{Ahrm,Y(>Ek'2j^Zw3Nܚ^ݺH:"W-N8 8?VX$[KX*vdz:zd Rhgo=0p}F#6J'{QQ^'mB?]}CYKY{U4X'};]C;ZۖNNPqU*2w礶z~=P>?U.%K#QmJ) 8>. ~:!e"gU=+L,MNzp_YH;_"iŃ\V aIf7udZ[1k2'}ψpeŎ`EVnDQpmnZ.#)[N\TWVoHjQ!FI" /OF>fntmQl}B8|/q=$V:'U=>TOGZΕmz0a5v6I:q-$=kMOYf--L$,Wֆ.:q+3+r9osIny3[^ˈ|&$(+PjfWVE*XQ&1Y~PUHnDMMMU?P;̈E{EI˖% hBV³c2 b'.Euٙ%7f2"7ԑzn};v9{ @x-@:fjT>}.6uw=Cܠ̓+Zs85RN~dB3Kxܳ9Ew+=&`,d1,Bzi3FX>xvZ6+ݍ4'P1tvZﷱP>ھ~?D HC6g1[iS|?*+@%x R~20O- ci4W8SP)]@&ҕ{uNŵ$0NBi7<}R{l;&d(~}p5Ẹ!Q hZ {3mM>4 i = ǀ$E2a(/5gϻ/9]C9)@26S\??rvB$N`BOƶ8# ۞s̏_{x=[YA76i4r?Kd>K=~zC؝KE@N:ww/̪OmVt2SUW~eԹ\D~7#(%gMS4:Gݺt&i!P55-QnÑcd5=k=My5:M (yqv>%rN*ާT P"a"mwiF ڏ'֛bd?^9I5ȱWXm9 K4-ȎnU`O;L2)eH{U|Ak!*\񍗏I}ΞS@}/=n2_ 2pڴ8+#n h) 6b()&~7I('գaϏ۝ X8[?3\F#.1=;| ٵu=Ű6xԓL~J_i:gj$!b m5ApHR3!ܩڞR۫L'dwD׌Mfq}T:!Q`8`DMak즆hA}NVʧ;D COp͠$[3M_o݆}Ub\](}T~'5o)o^s͟ޖ9 !b WM)e w͸[<3zފH Of7mm;]4^ i`/Vhؕj0qSH(xH%ly}|cRڧ_N5Gri&'eU[ Dԓ}S96Zsj]AdAfFyhY맓/?JĒn_7>k;ۘO,c6HXK5z}t4h.68N$G@@aW uZC>jy`x`Ӱ=&5u\8[%sصDwV5lx4E#Fmq4Fj7V^Z _˲ .EnnJ?X\cW&c,n[=M&>6zcM2}ֹbIJ;GcRڟo:A]o __B;]Mn194|&8p~ E:lJVs1ooDWsz5!)7{{DG;5dxs]FEd1f{P\ 1;dl/So{%$NXbbk0v mXԗ}~ڐi,Zc|S.fySV8l&a?ެ%r y֮=g@>qS^"]bz޷9$Gt狲xЌ:o endstream endobj 675 0 obj << /Length1 2579 /Length2 21541 /Length3 0 /Length 23020 /Filter /FlateDecode >> stream xڌP.ܽqwwwwwq \ww'8vf7սU|6%:PWT3!PRjظ"Pj]\mA|ҋM2 S7# n`errXXxх aacPd9N.6Vn,1r3uؘn@pFsS{ BX913{zz2:29X 2tzX%T9cT@YhKHG~ᅲp{7?J"H^꫷sgZ͵O}bW[βoyЊW@)I>C#*7ZhPkT@(5~r}E+ 2Vmq4[=K;^Kt5޹W"7ir=R^_;9%ض;xO8ϲgLvƛeVzwOd^#+.gr 1Rk\?rq#A\Z"S }I4KO"Tポ %nдkw'2˾ \m}5rt}ccD" c]U;G)e(E 8o. ]ә_FmnkįwsQ)Qi]0?/Sg!S^o<:6Dzݍ[XNf:i.>q\$l(sS0.'PazMk5`eēM/ud x%: )޷w5>9+)2CYDX {@3j". ·2j_^I)ddIhW.VK8&,ۤs#}Q XËz23YZb++?}+% pщPy}P}G5uRyLp:X/ Mw=ݡ+b4T"}h{$;|ޢo(#?Ηa|s}>X @2l4o2_2mqO*&6zCO;ͷRY,+2V #ϙE26 t_pO05DžrDڽ N3 9 W^$N1m}w0^Ύ!fPk0qJKUhJ[A!BY_&zThX9@oX? 볅Θ.JCiXIUK+6|IPsq6ЩTdSM[(0HUNK\O"{l aOq`)}skEQPsR=,~ "?851l4O~;@5`G/1QjOr.,~tͧ-2h,:yRGsm53atGS~Z& 0"V Zװ˷5Tn-Jɂ魨ܪl Kc+qrLoccy9y;q*ЩyP17t3EAaZ~ ங' #C[Av. |dQt 8eΐ}dULƁNNZ>ǧ9Rؼviy0PRrɒ iڿw;WY|~p;E(8/;A=Y,h}۸/[/Mu ·}4'6Yo9]㍷VU31n`W};=8gUlc-]nk;%.Q=PMAK#/Ɠ]VVyԭ ~ku~Sȁ(KvIOjS4z5-4ͬ54q,{nB`<6Ȕyq~Mlۯ83 !(,#b h#Q.}|$mzB] ~O e]P_?^@ DPa1 a~gh>nDxSl[UcXՕ?͡2ೠ:}o}ègpRp;Y̧AzCc~}Q\Y4FwmTn#\^sl_p!VMč'mF,nzcs򕂍%[{Q+TQּoك BEg:`JkWî=I"£㟛w&pZcCP袮oI/C\%0JkCZ7brv+-43&Zٕh/@JE@t VRoҪx}h'{8뤺3+(E+qc\t V/nλc*^P"6ģڵC顃#R/t热}쀰Ĵ9j3,Zmͅ'_:d,%ktu]`l`e0($>.:a2HxrCȮay?Vt{em(PI:NGzy<1Px)2xI@'? {9$O_>a_H,NH"}716]Dԥ6·J;6cF sc&,|zjNgMdTk'oGU\D :CO' ryv޾8ܯhE3mWyJ 1Ѿ%i잺2PWהVYJt&2m ׏O7c}(0n +rxhn|7hm\Xcy,rtGtYE4.LXk카@1*GMm1kt _OdB5E肋Y-'F/ jlA kdJ.''SаsLkr 5;yBbxڡ (A2gthG4/H7sY4rqm7 nҩ֦f6קo=y[ h[O'/pOaH,Z30Mk۩]>\1O!0!,MzvM-6/̉؀EZ^jٚ}258d!|{>fD5̣ bsB)QTD7ɆX qf̓`hYP8R? P%{A[g_4O,W7;%Qf?9v [[0 .(Q3=%\~ 띢[|ح9le# a4\Wxw۠8T]orWR=D2R)tPq̥O5)m~N&'^ iU:5l4{M ZNtR2=!$tG_R(8}NB.=#4$}X[milvK~YH$~jMG$Tn›X `.08V",1pcX.;UlXZ,mWAv}nPlߋt7h tc$l4ϔƬ;5\NRQmWBۨ_Ž :hhBo6 Vf${+̺V\ xfL{('$қ%#׏!ݷF3׸R{a!edv~Dal|W=Qazsq#QVNb}EIo3`xW̡)>ZteOv;45@(U26d oc 0DD&$X9 7:q_:q |Zцė'Q#D_-!5h*ŝG!V#}k<,Ԟ r!AM#r`*G}p*٦ H9o1Q=?)A}3u<G!tOf"Oaً6HEvœΦ:ṌNڽY* ZXDKmcEȲOsYܸ2Df[;촋 :`N6K! RLyvAFB j  f2 !GK8,LYql[sGMZQ;PZ-9GKVGw_s"V%x\* $" ;=`&k΁+Y(lM/eYu t"Jnqyr 90.1t|q#UR.2lŲ't(M԰E)8nd)KD@(N~s,Q(6/8;ԑ8;Ux]Y3o\OyD=3|%5Goo(*\}Ŝu mR-_t\NCol_s68T768!Cza }}!? {+V_b: 7yk +Qv\Cdel6E%1eT;ak꾡1 zG ~SZߡ4[8(E/aߥ(O#k廗"tIt3QȚ _ǵ̔R}yO@.MpۈmbhaQ7<5YAK4+[djp31 ڽR8yRBD9z:zf>A ^"u5_lJ!do%o0N))KXb_ qE3qš ^)ڂ΍[K֙r _Mhx9HZZWqoAf{_Y)Ʉ ɭ'r$0"]S<]n'tRxY TvS~@{eU[-S6Z2~Y4uu}SC?#Bh00kؓkHii˴;Ke|}&cAR"]7q˺k"3UT(70w'r y|5WZ[U^rkVe5Br `W8P8.' HE[mkEw4 Y^!VB70KQ(:a}ܭBkH؉El6݆-{JM#=:eoЪk~L9"1vd'^*G^ Z q C{?D2Z͟Lo^!N(D(\`.p`IT<(:^FeA/\ߝuZۢ︎K2ZNkuV[S}$~/5t8jŘ}Xy[GY>5AX6fKHv1ALVRA/*ȝo}'Ƹ+Mw!oZBj=.N.Oj Im( `bG,`owQdCL$-Â9.B&<)uS{Ly28!*|kyqkKnko 8FJXPj'4E<<@ߢݗ%Hy~LZg:*Qo8nܤS|3+g(M!Rqm_y5,wb=nMF4|VZR;vlLEK(3A8oT~(@{b3fCp*aO'"l1ݬuZJ>z(PT:k3Zo`CR:׮A9"kV2?pHRϓAԃR]m+?JIPli2FC \!mK3Hy3]>8 pi}sUs&]3,;2_5"pjp-b=J&E{ᙕfO#:k# ~NUXF#Á%+QeS-Xj;儻p`2?=arzHQ>Yj/>@)fuZI/k7͸p$ť l'5;UZuy2f\dBОM萺J3&[-EY 7|/xh&^fՈ9~GwH+'xl&'D:u#p9{Z${FK@vf?eVZkܣ^D{D'$^p/@`@([50A;Gsnˮ*U*t`!ρ$Z:-E#5.?k<i,1[6C>5*eGUJT2WtgdSg֊*I >H."g (ֲ[[gZN[#JkWB {Dh 7ȁ1u>e^X<,xA=s%okD /d_M#!%Sjڟ9r(3|xB#קzM[Q97z1y#~|N-B&a{3ݍ>pr !9̎6xlz>{"!LLoFѴdi(96"JW#kϡ!9\0Σˎl\'vuh NfdI:\kNym?`t"CF+@wzytL?5TyUz뙔0#K Q ^\,wtj*ݩz\9yWs+`6\=^a/(5rՐΒo'vGy0cɆ1tr  Cb=/SBIkfF'1 l7X@9/kYWe>$o̩BJ3mJO:0o k" iv d*|IwݪQL6c@-D'DFjJ3p uQm Qy,)TKI=$%>7-3֩4;Q%@S^h;q!`cqv]ncX垘ќʝ#.cSBv1S:dl,6my 1Rv&ELsr,-JUJ)Wh-\>_j*7LkxSlj}tG$…1B(b 2ݛ8yl:/a%7IH0Bd oaFCA{Z*6}g}IȺѢtcB:tLrC?gJeOBFQOP"ƠG->\89 s(ܱM)8 ;3k݈Zȓ$9y16~W,R!fXM~Uk ' 8sNKBEtvY977YuO}s{4~x |ۄ-xBA%eL;k|uFJJL$&W_*M`ĥ[8K))/gCyZ2H{uEMvW(aSpĽ>r[6=7C_f ˒Ǚ%O@` Ai`:pWF/Rߓ`X怹 ')-)iQ Sk<)6%bAK+kRo8#ElttJSwrk?J a=*n-ĩ,&oSIc~}RP C wE3/&=nGyw' 1Ħ ~-J>É3l}%bL4:1bܘ!Lf&Ovݯ1iA%bˊf@{:#^POD+?m3㣜)E>?RNk zM% XڗaF&ԟktJGZ8cG*MMk_zRXJ;GḂN8LZ1!? 3A 9ƹ7,?MV:VPPV^wP ދXuJHC϶tYaؼC(y2`O WwUܝqIcavhk"GO/t z?lN}\>r/r4{jMA$M؛Z#BGI|W̾ ,9|IÔ& >Ŵ`k}7d`MI霪̓ G4Z1sv܆k$(8>'%A~V*ofH[_ZI>M mc KswxվGy*.%7?KrGZQh=B+KXmk(7iH+g"b*u)rQ[)kb\l{$\N/FhF6Bɮc n-$jRO$ϏP4@FyҔD#LIj>!E2ͬcC"2,18Fƒ2IsL꯸۩ ~Nk/7:;骨whwoPwVǗ7M"Dodb%%utcFQ\e&|\$@xM Ya5*[19 h3g_ihU]ݞN9a|x7Qe:ƫji&-p xϧ#Rq*i _Va2=,< E(uU9D\?~5"hҖ}ImEs E[K%S2g3-2O<9>Izt:e p.:6~Du0*C,\ōuV>wq[PS~3 po&\m_\BB8ɱ7bbH쵫:ˡc\ \ڄ JͻW; q|CU&/0_Qf 4*S_/IKYuȉyU1~jZnyc쵡EK[ ch`wk?ʩzw'0uRcp)KWP:c)4e7K}\Bj%ɂJ$elDW=~IPKKSC]U]E:xP>V{Jz:5}#8bQRU\ðL* N+hhaJx?X,˰noϸ{BDns3%GMef-\0VOw'/>dj;))wha~hAwԪ*rRG!3Go <.TCfVx@'q$q]nGƥHȿj>)073̋2Z/nGM*S&FV(4Sϭ\'5 Scl_GkS`7RbŹ&DBL 7Ix#d+E|h#z;0$S=3M̏+] ס) th!+L ksWhQdG6Lǃc_Nwt+xClc-aԝuRnIci/ ?C#1O4?W}X[ڞf9 _ v` 뱞<W;2Q+Y f41佗E>njI7bG>s0w"_M{&'I@;w)+^ k}.&9ҤFOM +N[?rn'kQCA?V4fZJ&%GU ټk1y)QpY#VUcO6!'6Gr2#!d乩Ħ4̴&?|_lniWɺvKĥ豒e#\'yac{p._q.~֜|u}nï jӖZ <3| }i=^WgzA.aJ~^ZH7@ľklq~<%}GlU-eː@a~ H?F8VV5:4VȠy/Sʛt+U\G2i I;uⳝ_JO/N#{ѐ3;vWQ}ީia$*p޼vɜeLs~V A7:}wnAieT_#ghZ#CĻg X sd|g_Q \pNé!u1VÙvdc=9zFbd=D?)G(M}j s)J@NvVIL!U.O &Qy*)C\FVuxӔGXd*?nNP1gkzj.h.뼤ȐboyaqAEO+7{:tP{>oeO:e.=XA Ĝ,s*LE۾?{eDp_k2?~W*uY}+q1j$C-qI `/3@uvք\YubE2!Qsa4t/Mƨh^Y zc`QTH%c2Y ˺J ~ h624sT _SG81]؅ !K=ӕ1nIGMP "=fw |ctu^1R3GG 梼S20܏P3Kr_1aR&,V0xa xM tnK5QH |I/͆~Beh#]uLʎKHedUl^fX*Y: I}dT{!],ebKoi}qYLL/H`__cjj%0ePvX.CUFb[w2o  lgtp$)xMFPyv\PE *|G.?S.ӔC$c:@4v / tW~ĸYfku[ʞ:ҭzB^PW/W\xYaV$ F-hJ%y<Q s?bvb0j]ǨR{Qm Ƚcʝ8/7[ \ΐcP`R󋏎Aa,4NBu7NU,G'bՏ T1L2Zl? TEUB{CF2: ։PI:ul.au")Y}CChAv+뽅6 "yx *nT(W4YZB<GRLi8PWg$68 qwr$iw]CS\HNFni7EU|XQZn!1 wݧ^Hwc;C2*Lv֦VkNMNuOp4pi[eVhK~dYnVy`q =wt:yO7ЎiҀn_]ox_^EHB>Fz‚Нʰ+SZ8t5d(0f,e/uБR.F7(Qǀm PUϙCe۶tP:$V~}I|H$GH$۫Z @03b89_}8C%'c4Gk7FD. Ѫ?/j~zF{QUKBm"jA(HPЫVn{UDϞfE"b9p&Kx=J/_uHy)\uW ., kS.k<;rytѭisu'3U%l?L?0kz.Gq})m@$ϯ$\z^=wEl u+z:[88=?QqT\_|&S9# i/#t:ii6W E(3zDnxEmbHP b0"NhmDxuDyOc6~>i;)H'e&IidGM#cKӸG#,HyE3V-|lGBxގ:.!zV˟BS!-Oz!$-[ϑdy~.] Sˁ Xdmte8p!Db7|\ӣ,ž5Nzj]` 䩭5[aFg 4]!Q*)wgV&Ϫ GV*`DQ!Y[~CO1R>y1"4"$}ǤOD |F˳9kQ(VOx5(S@YM'n?,SaU66t4QoaY8& q ԃ9݉!v!7{k jGcY(؅Ԯ>D8D( P$QM$T^oN%v@}jckAESЧlŻ0s(f1vŖ:d'TlmAFO 5Ωvo#֧@il[1E#b>[h (jrJIξʆ_N/Qm{y4QKE"6L WqYlM>,S.WɭxZs&½R-D$+x9}2uG9Z\B"d[8 nTxsH}(cma+H )49.Q xo:ysƎN*XJtm#9LZxQ0 o!ETgb(O "(rN3.TNi3،~!{l(7)|Tr|{)\V趻t+5NvB}?6D p,`3"n\NA޿hb,Aj&șJ=6-FךKBCvN_[oj9O*ZY1*MlthIo0=c.>,;Ak.s,Ҍ/>$z-Yt~~,h".3 jG3+n3:=lس~>0} =#L>|ț.]yY  i۽JDK-kF+ǾnseciIiu^03FgNg{@Iަ‚BKO~{E 6)ѹ0T4MT:\2/8 pl3/5!T}?w` 4TSތxwfM2OŅ| ~Y&Ӓ)7b ro2ZSlrsND,{iZ1e*̒(m6q}4N|>psǻhF$5uTkLyŒf'vq`P0R|K.73I3B86 xdUV,U|눜?AB25E4!+wg VўK$) _t?j1YAY꫖n/XKd~:l{V$0 k/t~k a?l%'id|}X?,!d "n$$hx0O+!1X*E q KP'R1˝|G8U1|D=U8192+hU=R즋NsJA@B=g4""O7 IO!ŪQI%uXRyo U65Y/(ioFKTERXl6STn.cd)83Z{'F_':si-\^ ҸJ8OE15VK.iq=̇RyQ@XM~-ܜ:bsqvf)H쑔G9ө@9NB \}),Yl4//QTEV7NzdƁiآM?-id= k.Mu 厡Gx `N Q讎n ŵ_-?pvW1?T)ʵ4ݽ@ ZlK[WWZ9G4&3w,Q?."@cNMSY^\#>"ƀTzfF$;нNY}mg UGQL]Zjk{!Zx*ΟYkX\59ZA" 1kG@aSGY]~,,eB|qpA25`1;tۺ Md/3o =@))e n;Sly?ZU]?;H|v=)O4ITR*oD@SB1HRPKt 9]b79XLtx8ŖB67sJRn]:b,kSUºB[g?)Z`70&JfOwGe[Gӝy'MM\z$A8ң;n\i$7x2oFwLX || t#;"`A%,& dk3$KmɁxmF{u,. daYT +ܤ9PK%wsg3Gc\b.M[qP :7jSmBGqR ؿw4?~;!M3:FP)_w9u2v;gXH99k{0CZEp:7}mq4v./n@5c( LaVT8g; m  >;8Gra<V!;Gh +Tx̲܈QHZZ2{+ilȾDsmdٗb٧Ml{O с([Âwl>WθS֘[L*]1M 8#e`~䟗d$ np[U 9{urldɐ0e2\U{0G|knAw%7 #Kf"(0yW@wZ,#VHL$V] ~n4)ìmJΙAqN>MZeU}V-xe:Mb'p7X.A%4u﬇m,֟WYmL7CLW:2q,+Zkq;x.vhʭ׿m(s*avmR@&S:\l. Ss+}2:/VRcl%!V4b^} p֢}?,Yy2?`(õl.}٨ `~)6PQ[.5=Y`M)Utϔ!,r+\N9 ڦ2JeV&S-lZ|5;h/agTl ΅UN4s85a=3M`c'I*(($~Q -xa)zEd "n/8˓XUAC y%C*3Y*I~,J/! FT)O &X$0$4Ws Z`$lӃ(іBfGv &=sJT X60 @3K! T64+XPϓcIWuNoϵ $]NŲ K&Ճthvdn=On:AϤ/F+d_}H,rV- qH|w_WۑԠK l=|vlT͹YAlrr#YR^q:Tj-D4 E`Em3\C@{v1-).Y*B8ΦbY@{nfТYܐ6r[U>: p|> B0J+)uFk!u+C"Ra?Y؉,cDu(-5ՑK&L$etš")_(dDP\?q&&"z Ll|s) Y6Hkw^zķNN\D%`FLXӨG endstream endobj 677 0 obj << /Length1 2405 /Length2 15114 /Length3 0 /Length 16526 /Filter /FlateDecode >> stream xڍvctoi4ӸmFÉml촱m7jl4m>缻~+kM!L+hlkqec*˨02T̝8dj@Gs[; @4' @ `dbdb`010p@ K:“ ڹ;9VtatmxɂAat@u?h@OX@հrGp'OAwuZCDGAcT G+G t5( 'W(l8-=ʎF,.R;O*ǟA<ΉN[?_94_5 jwgeS{w&bߦñ,gɹHQ1Ԇ/j)A!wBW#DžxK9yЋP;M2>1t[L_{yhOeyp_8%|g:E,BS)O1*:X}gCj/ 5Kع:Ǻ::Dؿ)_!!u`Wj8$͟1JFڹMRq#ZIxdHPG2'م.Ƀ#I*1az(,t{n+UNe.qi~OEJlHR^{F_@;ԋz2}@zs6G;c[6bQߴEw^{H݄#u\T' f ܷ6RxTA}OO3R_]Q1zUOg$[KG)ۡ*R)ȍ?jpO0h,1̡E-گӑ`yQĝ.UAL`"!7|ٛ@r?R\]򫞨#srW.*'j72#<[,e+2e ާ/tȺuJ_g-*ug1]{~6ж:宾S;`%\<$kq:\j qCz'F@z)^>}71>SO/U@BAaQutEi{E%r-oݬaQ$9:&֣fc3ƫm^PF0kU8iN xSraq 4hM l.-7a;fXNq _<lDA@+`^qHΖ<8:THXe3LL{_=G\Oqr.r>ݨ$7f|E}3qZ{ M#ֺ†/I^.增k#l.|{nީzĖvwyGWىƸsۊ˶ A)Su lmC|n FJ&?Prً̠}OT} 2c+&/O'_Z$JU~5xijs[a"ȒJRB~D8}3Iu okغsL/2V9PKc?&jmUOǞ°,+E,f74)HK%;e i%JLYeYp,Dً9sh!8e 6(y[O FN)? Lؙՙ RLwNgxE҄Xg`f`Iײ%?Dq ӱi}Q]OnBʓ?R #Lhȉ%6 XzW) A<BNa($n(fQ0+*X#jᘇQC+Ui)޳"meB"?&mq{; ЯxE;J Pi-zQ_n0Q {PwDB}I?mAMqr6]f{Zj(ް: &2<7fZ_Sa)H"Bх0{eҰl?ԍ E-@zV:HTr|^NKHFRJ̘$s paoPDoIOǕiGFzs[vzKvEHH+CPƾtMbowީ9A.#lz0 #ML̉5:8/MT5}dgX(cv_59{ 872#=C.C~~AzgezGl۸pɁpONK)FZ_e,j.+p-6"<Fh u*$MaA̐ƮRLrx-oeyG[l.Hr`{? vٓ {"j#rOMu\*W7c/x<*Cu}\\]u)ٴ{@~m[M:nNMR 4q{rD9H0SN;s͸4c3ؾd|IW"+AzWZTmPZ7g}΀'K  Un ,*m.)֧s yw8qDM>0R*A[m-AUU_֔99BWƑ:7CC&`eHk,cLPj$[VLReF{u:=.€6Wkc523$EeSgVzHϢk7ca8¢lO"a0Zw1wl-ʖNS)ͥ Gx6Sb@䄵x* +9͙M`5@-sXp |6{ w;3 :gėå0a@%=d'BI48bSM4T wq_>1VcF a+uG,G/ϙ٣>sc7|BTE*& \b?3kkwv'-Urzr~_]e'Cd `:KF^u4,Pqo7Bov 'IǍ+ +r(1'b2BbW A*ɚ%OS0b)B&pIb2 DOWHdWp̀xEcUhޭ5mUHsEanhh,9dlLdZmc̬1 S7vBNh1 ۝vaoc8VsJF %6R Z:scd[Qfld^m>6ИJXaAǐSf"xaBD?+*sGo >.q۸nh0&@P+1dn3pX@Z"e7XyxL$l=b8Қ?]E*~;CRڿ Mm;ǝXJ5l'=Aꒊ!oFm:ozI4~i[[[nIZۿ>pkB ,t'\5[+S/%niJ 7eM,q̍}F*BjMӊS5Y .דQO?AOhpVJY-ȸ>hŔ%Tݍw9cGf3'ˇ,"1؏ݘt[9Y1 ![?PՊ9 E_\ѧ 6E rD!`.G+,3Gh> Nֶȍ {zFWK2T2 CyhF3'PZ:m 9;ho 8dEhMW4iMaɀu&2ݜr I>Vچ S2r6m9Ƕ5fM9Znjg/{򨑯xTL`S7/؜$0mz#I RsA-on)"tV5od$+XpqdD 4[9}O!ΔRW9.ҍzk5QтQ[`pfO"}1(/yLR?L8$m~& $9 / wYln{}ǡH/kɳ6ǐkqo˷ei=Vp3$8C ||;5D 1H &Iؗ1E͜v؄NVrAOQד т*Iv$荒 Cq]}hqҬ1SKqpJܝΝ9kwI1Ԩ٘/ X94i-\FA9M?EréTɧqfn%z1+Ґfcl74>I!2N1,C*޻MD"nZOytZtx]2g&gT-h_RlAAF8(ý7 ãǤ1><,}^+mu,Gظէ2QW1Ҿ9!ahr%ppNd:]q^ 8{B*~#QB/zFe.3N2 =O;!deY\U9聆˪PPߦX?s)P_'Rؚ̙[|#hc ђ>|z;=ҟ~=jBs!cdd~[NH_oONs2k>͘4$_ M/xG{*a{N=yDj goq` itOGU\ֵyEj!X&T M_9D[mᒲQh"p1aG2yvm='c@ޅ3Aꣿ4ʛD2u)eq5:)g9s_6B DRv9d@2fzWO.8>^//lO~6̣1Z{_}=]^ҍpQv׬bz+u97Ūj\E'9 }KE(spݒNΔΧ凞ީ  ~oا,+QKk #a+m&cb:_isviߚYf^$j+K^_~ <[\GhA9~_X78=a&tt+ X?حWҺgG[e4ë́RL9d{V0Y&(b߷ ԭ%^ā\6=mO]|s7w#Q]7;}тOcA]yH<ﶒpCSˆZg\Z3 Ƅy)/ΣΩlƀൃOtC=JEHt_ 'Gu /k'g P40(,  D8r/Ký,.ZWnq6} ֿ!nǥ\)cM7.B} 6Bڧ'cEqAAE J&mwu}7+xa[b "+Lla΍zԊݐM.~ute]t6ԀXp^/6֯ 8^<[1 zI-֤ra#KD_pdl5tX guIݢj$1RpJ}O@ǬaFVZ]f]^JQĬULQ/*4$Loa^ mdޯ5G (39ox( X 3Գ Pvei~@ 0ѐ88-GѺ;nWG$0 ruvUF<>qpz$! dS<@Ae{~5 #ǾO8T^]XIU_ik.WT-iE>h[P%ffqZݐT]UY2P;&|My 3ƗM8C읧G(y `?@Zad|$ "j|bWR0k NbrVYЯM&+s2߱QOivFYݥzƓxŕcσ#m槀3G2_@h)_3D1) Ewt qEz:VY%"̹i$V ޗ=_µ\96. Aφ@N\džV2f 6gcWo5À :CaǏ4H~"`T︢'zrvdE S4uFj=*)XOXLԿ]\|> וKwn.@VRf]k-6E}%Vkb:xݜɧ3e=^ bHp% -k<* }՗6:ٸA'vM &^J9dtH5b:LE[-ο =f+yjUvyUf-ټX.xV|[MċҜXMd\eBX.rO.#`4Ɗq-ZjY$/:Or FU+R9\py*@ ټ~>en>hٍdW%>Fy[Nk!!sWB XRv|ye㍏+x4WiStif:P,mtVy ꧶_NT0&hc2>HMUM8ݔg!,T"y``%nPddbuMK+jھ aHueD _Y7|b;c3ݷ)9D.4M-}/6 zloFR"J2WZH_|Zf#ٜӸ]FD#h-+j8J +ꉪꞝRK? ͙QNZYt*v_CaU27+e:n_0T!"r_gWlE$׺#D:Kn{6FYӳi3Su_'c8JW^;moڀww;3ZwkLp6;|U݆ shڴj=ˍlCbΞkr{])V 2[QHBnX;m)U CEF C쯍1|Yx Fe$9[34h"&|+(Hfz.e&Ô-ـʲ#7G?s=I0JthoJ)*bϣDn <ፋ j1VJKG0}/ac(2~`:x֏ԅ8޳r[9\;̓;Ca j}m/w7\-A;&:5Y*$>7y3I$Y("z%}ptu#4&coEvS >#KxZwJ\0N\)3_DTdz 4(Y27KKK3{# a!' !/%c2[Ǭƛ`. 9B[` W^Jc)y012wlūW\tA U^bMZ#3ֱXXv{4wӎ+"/(G$2gtݯ|E?&IA1syKX,Mv L0 UAAT蘤WP'x1" +/Ianw'k3l#fZ]t/s\~JA+e+k٤o})x\,Uv\/}B >~rc{ĐGqcfe^!*30Ch;6mA1hGfj,isZ򵥙nV3YBp!$:  5Ok={h\4KE;֝~\@c2ڸ'ВC/9fۖ'p/ZGS!X sTdV"o zTht/]グ'izLCҥ]te,u ;鯶3V[Y߉W5ZlR/Tۭ4K:d?T~v+WqFTQ"s.΂2J2OBh@%yK\blnkXe*tR]1 ,{A>jaU3,DS_Oߎj|/ mCy;=̓DtXX_M ɭV6eS%Wm_kߋ') ]$n) N2|{!b{ {:" R!X]%9@f>YRY5 Zef)w%!uGo:ҹ)%And]ز7ͣ,.+ޟ9K^tIc2̐#*=BNص b|Cc4?wZ*Ns3~Pũҏ9paqJx=Pqr55~mfb8r<>|).RBYoe` +#eni%,Ixėga؛PV@^gyԦC@!Iԫw'5لC JnokJLXhBI7|;ؑ 6\y#S]h <²SIv\*HQQOKID/ٶǛZɟJ2ʪ1<psk sAт ~5G5Ts"XBp֓$HI/{{xI;l.y؛n vcWCf2Q-*fꭽҋ$r?FI52lD/|cf!aV?ԉr=(s5I@ zP' s"/7 nLfBi= +,~}Y6z/x&HF|FREg5zb}CLgLtS+9orX(}1X,"2RVJB0k[:i;P[Y4:cu+nʉ.+VA cg UC#auN-Kޯ*WDv &gR i#,,ntZ:[y|FYdրZ~M4˭CoZx== _% JفF:0Oge_qU /ڭ[׳l@b\[29 .ꅰP+D,oXAM`S z&9Іqzj7 :MKOߡweY|F>ML@B&^_ӂ˗cS|~ ۵+iNn3<L*y7UDo^F &G2aVNLu̼ RM:SE\Mg}=ixx ~5ɶ?k4hL;dN-tzf읠xF?w F֠kMމ"'&Ǿť&"]+zȮ#n;b@jJ<}B 5ayka[2I-2k  6Sm 0 L#ݬr*qCލ{{tJ3b,9Ľ9<&~w%XŠS@w*}|ߞFi.9nw5  (t<\P돪f麳+v΂L+ZcM__޷wgL*]'0 , Q~cM`}1k*͇zޮ :()PƠE`@ m!G'Ym\"|S<%dJU\VRT- li`17]h&}_A2$+74бZQF/?BYGZ90A@t"%r;Y֍VS*x l_YOXj2"nP^ ]x W}w- 1mF_A mHJ,Mo/A asaS"BEvo oKgxfy'Wq*#$t]"m_> & g&Q{ȉA⣅(EWwv"<Jtk!|Kgtsѡ)!LsOϜ ܬv9}ԐϹNcHv>vγ>ۥe+#հ^FKi/b[pIW11HY0+- ƕGk{o Rp..͉*"8ء.3XR:_˂hVx< ũ:W[N[[۞CHthU qSdg4K gd(%Q̣ZC&n ys&8lP]\ ZWY,ǚyu ]2.DYcN+>0ZxYĄ(VqI%r:]53wF6# չ҄XRQO)zdv8Irg"l>ar?uk endstream endobj 679 0 obj << /Length1 1619 /Length2 7678 /Length3 0 /Length 8756 /Filter /FlateDecode >> stream xڍT[6NwJIH#  3Cwwwt#!! - %(7z}}k֚g?{yY̠%k)p.>n^q)/W q@`T [8S#0(@ yy& ^`{&7@ y0|Np:?؀>11ǿ w0 д;\+m!} +'8MۛՃ( ;@ w/=W-[WПָqN`0; ` ڃm7/_ǀ?O?ѿm@- u8! 7` Ex^`t[.<`7G_i۬p_)A@ľ9\(;jӍ ~ RUA@8` 8@WLXXz x~-` #zwm w @`(?0/q`9/B~|_bYUc?-)'sĄy|||!@؂_P@rw^4g@ΥC(`GB@W"%O䷟/o a GL& 1 ]M=p[4B {(}@:`8/7 ҁy0(^! 肸E<!f*B0_/$ uwA5![n(  z 8q~G 2¼ >^D;_?s k#tuAO=m͏ Co@A/l?Uxկ=I;[m}@> ;P"eDu,7FolHs' + P7/Z'n7OY"4CXb>m FrrhنH /90go#nvz9D󑤳GmxZ$z#D[,8`)BfFzyT;qiS;-kܺ*J?<%>k_'e@ =^bYQnG KV^'E1_=R@h%le +a0@9 "MXa< ֫lYM\^=>Ġrozcܾ7_s/M7V`G{.VKY:OS1۔hf`6{/9Ank^,s @W `AqdaD;Ȯ굎JusrI긊 J &.n,X/1Ft!%LA\a )uw:"}'pRZD7 Lzr5Ms=O%-|)V#߲u˨,W~YThW,Kҟȶau=N"@mL^d}$yZ-8e (g{8uBdzܶ71dL7*E}Wȭ-LGFt-VL ʥv팎8iZ cZ6R'9T\Pދ!!ݵ Ve21qv$v>fHAH@_Og_СV#ױO+8OP,w!wȯ Ã:SGr+`Gb 30#KNnSלv|^~R͇(h蚡 ^pAag=OfM{1@\|< E" B’-嫥Qfc8^:Нri݄MMDGԽu?&pYqy6=xЄat+>)vߒ&ʺ^&jۭr$n# Eȗ$<f~g#K{>7qkJ~Oc_%V1E,XbSlO"TLu6$R\\)3,(۟,}buWi*7ؔ&ncgc8VOMfy9>#3Ţ] kL i9Oy#y/DߺP,?.ķ/.=Ak @_ȪZFSh^|i m KM^dJKe n>ey5d}z4nj A]Hۓ _VC'{bn ֽ(mk+lHTN;[< HI/+Hj]G"AiRؙI1.r|rm]#BcU7;-KCzS+M {.Sns3m:Q .\\\,N?ьd&o]fCt~΍Eۨ$q;Gn=NcWiN~Q8z@gz?ēžBP׳-sț򖃂_Y9 CO\x]) 'g.:276K}m50&`֥+eXpؕ]t?Q\?1}pVI}$)է2@v"Kj,EE$f@!ܽB y9n=|wEJTD}1HZx2զEV;Z%}5}q {"[ǏV\MN^,m W;ZHmf:ݝ3:\ꪊ 㳛^0vOGEqtϛDHyk2/y-.bLUR 6ߎ%lv?b?0UKoRoӳ2ˣ$xIm\vdWب\4eSsVy;~L<֩ڼ.PdAķ";5L6za-*C7]Jlaj"ς[μ6.W 4''ViY~7 %O$A7ٔ@OÎ0!IP.b0(p ?D4ܮO3ǽKj qфKJ6DqlɣYGeݛ!m#噷WKE4(:,"p8!ʥD:W{?V3z&Oo5Kw)1 r7Jb\p.DR2N,I~[; Ynfx}+;00~0,D|Zb9YYK\V$#,]ըp:AQЭ!0־8̏K ^;luיף|i]7#Rb\ ^$՝bR8?u;WGD/HOxi M| $}Eb+HV5tjD&6j{<`֙BTAz,A9Kݰݻ];*/9 gb4޸Py D¤CGjx+.=XJ7Ԟ8˻} u1RVRՃ2i򘶜laZ+0{DRxfYM28EN1rDG,\;DS~%*zDVfOEoauyO s#eUy&$ {83C(yVǘ?.,azdY8BBJ7Y*]{@s' պ("km н[u@.12]fPH|c+2ѡ6e\i^~>멬QN4C8`uLAS Z}L)fj>u #QR'ڎ^B:r0}xmIA2CWW۫Z;Ϛm!LB:Cֳq kV;sދ69(I1JO8s({-Onakz``--ǿ}RUB>KNǻ+)f[z9yvroO崑Jk,AۊRSQm6h#x^7 ̏0,O@(qنTΉ5qp/EqO{TrO3_kҗ/8w, >X϶M^)icRD;9x[:N$)}ߣ"oiGcоcw#ݣ|d|?ߙE&4_ˢP H$Ti̶ԏԆ]_#?; 7~`KaHEb uWN y.S$˾ms{ip 3cNDԊ oK"P 싹\;'ҫl!y>Fw[q6uN0}5$O)GwB=~{&+ AKsD,\03m7jƇ894kF 3V*,Ҧo9vbt_R]6C9\R(0(B6I,H"vo1o%5)`$\j1OTsR|1_3w>׾6*ȡw l?wfԘܮ.-͐$K<0}ryttBu]).@M\wϽb bN%RY'Dk'Ok/wǽSZJH*zVE q~Vqyo7IV2OK븎ďqQY#U\ఌO)k"5EczRfڥt#;@&ƚ܀S':5]/,9V*gSo_ |iPcǔ sxi0x[\q5]'8LJɐiUUqgg hYzQ0c;Yj$/7_K£@CmIZBNvj(]i1 Ɛ'zj%ꏲBB{[[4j-Z]`'MfܨV m:Qc"IV⤣4!Xq"$-}Oy$K}v҄HZ+#s)Zmnd! ʴ3k eb=־3bo?j Gl$-FVr$m\KNM@VUJUBLIƳO`!Kt-*3孌ΗӦF'.w$ٱn؄L?/N&R4e inŎк1*h '%g1^\ʫ7CO},>-x"2yvqi OPLsT]|R/߫[rp>K){s^'t`L.1ף`xUoJ mE149pt' ՌSq[5^.̖W־2yS.\g/C:%8>Q6XPyC4ڒ/61,_qa_5:3׺Si}{L4nUYZOrEY2n:^½e(= o8#弪DO3$RB5XMaN+֨IDaТE )ǘ͇FtGKQDW\A|T"S O*TW ^ ~}B዗ѷʏHq/qFZ1̓g›xVQ)T HS|%(DIi'z~Be2FGwh )@R*ֿX;.2hb䬣5S^ò!E2.S+F/W𖇧,pٶ.߫RK܂`VxO_uR!:6Q px,]ѳ~PS$tKF["sDܢT~]YlZ'8r\O#ǘ ٸ7׫\f9)2Jh_&DdWK}^4lCy -` b4K .3m^G%/:KuyNĽM;dxHsҒqpbd'Iu$x%E_@׹S@ֆ1ZG3ņt;Ǧ ]wNJD%y M(yr2r-ȳ p 62wu.m- q޴ڹ{uA!w\Κh1ѫؽZ7x GL5$>6(SUwWk o(iTBHIF@9׆o Ia}Mk|a-}Nh:*'$Z+ i%?e}MHdob3ob*n9zC|"*][ְ2"I FJu>)#I ES=;ԯ`Zp•™P'8TC(eV؊; Y{V_? 6tT |α^cvu "D1BZ! JR[]i( G{#P=or&>D9Kյt-KE4yUzOca&7){Tq&ނ ֱȝ3*L񂺏C u QOόz5-)>J!$poQ<?G endstream endobj 681 0 obj << /Length1 1470 /Length2 6650 /Length3 0 /Length 7646 /Filter /FlateDecode >> stream xڍtTk.(1( ! ݈0 0 Cw7Jt)%t ҈?wZYxEGGnQÐ<|yM}cQ / &`3 \prG@@HB40#_/"/*{G8BrZ4yjpĕM셀!Q?z$w8@ A0&iqB9`(vH8 rr#l9`ֿApT<uY~W(@j:#]y]ZJ"Z!] ~էE@{lu¬m~5a |ATrAA`$@x퀿x9C~|T? G r76D+-FOv sF-^p? 76T699'_H/z F bUa6p?բ"_;Z Qo1W %7Gf r:z"%M8Jv} #ZM5鿭HJ0[yxPW%'Z/9Ba+׷_6+M|}" 3ay$ , G  / DP=l_kAO *Q+j0PWԪQE!_BCANPJL@g5_ȿ!Cu/ N-߫e=x6qVڢ{"3y>v.rU4:ǧ}x} ZLQE)<` aَF&2޵42}0Nכm[&No$,)8ٜ=c䀱I<4@5F 4B%k4>}`a?'UzprZ o5>Z,FW4v9oe h1jXi% H$]!~I^8ylRwZ P(U2ضv\4>2f͎;!hrrGK*zyCzstkaq9yZ"_]Hl'M3ydd46g2Ѓ</qfbyeiqy9%R &_sY2jP#m>4Z#U_bvcr:vDOJ# *gզQ㳸Q^KO[+l`>Ry\~Nt2]ssw8C*U䢆*7+7Vis|B;WU,bQ餝8|Hr9U-H슣׃FRCH40^c7q N}+KUa^lw^]6y| " 6]k,$eGtMFXTZt x M9hA}6f O-py'E |Q9~kRxOV:e6L_-e='>|/gon(ne~{r~Ǻx?6sP"D*xz^G&|Jt;VNPJZQUoh&l9ÄD&Z}b5A v9"iy9wr{os~>8;IBtлA2$~ 't~)Ԕ5FI{~H5P{\wuYyd8΋:a9mnr@Ol3TP˘~ݱ2CP\\}-đKܷvB';x.8 #RDp1AqtII`hwz1sux-5Vݞ\ )~{ QS6{G.ܸG"_z.qE5W3&}נ26;l/Y'׿/?8R(bNZ Va;֕QP7:`׳F6<aJtc[+娾@ȍ"V*Pa"\(X@FYeuȑQJW9k̺J%Kt~~.Tw+ AOYWޮmzo8uo8 }F ] 3!y Y5 l?M\bNR -L"7McNLѡ ZKvP(R#,«nCځeщCwr3@s-Zx g߭2Dð H?--ro~J]!S$9g)'%!{Q>¾;&%,YF,wj:䧦^&C2K#eo{n]8\qVLuFد=d]$ f{HFu\/v?4Ux9$lY`F1rXY_3oqmE('Vz8GEXן8}Ŷ7]Y8FWlfS.l,MiF{؝c2Xd) P{_a/sC|d*z}WEKoESJ Ew辑Z9L1L! HIF*/f\۴|8A@ǎp~O}9YyZ 'aץIzhw."(+ `K6ͨVh9-?] ~eBл(f1UOB,r!m |xs<-gwrj>&K8(RfpzN8&[)[؋>vKE=++C7b*iu}0Vbuϯ22ȏ3|8QwѹHou#duv0}SRWl}!ZcU`̆Q%q5Bdb&2aJ묯e{dhME`waE6. 2= 0 gҸ|&SL_+<9Dd[}n[*8qÔ|S-C!Ѷ-òְ殙@]K6-- d_̚o1j73CGKՎaͬ"ibJ2b߮|>{M%2-lFB?ؿd3m%j3uT$?Ю?-ڶI2}V|iNv!ze i|iab<t+Œ߼$:E$)yski ͘>]JZAmR%=مG6`R Q7}\S`K5IxmsFpG>8\nvMX)?IܯrHchup&0ƨ{|p8]SW]T^VcKؿtsp3Mr]tt6D{H\Y+VK%#:! F6lӫhFķC@ړm̯l !> !hB+?l ?x42\2\djXjK _R?*'>П9y3lXm4dN0;=q`ual>?v|rosCW4=nGrM=Y.Ǎ,nQG _9滦ݱA^fa֞Z"DeMDwAE"q )7Mեs䚠;̽Ys}Ӣ's]|kztML?/z8㸛}Rx^%n9QxP˝YoRSr B[͉)*C7\/](FjLwG#g"I[Ѐ^݉ jՒƥ0pBhw硪gIQmڇKFjB[ƙyB} [ߌGӭ) {SoQ:jz#@_<*y'X6gA\!q x鯥yI/5aȩШXHht\[OދLhTa4?>H։J BT-(z:'?ljjo݀2(Nm֭'I2ݐ}]`~ Ie{SOFTG,eC:v Hpa,Ӕ7ZMHݔ9̇շZjPrßo'o7ϗmNfHJ_o&?ȥ]nLG~OLz{N5{$ZJu)pl9h̋,!GDLTc)ʼnW )^<h5D,\$ۧ9xgɵ uzLj~{`~cVY.g)Y, G"Uca_{vv^X_tj:>%i`. = 1Go 9ηo"hk!]a#1%0xBAjSuܷg$L\q=E/I^wI}jޣ vL0ל8-`B'˹:+Gs^GuJ7n~$ gD&-IZlֽ"d@63\i-}5 ?+1- OwkVy+/ kֲ[oJtD9V='^at,4"n>#N;Q2zoYuzlPg jjћ )x<&H@KD0#7:Ե~G/t:)SH~Ǘ_F]\ŇrZ8`8 ń1+{ DHJKJ:~˚#ohb~CkNըMb9a|G'(Iΰ2ãe$;W/te_)2r&^;d7vÀ-߀vuu:m>t! gvg΂jlGNQ>ôbq 9J〦[bӘKEc,7Ȏa(s/dm̈́ jOˮɽstuG>r! XqH&kU*f~E$xyrmi$UO&YEaZ2 CQ%83ُSyYLn'P\C` !g(U m>OFh^ȿ?nA!ww:1{O6Zp.|Ak^Nɖ$r,4ggprcmoǓ*[ 4J5=x-#Gpp`x0I}}=/·mJAg$G2"Vl&=~kyLz]2ƒe5܈s65]YLѣ.zZНR=N~#xJ*{?zjl\,Ւ6Wssu'5w,D},j4M9#I:.[ xl_WNM^ d;ڼA[1ue :iwӐ\B<;˷? : Ӷ˥5z1hdo(/ƹ ߼CSᤥ0 HH־ӄ{edidԾ|Dx`qlpEwp;e;<}zkJRم2LNM[Q˰Vb͝=Y׻H{;v^Q5fI ΆvV( .O' ;Tcuޜ=#I; ߄S6*{V_]blg3zSԇpFލKlU*ڌ endstream endobj 683 0 obj << /Length1 1794 /Length2 12602 /Length3 0 /Length 13729 /Filter /FlateDecode >> stream xڍPMCn 88݂ $syew﫺LEQ l9325dYY,,L,,lTT gTZ@G'؎@c7" b`errXXxvHL9 Jlt~_ZS:+//7ßQ[#hl }6=+=3Br@3%mƄHа96wv3v 6 S[;@]VlY/߇`ebwH3lkolle)&gwg6NxcWcɛßҍR dF?Ҽ8h> #=\k;׿dgfGf.̚v >o&,N6t7dc {j́o]gG?YYf Sg dof_v w[X[l<3ˊˊ(]/FvN#' yTAG9ܷsd׿{w.%[iO,,oXϐ.#W͟9֍mA6{u(f]" bo jgьL,ANR w ˮǼـ*`'/[ +֚.f31ll\cGGcķ~#NTlf3-Vrq0Ef7Y`V7x&,?b6@6?m#y[MxoJloRloطdߤߤ8ߤ8uro|S|Sd{S|S|S'ś8:N[W|r@w)ؔ?Ȫ&JЍE$j%{~h-cF8,i|8um& SN .l?;;u,pΚԆܮDfp~<nnyO j":*]4BZtvCBd$X>sg`Fxtb/y$zE o4{ j Lk4.L%7u!s-f$3ݟ-G~N ͎N$BOvNriLAwH}K臘0}EVN0pJʚM+dlPQZd6}7bk'S ȎZ1\t,hEA"CHrLU@5y:& N{dnanH`>>5{/^iFJ- gW#*C?$3Y78_nxS'N^g"z9OgwFC_j<^3mεS^5#}&s LE%yՎrm Gȩw֔պ^S~ՒSQBK'/| ~qQ̥dt7 bn~2ujqExs&١]ViY_{sI 1Ve>)~88WY='AϢKK9q8ѕvS ˲];ˏ/LƤg8|.6\Q%>EELȗc:Ή!ݺ1c*϶6z3X$1j3nR6Egzf,_]vUs~ʽ>l;5ai6{iu_Ҋz/Fχ[EkwQSȧ-O[^c1*,)r(5&'#ra!I,ZԞV}M}z. ?IV~bs_p奀ZOSCB1+֯h():P15jigAj^o הּD'_?#[5d;KWo!5QK}/ZȚqy|&wė8VKr/wK,b:&L飯sJ6Z|dh H(1Wqfлtr={ؿK>y$ U>8C8I9Wpp=9k6l ҼIZ B?CڑٟZ;ldh "'w"Ԭ*O$* (Z GӝHy Z%/'R|yOZ>.ۘ_3>HMx/zuHx!IUl/<uG)fZbzrM2_Xb]WQ:F4tX#e|,Q|qn'KjV;ZBf[xUl'S)Ey}UoRmY4/-Z{Sa›\\Ggf{aK;k0*ˠ|[k͂3"6/Ÿ#<iGMd%/hf`[|}݈0o=A.ipe\%Χwg.F)٘O P.rRѩQ,[r'ڞag; J2;%7νJ|~Q[;0D1obj DּYv$zv6>uﳀ5"k,bLͥ^mتt‹CԂ*tMmiEZ K .0WguOմr٪N|]?r^LȢב1-m>p>I$.繝5g. xq&,cV:1qH[ hF# A\ u:DIP\`$V XXi3ۉ,>=ՔcoBOK;vDKc+bлLD?-o'mGex3:xc/pBwG%%'kWR%q&ّ~@f@|og:toNےc?gbG؅EѰC|*9Plw#lѵ90p Eop}GyB5hђ~901c*hY}@N&#MJjAT,[gLm1Lx4Ľ"T]Nx`;2X1ZЯ 5eW=K.o8@tҔIܤFL %>Ƣ^OLvB\ }L#쟣&wwdvmZo&7X8<x* Qپ"25c.>2|xsuNSpR4Nvda$u.ͤ EgT(z;˭~]Z`M}H]x̓Xnp Efiql 8~>ik՞|TTSlt5Vt|*b$8e$Rx[֫4#f9~$-rUbio1s)o?yz 6rsstՀ n#XrD,E0sD[>7ژy_kJ 3 #'}A9eçb>`/vXg!=uXԡӘ ;Ժ;/f?:^SNC0P@<2x{FNuF,b%K*i &w||2Vsd-u|zE2jDqn~U3`) w8D}$,=0#L(U]D{HN/׷D=ɕ/m$Ѻ\ɂhVn&P_$;4Y㌁RtjtUw*sc9h~Tad9mFtfԫ wZ=Գ$>*-gϱ(u}UPxxpdʗyx-k'AW|H(aOT^3%BԞDؙU@gZ C> mOY@Aso -2@覱pM5c]ީ5W3w?lK}_GG>!Q;? &ꏯ1sÅ빫1 "nH/s_O !.p?2{?ԋ~d"sMEh=4L( >]nc_,$\~p=x C_~{S&q4epI g]"${xiYh0L-aL[~ ~v,GYIM׆ݰjg4CFPk\dd&^aW ``/үň a\NxP|K3 ZsG = To{\j*ˑβ0xFŶ3zj_θS1_RF}aGc6Dhg7چ%@wh?fMX{ց; K}sg~2 t$MpgL]d p/^~GW&ޑSxߑ[w1e^{kw9~4E(l5+Ï}с'\Pii])4e!ʹDO0Ӵؘ2\W c#)r9CpOqx!X E׮3ꔧ/1L=c ӕsk6kz6ԙXjnbdW vNU]% ;URjҶƙwk-O&S暸 q誷"!k\zYځ$s2ms|F[Q6fgyiUErT鈺T (>Y҈S&yJZ#j7 8+Ni@rVh)uVk`|B5+d"P#-A9q,Wr~[pS27+2Cb&sZN0VLVڱMH]Qi ~BZ` @h'zdeyZ \ @ÜtT28 F-!s)+o!z'O6&IMzu\4XS1_pȖM(ϟR _: @v{$΋DFARN㯼]yoˎґ̶DGgYs)MވdV_N'T 7ZpNp0wT57 ʵ@ Գc'am&P;88brQj|VJH8xPk%¼ ZY$]٧tx AǫG.țraZs)wkL'ɴdK&DcpVT'M:'=g :"9L~n$FSuh?6dJM1ӒTt2ݗme%Z&}[g0 .^fHôoEI[|c Dz8}ԐW5Y#勼%a䭴친CV=b\fgCNO)9YBx/en&BFODcRNҷ +sY^{S98m|~`Svҵțzl+%k#{W?pa^$sޛDeZa ٿw&>8{29P ThPsF ɺ!K1dU 6ůߔ$˪?m8仍UW1.i"G.G`7]-ͻx.@YF)olʶT՞ǒ<)!9g_Xs0Sя쵛ѣ'Z=#lg5'(nQBOv{L1Hxsӹ∦HAץAnKC}y^/LvawF>0 ]:KGt4}-+#{l+i.6.+X H&3JwRP*^64L`ёmEYna:}IFa&Ǽ\6cR|?BZgfBI%H4<AVV.VEBQ,H[iI3v]&4&^tRPho`}6DiBᐳMH߃JU8(@#=4 %qv)O9yIz277N [=KE!Ѵ&7$a G[pTT%Um1a=-QaK9WAB@xH!eBT|TMΕ=5fz&ҵB <{di}b'8y_nVx޵۠Lf0yn9~kVFUW0X/W"EٯX`kfiyK]sykդaYa_Ɠ1kD|Nևjni%an]sۻF"(H] pz*CcB)>w"%="t>þ\4yM}%{!Ո\xʥ3:='KXޭZ,;ӯ/+{L߲ߏ?az\Pk]ˏr ڲenVN)׊إs#媊k?m2a?RvYGen+#qV|^d:mue␂ڮ^ǗDX'*Ah,9uQ[v-X)Fjſ+;sW,R %BVѸM;}G†Ga32!&1)z[D­y.RJ\6A>֐d\,>$5{gvX[o5\hV.% 77J&#FSՊXjg)[2RAC[Tw Ή3,*ڹ#6L5b!/GhFˎqo3rVCo(E)Y`1-3сgTa_X%K~}PC #QDUP"N  bxL.)z.9N}zfb)kˀsU&N.ŋ>|FcF(iʻn'-h(W8_얌`A+`kZףЫ24$-KOj3' #+g);eZNaZb&U:[]fŃ.8Gw|4%5 QEn~+ W@\ս2=JE&TDg,Iz ,o C6ōu=p%u-)Xz}VXل_qH,-d9m;F Z`c&n"J/К@Zʂ߻Lâfq8 *[zg|?m؉IMǸq3ͣS.@<SgYo -P^ˣQ%엯BkNBVt!0f\*#e-\C,k  ? #O#œbjB?p>U}{mޯ VU<'2'i+~#@e2>{?:I̓s 휓d,*JW'-MLXgbp!^Q ՁpM”ЛKj_=X"HP55+Χ4 N4$ܝ?'^S7ͲmZ~̃ת!c\b{x`Y gK@eFKSv1%L-5{le4M8JI;D XÆ7lVY "dz07}cga|U"ꍼ^euI[ 1S^p}-bFL|E;,Y :+;PKQkrj ƻgP^*oi ec~{򶥅vl,잧+ p#W~,{ޕBs-؏zbm'CLIV@}jIU=˛ݓzjN; z]%x* $;b|ѓw_t4O$p AͶmaѨy03[# ɟ_ Ј'H ѶmeҨΐZ)6g6g=55-")r{  <>> Jcvo,vB ۘO!jJ P1E"}pC&<|n;ϽJ6 [We.ۈE[=tbe}a)7MPKlf6b~,B&_n@?J1dR }yNM,,Z/u5TSmOE̙[Ogsz-Hu1ʾvGmzKp+OC(*OL~iR(yb`?,CRNg 8(&uKD6dA/ #$I\>$'[Dd|rY=6 .׳&E {Liߋi W:Фw$$2Q5aqL*C,|~ >t$(πVgqL!Z,6H9곏dy(3^/97=NZdMCSKk>J.y@rYK21r!t循sв0dtITd1{vv{`E|p Y0=@Ï=li9s{,ٖH~w=v_&~pnfEU!,d tm"FP/#D[j4BjNSe1橞zjs $ W99PfJkw;*ƙ[[gY?hz,}tK!sh) _]tKT홿`Pc@6n >ĺYmyth$>NxyQ# y̨Ԫ--rݺR26mrQ34ZM&0dos 5H|"lw?СD}{E48# 3s`񈻄5V{+Ӈ(saFO<$>^rSaV۪^mk8yt,+e+ՐK&k!#|#VenF1ⳌXFP8 1[@nssGfo~Q÷ixe «7/\{{8,f]dRq~n.jһ12r|mo+v6Ȫ^1{ŲRF豞\up03dl MUf<'|2JjGVͭBCVgIe2zޞ ö E"HQvS=tQܻN/|zR/U#W<wmuR"u bkPyPʪ Tj_G,S'e*~7=B7P%ݔ>TP T6]>͐%`*ZTL Vm' |I:m٢_ &}1:|K ]SDj4~'@L4AG[s#.+=N'$s@f|Ώolr Gf4n|KTSӮ\qMQj43/xEwe09#4|~RTAؤ *neĂRUk6$ }fwޅ@5uYdv;17QB<[3VɭW3аo~7EE}7ɮ4$EeEKnovI̾F\bؼS-l;@<*eܟC D#2lD?yd$(_e eeVqH<* EW"I9[z>@{!^F}ŴSH0k9&w~Jat7:w\H Q"4jOĸmhZ"aTv|\&PhP{(noDI ?!/3y/J'@Eg{Ie`7VpuH(y OVW ?umB 9dC{\%i ^{Og:HQݱeWWP>FF[?T; Q7U O$`̟J5CaW(|u{x {y(Y 8?&"/ yELA2c>NJ:6Tt56`yߡCg4-ZÇXU`k|#J7BzFZS?ju%#uQ2rd | f 2[k^ָp59:ߔ,|Hsjqϫ<ƣsYf.;F\uxWP:Nyk$Ҕ@w$I> stream xڌP۶  ww܂;=xpw^}ޫR79+d*ꌢf&@){;FV&>,/ Rb#doq' X&aSȹX\||,,6QwHL9{;3"~hLi 2@vEcK-8 @t/4..|LƶLNB w%@ tr*dl 2&DJ%o؀Lv` W;3.Pv߽2ݿr'`\<\vf)8ۃ݌A6&`en U wyΦN g&g_%2eI;3q{[[3_I{2}vvs_E:0kځ]V,.N6n0d˽_z;;E}A@?n@+M3 hC,wytY `~YSY[TYs&&ffd0qXY9xF4Xڙxܦ@/%{4\oͿiu96x[<.PV@3>u1/xY9X8@@3#\UUA-`+u/Sk q%L36N.'" f/_ `fw5V.N_ ,,x̒!nb0K!6b0!"(!p.8GCO? &?nMC3S{u/߳?TeCL?1B{W]X)q.h .ndOl.cp 叔ҿI:Ifv9ڃW b;˟eevq1n??.lW7O*`O^@C`˿o4k zMMCBjD w'8P"ov$U E70JߚqX ~dD,5Øl3La>;+wuۙY ߈28^gJ P|J?"7RJ=ݴҘ8 ϑݙډU3CjuFlgOM>!j'(\CDY F-&3":18u*.+I\e{{;fzӔAK2z.pJZU9Z0ǡo `Җܭuqez"$h>^^",7X͙ef80Je"tٷBƴBo2s>[ 'j9r5//3 _{ܔDpُ}[9mZ6Ij0aKzS3fܙ^D^esx3 |Jet6cgg&\'+f].BG4!߲ON886[ YW̱p&*T\Zc[(nkCc9;^ xy}XXRI#?s\ܱF`'i \$BGk^4NuqX^^㈲,3rCbp FnG#FwU 3\#W;,&SRɗ"aVa8-es!Cq; 7*M ߶[ 0ԁ̥k}|FaQaRB9JSM1Sdgk^>bB kcVR԰Bş6 x.h*&rEjK9`+R,2v |57g31 *QR۱zSlУ= }e?$@ nEd^ͦ'Q:}Ʌzk qx(`Q wM)V"a?Etcz'gdΰc5 aLjOJZ'n7,ne44UIz/܁*e 0P/a*|p(fMjVF{gEg (\ezh(E[a#[(ox l.r2ξ51ĉSۚ%Fj]Y),et#36g^R)eM. H e8%ץ\TM6tany4M4.3,6gq68)?Y&\&68GNu~D9k8Q_Y o{cr`yYVn K8 p`68% ڧ< 䩭* I1p׳:Oݰnh+Ae=-pzb(r\3FֈѡUoT4s@Sk%2PTQ*i+g:zcp?rC~1'HaVJ԰^.PӰ,^?ehPA֥%lwp:/H|Qk Iw>,ۆ CeVAsd:GNr TVBϰM#\*']JTBۊHc 2| g;̿~Rd{ hӝTݾA^^]{h={5BäJKIrLo$Vvy(Dv&dkoEdֹuNѩ.QoCMhWuLg>/ưK/7 &:*ȇV> f8ֿL|j0xӰL5"25kЮdr(6&2M ]Z3`?2 `;9w{*f's? t(7RhZKp?~-aظ ._}94Ը3 [E &`Tx"7Y_$jMQV& -E?Om񥘶 6b2a<ͳ3䂑p!"ؠpt}P`'} A^ +_?nqQ+g# e"coƼ?Gc@^ЕR҇f4%sFJH_޼k)xsfiKx@Olq&;?Sʉ1#oI)$P3-^S/N<^̾F;5[ nMr#{Z,^o 15c?/J.)G\Z𩤃 'ni\K)j{3AGhRӐXdB<9@͈A4mTWRkz Sd(scLyD`|<Xt/6C\HKx ῲ4[߬b!CUy\v*3irI V& w9U1>gqy0#+axGnombGzQԈVr8hGDٷfqjl{uvD;MN?tLݪ,]Ip/8Z4EڝLWw {W~j_h>Ϙ=Yl/=M|T[ &Q M";(PdְDݴbFMO0{#L5#5n9fh]6MT_T+tI89FϐDUpMKj_Nvo-Wf73BW+-BW@ݪT=2jҳMtG$brq"Sl݀ /zbWyeVF#ްوgFӊF/9̫ho}̮]0w rxP^N])6ϊmb87X\ඓo)Ӡi+*X9p*7Iߕd^ }]j@ [y1„66ڊ/r%5 Օ-Q 5C*!2hST5iK3ҝύK^nm#ZƅP<"-S%l K6Ֆ7Y rG%Izg"R,JV, 8cpž`(-u(*M2l_Y4 /hI22\ex8mk J午Q TKRֽt"6D5Հm*5ie&cvKMfy֠#+Ptt֪md]dQ?>Yܥk$ǰum%cߨ\Zo%G2mS4vr.uvpK~{xi{)$oKQ6ZY>!sҕ4%? 7/F;s(>ɳX^^3&l sRcrT},ϴX@XnTٓ]J UqQGb, hM`^:N(ȸCꔸ9\5_U͢w @yXO꿜/}o e: P%qA#l !:91ða\昺ĕ7v՝s)a]sw :bcj6 1}lxq;ҁͰClRr[Q-4DZ7|9JxF1ìTXο+I>XՉ&'-enr}Ŋb>DD#3w $[. _l޵1t1xfO'|. TiU-0V$m<l|QW.@U`2f2݉o"+zNt8>6͔ kۓ*yOeFE/'*5wIOs;F?g o`p6+^`cVH)6f1ToܝCtX39[ɓS&yq1 1IԺo ΢527hOeh`Igԃ@ r'NEV 7 1}=qp-8¿4"oPVvVVF!jVŠOux@*65 wzeQ>F#N9BuP3_H-ʰ#xө䒵 {& N`<.|6rl1!O'["qlK`?l- /(@`$mcG ݜޠ)ԂԖۭL@XYq;E9xpj#hII*"_Q?ޯyU ^wZT*WN9r|*pg5w5hRWc)p"[hB*WK< QA$Gy$Gg})T ȅtH<1 +˫xIea]d47EYqCx.jf@PނBʖg[_*H~Wᦕvps poG] vt1NEt?PK/7fG{]YYd8{Bl|VH `+>, ysЋ*ʚ陕oAu(ZO޳ba&_! _20vlj0\d_r &Bjg0&TԨSO׾rSc~,ׅ`_(X5(nLn:nωϧ|..<-s=6Ts'8B8!yE@`_!="\j Uc կt YOSZ2u[ٳTSyA<c)硆;>8"Mײ)ORvc-f-|g*kUwxC﫮$=XH=>K&3[x7tS Χ3}|(.o#:Hfv"n^T07;23:L <`kO[:P2LJ!U&ʻicv/αeYD=愚ny^fH6v hJڂ&v0v xa@4Sy t]d24l,]wSҗ0j3ԝTV@΃z;VR9Zq+n݀;xTR~Rwz_E"m! 85\8櫡ZJ4P_(7ְhMyU阧/ "}'NNF<ɀ"޶]TڸsqB 1訥1;*,gB8,~X>ѻk^~A|Q³01ymёlq0{ uA1 Ĵ,!`C*6!1v?+yɭ:"r%_7@r]mL1jb̈dA=y\uퟍ.L1i8g_,}DtU2\͟=k6m[0 #ܟE1H+T= hwQ萺J iR}Po Z UC|eZ$[PM 68+sv[F?@7UYHsb,9ڣF#?Sc)\F>,5IT-lTYo?A}@ڝ˯{X á IK?%%8#-t9-b#oc7 *kXAR͂4ԏr8>,+:N_F*"Kq坩S+~?; ϰ7P*QߴQ 4NXgqfW.Tr U9BiX^/

PAx&A0ʂײp.<7dO =g eRFVePD6M=P1~rAUPLmv88pY&ZTDwAVa1]Y/KH0>Q:5lh&DRK׊hA]HhDwVRՐ-u"ޏp lR[ K"lzux>3#5;jtCѺA_V{`I ư083Z$׋>%vҖnfnVr +B R"9w/'H?2B\=,7oC+m_1Fhm^86w@FŇz5U|5Zпu l _66?pe5G-CP)-دhijj/:Ǽ ٪Dл"@OЯuict*4H^U#j֜(I ql;z5 JI`ӬeOj4* (R9V7xe{r}?0Ғا5w Us >5~3KX7lASwrZo9Bݘ1P߿}[<~vԵR֛v0vq"j%oOfb=z^SvFeW}Y۰[BD+8jڕH*IIv hyщa:@fܔ;$e3@M֘_Dv:ͮFR5ծ${?uiCgOsJID]ʅJ$?E47v6a%4BCϳ_/VS_Y@RW\u8e~Eؒ5(;iR0s v+!B C.YJ\ J}j(01tC"\JĝGbscaj`art ?$,|#jxA~5R\ff<Ֆwaq36u&`;&!e.vS4vh\N\:CuK3J79=8;ty]{:Z{YD$Rw췦ځW[P1f-]Z1|O AI8F_2' 93_kOOY 12#`uEspG+kOwmωjtx'ϊddtPqI)I߈\9IK Wץ=r?6$-CYu;ִ}Htx4 i-P}uً4=?Ij})]w/q 4!S.U2lFݰ{=v$~Q0?L>9v]ŠT 3{B'KO *û[N(2;'B!ވ+w|9~[5jBWmfjGDRqY"/duxI𵕑QS+(S 2M-P/l6s]u`h]1Gj#}QaE/%c(!aFlu|$ʹtpX)Y3ye*xwHBڙSJ_N(_\}ʺpK?bh'?y'A'XpBYq1]z$20CRr3XegZQK+~2՚0@7ևZ##r CD&v~x`jhwg 3R+Ͳѡ8(Q (Ѭm6<4w7쨍 ۰UcAPuqW"Z?{b,mL^55~g~ IJGAthp]Z8M4cItdB{'}t:As7` Dh"7Ls#nB6'ؔAlLƥ⟴Dx Wu`Ͻ؈.fȕWmkV)$9SgX}/y*^@ԇ2+#1渎h*{|$+˵C֪AbpB&ыX!RJ=ǴE?#ó gaKR'89Q 0 8έhn Mj-(cz //+ך kI|3˻QcnUnvkGt G"ї+E6) KPAW1E0'0A=,0NgOoi{;jgRn :o[I÷CVG|:9uMh7%z:ȨJ}?@+1W+ԃmWFt9R}_FvddGZԼ(f~ugAcm;6fs'oQ8w}"U'JܙR]ݔ3pMY镡!'2qm ~fN{\*4 7葻E( gxk5C\.:=v&aO NHwρn 1 H!TP:bՎ_8+#9Nʑeg\w]VJg&'N6> ;E1cd&<_⮶\2>GEL. aWV LV^`qzSKx{:/#K]5U7"/(-昱y32$۬jKfi0h]D* 1ڨm+ ǭpnD'l J2g2r>}Ql < >_hT \V-:Xغ&ۙɏ,m["qH9M "? =`"|cZqY10 \= /}Ac/)¾YВ(}dLةfR\ʒV(m?rE7e2Zw.&dd{|ǭoOQז#LkBF%w.\/p{>a}lɼ$4J IQͥ8YӪ32aE$2*ڟ,L>|G6K%x6t>'Or:ߎ/1wYh-R3\fov[u %N)݌{z.$ ,֌(SSX%0^4{Xg6hb:S 9fa38&' ^T@'\7H3I5x_k3Y֜&xZo/Xowp-ϻjJEitʂ> NDNɯe,R2C+b=l2-&Qmi}»6n)N}D!F\_ё{|2 }w&D>*ΌeD' Vt-3)t1z)=->4xf[nQCRY2p=” 2-da"GL*Y %b~ t*tDka9&wf$8Owl|5BGlWu_ᄏ1*uvvNz%tXmu`-JUw# )Δ c 9D(J$kŠ93zǼFIG{r]\z]cp9$I+gc+**9g]Vqjz\e0<L-+_`,\uU(V UV{:2- \/':^@J`r'T쉢),Ց/n{F.)ii e֜pslRoWzKF_{p^|vΕpAN0CRGMvhB-Bz\"HTQ5rvgXhH/"{|x֧gPs`#8 #F#I Iss7PB?/o | DYs.;սmTGuvBcѺCgyZqn:؉Oi,9,/4@&E!JtYo]tܽ[ ^8KgQ1'd(˺&`Zq3Kr }'Qw<tj Lg{]rnЇ]Й9 +!mp%.tԝRQS kE_5zyQ(|?TwZ-H|{,[A{w~93c|Uk'GXGݜra{VGH/_ D֥^SgoAR0&?z:>JLWHM{dXD×.kpDeo$ϒ4+lQ5-}KGzյ7r*Iopm hľe7uQU>Xwj 4D5p^OE*bԡ$*~k@NL_i[)HdDYy㚫b tS@I}s.r:tl*C:؊Os ҩ`~)`?Vc5VGfL0R$&HۈRIZV n,6IfڨQ!*W !!k b'i8nsLN!ǻ=H_ sߌ:1E{@S8'Lm~0+-L[8]ڐQݰ^A5M)sS F>^k#8 (<[=Vȹ0gƶz. %LsHgٓ RW4tzH{ɏX\r}Bp+^rz3极Z쟨xBKAHϾ+EBףpN&$g1tȽ/pd5j_^y26(<${UDz\+h9M }@wk1!ͬAChz8@6f(DlZ>ϤװNsYճcbyYF~f|+3-Yt$>3tb= q Mi) vⳤ %9 U:,倪@69bp4 E^A`b}S(zN'❡kA̪Mhzٲc͑Z7։0iApVG|YOnTɵȖ=7FCg56Qc*]ٍ5 ,j%yJ, j&e8a*I\lʓ!jʛZg+te3};7 ^{?Kl%K9N~Wvʏ#'1*٫%ABs.‘qJ1gs+2xUZ%!;0bIcBצ/f ѧ4 Vǣd Lv1HP9K4Dſr)!ԕW"0a 0D*id nL~-RV[ ] ڞI65WJRq['09wLsKv w"ə(EA~٨%j`:%sƳzfӀhN~S~6|6IHjRbr:))8_hqy|?/_rcہ6[R:aV|M0:H7ծøOcͮHkY4+Li$4S.V0!2,E4H背nucEAV^˗j>^v!xWĘ# u:D9c^@[~q0J0KG=ȩCt$^|jKmH1f)Go/ # a@I`Lh_KþpO&ъ < 0*Ql;'*3kvK*G@[˪f5bw ,7*M8\ fFivn#:g,1WJw;D1;%sѽع=:] ~g$68\-zؤ"alՌh\31#M ;W^f__VQ$?耆cT }x>*V!r!ѡZ<>Voەܛvu 6chnf:vm0ّ #J#]jQ|u) :OVz%_=ʠY~ႩvKI/\3 ~~(.KNU1I{ qƸD-VK]iD,z((Fh|:'2ىw\zJ"* WzVi4N<10Ŏ>{S6()-Ty je Ժѳi㒡 M+z5@ 8։Ά.Ym:G+C!` _7W%رb@*^}W2矨ymkQ0~̠^ y{0Me G,Thyj@/NQyF rhLUNu9&݄|M7:kڀϣ D WD$h [j\ #.|ڀAHzDّ"SjTz\h \A2AXaTgGVOpHՖFc}YsHAMu|'i\4HgB304=sDSTu3#D>XF0Í${:Tuc5^w fqAlk w%7ir"(-n~`@ BJF#,TUxǖ;2mbHW %U#p[H\ZpNL=[=- U Ơ[(ѱɛfƒxpu{MA3vӹ7:V귕ar*xs;#?6#Of˂$sr#f"*gOH!7b+& k.a)غ?{?,2.)6vk#$=l޳qcD2hHEԮe'aI+x|o*0C+cf龚K]RW|Q.A'#Oi|~7Pe@hǘ4B$Ya !8fQ75X NʩAjyɂXY>ZןV{b~ͺy)ff>I7W+7ZJ9>h뽦894I+7:z-wP9 ñdd533|s4@B}{7VA7S_Y &Y콍|fY$|1L0-|EY,', 漖hszF +hDYB>tRH8Je5K͢}pH[^U&[RC~]y2u4wML#QԒf[QfB^+\QFmex0z^%MWCBY:AmIJ\wrF]X:nBXn.>8Zy<:|Rl#gtKP;o&?Xgm=H D7eШTbhl‹?] n[il@;l)~f8[WCg>l0g* ),*)͋տ:1_we)T*L4vW0jΟVXp\c.:}Keșѫ8?Jso> stream xڌP c3%www\C oιszYmw9 1P΅ "`bbe`bb''WtGOtr$5r]m̬ffN&&  xFn9\*j377'![@h b41؛X]<'#;35 t:M 74xr*f.FN@H`cisڙ v,@h/c3@v;:yZڙ,mqY:_F6 #7#K#cߩąF ]3_52f1;S{[[3_Z:M@}dZٻyYڙU+PJ6 9:&z:V2%`04{;.N@_*33L-M\@sK;?Abٿ0,=:Lc0Oz 3c3J)+*K*=lzVV;+'Q,?<Jԥ$ zP7=hn?cdyv7Ezl-m". `TOA >"n2@"c'#k 0s#g__(]"vP0{G_hJ? ]Ai?.,/?@&,꜅ ? (+@PAU3;PKl@ '2;4Ѓ ԠbA@/{6l@W? z YAptwfcO!?2wlclL],81PY.pp3d οkg{x@P@PP@? ^rE:+wL\@ui]J@~}ބ7Ī1;,F5S3L*u}NЎӃP ց Wfd>/ G]_FJ΅FU?:ZCvI8r!)?Kx4TmN)s||ZU ,_&/4]!v'A@^XB˟{'Ne-e]܏KCy6@-|&]QÿD3=B3Grɣ#+d-1M9sb@pCk..Q^6<Dk*#}uu~/s~85c&-!Mߧ*{jaCgZ $5$|"M NwZgwC\mpZ;K淗;AM\1ZI6f Rf]2g~DLӧқu/gO ~2m|>o;8Z 7ooK Ljdb8n-9-) I)ՙ(8vsc`m!SC%21yW/>(h)S٤9_S_3M0_ƴҀWI@ Qĥ`h-G+]|{wW ^gc 6$[cϞF'ږhW;cgx*W6Ef6ABc+;z{̛V|o09c%s5A\O +3*ЖoE(޸IZ0͆<^Ƥs ߬ciRlc18Zd }xϿmdTU>._0SnX[s?KC|؝d?ay8pRu1V8hGÑDrн+ &hpBzAU5^N2hsZ3t8T`z rA⇪b8~,P[`Px OnHqAB#3 ra9#]!9B/aNq_5L3t߆xxۑ.MkdN^Q{X QXê\ DߗM!!`/h{:{'OS-W2`TiBxrT)_\40 ōr}nr/_Hԡ~[Rdk^X sVzC5|n(/ @_5Z86T,@U,bH>k~[WaxL_kDS7cz1 E1q- *!^$DsL nux<~y8[U'݉ G׷_`vR52EeGz㼷. 1MS}B%߽rN%){B"2͖pŖŏjLGāqx:|CzT&GG;b T/yeh~w=-V,Qƽ!kh;Tc?-^%'njxF_u\LX]wh=]?vL7  wjnE$q#eH>O[j&gZa( 03gcN4VTo!TfȻBro qF{J S- V"GzM#+ Ԙhx n"٨vo,p"@: >z}]\ }=[UL^MAoƇIYIaj\s+^'ze!_xdy 5ɧ5SBȾ`Op;4f J+SsЫ\.xjRʵRA~CV4jJjFC %|L,HR;W"p6 p+hAE1(lbLr&7 üCA;Por䕬 pR){D6[tj}K"-?6VP{yv.X+}g*lz*?Z% AT nV"[+% `,8+ˀh{sW{̹`K1LkYeAES.-ׯkgڱt0|QΩ[ܰY߾ qT[QW:P Xԋp ٔb4η!!!VڇIi PavhhS 9|>i0T|<fx[UMFDC*7 M}mߊ[^JC({62nrԯCY"ezs;J.̉:l<0J^·Pf,@AXΤ?%#Q AR/YiJ)V٘+̏M樮?K r$'63MV%&b椮U `vTecΘUE~rtIE\ C)NЯޥGkeibEkKG UsY$,Uo]&Տ3DGRp/doS47M1>0 Dsf[K^˴*} ĭ 39,jLf¯h \EB^{Zl0_U$'3Sc*ݎb+tղ iIq\BhBPF$D'}i ȞhCww?'oh`~&^oFJC Np[.)p{ i9CO_IAj~es|$쉳`B'jYqpi Qa"쾓ϳ[NAӌ05 ‘ Jtz3q 71/ԭH"8NKŬ,ݡO[ ᔻl|j-/nz<$"2ݼnOGolcJnjNjuz#L"A,B.h#Eu)LNYٳ*"uu/З1}aM&*E/"un)M"S*2S`nJAMƁz dFWCyFCr,,.SpT:ß|xEv8V4C4 E"." +N w,ʗos!N0͍ =֌]dydK-ط㻯ޱ& $*: TS>W" syN`>chX[0Yx>vxjZq6m|x{ NK iqKkdB F@d?bpR hvBޙJ`9SxΠN#Gz_ Qulv)xo0푦C:\'D* ؏xvhEܖ;ƨDGthV4ܚptޤ ǚ;x7'/ARpirC9<}p1"|hkALPMtTjQ :$l|X$!xs4( t4 v nr VIh'񵃙x~/AW::6҅D}k9 ֲg+hNaǐ2H&X~5L’Q;5uIV0&Etl [Htp [gx^.촦%PxƧsz)Ǡ&"w x{tJ~_a~m ܗzS2}V5< ndhmUIx,{w,%']-]DjrH}c2F2I%6ǦCHc#At"ڙL="Lfn"2tF}zIsjyN#M6 5"m'؃u=_HqoݛwQTb$RBU_na}\yj:Iwj nIwf,ѕXG/?!1'{GcV9XpԼ3?&O"ǔI^ϔKfְDvJ`=蜦KI#Wrp}Յ?!"V% 2pxD2=$\T¤u'= /ژ b$ S;jaYk 4LF<:$,peK^o"s;]H֝/z"QIEH 7FVl*؎/SN$A^')GxmX\c+Z#' 2JJa!̧[ErEjJvҠ,Hy1E`p"Y~}+zɩIeֲXk^xFyT{%9(l Ro2F jB3c^8qƌvZ@ g5yJxS!F0*W͑}~k*UYYdpץٛ60XJ)cW$݇@H]>Ģfڱ\w W((wڵcF򆕾zz]eUDQqيL$Ld{?fRڬ|L@łMmR+&A61אb+_Y7= Vd)T7Өdlӓe`!C)meaUs1pϐ@I4a2![RiMܨ!itG\c4W*WS9Yt}nNCx}0 'fQPs +QyU) Uh); dxq6Gc,d"S~ ø=R9 DFn\p)[U\+݃Mڈw$qޞ1Q}k((}y?Sa,.ö)sM'⤗@Zv-1p(Hp^q߮pUh<ܡeS;r!x]?=" qjn)r5XIMMH5ZIb,B` l$̓~% ˆU6(9ku#{rl uioc4-\ίYV\G("ɞ.4Әa=${HD<)b¸t8'Ȋe\wމGW4$)$u7HE瀼mg`ͼ$x__~qW*F,O#BR>A ceqYK~ jC$+7凒 {u_,qЏFԛBS杽y7prd bSsS/2JO5؎Ǜma JUcE&7}{KlJyV`^-H~'H}7{l $F㧨77`?쨀.6 ui&wi0) MQ }6p_# cPoMK÷ Kr~(g'{%Ŧl>+K嶥9>ݬWD<&c)e)w{0-U ^j49؟weR#/o'F|rH+cu W^׃ldӘם$ ]/I 6vb$9\~w^A JS~ iFSyz!qiW}w +eF)u\agBgk+acᥗްaI8 c̜ա6Ԇ߽P ! .Q6:%>3<vܐ֕ yHN ͜s(X]\30v`OdGq޹i_=#Tt0p^܋0r\RK )&ȅ஄ UWicZfcV':\Wڎ=( '$crIj!5n g}2ˌ%k!In%VIEݚ+wa8^_ܦJ©~DŒTG\Apn){)V bÿjލ*t&z"\ڑ47ja^a ]lQ.#?˸ڍEgD6qחbm9[uG^^c{¡ MaJG]!Iu[IAo@ t^WTS-*AzL扛[Q!M;Ip_o?1$p:.I5s喇Rybk%]4lNmr2FIڎ`o~By"[#}9n-lͥsUP&LѲnjwwG NX[j.c5v2YM.p%: +J_D"]S02#Pf\9wbkq]~DRv7Qz&<:BԀvs.j0g0}InLz2w1b4G)KB8@YaF9<ݺ.b/oҁ$wXs2ҝTT1[T.VEL/<#Y,cOߌ3 ^dHτ1qldO{#ZQr9BlYyfTlMN(n}>!j IRknJOo^Bд'x1Gr6Fux,tf$*!7Bkz7p RV͔]B0GyګT®4JwpدxPr{LU=m{JSn1*% hN-:'Sf@o #LqZrØE+~㬊7Ԅ.IoJjv;35{eҭ^3]5~_{WP '+ QBN\ՔsI%DJ -+d;9E܉0w0kw&)mI[A&Z␖' ot;.8y{6F/X/Q?Tpą|b&^Q_҆UqGl;gbo=m`Dxa>cˍz0Iɢ G |)Q`did 1xڶ pf|aWD2n:RlQL>;&>K~zGqBgvŧ4F4_"''߂{m|FoxT2C5 t \m_קNޝ*oVE(NFGqC{(ˇl$ܦ䴭~;Y](!R)z͓\ȬhS*.eQP5U 62ᐔK3Sb.Y1~*',p4A? NLnWrm۫}iwD=ܽ@Z6Ҭ]5 9zeDETDo k@#r39ZONtveޱ/7_+|xk@(<r/?: i+VR ܄¸ye oizh.2|ׁ|̸7.ʨ8d z5r|Kd;{H)BFjy WgGEfjIڻ 6N1nAӺP lY2<h0hEt,6oi, 0xD5oyLP$_#Dm_򃖎EQn^Iʚ:}uFƏBP}uTB&_E$)kfp H7~6E/]/҇9O7iہ?s?{חqM*jK{C$X4PY  { d?vADu GH 8Ô~{YPBPB0&|/᲌5y*Nf-|AnI -@#l$bg{tCᝫ6%o>^)rz*ٱwm>ϊL,6kă2$?1/; ~teeo;i}*O#Z5/1@!Uu^vőNBjMi=O)hτ;!5ʼQ9żrĜoDT]nA; =Um|K*re̿,A3e,嵋OapVW[B@;Ov#0 s8h Nָ*(#c k(/aDULr~b]9 0 --_pXނ ޿K7J"Evmu$ :1iF}zTJUS h}, bXUTd]eF1h{Z~ӵd,֋'4*(&uXihX:mq : qܗ3A177붠D yͨbזnn"Clɮɀ M -VYyY+̯hO"/SNj.PhVql V}.κ uO gΨƦA|_9zr6{]w/6丅+b࣏̊>PI1lw*SWcEM,7 %E!n6Bz/3O'΢s mL9` sȌ ifʣIW6wMTOte]pFB0^\^Sm>!+}?YQRg0$I47ɠr.Q*lhaWMDN DHXa .j-xiO'RZ#aCKpȱvWcv8@vHXQ:'* v+fffE-?Ģ4SRt[H Ȯ L0Z;D&:woC՗b ^\K"gRrNil Om3 sczHyEhҺ9  W'r[n[>td1?VVc5rH .4y/<^A/FѠ?/]~Y_KwQ3r儇Ԣ.\,I@6(, --O$: L7|D~PEtI2Rb7H-ey, ؕ%!ݜ|tW˞uMݷe~ ނޯ0 :tV׮2^NKЩU0(lƷII0YM1piv.0?qBݤk=i1 Wr UNbn ˠ:Km)JH~7-Lt\Ijz2:etӳA`$x{˽?5\HmdM rMTEe8 >ZKaoGF[*9GJc3W^`FpfţFYק=\bpPzǃKV7ii` Hџ n 7M>)KWXR~jgYORLELaHѽin,6 ij.\_&FH^$+xԎaKO'R`t*rѷk}>mGik4:l@ v֖Oӌx ϚT_&5zW=W /]F /ؙ O}ȣE|$z Ԥ6#ZEQDZ.CxԈQ0a\\h{m [v%BZQ6P;|pmo. jEgTpbb"\(h"-{(x0-{fV%,gh~wJ`Y=t Ξ Z233tK ^VB:F@j_%3zGa[%RtC)>0P'i&Sɀᤇ'B2/riVRjPϱEǑ&F'[{E 'TϮhk'LY9@Bީldwb21WIrd O 'Q&YeSS1&:λgLl4?ֵNT6K+,vsl1Ԇ̓HTxC8‹`ό?$d'aaq."z^cS :&ja ;u4gCc_4L#:,U SBj>lY mg,>vcya_*J3Զ?D)Gl8 _$ @ \nke.Kp? ZJՒK5vehktll3+E?2! CY7ލ؍SI\zV .vlKǓaWɽ wݯ^ME&vvZ <`9\@Xor[0ә}kj4s˽5׭W1p1/?}QaRʳ2b*w"@鍤k?CgMŤ2Wz(Mp,NhOt;d.*qGBJb>W׆-L (DE endstream endobj 689 0 obj << /Length1 2015 /Length2 11818 /Length3 0 /Length 13061 /Filter /FlateDecode >> stream xڍPk.+ޢS(RAw"Sݭ["v~?sd&y׺׺ *1P adeb˫XXؙXXP@kĨT@G-q*0@mY'k+;?| #g)@ k :Rڹ9- 41r3:Ly#jkB+bddd`.DpA,*@G3a fLT5 ?rU[3XL`G&1`7V&@ F&&6vF`7` /C#kG[jwF)Qe98 L (2eI  qD> v7N l0M"adǬ;e$c!NvVt5`+ob(/;[; dz89'ǟFS ` 4QGf`;\,cI:^`k/6?Չٺ<lN6/뿃(SoO-ZMzs4Y ZR,@{?p@Xo+mwARNiF6 k@G y[&🕕lVb]Qm9J\J ߃XJ#+ A zo8Bo6Qlbk~qrPYc `.) 03m!P0%qE<ЁU~#濈El\Ph63x&"Nچ@ld0wohfC*1r->XY,*,PV@(? Y\ ]f߹зj(jh^; Vh@hPᄚ;B/CBkw6r#4 @wQd49~k8:I? M*;5&N[k~@s&Ձ.[cT[Iڜ1i+WE7$iDɞ<jB[< bT&ZP&DkHP0l{>{jY7vRe;`(޺J/m)oWp}@{,bTW8Cm>K@HLs9su=9L&Cuɞu7TͱP gdCl7Aޣ bUp> fܕs*f͜O(=O}^`< ]Pe wNepʮ0:Te{Eʟ5\uNh@RѾ?9cŪkcVr&mu2CY̙!l~ 0nWA`ʿ4=YWDx;DǵcI|:&?k*&Ж2L6@$]٣4j!$SiIz<艪^F-yx%{dS<B iDH[mqyV)aT&g&._:EMk[Q+%-ւVÖb]N3݋#ɼ@*BQcHJCMB{ˣ֠zq|bW[2)gbrj}CrH ORUC0s2k#+/eYٵZs|g$^{ٽejzZec Bg,2\It+9O~NXQ-u;y BgXD̈/"Fw}ڞ}SIqI 2)cv?9C9&V,M-Q7c*3ndn-Q+[/Q{:E*KN;[W+e4~D JY95cȥ5kX)vL!(8,?K/}TGq*Fs! )K}nK )%hn!0iqÓŢw:&qLȈ&=>ᓙ;ۧ :N0:=ꒃ.eOo`DZ|^l꿃F2Ih*X#UJ Z.`Ij'ep~H Q26ɫEBT"JOXae/z%vޅPj\H^rꎸ{JpNN2U_c&4]PӮ ~z. <BnUoHُKN{IdJD#X_aQ݄G~E^ɠS~~/6$BD/fIdV6mPE*bW,3+7{|'el{u(6+5-2|C=;N*`8]N6s Uݮ =e9\p:Hbw;K8KOPpnH E'% Y`6=܈K`[~brpw0@SCܾE>W_12C׶&Ӟ'D+hkHWѕR@_ zZGbDI$JHKCvxnbA} =&GD(¯ט Yv9"O'E9p#dRrR'K^W *mOuy9v{ߘ`:0`e:m^d^H~h]-MD XMU*mP<ٖ^SQ/5هAJ&ΰBM*}^/D=~21Q1M4i޹d0J͵u-zFY5cEIWQ=&=kqկIj\0P@(pwyT3XطSV#+sJ0hg(zf^> d:X>`ş=y<{d~_ŷU^L_Z:INR>|ǣF1BثИ7c.6&]-f&L?96SsV1r?FPLC0҆MNq3FjmHC@V,a +ԯ"ڮG`)npo[{ eˬF/Tľ#EwFv :1qSn<+EW> o44 `kBtBbyjZm .gaCc3)K1L eaΜmb5X6HreRȳ+'YTg@ժ]Bs^'zA Q>`z#|G6"4pХGCt%.9 5|sAKؐMVp[]<*A5Mus6Ú^c4bK\뻲;;*G=}>+6@q"T_щV);+O"~1t0Γ^36k )WE5a{f0҇1q_> p>Re/6mJ7.~WyS{~)L ְ̔qDo+ \Bl4%DۍP}CDO2ͩ.XY|^A 3GtQ*o IIrTML% mW=ʞS_IF}ʤN,zHTa1WV%} aa NbFr4odZ-et%,'q#~s# V^=]k&](sϳ.kj%_ tFh%ޗC/Grѽ9oӈʳjǿ!>wu#Znq`Ύp5'U.Ӝdg BJ.8@' ߞS7fJؙcJxS$LՆăbcDwy" V@N'UׇcoWeyskRC2?_#*ZJ h?P6Cݻ `m 9 9#YNL1?}tEɌ偯EsRG:ux:n 3"2C껔t]WKPTq: qѻϠ\֖[bgq>Z/U7YPs;] H_j"t?\JlñTIS |x"8Xy8g} :^32Ɩ+vCYK^vˀ_Pf_fu-lB9ÐpD,%ݏk0͛ZFD2OmozT'޻rFN:L7^iuXוo|]&ul}kc};M oZa$Su"^23E#J<++1U/ y7.B@R,zڠ\G$UnqPJ2P/ms5BFn:0E{ׁFEj#I [~UbǞvMcZ\Vϋ/Or,ƁSJ~l6+bxGq p  Vl[LτхL1"$ d٫$_t1(duYGoIV]ZT/N/b`VJ4~-W. !G۾\]a2)NsMض"SR[śܑAAx17C8`'AٛlJ@ݏETZ v?).l"WQC/|+Γҽ ) L)b>i@&o]2qQ&#~F| 蕀ޛf:?q&+_/|mZ}Jh&=~1D.h6=<~nay{"v)}^4<$]u{Ꭻ[G/skSmP-z:r<`o2o- -c^PpѧlSx+L+ā+u l2|zΏ`Iy/5m4=5<2{Nkbmod(uQT8EGDs lmҘJq{?|`]Ѯ:4?+/rEHP)1< #ew|+ @爤D=.UXI̓~O<67:Zb'Mlmq^syU 68zbqaa2t%)*6(etdN ˜E9vteJ^, { )|*ClG| =gfQh,c-igʋ i '&K*.E<5n"uŦ 7cB[== x3+$* *D9_''"mH[Μd*S>ύU$<齽c25ET>= û)'ze?E:#|߉N[~> Rk/C5>d%oNu}::;%ӿi RVmZ@^wfx>CRzɮ":҅l=ϰdԊ`wͱd-T :A?V7_--yG7\4kf.p#<3E|"h$%^Xg`Ҝg+{9Ȥu-q vJ䣸IIg =b##Ӝטh=٫tyv&Bͧ…~Rt~=PRHj!j MY)+_;I܉ӏoE"(?|tz Aq~P~-mBsmzpK 6AGl2.rA\{ ȼFMoc؃hN30]rzKք^;z>E[<]G"fq­e*[uZd%w)k5 e~t}t(mNs_Pu"}zR ' 0|Kc@KgR),16 <.Hvn:kɌJ|A E/W5\S@ǙmoԏvW~C͔-,lYu sYMt *%Z d'yi:1)DD5/e"KyMf\ xȚkkXy:ǖwec+.-[c>|nmu(N|4c> ΀"6- HNСw r|򾽉5:3灰@G}wĶkW4z];hP;i?+;y*Pnz;ao!YLEO"xL\Q$sЧ}AB_phȚn*%MkUmdCk.Dex+?cCϦa{_q?pRg4 z.qM׍lσSf$&u:g\*ޅZzay=%OKzP3w\9)!5>q8a* [JDxxp6Z~7&33!p>p)ܬjǎ;4JZU"9&,&Y]%rQ{2 Oړ04Kp42ҾxMI/ؐOӋ4q1paǰzE 2K==+q"m5w>&B[kV1+UT 0ޏl9 3GBύC<;%IGE^ eCbwJ15_ `lsɣ>ZfZa)7 R-ǻUB ̓\yS Cl!Ro֘LZt_^֠P8`.ԟ9F܅ZMӲ Xr&d\i?DjM: 6 .tqb |/ʩɯ51I6f'^63ҚXMEaf4x[C1k/9=?J^2U59CZC/Lo^Fwk()ſS_|[[[1'"Aiv_vy_үqX^p;Nyع4'w@ܞ EILیؾ.xfRakV(NWk>rE%wE5_kDTͣaR7nKՇ}͉gz ^*~SDumܛdQy&my!asNةiH9%YٿMskIyNQyze{8 ˷^`E-5SAj7|Z3=fxx"|_U3+8CI99/sȅtj0\s̳j܌}ZjFY*o)V3C1dáŲ] e붚h㷋.Ť!s#f\ŽG{[$Zhe?=:j{.eY.0wYWtu1KLΨ`冒ӟ1]) d!d,JWiVNe >HOks׬'ң[-:/Z=w3Ʌ$Cb~޹ '$G F8k_U}4؈N"P+3a޾i.S-(9]~"#!LL  ;[1(TY2TaJ䑼@ 72X~!"#3iΉ \h+h┳뤀VZWr2Zop(S65N1 H0. @9}%UXo*gmĻ#7|Cw&Jc?zC&%'pjK[sMA3^.UL!D`i@G;b \cKQ8hjõeDVj1 6-θҢ/>ƏcJ'N*;DU_j9[CqYq@P;('v9j֜9uz$OWnsil*;{+;Dm)ٕ,co6j.IXz@a&^?^yWZn b},]J'tȗ^"`x:'>䵬wF $S(>ՓnII}bGA3VuD,}6Wqyd r1؏G6G}GK4@?lL%Kꛓ{)f7bצ@nU}x8$!qA:ږE).>OZ]%1C -JzS%abǽ>gSd^&tXpXV&;;WEReK˜JEtZ9O?GͰ+`x&%wW)$!B97BBf2 _itT|'9?b?>M?ݻ}l8y+E΍QtjSp&J xO%UJ09$i2D%˚A endstream endobj 691 0 obj << /Length1 721 /Length2 4672 /Length3 0 /Length 5264 /Filter /FlateDecode >> stream xmrg4ju :ѣ D%.E13 3ѣN"D'щ5DF^7]Zz>쳟˥A!0HDT`n `P<V2`pb 2^ `@D!c ȹ*➋`+\7"=`tBTʹ @F`N6NH@ CqA- p'0h8oM8?Ю,Z-A t4x5â>_//u'!p$ A!dM m<?wt-w p f?wrCQ t1p 0YP_z9 $N醀#VB- ]O?ڏcN;z?<50 ⯽bP? \""X7Oa#i|žc4׻9$ #d |r o Y {igKX /(lok} (V{"B-XOΞuZjuӘ'OM{$ަ,}'OίmE3;1|KyzI!TB3`eda0$3;6/3?=KqrytnEGu2rHtn%MbԈpsڧ BJ ;`e`FX(8WD"Q/]*\ұaRƨoV@~CM…bԙe3'3'>]}TJT!{QyŦr؞{ } 2%.Evpz#J, Jc9u}-*;\pf4ѫ&wϯ,3o;!@ LGl** 7$WWpYQ5Ϛ5# o9-ͰEq?sHf =R=]q'b."_{88  8ixxs=e26R>-MԜy$l$Hr*ReK\w:(_``M:ǦBԲmhR@NP >ѝU%' 13atLjgt4O ")<u@VoYA38IG 4_?)o~[u.ᅬpLw$,ttQ[ \6Qb})Ŏ72K@w>T8~5,N乁c-Tlv#$I2<-fJLZ摳lru^Pd<=.m1MMf+km(=[3/71,(m}!\.·ڔe=D{ωM^ E2 !w/3+H6= M4A'Z,Dƞi*s\F. ONޜՍ 6 ۹,W!#%Xfo߷90 )!Us*@>i}ޟ|Gv-z C-d9Du1N,tA po%ǞMݩvIeʾ&Ĵ6flVk;;v^-YlM.#&l^D3 KYOhlu9ZM:IQtf\jwwŶLaG|-;+qm@٧ N4 8$ZTcg3-KVn*?CmY;S^cyס8'"R\R.E(/^,j&Ny[뙧}x0Q;>vdJKo7f>!ʏs5hr\TesnX͈S)lY,W%!%?b:I9;D>b60*/꘤p&8y\/+5D 8ǒܚsϩRXKIHdݢxN m& V}ih6{͎Q z|yń'<3reh;Xy3E ="A`.jbZ_+2f%vI^ف7Ҥz3q|Po_-g畈 eWGߚ&PJ/$/32pDqDwu&:`O#4) =lp7X\~\m+r-]hQ"eG>xTh "#Ud5i\*!' xAE@}oU4gnş5Y,tl:/IZo8io'"v){gdXߟ;ٺE+u7{</&Uiѝ*v|0l (kN1S#k>w?{Y9Ay|'?8*Yf dW(jP ]~:e!=0iټ౱]PEf-|ѝ6%~R)'ryhz`v,z5bphѵ1[$1ʪ{Jb~Կ s;_<9|9t*ʝX|Jy~>M۩^L(ݡ ֣KHڪzԴDjt³ޘy&m=t9+r[lS3΄QDgy+3f^x_hiޠdd357hm Oڻ;=F!}7;\+9n"jqK5T灁?"(l ,A]Dn,,fhaP)Feɻ3o52i@{;H8dg%lo VUÜ{#gZ#K 2f}{UZIݴzEW1M;7I^_w󱛍^1cŐ=!m endstream endobj 609 0 obj << /Type /ObjStm /N 100 /First 929 /Length 5445 /Filter /FlateDecode >> stream x\[sI~ؠ0/0\<6!X KIϗYRdrه P*Yݲv2RAVvBWR)U)#q*cp-߲ꤪv|Þc4F:NWIhd4\ 2{!щbOk])100,PyZ)a4Q` LGDŽ8NtUiÞFUZ*GIt<6b ;DGG^Yr]v< 0:& 1X6+*nytE6t!;H(@@krDU ._Eai$TRA ; Kp9+[Cr .UjA8%Z-V8JRϐ *[R;aơJx N+X JKA2• ưQVH@^z״{( {@"#kx3 P(9 g=)0#HRXZ~cD$x;n A7P Z%Ȁ,~ýqo:IM7?N԰ZSKXgx4>C$^v6]DںN21$_^s4g9z|X5og՛?k?MaF x9OדM^G{rL>@ӛ`-Gy޳OOK2:'GK>oK0޽xrޟi9QuvֲE]h5\db0?|`FZ|YCUݶrk%n{7jԽe 5}LPmx08;no߾&/O۾֌00Nf-܏n'F*F`;0/kv&}mڇ:QaSip'R?{K@&ct<8us; g?]+Upu(ΒkQZgmV)Q elm)'Kr'՜ &qgZ]s(mMYVI*W#ݤCa:[ ߈"wG_.,d1^_u3MCz^}uiwED 4R)\&go^)83H( 傈Hzvf7/AkJ=\K"BH=xdj%/y]odЋ|^l D&<&&[[l.B-|fQ T$l6dl2Ӛ*&qm((arCXSŲ[z}kF/csFN'46F%Fz͕:BL; *3/i^=f  |4 :1<ǀo5NFe;m  tpC)WnmMuorƖ"UtQOm<>`$2wEGa*QQC n46Xn0V{mj4%:&ptuFbCXs}ad vtF7i3̢%L xiT:DoQ *%Ě:5RMС2e&, 8:SK$n ,YPF|b\ƓPJdL1(kM.`FRB vFMMp`4D,t Wd'x:UA)ܧ vtpQ4t˚MGzN2:urݗ4zIGsН Gy9`(ͤwq-Tlh0N*f'IɊA7Rľ*ƂbkFc KdQѺ'ec&ߗ4T.ysZ %2_9z&CmSՊˆfŚ6C;SFI% xmuCLJaƓRܦbI *xv6ͣ-H1yq8JSy&6 [堐4Iʶ-6y:%OI GHcaOܺdL>J"Dm.6rhrg(;dJE)$Tn̉\,3ư#Yn[kfQjU:P.)WiJiP]״tVvY((܇.IC{SD#Z94skTL[: x.sg2z Àq*zD^k4soJoJ.إ-as}#Zْ(oCk8..Ld i$'\=[Y ӫ-%:Hsef6bxc}ԂdL&9jϐ8Ӥ9&϶_&6[$Ȇ(t:&eO6bceaOq om G[ͅ+[Y^rc :gfr# ň㜳Db%Mi8U+ШJ&-Y4/ OhHj8#Iܪ@E&ú#KJӈG!tP$86p4%!-LXj3YBۥ-ڄp4H)sHD:|(Eh&&sD(Gyju rjOlyܢp 9^aaܸqqm-f^tpE5H<И+WdA zXVZg|)_/Ѽ4-c}D#,ɕINڜ y3rρ»i6]"[q["ȄED67ż#/zK/9-Ai?ILBOSt&{ߑӋ>YT)ybexie͙=Sa,z]\7/Wh˥]Sy^2Jtr9}ܣ4.Z5.]:c*$@b.Ǥ:+~8L2 W~ચ^82Nn#sPeÂTfˑ+<圼6sFalז0y#1 qz6\Ɠ8Uw~{s?^>ie҄GTE6Om^sлz|ѹ^CXI7z_ɬ 5"X8`\&o~sߛ'P/,t fr1&t93썌>MxO|}l>g.EqN^x)7I/ oՄ}|+˅ڝ܆_?>}{l2ͨKvɳNA:6F줜sZZYs$xX"Gח=Š94?spxSx ;I&ٰi'pRVNON$RֈK!~'zד {v 7|uEeV?4ׄ-=[ ђg?-|{~V,vgv{ }^T7xa>8o-1G?XLs)j2U`{vl~*fqŸO:67f'WTgSR:.'oo_}?bOC\{ e"v n V[ĩ[ҏ"R_oHkID.ee)EۦnL~s 2͢P* zSCrYrzbYr1KsT.%QGV|"!go8$[o_<'/551 0h%阘~]wJ؊k0tg|<_eViAQo.?qp&p0h*B֯ŔӍ !2zJ9؍eax!^N;]cA#_:,^FnE.jNq2UVw||uَ}A23nؗȖs]~w(st1=mZmn+ܜ,v?p3푯C,o`EtƮﰁz }Xѹdzɿ-ՠm]ƶ\.#7zxsT|-cwx }~ endstream endobj 725 0 obj << /Producer (pdfTeX-1.40.22) /Author()/Title()/Subject()/Creator(LaTeX with hyperref)/Keywords() /CreationDate (D:20211011102840+02'00') /ModDate (D:20211011102840+02'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.141592653-2.6-1.40.22 (TeX Live 2022/dev/Debian) kpathsea version 6.3.4/dev) >> endobj 693 0 obj << /Type /ObjStm /N 58 /First 489 /Length 2559 /Filter /FlateDecode >> stream xڭZmo8_v-SmrM\(Vv_3"H6EiS i4J"YD9I#$$K)Q<)QDLJ*<&% a^yr\HtAN(AN2` 1SRD4 r0q R"Qd  J)S 4bL6*@XAYܐdъj]fuQB( 泲Ov_,/B(],?ezWV߿^u{9Ȼ|{_qnՊ@fCu ߯MŒ8tiZ,O)-~j6U_ #t#TnLha珸l0F)ܘvd{ΨaL8L;Xfڑx8qs|qh08­Ϝ1CxB;2nԏsizMQ-`:hMT"8OWFJO}/\uWMF^i:cMvRhz@_u>0#z3tL20DC:3B8M~)zyzԌ[':\Jl1z=\N*2:(\k>tfO/Ӛl5pzqgM3Gć}.-=tk2\Ӟk: Pi5Ӛd5 pMz qv#zɐ^:KS٘mê!FCz&v[FSk}>{ bumے:/뼩j[lnJҔ^mYWTMe["HPށsC(>~] =+ tPG o<ˋ>b󐢱h&>7*p+ 2ui q,sbuH$,Zy_74q5PNt>>iD/lVw yF듅#*{ȴf!Y-Mho_>QsaGr-.k9 `f"^O`sfkRz[[ HOLVy$ . g+RWuԆxyǚ8I0 ߙl wef7XΡd6n]V}j}`v1~WUUMѠdV!gO O;r%>yyϊr_zaFI(F`mlRm؊Bl싅EnL^tX85j}SEg9 ?mmsP`m-@NWsW)~oJS++XS=ւ2hfR ßmrγs yEnMJI__m9=-.a/pWEisXcɌp/xaWFL >_$%> wo:H`lVDb3 /q˩7 =Bw0'MӶ2嶩v!CʬCfP lmsI A.T"wi`#`6YJ<<|y +<@CxU@g(j:'`om* mB.ϱyl{LYT^Je(;C `)vgj72k;x- K>vVOǾ:]׳2hn ٶ)/_~ ;I)PTp:n<)`-8޶? `pA%<9&`MF($r3!88q1Bb« @'TaWPMB ä%d24JP\Yk/5 zzdؑ;; W Pr zPU;0n]dH‰!$Eb(b3 Xua 'u p wo22vxΑ"!S@Rwϯ^\GuPy{W GX?Bje5 S̒0̅TWڈN1u:2 ?lfb&mP/X+e:k~#A޽{ѾL勒:n ??+_on~T{k9\_+¾ʊwkUfUygzEgWq@AQz 'v^n?'^O^9(ޯl}z_"տl8z endstream endobj 726 0 obj << /Type /XRef /Index [0 727] /Size 727 /W [1 3 1] /Root 724 0 R /Info 725 0 R /ID [<25E02ACBD35757919698A55A7E8A2FC3> <25E02ACBD35757919698A55A7E8A2FC3>] /Length 1725 /Filter /FlateDecode >> stream x%KlUU:=GyJˣSJ<{*-}m)$3#&0pH&80Ĭ5Ό&$&&j"1 a|g{coo$i%]%NIZ42Qb@LԀh@LDk]o `zb<m#:vby\Ol 1>M=b_Uzso&vԢMcVu)4m`;=XQ Z&; #iZ.vv`8 :AG1pt$u.p|ia\<,E-y7L.WzA` 1pʒǚcu ҄ˀOx.i0nIâzV/٫Sfp̂905pn :u .[֪X@!֑o͠ޒ֗98qs\C:$: :=:+5F`7h| @`awvoduŁp,FKn)v=#>;XXXXX bɝ5N@C.m7:4+QQ̱ӱ::y0ZrEcȄfgewj;rXǗم@yuuuI0q',ykZ3оߠhQ+c֒wף0 iKDfԀ(Z@X րU6 ʒOU2zZg5UFPm+5 zZA+n 8U]\T6k0p@8N8 J)tXcrf\z#`X*s?S/` YyEA0dCXqŜٳj]f< fp̚9_ͪS)x*j=WimL,Abّ 6E\q-Z9°4i"ZKoE܍+UV/E"z:PcXǺu"EۭNuQǞwh18q2"Md#D ]kY="RXK=bXİ#:.+{rxƲ{CWP%r!z"E1,^ 4*JաG-koѐ`&AE.i辱˲ۓrW2P k: 3y,U\rUU`5/`!>坠僠ղ5p9Wv::=3.[ZΝ&E҆,ߤXyD'M%U WArXj`A%H,n!LHl HIhͣ zE@)@+*HoHX ay r0NS{_C웛ju[ZgUX^^jgՏo5˧3h55d5wn5l5_ֈJ?5j7:g:oWVkJ>PkԚ#.XeQLUP Y3;Sd\ |.`? 6[ (}A ]"/}( /}A.+Abg ,Ƅ endstream endobj startxref 557147 %%EOF clue/data/0000755000175000017500000000000014130772671012255 5ustar nileshnileshclue/data/GVME_Consensus.rda0000644000175000017500000000415514130772671015550 0ustar nileshnileshZilTUQ*""j,)1,B1)C7uf% TDR,RD(]fΛq_w0Ll̼}s;9w2~{>7lmhg(C| imsҷ([Ʈ?GM^)[~8{/|/=efԋVgPtgnҗ"7LMQoܧM!?7J[5o3^2_yٞqQ|d.'pL"ohoQ,%0 0_L>j70JEOCt [Bo5pzw`;[>ZN٪v^,O|Ҥo\٦RHs{e =RRL@T8$2&TNDFgjשQfWsyvRz^DʼnۮOGcݚJt2,tͣ*y%0^fm.;?ʼϴk(p{vNW N]] hKi'dI#s|fdMk%v[ ?'Z8 H|_u\E^eHty*aQvZevfd˴ĖF¢yqÃ7M ȻzA1(O0v x{FgJI|gyؽ2Qv?Tr5m&( {Q/[A_/ SXT@A5 {"z@E +OHf&X;mt22sɸy؅T!7V`o$fhS%S%JX3N%gkn ( (I1Q3v L 0C lv>G$E%t{SW|Cm{ŵOqB Έ Ps(zAGIPVMRǕdj#=  8i/b!RHh=;!:p)}̐z<7$u<@rY\xPIl'm ;&t[,:ڍQNӋ( ?{!pC, ;6w?CِU6_"XR2vZKpI5M%A)`TwZa֐PIlwl`86X:" JQJG #  CC>JQhNhN ;o+v < F|4O FA{^ n OO` A2}`?]wޟSI;uW v`_1u:Y$XoIeb{k+:9l;A6&9_3p~?Ȉ&;hYx<p@+ X1mH# 9(0KmoAAA" ޠ4Qjgw"8@=`e^rNiBϠނuAr #X mcRqb"%W;:̨"]D:xM y/& RݭI1 AABw9NIQ<Mla~|9qyr*dwֵZ-RʎɈփl,RU,H%ўjd ]WdȈ=9+@)|*ògeRf$j?ҪBiħ M8ʡğ6VAIAA H^u]#?!R@LJ۠>ķ]S+Ba~=k|Ral+{9g1ȖZޏdBX n-Yg$$u2:mL'Lΰ]]c C ˍ'7O,aiKtu23EfmX䟵*#clue/data/CKME.rda0000644000175000017500000001152414130772671013467 0ustar nileshnilesh7zXZi"6!XK])TW"nRʟ$,~S'$^_eMOyÍ'Iar}5}GNT..k紼UCP/YX#I#Je샹Z/;8v\o-}k뢊c/ Gbpd,e8!VmNI[gh 7?9;5t L]kԶ݁'Nbzܥ>>.^rYn ~@m&̀0˩6 "Zӿ!m5;:wg [jM%_ZQ #`@%Chm;k>Hcu%s]#:Ҙ30ZO K)G\ڷ-$[`D=mpV~bչ.u[q)3|:gJ u*`gB4GCMJ܄/C#͐iR(4kW(X%H/ Y벘c.B=03@֗!Ǚ+-bd>g^39]CbO/mb@;ĉn-$@7嬍J<L7'_ǜ([P|Zph@kV7 Ik kKF [(Qp#&V{Vd]P/ AIUv%ahu\? DשׁK! _#&ȱ;/@@}08Sy6H(x9/&#XߔWnWoqN2d{չU>=GDG(0V2DI?\k7F^ԇ00`i8؉?545^i.Tb]x7G5Yh15uK!j H'L(_󺹳,&7I2;0>0*$?&JJ?˘xρȽTK76Zo7SN(lu=88W8CW]݆FrEdz n;`7uSH'$N$Q>>sי{\?VJꏜ~qc3@4 ;52љ#TBAW6C"0\6J4q2R[ w3~GryYtb18~Y.J H].Ⱦ=7=f⢰aC5DX$$(9xVQ`%9~^#4؎ sͷ(1)"Ocl5RZ<˚rN dG'F;hNvᲀdDEfUw5mßI1e0: &^N4;}j+FM,$A^%ώ`CYaA-Kԑ}0%fWU͌5LR V,X -XFbfrx6:$@PUM6*I5g$,Oܒ;{K~\llpJSmc (P A1p گuE< wA:XC,m7:Bsq!:?Fhŗ="-re=U&\3  sd a`yGy}bGl86Ǯ֣vMO"sU GlMj ޾*W%>x;t@Z\AjX6)aВví[sF4cF :<`B|07|;]/8ޡ$(R[ '4g#O;(d"Y4f^4KYzR8XF4LfΎC;`.ypՋr3.#h׬wK/G؝[L\+$"ςYww/-8p3Wc v*]ڿek@[ Slj3Cпݒ6^`ϺB+8FYsˬ-OȵR5iPtQs5Ka}oO *=1a"fTg9PG^ok?K)Л\3;h—TY\hԟi4;tWaIBx*D]I5y /\|+b:_\( L)[@YK{8B;s[EO܋r2j/1uuy*,qnaT$?_8+ <.px}6p?wzBZE?F,[@*uR_KEA}b/E\@*{.vDC~%=㵉EA @Ԑ$Ѹ'&>sbس FG6ůFn=]D/|;X' ʆ@xkM>߫?#qt"lm!Ac jp2s+4s!\ԥXXkZ#{w"КPPo(grpW8\6" Qwu 6e;wWġΎdU>R:XLũDN`0^=ZI0;V?1&*==ᶭywt2]UL%_lTMGP M K%ٮI`gJq:~rf4r{ f!/BnvFS`& #,9RЦ.vVɇ >mJb&גoQW*r)_!樁55^ǹ H Lא<W{\)ŔW)s<fbcT>d?fUrw޴qme4yHd< YFh2x3D )uo/8Tpnvg (=0iE^AJ0Uum@=OXg: 5"d{/Ay҆g~_x90^'LPKإ9>e IhSNYe%Dy^KZ,{h;!P۶`/ǝH.〱&^F%NTds&"xoFyeʽ?*XbQ9N;_&dafk,Ttz\z+uZuxW7iRإTl#;d.|h Nn\_Tƞ x-al܉`ּŷf+w[$t)Q5V+ĤjC=f|,!pI͎r%|1/1k@)r$E?2sɔJKйU/бƭpُ<>Zo{Ŕ0>cxw,mE D[U|e]uo.O1KRX3bE&ı/Tg!Ȇ=@Ha&Uܖ3Yk顬'),1@e8z^wgVJ, ޸J}yOռ64#ܕ:wPPaq0vł I(v^ٗ]0?zoz "П&2Ec9ܭ&g 7d%뉔mz^&nj&V4^XJB&&u%}v'}nrģuIzTH]"%%1[2`O؛C$X* $HJ+wpnjI=XUXk3녵hmh+A"l "Prnt]^.TA2kTS&dϰPf {9x8PE}8%nQQD7T_j<`rLJcǮr!2-14#ĊbtW-q1WEM*XSvl~ؼ/i&\f o/Ӊweg:WȬݱL^+؂L^uJ1?2!{sh-SW1 KVҭ`,6.u*ګK̠r[?6QQ5E5f"Ar&K%[hLh8e؀؉1$!Y MHK<[icMd!,&~r7~<2}:咡7'+f(;4fp4)H:=Yf#YDAu ;8x6 KiR( ^ eb]FQEL?LÂs[ _xfиȮ\7ԻV?nT:_&jR'Ds3hܒ WTYsil If+Vzψ@(]) e>۵VK{R08sO‰ͤ"-^\\yH:,F [ g;x-e$ r>,8xXCK>UV>)'#"7v1e/JCra>_geyX l=EzT9My!7"OWLzo)ZOѧEiMk)Qh/tRxv ZK*=#.i+#}u GGK~y &bj1#ә7!_ptST:p Ѳ_湔I@ 0C/vRJDvZ#j6;rK9K^;& Y>0 YZclue/data/Kinship82_Consensus.rda0000644000175000017500000000143414130772671016566 0ustar nileshnileshV=LA޻=<<2BƆL!Adwpkf/FlтF!!DAO ъփʖsvw`Sy|3;64K4MkXLbOLKh޳^+7s-k\@~Ah꯭ K Vy`_fW'ao8v56ژMk >`rfa}ys*rC 3`>@gPar=fS{>}] `.iyp'´ɹ%zL'gsejѨUlwIbr1L e z.C>;sB=B12Q8<SU)N^ɪFXe7JKVѬMǢ$Yh62C 7&8nUx7n^o #XHռ,ZΉ;Ǡa (#r.Bk\{Zo@\_x`>M ^^U4h~@YBW`ğ: HPܟ>2ߞ#çľ-Ei,n*I*'el N clue/data/Cassini.rda0000644000175000017500000003170514130772671014344 0ustar nileshnilesh|y87RF鑩424l"*dH A&"2gNڙgᘏpLY~~>k E'qqqprss7/?Ӵq• \\V2.% YWTԧх;o\Z@k~P]jTpᮥ/']ƃiaɡ7sI#-ke^CW7VP@(gfSS T-%cr}7EQߋU4~[3d2?ڸY}w L4}1qܧ<\ wkY3=1}:__z*K%w'x8.= h`F\bY9wd!nYQ.Y#TH;fU_0u g1e8Eª_:Wھ ~᛻hNmjh%wS;Iő덊/gZʂp?Ԑ~#"{^\LMUh4@;| ^y"IR1͋^}=O_ppw̲ȱTDiKvDůM=[U.iO?9\'Ll魚JPخ ߧŸ*nn\oIk|偘~G9EY.0 hR^5G,x-W ס=&gm q ]quPeb^9k+V%{a,vFDEt+|d{?e["zVGj^G6ݔvNz|$=?kzҸ~Ͻ'OhXZpgǦ7W%_} =.߀ze]M|P|jh;3R9(%n q ;5-P1x&taWl0=3voY-z_LƇY{k.F aO%.Oo>fk0H݌"Ìѻn>osb50+&i<[PO-Ԭ 65HyJ13o|o=q߶I\]\ukܡ2#wВO1ͪ}o`GBҸ#bԙ* N!jC 3DQKR ܺN;>w{2K0,ji>sێR򰖢֍fcL}w<մxRSk'IbfD{a4KlvC)#θS2&ԤwWi+(wWL f8 5PQO9oDžb-w1㴼ݓZ;ϼ}C>M;TVPѻO%BcsN7>f{HU.-ZbXևP]M&YqߤS&-_/GԔǢ'Dv23YMe甧H})Q/ra-vfdṇ ߜ5m|_ fV\EhwkǛQyLYrs*=PF Ct>0]P37rN颊 '4kˈMWGrpPi@^dU(fROݼ$4sQ_tCWhxmQ)6f%_t7( )B ԊfQ lՊ{+4P۶F_EJS1k{KNr-dYE^ YO;9'p]=%vukAPLҲcaɨGG&f?Vɛg{s]lQUk,~t=]\̩|Ωȸߧ͸v'JcG\Ke>>5x6A՜:wbx84!O.W ld(Q}PB0흲Ma+5^bPƲ: \unUha jigLzRYA ƭTS#qrk.#tKO^It\LP>OVjBv.@OcCo!FO4gu] YQn:ewY;~<ҍ=6l?Ƀ{כGo}D>R4sGhd|>ٲxMZq%*`ZuV}^{bړN7 0J_wCkO ^U#0=QX0[/G+8j({>=7"J᭬qټHT<>=//~k?ƙWmB/7+ς?qvƣ/c$C9eޢ܎kӧ7'~( TM阡x;R [+Fiicob%n8wQ )o5Ɠ~bvz내@nz3KBj0>;=݀Va[ȡ%0yx~hvtU^2TeV/tуKõHO+;w-z t[pVeVk8Eyd.<3T7$&_kby/ em]{K#bQN.pdOC 0[ z L T<eՄ<3SٺAz\R=3wݪp-1QibT\kqpIؘ<R59g4-A͔EJ?~ {AyhgW^oc3x?V݄2CN7Ѓ;Ŗb ^'PE6noLz|DYebECY8*rsop=ϡֺuqݑhd\$n4<"tV.kPUFuiq;'Cqű+q>0.LImVy}}p2\e|LjsILAo6<aOD>ijEېD0fr]^]e?|%gPgj=xk%zN+[ܐ~*P/oa>fETKC]D͒^۱&w %Iَ鯷iAC/x6!f:˛Jыd.-ߌsb̪C9]!vybK ʗ.yg9g<ٍ'|P݃sP6v$b,ȝIMiJuFLPfM{fk~ᵨR|J1p4,Yl:∇M_0)gpc) t_dO0w G_ޚjԝW @#_W-WD#J7 C}U2QϨ꬙5nr: 4/`#f3(]G'""ν=rQyS*kcRʌ7d-wLVR̎)ѧ:*݋ڙLg:a!NbOj\3yȠǎxqv+[q"urU%Us#w=N%PʷSlGey"5X4u=GzVe ii PEW$E̖狎5zO5U`EYm'KNLsDA-j1mQĞ]_! e_&+J9~rӍr|i"%oʧh]R眗\(fJQoR!{9JA0_Π<ȋ*$&Q6FP8oÌ=*Lj%צ%dc݈K5+1398PD#Q[$? &e%Q)a- cvDK#~Q\?ΜF-76TiEY] p6BLaSq9|ɍ40f_;_v_+iqkpι0aWAshaˢwݐrݡgGr3Il9;GopX_ %yp_ms"([GN[_h Vv 7%_q z 6 fF"{s[6?҄J{*f>)4kB]̋,3'aTC3׌ƭIe/@y|k ,n;kԷaᢥ!7_Z`X LG{e/bmQgrHa4hAG U0OUOv'NlXjU%$ߠ;wH5Ԩ@Lu6pT g\4>ڦ Zi+VVM\E^2[7ޮ#`ri/7ԙz eACGeea>QhT㜨nT/踻| irջsdm#wyqbH=@M PgHQĺ>Yz{R.a_&n2*r^af\g8A⤽sIiprDK:l4uA`7s!IX/{m]q ܰlyލF#{cO꣑ٴM]y1}O !ȻS fn)9)7wnwO]& `cc'D9}okJϣPS`Kj6ڼ>n9uj"IWABZFeρQGN<\=8KV"{ˠ]3)9||_pO@nBRYoU~88pdV5z_>ZkmLLE ǁ5/ JކcG_%Ik_N0p;@۴) xӠ*#5t5]B/<};ܱ =Ny0"pey[|ÀJ;UeR^B>2J c?zE65U_z*eٹֵUc_s5^׃<ӌж+-E\ 'AjVSn=aE R3'ܾ=R~C\wln5'8|~o-fU`gp F|`g,/w؇Ov&qXy _ $7faC[l3vU[p;YNs=@nni?3\KcR!9>}uiuJ8?5_8ȷʠ KX:s$!Oqo%=U;A?nfЯ FOnxm>_>]_}ؚAȿ 36%psJ/E:$.?3]Bm<_‘E$l[&Qۇ]xk,rv]\ I+$$N| \H j[y]`? w,m~: W%'5R>?)78({b@N΀wsaXJ`Gsܑ㠯en=h xG]Y5_iwݢ m2JiڶgL0d Amrݖސŀ<'%:X-@(8*u~etu#LxC_=LWV?Թ7K_\V u(VaPŽC_I '$qw3v^ x*US@" ~|}ITXq2m ?ѽ4#gwvh`?<&|7{A_9j98!fk4gi#C/g#u<·0 },OANkU?j We?gl? uOiOo#)י-Myu=гnj[swߐ] >@߭;-z''~?3n 87;-SOցSVo| ui>"OJJʸ/7e fO2 B;yy+{NFK1M{sg>L2~}0O{l~/qM$~< 3'Uo;,懬#ƧBrΫ9 I0 .]B*țqؑ rw@7l+qϞ³ w|#gvM6U } SqUhq#q*1DvLψA<:> v^ԇ]헵\J0/ȵv2\<}+N8z&ș_ _xS.-BЧIz hiYcjQ~?hwrs oxM"6p ѳ)dekt7۝ڮN-?VkA{ r]ހޙ\s3j[ a|vHtg%o,>M=tQ9ї 3Z¹?|ԇKzZq(}i 鲍/ rbRS$'tKg{RtSz4X6h t#E wϐ:vFς V 'z: 3H^cx "+A2I>kaL|!y5k丙Y?u1@3r?qZj vŚ!Bg Eۈ+H`X B` 1Nqc]lXtYxߌ۸ۦtoup>VrSG&nJꨠ\?yiy4SH}.GH\#q…Wdv_ %O'X}o;鱀[7?7%yW{szd^)8+c4#kYgm3gώ*!VRWK c?R~{;1"eKe*ࡿI$E fRk7,&sS+I%WI<1>:Iy}*H=&~<;c׳&unT\$qtCEvΆ@7Va '湑goGKI=B3օFo=d?YN 5I\ExR&qM0U _\M˕HH69WBInZ;A/#~5ɻ#Z<,#_ud,H &ܖC[ 7LX |[3[tx9m}3w%!^&}M@OOՀoZΜtN}([Wvϑ6bx f"'Ҍw>ٟ7͑~؉>~4~G?wsFr;!nшy#2bs} řmCSsڗoj=)--GcNG`|U8ȧ-x6 -@9DΓhv6zu5m#oTy=@Vc48*Jdp=&v<8|y VC;:ișuM|f]R~Ϩ=YEegf.2GAП}:XF{oj N,)_~wX %{b S?eh 鉍@7-30Ja6'S#}6w бJm >{ 䤡?h-ЏUr!mu瀏\P)&V#~yC˒_O֙ ~c3Q$zغO׎=u0蓸eIӆ01s 3+.C > Ywg)~Xj_AW.m#Dly6YR㠿{)} f7s#$k׉?SNθyOm%GE$r%1MWFB;m:ؿ\&LqSߐ:],.%g,ёTq_ :V| #|Ⱦ; zISvnS6ԛľO RݠetOr+eH]C:EXY3Gd$NN%.!z(}{,GoI&u1ԹŦG]_}W2 Z{_L=^d=lk\9SK;:$En g\o4}؍$}rH<$uڧ֐\:ΉԛVSR<+(.' @`mhCm @k8 {n7OoL90/l/BAY>rH&;+5gKX>)krqrߙkg6r !D= -]uԟG9Do}hˇvLꟺN4jhߓϧ1kr.lBriGG/?g#|1KKZu`#ڤD3xR:9ki}g}ͮO&(i(9o+%y=B=ৎm5>&Z= Uj4*ۂ%tMZj9ZeJ~(]Z|r4?un$%IzmUgF2%j4'rJ[% xJ6ݏo'{ 5'>~' E[ݣ3k=N_V~/ۓu;1}$hDxҗpe=|w[JXH|c?f~ ԇ?JA~ټ(/t"=6jngb}E~g\zЯ {ٯ*V@OYa&G;]ij(M~gx&]6AYuKo>: {Jߺ"] ߛ| ~':Uw_Þ-K7| ݎRKrݨqRCL~ : ~>M p/䘑q[}1u#Esao?W'ZJwTbz{7*J OoޤΦ'8mH\'-$ oq+_ uºV*;:>Us7nI?P ~}vpavmb<yr{(sd+VPPa)`YpQlx|3^ pW5ƞ@>] ~Be }HyBoWnS!qzQ Q {4sor)8ɞϨ K+_' |uH}3(4 H\X\(k=a %JiޣJ1|7y9 rQi8 <}}}_x_pop"roC1ηlq|W\I~y+;" 恉[j+;9y?۾dOclue/data/GVME.rda0000644000175000017500000000221414130772671013502 0ustar nileshnileshX;LQgDLR-61Ja^?Ѩca,̬lF-o0QDqEm󘽂Bb793vMHhbeeZ$*+bZg4?GӢe;NW9])ħܥk>+ V{-3{fOnqxytwcb77s4? &?S//uhZL։^!>P'Ѣʃ(|_(ua={` Sz5:wC? #:>/gp"ӡS :z8(`! !Eg!$jl;3U_&wz:j`,2:@: R54c>ȁK>#UT'Qw.}nQ͟yd Vu, l uFqƁQw S07@ #ԃ-3ao2\As ς{sC,(qkzSG<\޴a`Ih,TZBs %1H> A<6g}HB~ S`o U g@ ;N 1"/ #4JƱ1[oijBu(,rdU,kχ3PnoQclue/data/Phonemes.rda0000644000175000017500000000153014130772671014522 0ustar nileshnileshV[HTQSP"=)"")͈Q6 ⇉E2)"zhKSKS2aFE "Q}D{{aI }9g^Y8 ð0nvf؍i5"%7ߛߜi=Ƈn{!;}!G]Ct'Xf=>8бCM*w+VڦoKWէy|VTzC2~4w_* Vfjof|L&vTVK:*zWxL Ǧ.}ŕnj7K#'u=/s>̃ΨqG Z%LgѺ-u!9~~S?R rZ.ΨqG\kQju =T Վ+5CWzWx?bKIrIz ,t%ԑΒs2ILp&mm$I7)5$5'E#ΒC UMTK$IL$)Uba*&Ol=75k1*oU"?J|9&DS%Ȗ$.5п>#VafvꟆ}Y5!9{=f%sKi\vmnCPy\L#6r-b`6~[=?Ųcjcs\rg3~}8,3[kzvvޘg}ݪxH|`-eBѐ. j~X)3O n.ڻlp]z9^6pN^/..lN99wuED㾋б320X5ij5[9j/f-I1}ҟDbҒ%ےKI}H$"I'3I$|NRzOzIpP>'$I=ޮ9'qIؒI~:l]i-V1ĖKvvoIw$pX_Dۭu$Ԟ'~I> 'tK֒^K!I=K̒z#$o$󔝴ټb)3yzRv$9'\βI?%II8N2M.NI'jIODI$IjII$=MMY'Ipk . .u%It[[:dIQQh哥(!/)1hE&,R6[0,@t* -:,1j~W||RvHC gG=(BZAl yɈтw$ p2f,CZp$noܙb۹fNExwg[:К}d.gip2GpuBQ':׆/&J<RkA[kEG^ ؜.""Np՝ Lɬ (wf1{]7Π;x\Z#0tH`p-(+c0^}62}D*JYXk;iwIΝYYah9PprZ /ʩ8'^ ն ^\