clue/0000755000175100001440000000000014715050616011225 5ustar hornikusersclue/MD50000644000175100001440000001136314715050616011541 0ustar hornikusers11f9b6ab4bc92dc5d693c44d8744bdf9 *DESCRIPTION 8d6739bf3e0ef6e7f5e5e1f6f6f87989 *NAMESPACE 281c7577f564a5acbecf046c5d1b8e64 *R/AAA.R 3cd212ef14c9566294542d691268fd5a *R/addtree.R 5ca7bd63f1ed85f171358c12fcf08e53 *R/agreement.R 61f26eec5666c409d3a7369f9cc0c99a *R/bag.R cd56914218fa9922aba0f76ff8b94909 *R/boot.R 74c617065ccf4f72df1353534f85da75 *R/classes.R e4c06eac28291bc97fb2e40603f3e23d *R/consensus.R f03ed842ca3417fb555be3254025d412 *R/dissimilarity.R c8a21520e911951d95d7ebd74e113265 *R/ensemble.R f4bbabdccc0b0dc31dbf14373ded5d11 *R/fuzziness.R 5999d867614d17cd53a043cbd99703c9 *R/hierarchy.R d67f188882f5aae752df482d3473fbd0 *R/lattice.R 285f76623c207f44342d7d1ca33d07e8 *R/lsap.R d10944e1875825af11bcea65571432fc *R/margin.R 4fd82d58960235a35f20d2b964149caf *R/medoid.R a3dccf831a311ed068578f817f38161e *R/membership.R 27369e3ebfc5ade758ebb2e49bb213fc *R/objects.R 4b8e8ee574a015903622e264e7560aa8 *R/partition.R 00c4dfcc2d401d810d70f246b7628f6b *R/pava.R 6131a8ffa97003764405701373a3bd48 *R/pclust.R fc4d256afc4ea0c926fe6e288f20ec65 *R/predict.R 44b2e289ec1ed4ea042eccd8816080c5 *R/prototypes.R 5eabc5a234802b0f5a9f4c244ebe9aa9 *R/proximity.R f1a133ffc362372bc24ff24581964b1e *R/registration.R 69049e632bf64e2a11ed5b4f0c276570 *R/sumt.R 8cfa16132f28d693fcd03d396a678deb *R/tabulate.R f415cbecc8d1694bca125998db11a2ae *R/ultrametric.R 6a62f57555cae254f1f7b3ed5d9f152c *R/utilities.R e869958a1b430996c06e7634afa0a1b6 *R/validity.R 76161b65639451c966be75488458b3c3 *build/partial.rdb 1ecde39614b8d2d471bab860edea239e *build/vignette.rds f60ef93181169834635757af0cd80712 *data/CKME.rda 3c33bc3f6d97ec366e34e5bbec1405a1 *data/Cassini.rda 27f7d1f79b9a08c18ab02a40f6158284 *data/GVME.rda f32367364ed546a302efe0c7b5559822 *data/GVME_Consensus.rda c876f11cdbdd725dc45e5dd55522861f *data/Kinship82.rda 12219bb9ab488982613087f1b846fa31 *data/Kinship82_Consensus.rda a74efad64ca1000d958a97b711dfebe3 *data/Phonemes.rda 2a6241c5a81a77397582d55686ebd255 *inst/CITATION 4ce2ff29ebfc819444d6c7eb2f09ff6b *inst/doc/clue.R ec5243c6beee816b6e93e5cbda9f722a *inst/doc/clue.Rnw 88a110097c34fd7183aa00493c5fffff *inst/doc/clue.pdf fc5c32ebcb85203fa533853fb18d18d3 *inst/po/en@quot/LC_MESSAGES/R-clue.mo 6b382525256a1a059e58ce576eff7106 *man/CKME.Rd 0d61696816866774419c7fda98f05d5f *man/Cassini.Rd b18fd96f640a1001be51feae0fe8e66d *man/GVME.Rd 1b6144d910daf97b691b547c3bcf2d51 *man/GVME_Consensus.Rd 1804157e1bd38aff59e84c3ab8efc9ef *man/Kinship82.Rd 0b423e42f1f2cfba9b9d52e163c0abf8 *man/Kinship82_Consensus.Rd eef7c118f9ea4c1434bb1f4e00b77e1e *man/Phonemes.Rd 50375af82b3d133984605c006831a07d *man/addtree.Rd f55d433cb1f20ffa39a7f0dbc9e75c02 *man/cl_agreement.Rd 3bd2dfd6f155c962d48d3b72d788314e *man/cl_bag.Rd 5dca26838651ac5caca862e459b4920f *man/cl_boot.Rd d4081e72f3447131afc6a61d0af7f3d2 *man/cl_classes.Rd 5a5d699003782b013559e0081598ac1d *man/cl_consensus.Rd 6e672adfe90c3da3a6ed084d610e1aeb *man/cl_dissimilarity.Rd 872ecad639c4ade222bba29873cb5465 *man/cl_ensemble.Rd 4cabe55c90e4c148ee3bc274d970ebf0 *man/cl_fuzziness.Rd af83eebbfd3d600999346facaa4308d5 *man/cl_margin.Rd d4a61b7498b939cd372cf4b181378e11 *man/cl_medoid.Rd e26070e22290e167ec900cdeea0567ac *man/cl_membership.Rd 2ddf43cfa7b4809e1b211e2f89080d5c *man/cl_object_names.Rd 818d072c048b86741b39db9967dae2b2 *man/cl_pam.Rd d9486d40bc389102f8d0b5dbf4830b0c *man/cl_pclust.Rd 1eb04a9edb42f0c3ad50321b36475d6a *man/cl_predict.Rd 9e88e1119f27cc732e7b865471521f1f *man/cl_prototypes.Rd 931b58a667da8aab28dc441fd0c630f7 *man/cl_tabulate.Rd a79724c42916ad2db16343e6539e53b4 *man/cl_ultrametric.Rd 09f68c908abf4bd3488ab25002bf78c2 *man/cl_validity.Rd ffe8dcd2639eb402c485d2ae30ff7b55 *man/fit_ultrametric_target.Rd 3cbae2b63263993d541d67892e307696 *man/hierarchy.Rd 7175d60e57286b9735d26ff996592517 *man/kmedoids.Rd d1b212bcbf61720cc380d2aeb01c95e3 *man/l1_fit_ultrametric.Rd 9e257bcb7df23ccbca37778f11f52901 *man/lattice.Rd c393ed09b16d0d60bab37dedd95731a2 *man/ls_fit_addtree.Rd 9b0469edf996e2e47e8d3bb00dbb5ea4 *man/ls_fit_sum_of_ultrametrics.Rd 4de00e99c87ae942624b33a73fc10bbd *man/ls_fit_ultrametric.Rd 115623ffe35fcef24928738206942374 *man/n_of_classes.Rd e4822d78d50337d163d881298c234bb1 *man/n_of_objects.Rd da27a64e2cd173b00f81361e10bcab81 *man/partition.Rd 1bc099d43549aa2805afe8e5c5912696 *man/pclust.Rd f04df765bd23d594bd0ce49e892a2c3e *man/solve_LSAP.Rd f9e7119e8be0492354b6605b81fb5ff1 *man/sumt.Rd 6985140eb3d61464356b3a4ad86ec71c *po/R-clue.pot c1cb790e0fd0e4d3f38106f10318b585 *src/assignment.c 914912fa18b8403e505ac5d8e1f4ee29 *src/assignment.h e2f17003f4c681661ea31175a99503cf *src/clue.c 8cab4a18877c997d1af59596d2b40f4b *src/clue.h 1d83eaf5af08f3fc312d6dd0363e5c49 *src/init.c 76301856024f2491f73fee44641b5c86 *src/lsap.c 1db06fea8e5ba8856f5def041c22bf54 *src/trees.c ec5243c6beee816b6e93e5cbda9f722a *vignettes/clue.Rnw 6427acd6ca3c47f638f9779f323ce40c *vignettes/cluster.bib clue/po/0000755000175100001440000000000012213262407011635 5ustar hornikusersclue/po/R-clue.pot0000644000175100001440000001223713142031604013510 0ustar hornikusersmsgid "" msgstr "" "Project-Id-Version: clue 0.3-54\n" "POT-Creation-Date: 2017-08-07 11:31\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "Argument 'weights' must be compatible with 'x'." msgstr "" msgid "Argument 'weights' has negative elements." msgstr "" msgid "Argument 'weights' has no positive elements." msgstr "" msgid "Non-identical weights currently not supported." msgstr "" msgid "All given orders must be valid permutations." msgstr "" msgid "Iterative projection run: %d" msgstr "" msgid "Iterative reduction run: %d" msgstr "" msgid "Cannot coerce to 'cl_addtree'." msgstr "" msgid "Cannot mix partitions and hierarchies." msgstr "" msgid "All clusterings must have the same number of objects." msgstr "" msgid "Can only handle hard partitions." msgstr "" msgid "Can only determine classes of partitions or hierarchies." msgstr "" msgid "Cannot compute consensus of empty ensemble." msgstr "" msgid "AOS run: %d" msgstr "" msgid "Iteration: 0 *** value: %g" msgstr "" msgid "Iteration: %d *** value: %g" msgstr "" msgid "Minimum: %g" msgstr "" msgid "AOG run: %d" msgstr "" msgid "Parameter 'p' must be in [1/2, 1]." msgstr "" msgid "Cannot compute prototype distances." msgstr "" msgid "All elements must have the same number of objects." msgstr "" msgid "Generic '%s' not defined for \"%s\" objects." msgstr "" msgid "Wrong class." msgstr "" msgid "Plotting not available for elements %s of the ensemble." msgstr "" msgid "Value '%s' is not a valid abbreviation for a fuzziness method." msgstr "" msgid "Unary '%s' not defined for \"%s\" objects." msgstr "" msgid "Hierarchies must have the same number of objects." msgstr "" msgid "Dendrograms must have the same number of objects." msgstr "" msgid "Arguments 'x' and 'y' must have the same number of objects." msgstr "" msgid "Cannot compute meet of given clusterings." msgstr "" msgid "Cannot compute join of given clusterings." msgstr "" msgid "Join of given n-trees does not exist." msgstr "" msgid "x must be a matrix with nonnegative entries." msgstr "" msgid "x must not have more rows than columns." msgstr "" msgid "Argument 'x' must be a partition." msgstr "" msgid "Cannot compute medoid of empty ensemble." msgstr "" msgid "Cannot compute medoid partition of empty ensemble." msgstr "" msgid "Class ids:" msgstr "" msgid "Criterion:" msgstr "" msgid "Medoid ids:" msgstr "" msgid "k cannot be less than the number of classes in x." msgstr "" msgid "Cannot extract object dissimilarities" msgstr "" msgid "Cannot infer class ids from given object." msgstr "" msgid "A hard partition of %d objects." msgstr "" msgid "A partition of %d objects." msgstr "" msgid "Partitions must have the same number of objects." msgstr "" msgid "Class ids must be atomic." msgstr "" msgid "Not a valid membership matrix." msgstr "" msgid "Cannot coerce to 'cl_hard_partition'." msgstr "" msgid "No information on exponent in consensus method used." msgstr "" msgid "No information on dissimilarity in consensus method used." msgstr "" msgid "A hard partition of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "Cannot determine how to modify prototypes." msgstr "" msgid "Invalid function to modify prototypes." msgstr "" msgid "Cannot determine how to subset prototypes." msgstr "" msgid "Invalid function to subset prototypes." msgstr "" msgid "Pclust run: %d" msgstr "" msgid "A hard partition of %d objects into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of %d objects into %d classes." msgstr "" msgid "Cannot make new predictions." msgstr "" msgid "Standardization is currently not supported." msgstr "" msgid "Cannot determine prototypes." msgstr "" msgid "Invalid consensus method '%s'." msgstr "" msgid "Invalid dissimilarity method '%s'." msgstr "" msgid "Invalid agreement method '%s'." msgstr "" msgid "SUMT run: %d" msgstr "" msgid "Iteration: 0 Rho: %g P: %g" msgstr "" msgid "Iteration: %d Rho: %g P: %g" msgstr "" msgid "Not a valid ultrametric." msgstr "" msgid "Given ensemble contains no dissimilarities." msgstr "" msgid "Outer iteration: %d" msgstr "" msgid "Change: u: %g L: %g" msgstr "" msgid "Iteration: %d" msgstr "" msgid "Term: %d" msgstr "" msgid "Change: %g" msgstr "" msgid "Overall change: u: %g L: %g" msgstr "" msgid "An object of virtual class '%s', with representation:" msgstr "" msgid "An ensemble of %d partition of %d objects." msgid_plural "An ensemble of %d partitions of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d dendrogram of %d objects." msgid_plural "An ensemble of %d dendrograms of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d hierarchy of %d objects." msgid_plural "An ensemble of %d hierarchies of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble with %d element." msgid_plural "An ensemble with %d elements." msgstr[0] "" msgstr[1] "" clue/.aspell/0000755000175100001440000000000012462665664012577 5ustar hornikusersclue/.aspell/clue.rds0000644000175100001440000000156312462665506014241 0ustar hornikuserseUz6 S";yu֬nwK)A}sF'$~?'^w_oK`eъF dvzť]q|~|9nBkSb:4QkID匦ѓ-n/QQh哥(!/)1hE&,R6[0,@t* -:,1j~W||RvHC gG=(BZAl yɈтw$ p2f,CZp$noܙb۹fNExwg[:К}d.gip2GpuBQ':׆/&J<RkA[kEG^ ؜.""Np՝ Lɬ (wf1{]7Π;x\Z#0tH`p-(+c0^}62}D*JYXk;iwIΝYYah9PprZ /ʩ8'^ ն ^\clue/.aspell/defaults.R0000644000175100001440000000023113142056061014502 0ustar hornikusersRd_files <- vignettes <- R_files <- description <- list(encoding = "UTF-8", language = "en", dictionaries = c("en_stats", "clue")) clue/R/0000755000175100001440000000000014503541710011421 5ustar hornikusersclue/R/objects.R0000644000175100001440000001310113036461743013200 0ustar hornikusers### * n_of_objects ## Get the number of objects in a clustering. n_of_objects <- function(x) UseMethod("n_of_objects") ### ** Default method. n_of_objects.default <- function(x) length(cl_class_ids(x)) ## (Note that prior to R 2.1.0, kmeans() returned unclassed results, ## hence the best we can do for the *default* method is to look at a ## possibly existing "cluster" component. Using the class ids incurs ## another round of method dispatch, but avoids code duplication.) ### ** Partitioning methods. ## Package stats: kmeans() (R 2.1.0 or better). n_of_objects.kmeans <- n_of_objects.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_objects.partition <- n_of_objects.default ## Package cclust: cclust(). n_of_objects.cclust <- n_of_objects.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_objects.fclust <- function(x) nrow(x$membership) ## Package e1071: cshell(). n_of_objects.cshell <- n_of_objects.fclust ## Package e1071: bclust(). n_of_objects.bclust <- n_of_objects.default ## Package mclust: Mclust(). n_of_objects.Mclust <- n_of_objects.default ### ** Hierarchical methods. ## Package stats: hclust(). n_of_objects.hclust <- function(x) length(x$order) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". n_of_objects.twins <- n_of_objects.hclust ## Package cluster: mona(). n_of_objects.mona <- n_of_objects.hclust ## Package ape: class "phylo". n_of_objects.phylo <- function(x) length(x$tip.label) ### ** Others. ## Package stats: class "dist". n_of_objects.dist <- function(x) attr(x, "Size") ## Package clue: Ensembles. n_of_objects.cl_ensemble <- function(x) attr(x, "n_of_objects") ## Package clue: Memberships. n_of_objects.cl_membership <- nrow ## Package clue: pclust(). n_of_objects.pclust <- n_of_objects.default ## Package clue: Ultrametrics. n_of_objects.cl_ultrametric <- n_of_objects.dist ## Package clue: (virtual) class "cl_partition". n_of_objects.cl_partition <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ## Package clue: (virtual) class "cl_hierarchy". n_of_objects.cl_hierarchy <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ### * cl_object_names ## Determine the names of the objects in a clustering if available; give ## NULL otherwise. This is in sync with e.g. names() or dimnames(); au ## contraire, cl_object_labels() always gives labels even if no names ## are available. cl_object_names <- function(x) UseMethod("cl_object_names") ## ** Default method. cl_object_names.default <- function(x) names(cl_class_ids(x)) ## ** Partitions. ## There is really nothing special we can currently do. ## Most partitioning functions return no information on object names. ## This includes classes ## stats: kmeans ## cba: ccfkms, rock ## cclust: cclust ## e1071: bclust ## flexclust: kcca ## kernlab: specc ## mclust: Mclust ## The algorithms for which things "work" all give named class ids. ## RWeka: Weka_clusterer ## cluster: clara fanny pam ## e1071: cclust cshell ## ** Hierarchies. ## Package stats: hclust(). cl_object_names.hclust <- function(x) x$labels ## Package cluster: agnes(), diana() and mona() all return an object ## which has an 'order.lab' component iff "the original observations ## were labelled". We can use this together the the 'order' component ## to recreate the labels in their original order. Note that we cannot ## rely on dissimilarity or data components being available. cl_object_names.twins <- function(x) { if(!is.null(x$order.lab)) { out <- character(length = n_of_objects(x)) out[x$order] <- x$order.lab out } else NULL } cl_object_names.mona <- cl_object_names.twins ## Package ape: class "phylo". cl_object_names.phylo <- function(x) x$tip.label ## ** Others. ## Package stats: class "dist". ## (Raw object dissimilarities.) cl_object_names.dist <- function(x) attr(x, "Labels") ## Package clue: memberships. cl_object_names.cl_membership <- function(x) rownames(x) ## Package clue: ultrametrics. cl_object_names.cl_ultrametric <- function(x) attr(x, "Labels") ## Package clue: (virtual) class "cl_partition". cl_object_names.cl_partition <- function(x) cl_object_names(.get_representation(x)) ## Package clue: (virtual) class "cl_hierarchy". cl_object_names.cl_hierarchy <- function(x) cl_object_names(.get_representation(x)) ## Package clue: ensembles. cl_object_names.cl_ensemble <- function(x) { nms <- lapply(x, cl_object_names) ind <- which(lengths(nms) > 0L) if(any(ind)) nms[[ind[1L]]] else NULL } ### * cl_object_labels cl_object_labels <- function(x) { if(is.null(out <- cl_object_names(x))) out <- as.character(seq_len(n_of_objects(x))) out } ### * cl_object_dissimilarities ## Extract object dissimilarities from R objects containing such: this ## includes objects directly inheriting from "dist" as well as ## dendrograms or additive trees. cl_object_dissimilarities <- function(x) { ## Keep this in sync with .has_object_dissimilarities(). if(is.cl_dendrogram(x)) cl_ultrametric(x) else if(inherits(x, "dist")) x else stop("Cannot extract object dissimilarities") } .has_object_dissimilarities <- function(x) { ## Keep this in sync with cl_object_dissimilarities(). is.cl_dendrogram(x) || inherits(x, "dist") } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/registration.R0000644000175100001440000003271611754400707014274 0ustar hornikusers### ### At least currently, all registries are meant and used for all types ### of clusterings (for the time being, partitions and hierarchies) ### simultaneously. ### ### * Internal stuff. .make_db_key <- function(name, type) paste(type, name, sep = "_") ### * General-purpose stuff. ### ### This currently insists on a given type: maybe it should simply list ### everything split according to type. But hey, it's internal stuff ### anyway (at least for the time being ...) ### get_methods_from_db <- function(db, type) { type <- match.arg(type, c("partition", "hierarchy")) pattern <- sprintf("^%s_", type) sub(pattern, "", grep(pattern, objects(db), value = TRUE)) } get_method_from_db <- function(db, type, name, msg) { ## ## Keep 'msg' here so that gettext()ing could work ... ## type <- match.arg(type, c("partition", "hierarchy")) db_keys <- objects(db) ind <- pmatch(.make_db_key(tolower(name), type), tolower(db_keys)) if(is.na(ind)) stop(msg, call. = FALSE, domain = NA) db[[db_keys[ind]]] } put_method_into_db <- function(db, type, name, value) { type <- match.arg(type, c("partition", "hierarchy")) db[[.make_db_key(name, type)]] <- value } ### * Consensus Method Registration. cl_consensus_methods_db <- new.env() get_cl_consensus_methods <- function(type) get_methods_from_db(cl_consensus_methods_db, type) get_cl_consensus_method <- function(name, type) { get_method_from_db(cl_consensus_methods_db, type, name, gettextf("Invalid consensus method '%s'.", name)) } set_cl_consensus_method <- function(name, type, definition, ...) { ## Register a @code{type} consensus method called @code{name} with ## definition @code{definition}. Provide more information where ## appropriate, e.g., @code{dissimilarity} d and @code{exponent} e ## for methods minimizing \sum_b d(x_b, x) ^ e. put_method_into_db(cl_consensus_methods_db, type, name, .structure(c(list(definition = definition), list(...)), class = "cl_consensus_method")) } set_cl_consensus_method("DWH", "partition", .cl_consensus_partition_DWH, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/euclidean", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("SE", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("hard/euclidean", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("HE", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/manhattan", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("SM", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("hard/manhattan", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("HM", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("GV1", "partition", .cl_consensus_partition_GV1, dissimilarity = "GV1", exponent = 2) set_cl_consensus_method("GV3", "partition", .cl_consensus_partition_GV3, dissimilarity = "comemberships", exponent = 2) set_cl_consensus_method("soft/symdiff", "partition", .cl_consensus_partition_soft_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("hard/symdiff", "partition", .cl_consensus_partition_hard_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("cophenetic", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("euclidean", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("manhattan", "hierarchy", .cl_consensus_hierarchy_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("majority", "hierarchy", .cl_consensus_hierarchy_majority, dissimilarity = "symdiff", exponent = 1) ### * Dissimilarity Method Registration. cl_dissimilarity_methods_db <- new.env() get_cl_dissimilarity_methods <- function(type) get_methods_from_db(cl_dissimilarity_methods_db, type) get_cl_dissimilarity_method <- function(name, type) get_method_from_db(cl_dissimilarity_methods_db, type, name, gettextf("Invalid dissimilarity method '%s'.", name)) set_cl_dissimilarity_method <- function(name, type, definition, description, ...) put_method_into_db(cl_dissimilarity_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_dissimilarity_method")) set_cl_dissimilarity_method("euclidean", "partition", .cl_dissimilarity_partition_euclidean, "minimal Euclidean membership distance") set_cl_dissimilarity_method("manhattan", "partition", .cl_dissimilarity_partition_manhattan, "minimal Manhattan membership distance") set_cl_dissimilarity_method("comemberships", "partition", .cl_dissimilarity_partition_comemberships, "Euclidean comembership distance") set_cl_dissimilarity_method("symdiff", "partition", .cl_dissimilarity_partition_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Rand", "partition", .cl_dissimilarity_partition_Rand, "Rand distance") set_cl_dissimilarity_method("GV1", "partition", .cl_dissimilarity_partition_GV1, "Gordon-Vichi Delta_1 dissimilarity") set_cl_dissimilarity_method("BA/A", "partition", .cl_dissimilarity_partition_BA_A, "Boorman/Arabie minimum element moves distance") set_cl_dissimilarity_method("BA/C", "partition", .cl_dissimilarity_partition_BA_C, "Boorman/Arabie minimum lattice moves distance") set_cl_dissimilarity_method("BA/D", "partition", .cl_dissimilarity_partition_BA_D, "Boorman/Arabie pair-bonds distance") set_cl_dissimilarity_method("BA/E", "partition", .cl_dissimilarity_partition_BA_E, "Boorman/Arabie normalized information distance") set_cl_dissimilarity_method("VI", "partition", .cl_dissimilarity_partition_VI, "Variation of information") set_cl_dissimilarity_method("Mallows", "partition", .cl_dissimilarity_partition_Mallows, "Mallows dissimilarity") set_cl_dissimilarity_method("CSSD", "partition", .cl_dissimilarity_partition_CSSD, "Cluster Similarity Sensitive Distance") set_cl_dissimilarity_method("euclidean", "hierarchy", .cl_dissimilarity_hierarchy_euclidean, "Euclidean ultrametric distance") set_cl_dissimilarity_method("manhattan", "hierarchy", .cl_dissimilarity_hierarchy_manhattan, "Manhattan ultrametric distance") set_cl_dissimilarity_method("cophenetic", "hierarchy", .cl_dissimilarity_hierarchy_cophenetic, "cophenetic correlations") set_cl_dissimilarity_method("gamma", "hierarchy", .cl_dissimilarity_hierarchy_gamma, "rate of inversions") set_cl_dissimilarity_method("symdiff", "hierarchy", .cl_dissimilarity_hierarchy_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Chebyshev", "hierarchy", .cl_dissimilarity_hierarchy_Chebyshev, "Chebyshev distance") set_cl_dissimilarity_method("Lyapunov", "hierarchy", .cl_dissimilarity_hierarchy_Lyapunov, "Lyapunov distance") set_cl_dissimilarity_method("BO", "hierarchy", .cl_dissimilarity_hierarchy_BO, "Boorman/Olivier m_delta tree distance") set_cl_dissimilarity_method("spectral", "hierarchy", .cl_dissimilarity_hierarchy_spectral, "spectral ultrametric distance") ### * Agreement Method Registration. cl_agreement_methods_db <- new.env() get_cl_agreement_methods <- function(type) get_methods_from_db(cl_agreement_methods_db, type) get_cl_agreement_method <- function(name, type) get_method_from_db(cl_agreement_methods_db, type, name, gettextf("Invalid agreement method '%s'.", name)) set_cl_agreement_method <- function(name, type, definition, description, ...) put_method_into_db(cl_agreement_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_agreement_method")) set_cl_agreement_method("euclidean", "partition", .cl_agreement_partition_euclidean, "minimal euclidean membership distance") set_cl_agreement_method("manhattan", "partition", .cl_agreement_partition_manhattan, "minimal manhattan membership distance") set_cl_agreement_method("Rand", "partition", .cl_agreement_partition_Rand, "Rand index") set_cl_agreement_method("cRand", "partition", .cl_agreement_partition_cRand, "corrected Rand index") set_cl_agreement_method("NMI", "partition", .cl_agreement_partition_NMI, "normalized mutual information") set_cl_agreement_method("KP", "partition", .cl_agreement_partition_KP, "Katz-Powell index") set_cl_agreement_method("angle", "partition", .cl_agreement_partition_angle, "maximal angle between memberships") set_cl_agreement_method("diag", "partition", .cl_agreement_partition_diag, "maximal co-classification rate") set_cl_agreement_method("FM", "partition", .cl_agreement_partition_FM, "Fowlkes-Mallows index") set_cl_agreement_method("Jaccard", "partition", .cl_agreement_partition_Jaccard, "Jaccard index") set_cl_agreement_method("purity", "partition", .cl_agreement_partition_purity, "purity") set_cl_agreement_method("PS", "partition", .cl_agreement_partition_PS, "Prediction Strength") set_cl_agreement_method("euclidean", "hierarchy", .cl_agreement_hierarchy_euclidean, "euclidean ultrametric distance") set_cl_agreement_method("manhattan", "hierarchy", .cl_agreement_hierarchy_manhattan, "manhattan ultrametric distance") set_cl_agreement_method("cophenetic", "hierarchy", .cl_agreement_hierarchy_cophenetic, "cophenetic correlations") set_cl_agreement_method("angle", "hierarchy", .cl_agreement_hierarchy_angle, "angle between ultrametrics") set_cl_agreement_method("gamma", "hierarchy", .cl_agreement_hierarchy_gamma, "rate of inversions") clue/R/addtree.R0000644000175100001440000003111014144530664013157 0ustar hornikusers### * ls_fit_addtree ls_fit_addtree <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 3L) return(as.cl_addtree(x)) if(.non_additivity(x, max = TRUE) == 0) return(as.cl_addtree(x)) ## Handle argument 'weights'. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_addtree_by_SUMT(x, weights, control), IP = { .ls_fit_addtree_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_addtree_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_addtree_by_SUMT .ls_fit_addtree_by_SUMT <- function(x, weights = 1, control = list()) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start, start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } L <- function(d) sum(w * (d - x) ^ 2) P <- .make_penalty_function_addtree(n) if(gradient) { grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_addtree(n) } else { grad_L <- grad_P <- NULL } if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... d <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control))$x ## Round to enforce additivity, and hope for the best ... .cl_addtree_from_addtree_approximation(d, n, labels) } .make_penalty_function_addtree <- function(n) function(d) { (.non_additivity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_addtree <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_additivity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_addtree_by_iterative_projection ## ## Functions ## .ls_fit_addtree_by_iterative_projection() ## .ls_fit_addtree_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_addtree_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### ** .ls_fit_addtree_by_iterative_reduction .ls_fit_addtree_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### * .non_additivity .non_additivity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_additivity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * ls_fit_centroid ls_fit_centroid <- function(x) { ## Fit a centroid additive tree distance along the lines of Carroll ## & Pruzansky (1980). In fact, solving ## ## \sum_{i,j: i \ne j} (\delta_{ij} - (g_i + g_j)) ^ 2 => min_g ## ## gives \sum_{j: j \ne i} (g_i + g_j - \delta_{ij}) = 0, or (also ## in Barthemely & Guenoche) ## ## (n - 2) g_i + \sum_j g_j = \sum_{j: j \ne i} \delta_{ij} ## ## which after summing over all i and some manipulations eventually ## gives ## ## g_i = \frac{1}{n-2} (v_i - m), ## ## v_i = \sum_{j: j \ne i} \delta_{ij} ## s = \frac{1}{2(n-1)} \sum_{i,j: j \ne i} \delta_{ij} n <- attr(x, "Size") if(n <= 2L) return(as.cl_addtree(0 * x)) x <- as.matrix(x) g <- rowSums(x) / (n - 2) - sum(x) / (2 * (n - 1) * (n - 2)) as.cl_addtree(as.dist(.make_centroid_matrix(g))) } .make_centroid_matrix <- function(g) { y <- outer(g, g, `+`) diag(y) <- 0 y } ### * as.cl_addtree as.cl_addtree <- function(x) UseMethod("as.cl_addtree") as.cl_addtree.default <- function(x) { if(inherits(x, "cl_addtree")) x else if(is.atomic(x) || inherits(x, "cl_ultrametric")) .cl_addtree_from_veclh(x) else if(is.matrix(x)) { ## Should actually check whether the matrix is symmetric, >= 0 ## and satisfies the 4-point conditions ... .cl_addtree_from_veclh(as.dist(x)) } else if(is.cl_dendrogram(x)) .cl_addtree_from_veclh(cl_ultrametric(x)) else stop("Cannot coerce to 'cl_addtree'.") } as.cl_addtree.phylo <- function(x) .cl_addtree_from_veclh(as.dist(cophenetic(x))) ## Phylogenetic trees with edge/branch lengths yield additive tree ## dissimilarities. ### * .cl_addtree_from_veclh .cl_addtree_from_veclh <- function(x, size = NULL, labels = NULL) { cl_proximity(x, "Additive tree distances", labels = labels, size = size, class = c("cl_addtree", "cl_dissimilarity", "cl_proximity", "dist")) } ### * .cl_addtree_from_addtree_approximation .cl_addtree_from_addtree_approximation <- function(x, size = NULL, labels = NULL) { ## Turn x into an addtree after possibly rounding to non-additivity ## significance (note that this is not guaranteed to work ...). mnum <- .non_additivity(x, max = TRUE) x <- round(x, floor(abs(log10(mnum)))) .cl_addtree_from_veclh(x, size = size, labels = labels) } ### * .decompose_addtree .decompose_addtree <- function(x, const = NULL) { ## Decompose an addtree into an ultrametric and a centroid ## distance. ## If 'const' is not given, we take the root as half way between the ## diameter of the addtree, and choose a minimal constant to ensure ## non-negativity (but not positivity) of the ultrametric. ## As this is all slightly dubious and it is not quite clear how ## much positivity we want in the ultrametric of the decomposition, ## we keep this hidden. For plotting addtrees, the choice of the ## constant does not seem to matter. x <- as.matrix(x) n <- nrow(x) ## Determine diameter. ind <- which.max(x) - 1 u <- ind %% n + 1 v <- ind %/% n + 1 if(!is.null(const)) g <- pmax(x[u, ], x[v, ]) - const else { g <- pmax(x[u, ], x[v, ]) - x[u, v] / 2 u <- x - .make_centroid_matrix(g) k <- - min(u) g <- g - k / 2 } u <- x - .make_centroid_matrix(g) names(g) <- rownames(x) ## Ensure a valid ultrametric. d <- .ultrametrify(as.dist(u)) u <- .cl_ultrametric_from_veclh(d, nrow(x), rownames(x)) ## Note that we return the centroid distances to the root, and not ## between the objects (as.dist(.make_centroid_matrix(g))) ... list(Ultrametric = as.cl_ultrametric(u), Centroid = g) } ### * plot.cl_addtree plot.cl_addtree <- function(x, ...) { ## Construct a dendrogram-style representation of the addtree with ## the root half way between the diameter, and plot. y <- .decompose_addtree(x, max(x)) u <- y$Ultrametric g <- y$Centroid ## We halve the scale of the ultrametric, and add the maximal g from ## the centroid. h <- hclust(as.dist(u / 2), "single") h$height <- h$height + max(g) d <- as.dendrogram(h) ## Now modify the heights of the leaves so that the objects giving ## the diameter of the addtree end up with height zero. g <- max(g) - g names(g) <- labels(g) d <- dendrapply(d, function(n) { if(!is.leaf(n)) return(n) attr(n, "height") <- g[attr(n, "label")] n }) ## And finally plot plot(d, ...) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/boot.R0000644000175100001440000000255711304023136012511 0ustar hornikuserscl_boot <- function(x, B, k = NULL, algorithm = if(is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) { clusterings <- if(!resample) { x <- rep.int(list(x), B) eval(as.call(c(list(as.name("lapply"), x, algorithm), if(!is.null(k)) list(k), parameters))) } else { replicate(B, expr = { algorithm <- match.fun(algorithm) ## ## This is not quite perfect. We have ## cl_predict() to encapsulate the process of ## assigning objects to classes, but for sampling ## from the objects we assume that they correspond ## to the *rows* of 'x'. Argh. ## ind <- sample(NROW(x), replace = TRUE) train <- if(length(dim(x)) == 2) x[ind, ] else x[ind] out <- eval(as.call(c(list(algorithm, train), if(!is.null(k)) list(k), parameters))) as.cl_partition(cl_predict(out, x, "memberships")) }, simplify = FALSE) } cl_ensemble(list = clusterings) } clue/R/bag.R0000644000175100001440000000276711304023136012302 0ustar hornikuserscl_bag <- function(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) { ## Currently, method 'DFBC1' (Dudoit-Fridlyand BagClust1) is the ## only one available, and argument 'control' is ignored. ## Construct reference partition. algorithm <- match.fun(algorithm) reference <- eval(as.call(c(list(algorithm, x), if(!is.null(k)) list(k), parameters))) ## Construct bootstrap ensemble. clusterings <- cl_boot(x, B, k, algorithm, parameters, resample = TRUE) ## Construct Dudoit-Fridlyand BagClust1 consensus partitions, ## suitably generalized ... ## ## In principle, this could be turned into a "constructive" method ## for cl_consensus(), also allowing for weights (straightforward). ## E.g., ## .cl_consensus_partition_DFBC1(clusterings, weights, control) ## where either 'control specifies a reference partition, or the ## first element of 'clusterings' is taken as such. ## k <- max(sapply(c(clusterings, reference), n_of_classes)) M_ref <- cl_membership(reference, k) M <- matrix(0, NROW(M_ref), k) for(b in seq_len(B)) { mem <- cl_membership(clusterings[[b]], k) ## Match classes to reference partition. ind <- solve_LSAP(crossprod(M_ref, mem), maximum = TRUE) M <- M + mem[, ind] } as.cl_partition(cl_membership(as.cl_membership(M / B), k)) } clue/R/ultrametric.R0000644000175100001440000006365313435044702014116 0ustar hornikusers### * cl_ultrametric cl_ultrametric <- function(x, size = NULL, labels = NULL) { if(inherits(x, "cl_hierarchy")) { ## ## Strictly, not every hierarchy corresponds to an ultrametric. ## return(cl_ultrametric(.get_representation(x), size = size, labels = labels)) } else if(!inherits(x, "cl_ultrametric")) { ## Try using cophenetic(). ## This starts by coercing to hclust, which has methods for all ## currently supported hierarchical classification methods. ## To support others, either provide as.hclust methods for ## these, or make cl_ultrametric() generic and add methods. ## Or use the fact that in R >= 2.1.0, stats::cophenetic() is ## generic. out <- cophenetic(x) } else { out <- x if(is.null(labels)) labels <- attr(x, "Labels") } .cl_ultrametric_from_veclh(out, labels = labels, size = size) } .cl_ultrametric_from_veclh <- function(x, size = NULL, labels = NULL, meta = NULL) { if(.non_ultrametricity(x) > 0) stop("Not a valid ultrametric.") u <- cl_proximity(x, "Ultrametric distances", labels = labels, size = size, class = c("cl_ultrametric", "cl_dissimilarity", "cl_proximity", "dist")) if(!is.null(meta)) attr(u, "meta") <- meta u } ### * as.cl_ultrametric as.cl_ultrametric <- function(x) UseMethod("as.cl_ultrametric") as.cl_ultrametric.default <- function(x) { if(inherits(x, "cl_ultrametric")) x else if(is.atomic(x)) .cl_ultrametric_from_veclh(x) else cl_ultrametric(x) } as.cl_ultrametric.matrix <- function(x) .cl_ultrametric_from_veclh(x[row(x) > col(x)], labels = rownames(x)) ### * as.dendrogram.cl_ultrametric as.dendrogram.cl_ultrametric <- function(object, ...) as.dendrogram(as.hclust(object), ...) ### * as.hclust.cl_ultrametric as.hclust.cl_ultrametric <- function(x, ...) { ## Hierarchical clustering with single linkage gives the minimal ## ultrametric dominated by a dissimilarity, see e.g. Bock (1974, ## Theorem 39.2). Hence, hclust(method = "single") on an ## ultrametric gives the hclust representation of the associated ## dendrogram. hclust(x, "single") } ### * cophenetic.cl_ultrametric cophenetic.cl_ultrametric <- function(x) as.dist(x) ### * plot.cl_ultrametric plot.cl_ultrametric <- function(x, ...) plot(as.dendrogram(x), ...) ### * ls_fit_ultrametric ls_fit_ultrametric <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) { return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) } else if(is.cl_ensemble(x) || is.list(x)) { ## Might be given a list/ensemble of object dissimilarities. ## In this case, compute the suitably weighted average and ## proceed. if(length(x) == 0L) stop("Given ensemble contains no dissimilarities.") ## Let's be nice as usual ... ind <- !vapply(x, .has_object_dissimilarities, NA) if(any(ind)) x[ind] <- lapply(x[ind], as.dist) x <- .weighted_mean_of_object_dissimilarities(x, control$weights) } else if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_ultrametric_by_SUMT(x, weights, control), IP = { .ls_fit_ultrametric_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_ultrametric_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_ultrametric_by_SUMT .ls_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Fit an ultrametric to a dissimilarity by minimizing euclidean ## dissimilarity subject to the ultrametric constraint, using the ## sequential algorithm of de Soete (1984) with a slight change: we ## try to ensure that what we obtain satisfies the constraints ## "exactly" rather than approximately. We (currently?) do that via ## rounding ... ## ## This fits and hence returns an ultrametric, *not* the hierarchy ## corresponding to the ultrametric. ## w <- weights / sum(weights) ## Control parameters: ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## If x is an ultrametric, or satisfies the ultrametricity ## constraints, return it. if(inherits(x, "cl_ultrametric") || (.non_ultrametricity(x, max = TRUE) == 0)) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } ## We follow de Soete's notation, and use the veclh's (vector of ## lower half, in S the same as x[lower.tri(x)]) of the respective ## proximity objects. L <- function(d) sum(w * (x - d) ^ 2) P <- .make_penalty_function_ultrametric(n) grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_ultrametric(n) if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } .make_penalty_function_ultrametric <- function(n) function(d) { ## Smooth penalty function measuring the extent of violation of ## the ultrametricity constraint. Also ensure nonnegativity ... (.non_ultrametricity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_ultrametric <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_ultrametricity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_ultrametric_by_iterative_projection ## ## Functions ## .ls_fit_ultrametric_by_iterative_projection() ## .ls_fit_ultrametric_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_ultrametric_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .ls_fit_ultrametric_by_iterative_reduction .ls_fit_ultrametric_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### * Ultrametric Target Fitters. ### ** ls_fit_ultrametric_target ls_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) mean(x) else function(x, w) weighted.mean(x, w) distfun <- function(x, u, w) sqrt(sum(w * (x - u) ^ 2)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** l1_fit_ultrametric_target l1_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) median(x) else function(x, w) weighted_median(x, w) distfun <- function(x, u, w) sum(w * abs(x - u)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** .fit_ultrametric_target .fit_ultrametric_target <- function(x, y, w, fitter, distfun = NULL) { w <- .handle_weights_for_ultrametric_target_fitters(w, x) ## The documentation says that x should inherit from dist, so coerce ## to this if needed but if not a matrix (as we will coerce back to ## a matrix right away). if(!inherits(x, "dist") && !is.matrix(x)) x <- as.dist(x) x <- as.matrix(x) y <- as.hclust(y) n <- length(y$order) ilist <- vector("list", n) out <- matrix(0, n, n) mat <- xlist <- wlist <- vector("list", n - 1L) for(i in seq_len(n - 1L)) { inds <- y$merge[i, ] ids1 <- if(inds[1L] < 0) -inds[1L] else ilist[[inds[1L]]] ids2 <- if(inds[2L] < 0) -inds[2L] else ilist[[inds[2L]]] ilist[[i]] <- c(ids1, ids2) mat[[i]] <- cbind(rep.int(ids1, rep.int(length(ids2), length(ids1))), rep.int(ids2, length(ids1))) xlist[[i]] <- x[mat[[i]]] wlist[[i]] <- w[mat[[i]]] } values <- pava(xlist, wlist, fitter) for(i in seq_len(n - 1L)) out[mat[[i]]] <- values[i] rownames(out) <- y$labels u <- as.cl_ultrametric(out + t(out)) if(!is.null(distfun)) attr(u, "meta") <- list(objval = distfun(as.dist(x), u, as.dist(w))) u } ### ** .handle_weights_for_ultrametric_target_fitters .handle_weights_for_ultrametric_target_fitters <- function(weights, x) { ## Handle weights for the ultrametric target fitters. ## This is somewhat tricky ... if(is.matrix(weights)) { if(any(dim(weights) != attr(x, "Size"))) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- as.matrix(.dist_from_vector(rep_len(weights, length(x)))) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") weights } ### l1_fit_ultrametric l1_fit_ultrametric <- function(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .l1_fit_ultrametric_by_SUMT(x, weights, control), IRIP = .l1_fit_ultrametric_by_IRIP(x, weights, control)) } ### ** .l1_fit_ultrametric_by_SUMT .l1_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Try a SUMT with "pseudo-gradients". w <- weights / sum(weights) ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(d - x)) P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) w * sign(d - x) grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .l1_fit_ultrametric_by_IRIP .l1_fit_ultrametric_by_IRIP <- function(x, weights = 1, control = list()) { ## An attempt of implementing "Iteratively Reweighted Iterative ## Projection" as described in Smith (2000, 2001), Journal of ## Classification. Note that this suggests using the Iterative ## Projection of Hubert and Arabie (1995), which we cannot as we ## have not (yet?) implemented this for the weighted case. Hence, ## we use our SUMT least squares ultrametric fitter instead. ## ## However, we never got this to converge properly ... w <- weights / sum(weights) ## Control parameters: ## MIN, MIN <- control$MIN if(is.null(MIN)) MIN <- 1e-3 ## (A rather small cut-off which worked best in the cases we tried.) ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## start, start <- control$start ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(x - d)) ## Initialize by "random shaking" as for the L2 SUMT, but perhaps we ## should not do this? [Or do it differently?] u <- if(is.null(start)) x + rnorm(length(x), sd = sd(x) / 3) else start ## (No multiple runs for the time being.) L_new <- L(u) iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Outer iteration: %d", iter)) L_old <- L_new u_old <- u weights <- w / pmax(abs(u - x), MIN) u <- .ls_fit_ultrametric_by_SUMT(x, weights = weights, control = as.list(control$control)) ## Use some control arguments lateron ... L_new <- L(u) delta_L <- L_old - L_new delta_u <- max(abs(u_old - u)) if(verbose) message(gettextf("Change: u: %g L: %g", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } d <- .ultrametrify(u) meta <- list(objval = L(d), status = as.integer(iter == maxiter)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ## * ls_fit_sum_of_ultrametrics ls_fit_sum_of_ultrametrics <- function(x, nterms = 1, weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## We could catch some special cases right away: if x already is an ## ultrametric then the fit would be a list with x and nterms - 1 ## zero ultrametrics ... ## Control parameters: ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## method, method <- control$method if(is.null(method)) method <- "SUMT" ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last. control <- as.list(control$control) ## And be nice ... if(identical(method, "SUMT") && is.null(control$nruns)) control$nruns <- 10L L <- function(u) sum((x - rowSums(matrix(unlist(u), ncol = nterms))) ^ 2) ## Init. u <- rep.int(list(as.cl_ultrametric(0 * x)), nterms) L_new <- L(u) ## Loop. iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Iteration: %d", iter)) L_old <- L_new delta_u <- 0 for(i in seq_len(nterms)) { if(verbose) message(gettextf("Term: %d", i)) u_old <- u[[i]] ## Compute residual r = x - \sum_{j: j \ne i} u(j) r <- x - rowSums(matrix(unlist(u[-i]), ncol = nterms - 1L)) ## Fit residual. u[[i]] <- ls_fit_ultrametric(r, method, weights, control) ## Accumulate change. change <- max(abs(u[[i]] - u_old)) if(verbose) message(gettextf("Change: %g", change)) delta_u <- max(delta_u, change) } L_new <- L(u) delta_L <- L_old - L_new if(verbose) message(gettextf("Overall change: u: %g L: %g\n", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } .structure(u, objval = L_new, status = as.integer(iter == maxiter)) } ### * as.dist.hclust ## Using hclust() with methods 'median' or 'centroid' typically gives ## reversals and hence not valid hierarchies, i.e., distances which do ## not satisfy the ultrametricity conditions. The distances can be ## obtained via cophenetic(), but ls_fit_ultrametric() prefers using ## as.dist() [as arguably more appropriate] which in turn can be made to ## "work" by providing as.matrix() methods [bypassing the need to handle ## the extra arguments 'diag' and 'upper' for as.dist()]. as.matrix.hclust <- function(x, ...) as.matrix(cophenetic(x)) ### * .non_ultrametricity .non_ultrametricity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_ultrametricity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * .cl_ultrametric_from_classes .cl_ultrametric_from_classes <- function(x) { ## Compute an ultrametric from a hierarchy of classes (i.e., an ## n-tree). labels <- attr(x, "labels") ## Ensure we have no duplicates. x <- x[!duplicated(x)] ## .get_classes_in_hierarchy() orders according to cardinality, but ## a consensus method may forget to ... x[] <- x[order(lengths(x))] ## Get the objects (unique codes in the classes). objects <- sort(unique(unlist(x))) ## (Could also look at the classes of length 1.) ## Recursively compute the heights of the classes. heights <- double(length = length(x)) for(i in which(lengths(x) > 1L)) { ## Find the relevant classes. j <- sapply(x[seq_len(i - 1L)], function(s) all(s %in% x[[i]])) heights[i] <- max(heights[j]) + 1 } ## Next, create an incidence matrix (objects by classes). incidences <- sapply(x, function(s) objects %in% s) ## Now that we have the heights and incidences, we can compute ## distances, using the idea that ## distance(i, j) = min(height(A): A contains i and j) n <- length(objects) d <- matrix(0, n, n) for(i in objects) d[i, ] <- heights[apply((rep(incidences[i, ], each = n) & incidences), 1L, which.max)] dimnames(d) <- rep.int(list(labels), 2L) as.cl_ultrametric(d) } ### * .cl_ultrametric_with_meta_added .cl_ultrametric_with_meta_added <- function(x, meta = NULL) { ## An alternative to adding a 'meta' argument to cl_ultrametric(). attr(x, "meta") <- meta x } ### .ultrametrify .ultrametrify <- function(x) { ## Ensure ultrametricity. ## In earlier versions, function ## .cl_ultrametric_from_ultrametric_approximation() tried rounding ## to non-ultrametric significance, using ## round(x, floor(abs(log10(.non_ultrametricity(x, max = TRUE))))) ## which is nice but does not guarantee ultrametricity (and may ## result in poorer approximations than what we use now). ## Hence, let us use single linkage hierarchical clustering which ## gives the best dominated ultrametric approximation. cophenetic(hclust(.dist_from_vector(x), "single")) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/sumt.R0000644000175100001440000000722111304023136012527 0ustar hornikuserssumt <- function(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) { ## Default values: make it nice for others to call us. if(is.null(eps)) eps <- sqrt(.Machine$double.eps) if(is.null(method)) method <- "CG" if(is.null(q)) q <- 10 if(is.null(verbose)) verbose <- getOption("verbose") Phi <- function(rho, x) L(x) + rho * P(x) if(is.null(grad_L) || is.null(grad_P)) { make_Phi <- function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) NULL } else { grad_Phi <- function(rho, x) grad_L(x) + rho * grad_P(x) make_Phi <- if(method == "nlm") { function(rho) { function(x) .structure(Phi(rho, x), gradient = grad_Phi(rho, x)) } } else function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) { function(x) grad_Phi(rho, x) } } ## ## For the penalized minimization, the Newton-type nlm() may be ## computationally infeasible (although it works much faster for ## fitting ultrametrics to the Phonemes data). ## De Soete recommends using Conjugate Gradients. ## We provide a simple choice: by default, optim(method = "CG") is ## used. If method is non-null and not "nlm", we use optim() with ## this method. In both cases, control gives the control parameters ## for optim(). ## If method is "nlm", nlm() is used, in which case control is ## ignored. Note that we call nlm() with checking analyticals ## turned off, as in some cases (e.g. when fitting ultrametrics) the ## penalty function is not even continuous ... optimize_with_penalty <- if(method == "nlm") function(rho, x) nlm(make_Phi(rho), x, check.analyticals = FALSE) $ estimate else { function(rho, x) optim(x, make_Phi(rho), gr = make_grad_Phi(rho), method = method, control = control) $ par } ## Note also that currently we do not check whether optimization was ## "successful" ... ## ## We currently require that x0 be a *list* of start values, the ## length of which gives the number of SUMT runs. But as always, ## let's be nice to users and developers, just in case ... if(!is.list(x0)) x0 <- list(x0) v_opt <- Inf x_opt <- NULL rho_opt <- NULL for(run in seq_along(x0)) { if(verbose) message(gettextf("SUMT run: %d", run)) x <- x0[[run]] ## ## Better upper/lower bounds for rho? rho <- max(L(x), 0.00001) / max(P(x), 0.00001) ## if(verbose) message(gettextf("Iteration: 0 Rho: %g P: %g", rho, P(x))) iter <- 1L repeat { ## ## Shouldnt't we also have maxiter, just in case ...? ## if(verbose) message(gettextf("Iteration: %d Rho: %g P: %g", iter, rho, P(x))) x_old <- x x <- optimize_with_penalty(rho, x) if(max(abs(x_old - x)) < eps) break iter <- iter + 1L rho <- q * rho } v <- Phi(rho, x) if(v < v_opt) { v_opt <- v x_opt <- x rho_opt <- rho } if(verbose) message(gettextf("Minimum: %g", v_opt)) } .structure(list(x = x_opt, L = L(x_opt), P = P(x_opt), rho = rho_opt, call = match.call()), class = "sumt") } clue/R/pclust.R0000644000175100001440000004307013036513600013057 0ustar hornikusers### * cl_pclust cl_pclust <- function(x, k, method = NULL, m = 1, weights = 1, control = list()) { ## Partition a cluster ensemble x into (at most) k classes by ## minimizing ## \sum_b \sum_j w_b u_{bj}^m d(x_b, p_j) ^ e ## for "suitable" prototypes p_1, ..., p_k, where 1 <= m < \infty, ## with 1 corresponding to hard (secondary) partitions, and d a ## dissimilarity measure (such as Euclidean dissimilarity of ## partitions or hierarchies). ## ## The algorithm works whenever there is a consensus method for ## solving ## \sum_b u_{bj}^m d(x_b, p) ^ e => \min_p ## ## As we refer to consensus methods by their *name* (e.g., 'HBH'), ## we rely on the registration mechanism (set_cl_consensus_method()) ## to provide the required information about d and e. clusterings <- as.cl_ensemble(x) type <- .cl_ensemble_type(clusterings) if(type == "partition") { ## Canonicalize by turning into an ensemble of partitions ## represented by membership matrices with the same (minimal) ## number of columns. memberships <- lapply(clusterings, cl_membership, max(sapply(clusterings, n_of_classes))) clusterings <- cl_ensemble(list = lapply(memberships, as.cl_partition)) } if(!inherits(method, "cl_consensus_method")) { ## Get required information on d and e from the registry. if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) ## Note that this avoids registry lookup in subsequent calls to ## cl_consensus(). if(is.null(method$exponent)) stop("No information on exponent in consensus method used.") e <- method$exponent if(is.null(method$dissimilarity)) stop("No information on dissimilarity in consensus method used.") d <- function(x, y = NULL) cl_dissimilarity(x, y, method = method$dissimilarity) } family <- pclust_family(D = d, C = method$definition, e = e) out <- pclust(x, k, family, m, weights, control) ## Massage the results a bit. dissimilarities <- as.matrix(d(clusterings) ^ e) out$call <- match.call() out <- .structure(c(out, list(silhouette = silhouette(out$cluster, dmatrix = dissimilarities), validity = cl_validity(cl_membership(out), dissimilarities), ## ## Information about d and e is also in the ## family returned, of course. Trying to be ## nice to users by "directly" providing d ## and e is currently of limited usefulness ## as the pclust representation is not ## directly available to users. d = d, e = e ## )), class = unique(c("cl_pclust", class(out)))) as.cl_partition(out) } print.cl_pclust <- function(x, ...) { txt <- if(x$m == 1) gettextf("A hard partition of a cluster ensemble with %d elements into %d classes.", n_of_objects(x), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.", x$m, n_of_objects(x), n_of_classes(x)) writeLines(strwrap(txt)) NextMethod("print", x, header = FALSE) print(x$validity, ...) invisible(x) } ### * pclust pclust <- function(x, k, family, m = 1, weights = 1, control = list()) { ## A general purpose alternating optimization algorithm for ## prototype-based partitioning. ## For now, assume family specifies three functions: ## * A dissimilarity function D() for data and prototypes. ## * A consensus function C() for data, weights and control. ## * An init function init() of data and k giving an initial object ## of k prototypes. ## ## We use k as the second argument as this seems to be common ## practice for partitioning algorithms. ## ## We assume that consensus functions can all handle WEIGHTS ## (formals: x, weights, control; only used positionally). ## ## ## We now allow for arbitrary representations/objects of prototypes. ## What is needed are functions to modify a *single* prototype and ## subset the prototypes. By default, list and matrix (with the ## usual convention that rows are "objects") representations are ## supported. Otherwise, the family needs to provide suitable ## .modify() and .subset() functions. ## The approach relies on having the initializer of the family ## (init()) return an appropriate object of prototypes. ## It would be possible to have default initializers as well to ## randomly subset the data (i.e., select elements of lists or rows ## of matrices, respectively). ## ## ## The 'prototypes' are not necessarily objects of the same kind as ## the data objects. Therefore, D() is really a 2-argument ## cross-dissimilarity function. ## It would also be useful to have a way of computing the pairwise ## dissimilarities between objects: but this is something different ## from D() is objects and prototypes are not of the same kind. ## A "clean" solution could consist in specifying the family either ## via a (non-symmetric) cross-dissimilarity function X(), or a ## symmetric D() which when called with a single argument gives the ## pairwise object dissimilarities. ## I.e., ## pclust_family(D = NULL, C, init = NULL, X = NULL, ......) ## using ## * If both D and X are not given => TROUBLE. ## * If only D is given: use for X as well. ## * If only X is given: only use as such. ## Something for the future ... ## ## ## If people have code for computing cross-dissimilarities for the ## data and a *single* prototype (say, xd()), they can easily wrap ## into what is needed using ## t(sapply(prototypes, function(p) xd(x, p))) ## Assuming symmetry of the dissimilarity, they could also do ## t(sapply(prototypes, xd, x)) ## ## Perhaps check whether 'family' is a feasible/suitable pclust ## family (object). D <- family$D C <- family$C e <- family$e .modify <- family$.modify .subset <- family$.subset maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last ... control <- as.list(control$control) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, family$init(x, k), simplify = FALSE) } ## Initialize. ## We need to do this here because it is (currently) the only way to ## figure out the number B of objects to be partitioned (which is ## needed for getting the object weights to the right length). prototypes <- start[[1L]] dissimilarities <- D(x, prototypes) ^ e B <- NROW(dissimilarities) ## Also try to figure out (if necessary) how to modify a single ## prototype and to subset the prototypes. Note that we can only ## check this *after* prototypes were obtained (and not when the ## family object is created). if(is.null(.modify)) { if(is.list(prototypes)) .modify <- function(x, i, value) { x[[i]] <- value x } else if(is.matrix(prototypes)) .modify <- function(x, i, value) { x[i, ] <- value x } else stop("Cannot determine how to modify prototypes.") } else if(!is.function(.modify) || !identical(formals(args(.modify)), c("x", "i", "value"))) stop("Invalid function to modify prototypes.") if(is.null(.subset)) { if(is.list(prototypes)) .subset <- `[` else if(is.matrix(prototypes)) .subset <- function(x, i) x[i, , drop = FALSE] else stop("Cannot determine how to subset prototypes.") } else if(!is.function(.subset) || !identical(formals(args(.subset)), c("x", "i"))) stop("Invalid function to subset prototypes.") weights <- rep_len(weights, B) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") ## A little helper. .make_unit_weights <- function(B, i) { out <- double(B) out[i] <- 1 out } if(m == 1) { ## Hard partitions. value <- if(all(weights == 1)) function(dissimilarities, ids) sum(.one_entry_per_column(dissimilarities, ids)) else function(dissimilarities, ids) sum(weights * .one_entry_per_column(dissimilarities, ids)) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { class_ids <- max.col( - dissimilarities ) old_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { class_ids_used <- unique(class_ids) for(j in class_ids_used) prototypes <- .modify(prototypes, j, C(x, weights * (class_ids %in% j), control)) dissimilarities <- D(x, prototypes) ^ e class_ids <- max.col( - dissimilarities ) ## Try avoiding degenerate solutions. if(length(class_ids_used) < k) { ## Find the k - l largest ## object-to-assigned-prototype dissimilarities. o <- order(.one_entry_per_column(dissimilarities, class_ids), decreasing = TRUE) ## Find and recompute unused prototypes. unused <- setdiff(seq_len(k), class_ids_used) for(j in seq_along(unused)) prototypes <- .modify(prototypes, unused[j], C(x, .make_unit_weights(B, o[j]), control)) dissimilarities[, unused] <- D(x, .subset(prototypes, unused)) ^ e class_ids <- max.col( - dissimilarities ) ## For the time being, do not retry in case the ## solution is still degenerate. } new_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_class_ids <- class_ids opt_prototypes <- prototypes } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. For now: opt_u <- NULL ## opt_u <- matrix(0, B, k) ## opt_u[cbind(seq_len(B), opt_class_ids)] <- 1 } else { ## Soft partitions. value <- if(all(weights == 1)) function(dissimilarities, u) sum(u ^ m * dissimilarities) else function(dissimilarities, u) sum(weights * u ^ m * dissimilarities) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { u <- .memberships_from_cross_dissimilarities(dissimilarities, m) old_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Update the prototypes. ## This amounts to solving, for each j: ## \sum_b w_b u_{bj}^m D(x_b, p) ^ e => \min_p ## I.e., p_j is the *weighted* consensus of the x_b with ## corresponding weights u_{bj}^m. for(j in seq_len(k)) { prototypes <- .modify(prototypes, j, C(x, weights * u[, j] ^ m, control)) } ## Update u. dissimilarities <- D(x, prototypes) ^ e u <- .memberships_from_cross_dissimilarities(dissimilarities, m) new_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_prototypes <- prototypes opt_u <- u } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } opt_class_ids <- max.col(opt_u) ## Ensure that opt_u is a stochastic matrix. opt_u <- pmax(opt_u, 0) opt_u <- opt_u / rowSums(opt_u) rownames(opt_u) <- rownames(dissimilarities) opt_u <- cl_membership(as.cl_membership(opt_u), k) } names(opt_class_ids) <- rownames(dissimilarities) pclust_object(prototypes = opt_prototypes, membership = opt_u, cluster = opt_class_ids, family = family, m = m, value = opt_value, call = match.call(), attributes = list("converged" = converged)) } print.pclust <- function(x, header = TRUE, ...) { is_hard <- (x$m == 1) class_ids <- cl_class_ids(x) if(header) { txt <- if(is_hard) gettextf("A hard partition of %d objects into %d classes.", length(class_ids), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of %d objects into %d classes.", x$m, length(class_ids), n_of_classes(x)) writeLines(strwrap(txt)) } if(is_hard) { print(class_ids, ...) } else { writeLines("Class memberships:") print(cl_membership(x), ...) writeLines("Class ids of closest hard partition:") print(unclass(class_ids), ...) } invisible(x) } ### * pclust_family pclust_family <- function(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) { ## Add checking formals (lengths) eventually ... if(is.null(init)) { ## Works for list representations ... init <- function(x, k) sample(x, k) } .structure(list(description = description, D = D, C = C, init = init, e = e, .modify = .modify, .subset = .subset), class = "pclust_family") } ### * pclust_object pclust_object <- function(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) { out <- c(list(prototypes = prototypes, membership = membership, cluster = cluster, family = family, m = m, value = value), list(...)) attributes(out) <- c(attributes(out), attributes) classes <- unique(as.character(classes)) class(out) <- c(classes[classes != "pclust"], "pclust") out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/tabulate.R0000644000175100001440000000061411304023136013337 0ustar hornikuserscl_tabulate <- function(x) { values <- unique(x) counts <- tabulate(match(x, values)) ## Still a bit tricky to create a data frame with a list "column" ## which is not protected by I(); otherwise, we oculd simply do ## data.frame(values = I(values), counts = counts) out <- data.frame(values = double(length(values)), counts = counts) out$values <- values out } clue/R/margin.R0000644000175100001440000000060511304023136013013 0ustar hornikuserscl_margin <- function(x) { if(is.cl_hard_partition(x)) out <- rep.int(1, n_of_objects(x)) else if(is.cl_partition(x)) { x <- cl_membership(x) i <- seq_len(nrow(x)) j <- cbind(i, max.col(x)) out <- x[j] x[j] <- 0 out <- out - x[cbind(i, max.col(x))] } else stop("Argument 'x' must be a partition.") out } clue/R/AAA.R0000644000175100001440000000037411304023136012123 0ustar hornikusers## Things which must come first in the package code. ### * Internal utilities. .false <- function(x) FALSE .true <- function(x) TRUE ## A fast version of structure(). .structure <- function(x, ...) `attributes<-`(x, c(attributes(x), list(...))) clue/R/agreement.R0000644000175100001440000002713313435044376013532 0ustar hornikusers### * cl_agreement cl_agreement <- function(x, y = NULL, method = "euclidean", ...) { ## ## This code is repeated from cl_dissimilarity(), mutatis mutandis. ## Not really a big surprise ... ## x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_agreement_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_agreement_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-agreements. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_agreement")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } ## ## We assume that self-agreements are always one ... ## cl_proximity(unlist(d), method_name, labels = names(x), self = rep.int(1, length(x)), size = n, class = "cl_agreement") } ### ** .cl_agreement_partition_euclidean .cl_agreement_partition_euclidean <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- sqrt(2 * n_of_objects(x)) ## 1 - .cl_dissimilarity_partition_euclidean(x, y) / d_max } ### ** .cl_agreement_partition_manhattan .cl_agreement_partition_manhattan <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- 2 * n_of_objects(x) ## 1 - .cl_dissimilarity_partition_manhattan(x, y) / d_max } ### ** .cl_agreement_partition_Rand .cl_agreement_partition_Rand <- function(x, y) { n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The number A of concordant pairs is given by ## A = choose(n,2) + \sum_{i,j} x_{ij}^2 ## - (1/2) * (\sum_i x_{i.}^2 + \sum_j x_{.j}^2) ## = choose(n,2) + 2 \sum_{i,j} choose(x_{ij},2) ## - (\sum_i choose(x_{i.},2) + \sum_j choose(x_{.j},2) ## with the first version certainly much faster to compute. ## 1 + (sum(x^2) - (sum(rowSums(x)^2) + sum(colSums(x)^2)) / 2) / choose(n, 2) } ### ** .cl_agreement_partition_cRand .cl_agreement_partition_cRand <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") n <- n_of_objects(x) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The basic formula is ## (Sxy - E) / ((Sx. + S.y) / 2 - E) ## where ## Sxy = \sum_{i,j} choose(x_{ij}, 2) ## Sx. = \sum_i choose(x_{i.}, 2) ## S.y = \sum_j choose(x_{.j}, 2) ## and ## E = Sx. * S.y / choose(n, 2) ## We replace the bincoefs by the corresponding sums of squares, ## getting ## (Txy - F) / ((Tx. + T.y) / 2 - F) ## where ## Txy = \sum_{i,j} x_{ij}^2 - n ## Tx. = \sum_i x_{i.}^2 - n ## T.y = \sum_j x_{.j}^2 - n ## and ## F = Tx. * T.y / (n^2 - n) ## Txy <- sum(x ^ 2) - n Tx. <- sum(rowSums(x) ^ 2) - n T.y <- sum(colSums(x) ^ 2) - n F <- Tx. * T.y / (n ^ 2 - n) (Txy - F) / ((Tx. + T.y) / 2 - F) } ### ** .cl_agreement_partition_NMI .cl_agreement_partition_NMI <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") x <- table(cl_class_ids(x), cl_class_ids(y)) x <- x / sum(x) m_x <- rowSums(x) m_y <- colSums(x) y <- outer(m_x, m_y) i <- which((x > 0) & (y > 0)) out <- sum(x[i] * log(x[i] / y[i])) e_x <- sum(m_x * log(ifelse(m_x > 0, m_x, 1))) e_y <- sum(m_y * log(ifelse(m_y > 0, m_y, 1))) out / sqrt(e_x * e_y) } ### ** .cl_agreement_partition_KP .cl_agreement_partition_KP <- function(x, y) { ## Agreement measure due to Katz & Powell (1953, Psychometrika), see ## also Messatfa (1992, Journal of Classification). n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) A_xy <- sum(x ^ 2) A_x. <- sum(rowSums(x) ^ 2) A_.y <- sum(colSums(x) ^ 2) (n^2 * A_xy - A_x. * A_.y) / sqrt(A_x. * (n^2 - A_x.) * A_.y * (n^2 - A_.y)) } ### ** .cl_agreement_partition_angle .cl_agreement_partition_angle <- function(x, y) { ## Maximal angle between the matched memberships. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / sqrt(sum(M_x ^ 2) * sum(M_y ^ 2)) } ### ** .cl_agreement_partition_diag .cl_agreement_partition_diag <- function(x, y) { ## Maximal co-classification rate. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / n_of_objects(x) } ### ** .cl_agreement_partition_FM .cl_agreement_partition_FM <- function(x, y) { ## Fowlkes-Mallows index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) (sum(x ^ 2) - n) / sqrt((sum(rowSums(x) ^ 2) - n) * (sum(colSums(x) ^ 2) - n)) } ### ** .cl_agreement_partition_Jaccard .cl_agreement_partition_Jaccard <- function(x, y) { ## Jaccard index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) Z <- sum(x ^ 2) (Z - n) / (sum(rowSums(x) ^ 2) + sum(colSums(x) ^ 2) - n - Z) } ### ** .cl_agreement_partition_purity .cl_agreement_partition_purity <- function(x, y) { ## Purity of classes of x with respect to those of y: relative ## fraction of "optimally matched and collapsed" joint class ## frequencies, i.e., \sum_i \max_j c_{ij} / n. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) sum(apply(x, 1L, max)) / n } .cl_agreement_partition_PS <- function(x, y) { ## Prediction Strength as used in Tibshirani and Walter (2005), ## "Cluster Validation by Prediction Strength", JCGS. ## See Eqn 2.1 in the reference: this is ## min_l rate of different objects in the same class in partition ## A and in class l in partition B, ## where the min is taken over all classes l of partition B. x <- table(cl_class_ids(x), cl_class_ids(y)) s <- rowSums(x) min((rowSums(x ^ 2) - s) / (s * (s - 1)), na.rm = TRUE) } ## Some computations useful for interpreting some of the above. ## ## Consider two hard partitions A and B and write ## a_{ik} ... indicator of object i in class k for partition A ## b_{il} ... indicator of object i in class l for partition B ## (so that the a_{ik} and b_{il} are of course the membership matrices ## of the partitions). ## ## Then obviously ## \sum_i a_{ik} b_{il} = m_{kl} ## is the number of objects in class k for A and in class l for B, and ## \sum_i a_{ik} = m_{k.} = # objects in class k for A ## \sum_i b_{il} = m_{.l} = # objects in class l for B ## ## Number of pairs of objects in the same classes for both A and B: ## \sum_{i, j, k, l} a_{ik} a_{jk} b_{il} b_{jl} ## = \sum_{k, l} \sum_i a_{ik} b_{il} \sum_j a_{jk} b_{jl} ## = \sum_{k, l} m_{kl} ^ 2 ## This includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same classes for both A ## and B: ## (\sum_{k, l} m_{kl} ^ 2 - n) / 2 ## ## Number of pairs of objects in the same class for A: ## \sum_{i, j, k} a_{ik} a_{jk} ## = \sum_k \sum_i a_{ik} \sum_j a_{jk} ## = \sum_k m_{k.} ^ 2 ## Again, this includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same class for A: ## (\sum_k m_{k.} ^ 2 - n) / 2 ## ## Similarly, \sum_l m_{.l} ^ 2 corresponds to the number of pairs of ## objects in the same class for B. ## ## Finally, to get the number of pairs of objects in different classes ## for both A and B, we note that this is the total number of pairs, ## minus the sum of the numbers of those in the same class for A and for ## B, respectively, plus the number of pairs in the same class for both ## A and B. ## ## This makes e.g. the interpretation of some of the Fowlkes-Mallows or ## Rand agreement indices rather straightforward. ### ** .cl_agreement_hierarchy_euclidean .cl_agreement_hierarchy_euclidean <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_euclidean(x, y)) ### ** .cl_agreement_hierarchy_manhattan .cl_agreement_hierarchy_manhattan <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_manhattan(x, y)) ### ** .cl_agreement_hierarchy_cophenetic .cl_agreement_hierarchy_cophenetic <- function(x, y) { ## Cophenetic correlation. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) cor(cl_object_dissimilarities(x), cl_object_dissimilarities(y)) } ### ** .cl_agreement_hierarchy_angle .cl_agreement_hierarchy_angle <- function(x, y) { ## Angle between ultrametrics. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u_x <- cl_object_dissimilarities(x) u_y <- cl_object_dissimilarities(y) sum(u_x * u_y) / sqrt(sum(u_x ^ 2) * sum(u_y ^ 2)) } ### ** .cl_agreement_hierarchy_gamma .cl_agreement_hierarchy_gamma <- function(x, y) 1 - .cl_dissimilarity_hierarchy_gamma(x, y) ### * [.cl_agreement "[.cl_agreement" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_agreement")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_agreement")) } y } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/utilities.R0000644000175100001440000001015014144531004013550 0ustar hornikusers### * Matrix/vector utilities ### * .dist_from_vector .dist_from_vector <- function(x, n = NULL, labels = NULL) { ## This might be useful as as.dist.vector, perhaps without the extra ## argument n then which we only have for minimal performance gains. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) attr(x, "Size") <- n if(!is.null(labels)) attr(x, "Labels") <- labels class(x) <- "dist" x } ### ** .one_entry_per_column .one_entry_per_column <- function(x, j) { ## For a matrix x and a vector of column indices j_1, ..., j_n where ## n is the number of rows of x, get x[1,j_1], ..., x[n,j_n]. ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] } ".one_entry_per_column<-" <- function(x, j, value) { ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] <- value x } ### * .symmetric_matrix_from_veclh .symmetric_matrix_from_veclh <- function(x, n = NULL) { ## In essence the same as as.matrix.dist, but without handling the ## additional attributes that dist objects might have. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) M <- matrix(0, n, n) M[row(M) > col(M)] <- x M + t(M) } ### * .weighted_mean_of_object_dissimilarities .weighted_mean_of_object_dissimilarities <- function(x, w = NULL) { w <- if(is.null(w)) { rep.int(1, length(x)) } else { rep_len(w, length(x)) } ## (Need the latter because we want w / sum(w) ...) dissimilarities <- lapply(x, cl_object_dissimilarities) m <- rowSums(mapply(`*`, dissimilarities, w / sum(w))) labels <- attr(dissimilarities[[1L]], "Labels") .dist_from_vector(m, labels = labels) } ### ** .weighted_sum_of_matrices .weighted_sum_of_matrices <- function(x, w = NULL, nr = NULL) { ## Quite often we need to compute weighted sums \sum_b w_b X_b of ## conforming matrices \{ X_b \}. If x is a list containing the ## matrices and w the vector of weights, it seems that one ## reasonably efficient way of doing this is the following. if(is.null(w)) w <- rep.int(1, length(x)) if(is.null(nr)) nr <- NROW(x[[1L]]) matrix(rowSums(mapply(`*`, x, w)), nr) } ### ** .weighted_sum_of_vectors .weighted_sum_of_vectors <- function(x, w = NULL) { ## See above. if(is.null(w)) w <- rep.int(1, length(x)) rowSums(mapply(`*`, x, w)) } ### * Containers ## Creator. .make_container <- function(x, classes, properties = NULL) { out <- list(.Data = x, .Meta = properties) class(out) <- unique(classes) out } ## Getters. .get_representation <- function(x) x$.Data .get_properties <- function(x) x$.Meta .get_property <- function(x, which) x$.Meta[[which]] .has_property <- function(x, which) which %in% names(x$.Meta) .get_property_from_object_or_representation <- function(x, which, getter) { if(.has_property(x, which)) .get_property(x, which) else { if(missing(getter)) getter <- get(which) getter(.get_representation(x)) } } ## Methods (sort of). .print_container <- function(x, cls, ...) { writeLines(gettextf("An object of virtual class '%s', with representation:\n", cls)) print(.get_representation(x), ...) invisible(x) } ### * Others weighted_median <- function(x, w = 1, na.rm = FALSE) { w <- rep_len(w, length(x)) if(na.rm && any(ind <- is.na(x))) { x <- x[!ind] w <- w[!ind] } if(any(is.na(x)) || !length(x)) return(NA) w <- w / sum(w) ind <- order(x) x <- x[ind] w <- w[ind] x[which.min(x * (cumsum(w) - 0.5) - cumsum(w * x))] } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/consensus.R0000644000175100001440000011507514144530711013575 0ustar hornikusers### * cl_consensus cl_consensus <- function(x, method = NULL, weights = 1, control = list()) { ## ## Interfaces are a matter of taste. ## E.g., one might want to have a 'type' argument indication whether ## hard or soft partitions are sought. One could then do ## cl_consensus(x, method = "euclidean", type = "hard") ## to look for an optimal median (or least squares) hard partition ## (for euclidean dissimilarity). ## For us, "method" really indicates a certain algorithm, with its ## bells and whistles accessed via the 'control' argument. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute consensus of empty ensemble.") weights <- rep_len(weights, length(clusterings)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") if(!is.function(method)) { if(!inherits(method, "cl_consensus_method")) { ## Get the method definition from the registry. type <- .cl_ensemble_type(clusterings) if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) } method <- method$definition } method(clusterings, weights, control) } ### * .cl_consensus_partition_DWH .cl_consensus_partition_DWH <- function(clusterings, weights, control) { ## ## Could make things more efficient by subscripting on positive ## weights. ## (Note that this means control$order has to be subscripted as ## well.) ## max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- max_n_of_classes order <- control$order if(is.null(order)) order <- sample(seq_along(clusterings)) clusterings <- clusterings[order] weights <- weights[order] k_max <- max(k, max_n_of_classes) s <- weights / cumsum(weights) s[is.na(s)] <- 0 # Division by zero ... M <- cl_membership(clusterings[[1L]], k_max) for(b in seq_along(clusterings)[-1L]) { mem <- cl_membership(clusterings[[b]], k_max) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M, mem), maximum = TRUE) M <- (1 - s[b]) * M + s[b] * mem[, ind] if(k < k_max) M <- .project_to_leading_columns(M, k) } M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k) as.cl_partition(M) } ### * .cl_consensus_partition_AOS .cl_consensus_partition_AOS <- function(clusterings, weights, control, type = c("SE", "HE", "SM", "HM")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ e ## = \sum_b \min_{P_b} w_b f(M, M_b P_b) ^ e ## for the special case where the criterion function is based on ## M and M_b P_b (i.e., column permutations of M_b), as opposed to ## the general case where d(M, M_b) = \min_{P_b} f(M, P_b, M_b) ## handled by .cl_consensus_partition_AOG(). ## ## The AO ("alternative optimization") proceeds by alternatively ## matching the M_b to M by minimizing f(M, M_b P_b) over P_b, and ## fitting M by minimizing \sum_b w_b f(M, M_b P_b) ^ e for fixed ## matchings. ## ## Such a procedure requires three ingredients: a function for ## matching M_b to M (in fact simply replacing M_b by the matched ## M_b P_b); a function for fitting M to the \{M_b P_b\}, and a ## function for computing the value of the criterion function ## corresponding to this fit (so that one can stop if the relative ## improvement is small enough). ## ## For the time being, we only use this to determine soft and hard ## Euclidean least squares consensus partitions (soft and hard ## Euclidean means), so the interface does not yet reflect the ## generality of the approach (which would either pass the three ## functions, or even set up family objects encapsulating the three ## functions). ## ## This special case is provided for efficiency and convenience. ## Using the special form of the criterion function, we can simply ## always work memberships with the same maximal number of columns, ## and with the permuted \{ M_b P_b \}. ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100 nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## The maximal (possible) number of classes in M and the \{ M_b \}. k_all <- max(k, k_max) value <- switch(type, SE = , HE = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum((u - M) ^ 2))) }, SM = , HM = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum(abs(u - M)))) }) ## Return the M[, ind] column permutation of M optimally matching N. match_memberships <- switch(type, SE = , HE = function(M, N) { M[, solve_LSAP(crossprod(N, M), maximum = TRUE), drop = FALSE] }, SM = , HM = function(M, N) { M[, solve_LSAP(.cxdist(N, M, "manhattan")), drop = FALSE] }) ## Function for fitting M to (fixed) memberships \{ M_b P_b \}. ## As we use a common number of columns for all membership matrices ## involved, we need to pass the desired 'k' ... fit_M <- switch(type, SE = function(memberships, w, k) { ## Update M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## If k < k_all, "project" as indicated in Gordon & ## Vichi (2001), p. 238. if(k < ncol(M)) M <- .project_to_leading_columns(M, k) M }, HE = , HM = function(memberships, w, k) { ## Compute M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## And compute a closest hard partition H(M) from ## that, using the first k columns of M. ids <- max.col(M[ , seq_len(k), drop = FALSE]) .cl_membership_from_class_ids(ids, ncol(M)) }, SM = .l1_fit_M) memberships <- lapply(clusterings, cl_membership, k_all) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOS run: %d", run)) M <- start[[run]] if(k < k_all) M <- cbind(M, matrix(0, nrow(M), k_all - k)) memberships <- lapply(memberships, match_memberships, M) old_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M to the M_b P_b. M <- fit_M(memberships, w, k) ## Match the \{ M_b P_b \} to M. memberships <- lapply(memberships, match_memberships, M) ## Update value. new_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) rownames(M) <- rownames(memberships[[1L]]) meta <- list(objval = value(M, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k, meta) as.cl_partition(M) } .random_stochastic_matrix <- function(n, k) { M <- matrix(runif(n * k), n, k) M / rowSums(M) } .l1_fit_M <- function(memberships, w, k) { ## Determine stochastic matrix M with at most k leading nonzero ## columns such that ## ## \sum_b w_b \sum_{i,j} | m_{ij}(b) - m_{ij} | => min ## ## where the sum over j goes from 1 to k. ## ## Clearly, this can be done separately for each row, where we need ## to minimize ## ## \sum_b w_b \sum_j | y_j(b) - x_j | => min ## ## over all probability vectors x. Such problems can e.g. be solved ## via the following linear program: ## ## \sum_b \sum_j w_b e'(u(b) + v(b)) => min ## ## subject to ## ## u(1), v(1), ..., u(B), v(B), x >= 0 ## x + u(b) - v(b) = y(b), b = 1, ..., B ## e'x = 1 ## ## (where e = [1, ..., 1]). ## ## So we have one long vector z of "variables": ## ## z = [u(1)', v(1)', ..., u(B)', v(B)', x']' ## ## of length (2B + 1) k, with x the object of interest. ## Rather than providing a separate function for weighted L1 fitting ## of probability vectors we prefer doing "everything" at once, in ## order to avoid recomputing the coefficients and constraints of ## the associated linear program. B <- length(memberships) L <- (2 * B + 1) * k ## Set up associated linear program. ## Coefficients in the objective function. objective_in <- c(rep(w, each = 2 * k), rep.int(0, k)) ## Constraints. constr_mat <- rbind(diag(1, L), cbind(kronecker(diag(1, B), cbind(diag(1, k), diag(-1, k))), kronecker(rep.int(1, B), diag(1, k))), c(rep.int(0, 2 * B * k), rep.int(1, k))) constr_dir <- c(rep.int(">=", L), rep.int("==", B * k + 1L)) ind <- seq.int(from = 2 * B * k + 1L, length.out = k) nr <- NROW(memberships[[1L]]) nc <- NCOL(memberships[[1L]]) M <- matrix(0, nrow = nr, ncol = k) ## Put the memberships into one big array so that we can get their ## rows more conveniently (and efficiently): memberships <- array(unlist(memberships), c(nr, nc, B)) for(i in seq_len(nr)) { out <- lpSolve::lp("min", objective_in, constr_mat, constr_dir, c(rep.int(0, L), memberships[i, seq_len(k), ], 1)) M[i, ] <- out$solution[ind] } ## Add zero columns if necessary. if(k < nc) M <- cbind(M, matrix(0, nr, nc - k)) M } ### ** .cl_consensus_partition_soft_euclidean .cl_consensus_partition_soft_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SE") ### ** .cl_consensus_partition_hard_euclidean .cl_consensus_partition_hard_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HE") ### ** .cl_consensus_partition_soft_manhattan .cl_consensus_partition_soft_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SM") ### ** .cl_consensus_partition_hard_manhattan .cl_consensus_partition_hard_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HM") ### * .cl_consensus_partition_AOG .cl_consensus_partition_AOG <- function(clusterings, weights, control, type = c("GV1")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ p ## = \sum_b \min_{P_b} w_b f(M, M_b, P_b) ^ e ## for general dissimilarity matrices which involve class matching ## via permutation matrices P_b. ## ## The AO ("Alternative Optimization") proceeds by alternating ## between determining the optimal permutations P_b by minimizing ## f(M, M_b, P_b) ## for fixed M, and fitting M by minimizing ## \sum_b w_b f(M, M_b, P_b) ^ e ## for fixed \{ P_b \}. ## ## We encapsulate this into functions fit_P() and fit_M() (and a ## value() function for the criterion function to be minimized with ## respect to both M and \{ P_b \}, even though the current ## interface does not yet reflect the generality of the approach. ## ## Note that rather than passing on information about the numbers of ## classes (e.g., needed for GV1) and representing all involved ## membership matrices with the same maximal number of columns, we ## use "minimal" representations with no dummy classes (strictly ## speaking, with the possible exception of M, for which the given k ## is used). ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## ## For the given memberships, we can simply use ncol() in the ## computations (rather than n_of_classes(), because we used ## cl_membership() to create them. For M, the number of classes ## could be smaller than the given k "target". ## value <- function(M, permutations, memberships, w) { k <- .n_of_nonzero_columns(M) d <- function(u, p) { ## Compute the squared GV1 dissimilarity between M and u ## based on the M->u class matching p. nc_u <- ncol(u) if(nc_u == k) { ## Simple case: all classes are matched. sum((u[, p] - M) ^ 2) } else { ## Only include the matched non-dummy classes of M .. ind <- seq_len(k) ## ... which are matched to non-dummy classes of u. ind <- ind[p[ind] <= nc_u] sum((u[, p[ind]] - M[, ind]) ^ 2) } } sum(w * mapply(d, memberships, permutations)) } fit_P <- function(u, M) { ## Return a permutation representing a GV1 optimal matching of ## the columns of M to the columns of u (note the order of the ## arguments), using a minimal number of dummy classes (i.e., p ## has max(.n_of_nonzero_columns(M), n_of_classes(u)) entries). ## See also .cl_dissimilarity_partition_GV1(). C <- outer(colSums(M ^ 2), colSums(u ^ 2), `+`) - 2 * crossprod(M, u) nc_M <- .n_of_nonzero_columns(M) nc_u <- ncol(u) ## (See above for ncol() vs n_of_classes().) if(nc_M < nc_u) C <- rbind(C, matrix(0, nrow = nc_u - nc_M, ncol = nc_u)) else if(nc_M > nc_u) C <- cbind(C, matrix(0, nrow = nc_M, ncol = nc_M - nc_u)) solve_LSAP(C) } fit_M <- function(permutations, memberships, w) { ## Here comes the trickiest part ... ## ## In general, M = [m_{iq}] is determined as follows. ## Write value(M, permutations, memberships, w) as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - m_{iq})^2 x_{pq}(b) ## where U(b) and X(b) are the b-th membership matrix and the ## permutation matrix representing the M->U(b) non-dummy class ## matching (as always, note the order of the arguments). ## ## Let ## \beta_{iq} = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## \alpha_q = \sum_b \sum_{p=1}^{k_b} w_b x_{pq}(b) ## and ## \bar{m}_{iq} = ## \cases{\beta_{iq}/\alpha_q, & $\alpha_q > 0$ \cr ## 0 & otherwise}. ## Then, as the cross-product terms cancel out, the value ## function rewrites as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - \bar{m}_{iq})^2 x_{pq}(b) ## + \sum_i \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2, ## where the first term is a constant, and the minimum is found ## by solving ## \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2 => min! ## s.t. ## m_{i1}, ..., m_{ik} >= 0, \sum_{iq} m_{iq} = 1. ## ## We can distinguish three cases. ## A. If S_i = \sum_q \bar{m}_{iq} = 1, things are trivial. ## B. If S_i = \sum_q \bar{m}_{iq} < 1. ## B1. If some \alpha_q are zero, then we can choose ## m_{iq} = \bar{m}_{iq} for those q with \alpha_q = 0; ## m_{iq} = 1 / number of zero \alpha's, otherwise. ## B2. If all \alpha_q are positive, we can simply ## equidistribute 1 - S_i over all classes as written ## in G&V. ## C. If S_i > 1, things are not so clear (as equidistributing ## will typically result in violations of the non-negativity ## constraint). We currently revert to using solve.QP() from ## package quadprog, as constrOptim() already failed in very ## simple test cases. ## ## Now consider \sum_{p=1}^{k_b} x_{pq}(b). If k <= k_b for all ## b, all M classes from 1 to k are matched to one of the k_b ## classes in U(b), hence the sum and also \alpha_q are one. ## But then ## \sum_q \bar{m}_{iq} ## = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## <= \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) ## = 1 ## with equality if k = k_b for all b. I.e., ## * If k = \min_b k_b = \max k_b, we are in case A. ## * If k <= \min_b k_b, we are in case B2. ## And it makes sense to handle these cases explicitly for ## efficiency reasons. ## And now for something completely different ... the code. k <- .n_of_nonzero_columns(M) nr_M <- nrow(M) nc_M <- ncol(M) nc_memberships <- sapply(memberships, ncol) if(k <= min(nc_memberships)) { ## Compute the weighted means \bar{M}. M <- .weighted_sum_of_matrices(mapply(function(u, p) u[ , p[seq_len(k)]], memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) ## If we always got the same number of classes, we are ## done. Otherwise, equidistribute ... if(k < max(nc_memberships)) M <- pmax(M + (1 - rowSums(M)) / nc_M, 0) return(M) } ## Here comes the general case. ## First, compute the \alpha and \beta. alpha <- rowSums(rep(w, each = k) * mapply(function(p, n) p[seq_len(k)] <= n, permutations, nc_memberships)) ## Alternatively (more literally): ## X <- lapply(permutations, .make_X_from_p) ## alpha1 <- double(length = k) ## for(b in seq_along(permutations)) { ## alpha1 <- alpha1 + ## w[b] * colSums(X[[b]][seq_len(nc_memberships[b]), ]) ## } ## A helper function giving suitably permuted memberships. pmem <- function(u, p) { ## Only matched classes, similar to the one used in value(), ## maybe merge eventually ... v <- matrix(0, nr_M, k) ind <- seq_len(k) ind <- ind[p[ind] <= ncol(u)] if(any(ind)) v[ , ind] <- u[ , p[ind]] v } beta <- .weighted_sum_of_matrices(mapply(pmem, memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## Alternatively (more literally): ## beta1 <- matrix(0, nr_M, nc_M) ## for(b in seq_along(permutations)) { ## ind <- seq_len(nc_memberships[b]) ## beta1 <- beta1 + ## w[b] * memberships[[b]][, ind] %*% X[[b]][ind, ] ## } ## Compute the weighted means \bar{M}. M <- .cscale(beta, ifelse(alpha > 0, 1 / alpha, 0)) ## Alternatively (see comments for .cscale()): ## M1 <- beta %*% diag(ifelse(alpha > 0, 1 / alpha, 0)) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) S <- rowSums(M) ## Take care of those rows with row sums < 1. ind <- (S < 1) if(any(ind)) { i_0 <- alpha == 0 if(any(i_0)) M[ind, i_0] <- 1 / sum(i_0) else M[ind, ] <- pmax(M[ind, ] + (1 - S[ind]) / nc_M, 0) } ## Take care of those rows with row sums > 1. ind <- (S > 1) if(any(ind)) { ## Argh. Call solve.QP() for each such i. Alternatively, ## could set up on very large QP, but is this any better? Dmat <- diag(alpha, nc_M) Amat <- t(rbind(rep.int(-1, nc_M), diag(1, nc_M))) bvec <- c(-1, rep.int(0, nc_M)) for(i in which(ind)) M[i, ] <- quadprog::solve.QP(Dmat, alpha * M[i, ], Amat, bvec)$solution } M } memberships <- lapply(clusterings, cl_membership) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOG run: %d", run)) M <- start[[run]] permutations <- lapply(memberships, fit_P, M) old_value <- value(M, permutations, memberships, w) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M. M <- fit_M(permutations, memberships, w) ## Fit \{ P_b \}. permutations <- lapply(memberships, fit_P, M) ## Update value. new_value <- value(M, permutations, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) ## Seems that M is always kept a k columns ... if not, use ## M <- .stochastify(M_opt[, seq_len(k), drop = FALSE]) rownames(M) <- rownames(memberships[[1L]]) ## Recompute the value, just making sure ... permutations <- lapply(memberships, fit_P, M) meta <- list(objval = value(M, permutations, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### ** .cl_consensus_partition_GV1 .cl_consensus_partition_GV1 <- function(clusterings, weights, control) .cl_consensus_partition_AOG(clusterings, weights, control, "GV1") ### * .cl_consensus_partition_GV3 .cl_consensus_partition_GV3 <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \| Y - M M' \|_F^2 => min ## where M is a membership matrix and Y = \sum_b w_b M_b M_b'. n <- n_of_objects(clusterings) max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters: ## k, k <- control$k if(is.null(k)) k <- max_n_of_classes ## nruns, nruns <- control$nruns ## start. start <- control$start w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) Y <- .weighted_sum_of_matrices(comemberships, w, n) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } y <- c(Y) L <- function(m) sum((y - tcrossprod(matrix(m, n))) ^ 2) P <- .make_penalty_function_membership(n, k) grad_L <- function(m) { M <- matrix(m, n) 4 * c((tcrossprod(M) - Y) %*% M) } grad_P <- .make_penalty_gradient_membership(n, k) out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_soft_symdiff .cl_consensus_partition_soft_symdiff <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \sum_b w_b \sum_{ij} | c_{ij}(b) - c_{ij} | => min ## where C(b) = comembership(M(b)) and C = comembership(M) and M is ## a membership matrix. ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## k, k <- control$k ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } max_n_of_classes <- max(sapply(clusterings, n_of_classes)) if(is.null(k)) k <- max_n_of_classes B <- length(clusterings) n <- n_of_objects(clusterings) w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## Try using a rank k "root" of the weighted median of the ## comemberships as starting value. Y <- apply(array(unlist(comemberships), c(n, n, B)), c(1, 2), weighted_median, w) e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) ## Note that here (as opposed to hard/symdiff) we take soft ## partitions as is without replacing them by their closest hard ## partitions. sum(w * sapply(comemberships, function(C) sum(abs(C_M - C)))) } P <- .make_penalty_function_membership(n, k) if(gradient) { grad_L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) .weighted_sum_of_matrices(lapply(comemberships, function(C) 2 * sign(C_M - C) %*% M), w, n) } grad_P <- .make_penalty_gradient_membership(n, k) } else grad_L <- grad_P <- NULL out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_hard_symdiff .cl_consensus_partition_hard_symdiff <- function(clusterings, weights, control) { ## ## This is mostly duplicated from relations. ## Once this is on CRAN, we could consider having clue suggest ## relations ... ## comemberships <- lapply(clusterings, function(x) { ## Here, we always turn possibly soft partitions to ## their closest hard partitions. ids <- cl_class_ids(x) outer(ids, ids, `==`) ## (Simpler than using tcrossprod() on ## cl_membership().) }) ## Could also create a relation ensemble from the comemberships and ## call relation_consensus(). B <- relations:::.make_fit_relation_symdiff_B(comemberships, weights) k <- control$k control <- control$control ## Note that currently we provide no support for finding *all* ## consensus partitions (but allow for specifying the solver). control$all <- FALSE I <- if(!is.null(k)) { ## ## We could actually get the memberships directly in this case. relations:::fit_relation_LP_E_k(B, k, control) ## } else relations:::fit_relation_LP(B, "E", control) ids <- relations:::get_class_ids_from_incidence(I) names(ids) <- cl_object_names(clusterings) as.cl_hard_partition(ids) } ### * .cl_consensus_hierarchy_cophenetic .cl_consensus_hierarchy_cophenetic <- function(clusterings, weights, control) { ## d <- .weighted_mean_of_object_dissimilarities(clusterings, weights) ## Alternatively: ## as.cl_dendrogram(ls_fit_ultrametric(d, control = control)) control <- c(list(weights = weights), control) as.cl_dendrogram(ls_fit_ultrametric(clusterings, control = control)) } ### * .cl_consensus_hierarchy_manhattan .cl_consensus_hierarchy_manhattan <- function(clusterings, weights, control) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) B <- length(clusterings) ultrametrics <- lapply(clusterings, cl_ultrametric) if(B == 1L) return(as.cl_dendrogram(ultrametrics[[1L]])) n <- n_of_objects(ultrametrics[[1L]]) labels <- cl_object_names(ultrametrics[[1L]]) ## We need to do ## ## \sum_b w_b \sum_{i,j} | u_{ij}(b) - u_{ij} | => min ## ## over all ultrametrics u. Let's use a SUMT (for which "gradients" ## can optionally be switched off) ... L <- function(d) { sum(w * sapply(ultrametrics, function(u) sum(abs(u - d)))) ## Could also do something like ## sum(w * sapply(ultrametrics, cl_dissimilarity, d, ## "manhattan")) } P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) { ## "Gradient" is \sum_b w_b sign(d - u(b)). .weighted_sum_of_vectors(lapply(ultrametrics, function(u) sign(d - u)), w) } grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking" of the weighted median of the ## ultrametrics. Any better ideas? ## ## Using var(x) / 3 is really L2 ... ## x <- apply(matrix(unlist(ultrametrics), ncol = B), 1, weighted_median, w) start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) d <- .cl_ultrametric_from_veclh(d, n, labels, meta) as.cl_dendrogram(d) } ### * .cl_consensus_hierarchy_majority .cl_consensus_hierarchy_majority <- function(clusterings, weights, control) { w <- weights / sum(weights) p <- control$p if(is.null(p)) p <- 1 / 2 else if(!is.numeric(p) || (length(p) != 1) || (p < 1 / 2) || (p > 1)) stop("Parameter 'p' must be in [1/2, 1].") classes <- lapply(clusterings, cl_classes) all_classes <- unique(unlist(classes, recursive = FALSE)) gamma <- double(length = length(all_classes)) for(i in seq_along(classes)) gamma <- gamma + w[i] * !is.na(match(all_classes, classes[[i]])) ## Rescale to [0, 1]. gamma <- gamma / max(gamma) maj_classes <- if(p == 1) { ## Strict consensus tree. all_classes[gamma == 1] } else all_classes[gamma > p] attr(maj_classes, "labels") <- attr(classes[[1L]], "labels") ## ## Stop auto-coercing that to dendrograms once we have suitable ways ## of representing n-trees. as.cl_hierarchy(.cl_ultrametric_from_classes(maj_classes)) ## } ### * Utilities ### ** .cl_consensus_method_default .cl_consensus_method_default <- function(type) { switch(type, partition = "SE", hierarchy = "euclidean", NULL) } ### ** .project_to_leading_columns .project_to_leading_columns <- function(x, k) { ## For a given matrix stochastic matrix x, return the stochastic ## matrix y which has columns from k+1 on all zero which is closest ## to x in the Frobenius distance. y <- x[, seq_len(k), drop = FALSE] y <- cbind(pmax(y + (1 - rowSums(y)) / k, 0), matrix(0, nrow(y), ncol(x) - k)) ## (Use the pmax to ensure that entries remain nonnegative.) } ### ** .make_X_from_p .make_X_from_p <- function(p) { ## X matrix corresponding to permutation p as needed for the AO ## algorithms. I.e., x_{ij} = 1 iff j->p(j)=i. X <- matrix(0, length(p), length(p)) i <- seq_along(p) X[cbind(p[i], i)] <- 1 X } ### ** .n_of_nonzero_columns ## ## Could turn this into n_of_classes.matrix(). .n_of_nonzero_columns <- function(x) sum(colSums(x) > 0) ## ### ** .cscale ## ## Move to utilities eventually ... .cscale <- function(A, x) { ## Scale the columns of matrix A by the elements of vector x. ## Formally, A %*% diag(x), but faster. ## Could also use sweep(A, 2, x, "*") rep(x, each = nrow(A)) * A } ## ## .make_penalty_function_membership .make_penalty_function_membership <- function(nr, nc) function(m) { sum(pmin(m, 0) ^ 2) + sum((rowSums(matrix(m, nr)) - 1) ^ 2) } ## .make_penalty_gradient_membership .make_penalty_gradient_membership <- function(nr, nc) function(m) { 2 * (pmin(m, 0) + rep.int(rowSums(matrix(m, nr)) - 1, nc)) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/validity.R0000644000175100001440000001002314144531021013360 0ustar hornikusers## A slightly polymorphic function, similar to cluster::silhouette() and ## its methods. cl_validity <- function(x, ...) UseMethod("cl_validity") cl_validity.default <- function(x, d, ...) { ## Note that providing methods for classes "cl_partition" and ## "cl_hierarchy" is not good enough ... out <- list() if(.has_object_memberships(x)) { v <- .cl_validity_partition_d_a_f(cl_membership(x), as.matrix(d)) out <- list("Dissimilarity accounted for" = v) } else if(.has_object_dissimilarities(x)) { x <- cl_object_dissimilarities(x) d <- as.dist(d) out <- list("Variance accounted for" = .cl_validity_hierarchy_variance_a_f(x, d), "Deviance accounted for" = .cl_validity_hierarchy_deviance_a_f(x, d)) ## Consider adding e.g. the Agglomerative Coefficient or ## Divisive Coeffcient for more than cluster::agnes() and ## cluster::diana(), respectively. } class(out) <- "cl_validity" out } ## Package cluster: agnes(). cl_validity.agnes <- function(x, ...) { out <- list("Agglomerative coefficient" = x$ac) ## According to the docs, agnes objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package cluster: diana(). cl_validity.diana <- function(x, ...) { out <- list("Divisive coefficient" = x$dc) ## According to the docs, diana objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package clue: (virtual) class "cl_partition". cl_validity.cl_partition <- function(x, ...) cl_validity(.get_representation(x), ...) ## Package clue: class pclust. ## So that this works for all classes extending pclust ... cl_validity.pclust <- function(x, ...) x$validity print.cl_validity <- function(x, ...) { for(nm in names(x)) cat(nm, ": ", x[[nm]], "\n", sep = "") invisible(x) } .cl_validity_partition_d_a_f <- function(m, d) { ## "Dissimilarity accounted for". ## Internal function for computing 1 - a / mean(d), where the ## "average within dissimilarity" a is given by ## \frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d(i,j)} ## {\sum_{i,j} \sum_k m_{ik}m_{jk}} ## where m is the membership matrix and d a *symmetric* matrix of ## dissimilarities. within_sums <- rowSums(sapply(seq_len(ncol(m)), function(k) { z <- m[, k] w <- outer(z, z) c(sum(w * d), sum(w)) })) average_within_d <- within_sums[1L] / within_sums[2L] 1 - average_within_d / mean(d) } .cl_validity_hierarchy_variance_a_f <- function(u, d) { ## *Variance accounted for*. ## See e.g. Hubert, Arabie, & Meulman (2006), The structural ## representation of proximity matrices with MATLAB: ## variance_accounted_for = ## 1 - \frac{\sum_{i < j} (d_{ij} - u_{ij}) ^ 2} ## {\sum_{i < j} (d_{ij} - mean(d)) ^ 2} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum((d - u) ^ 2) / sum((d - mean(d)) ^ 2), 0) } .cl_validity_hierarchy_deviance_a_f <- function(u, d) { ## *Deviance accounted for* (i.e., absolute deviation). ## See e.g. Smith (2001), Constructing ultrametric and additive ## trees based on the ${L}_1$ norm, Journal of Classification. ## deviance_accounted_for = ## 1 - \frac{\sum_{i < j} |d_{ij} - u_{ij}|} ## {\sum_{i < j} |d_{ij} - median(d)|} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum(abs(d - u)) / sum(abs(d - median(d))), 0) } ## Silhouette methods silhouette.cl_partition <- function(x, ...) silhouette(.get_representation(x), ...) silhouette.cl_pclust <- function(x, ...) x$silhouette clue/R/fuzziness.R0000644000175100001440000000435013435044610013606 0ustar hornikuserscl_fuzziness <- function(x, method = NULL, normalize = TRUE) { x <- as.cl_ensemble(x) out <- double(length(x)) ## ## The docs say that we should only have partitions ... attr(out, "description") <- "Fuzziness" class(out) <- "cl_fuzziness" parties <- vapply(x, is.cl_partition, NA) if(!(length(x) || any(parties))) { ## Currently, no fuzzy hierarchies ... return(out) } ## if(!is.function(method)) { builtin_methods <- c("PC", "PE") builtin_method_names <- c("partition coefficient", "partition entropy") if(is.null(method)) ind <- 1 else if(is.na(ind <- pmatch(tolower(method), tolower(builtin_methods)))) stop(gettextf("Value '%s' is not a valid abbreviation for a fuzziness method.", method), domain = NA) method <- paste0(".cl_fuzziness_partition_", builtin_methods[ind]) method_name <- builtin_method_names[ind] if(normalize) method_name <- paste("normalized", method_name) } else method_name <- "user-defined method" out[parties] <- as.numeric(sapply(x[parties], method, normalize)) attr(out, "description") <- paste("Fuzziness using", method_name) out } .cl_fuzziness_partition_PC <- function(x, normalize = TRUE) { ## Dunn's Partition Coefficient, see also ?fanny. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(1 - normalize) pc <- sum(cl_membership(x) ^ 2) / n_of_objects(x) if(normalize) pc <- (1 - pc) / (1 - 1 / n_of_classes(x)) pc } .cl_fuzziness_partition_PE <- function(x, normalize = TRUE) { ## Bezdek's Partition Entropy. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(0) M <- cl_membership(x) pe <- - sum(ifelse(M > 0, M * log(M), 0)) / n_of_objects(x) if(normalize) pe <- pe / log(n_of_classes(x)) pe } print.cl_fuzziness <- function(x, ...) { cat(attr(x, "description"), ":\n", sep = "") print(as.vector(x), ...) invisible(x) } clue/R/membership.R0000644000175100001440000001737311633352676013726 0ustar hornikusers### * cl_membership ## Get the class membership matrix from a partition. ## ## We could use sparse matrices for the memberships of hard partitions. ## Not sure if this is really that important, though, as we typically ## use memberships in a context where dense matrices (memberships of ## soft partitions) occur. ## ## ## Currently, the number of classes to be used for the memberships must ## not be less than the number of classes in the partition. We might ## eventually change this so that "optimal" collapsing of classes is ## performed (but note that optimality needs to be relative to some ## dissimilarity measure) ... ## However, from the discussion of the second method in Gordon and Vichi ## (2001) we note that whereas optimal assignment is "simple", optimal ## collapsing (equivalent to partitioning into an arbitrary number of ## partitions) is of course very hard. ## cl_membership <- function(x, k = n_of_classes(x)) { if(k < n_of_classes(x)) stop("k cannot be less than the number of classes in x.") UseMethod("cl_membership") } ## Default method. cl_membership.default <- function(x, k = n_of_classes(x)) .cl_membership_from_class_ids(cl_class_ids(x), k) ## Package stats: kmeans() (R 2.1.0 or better). cl_membership.kmeans <- cl_membership.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_membership.fanny <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$membership, k) cl_membership.partition <- cl_membership.default ## Package cclust: cclust(). cl_membership.cclust <- cl_membership.default ## Package e1071: cmeans() gives objects of class "fclust". cl_membership.fclust <- cl_membership.fanny ## Package e1071: cshell(). cl_membership.cshell <- cl_membership.fanny ## Package e1071: bclust(). cl_membership.bclust <- cl_membership.default ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::posterior(), but this now only ## has S4 methods for modeltools::posterior() S4 generic. Let's call ## this one, and hope that flexmix has been loaded ... ## cl_membership.flexmix <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(modeltools::posterior(x), k) ## Package mclust: Mclust(). cl_membership.Mclust <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$z, k) ## Package clue: Memberships. cl_membership.cl_membership <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x, k) ## (Note: we cannot simply return x in case k equals n_of_classes(x), ## because ncol(x) might be different.) ## Package clue: pclust(). cl_membership.pclust <- function(x, k = n_of_classes(x)) { ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. In case we ## decide not to fill the membership "slot" for such: if(is.null(m <- x$membership)) .cl_membership_from_class_ids(x$cluster, k) else .cl_membership_from_memberships(m, k) } ## Package clue: (virtual) class "cl_partition". cl_membership.cl_partition <- function(x, k = n_of_classes(x)) cl_membership(.get_representation(x), k) ## Package movMF: class "movMF". cl_membership.movMF <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$P, k) ### * .make_cl_membership ## A low-level common creator. .make_cl_membership <- function(x, n_of_classes, is_cl_hard_partition, meta = NULL) { attr(x, "n_of_classes") <- n_of_classes attr(x, "is_cl_hard_partition") <- is_cl_hard_partition attr(x, "meta") <- meta class(x) <- "cl_membership" x } ### * .cl_membership_from_class_ids .cl_membership_from_class_ids <- function(x, k = NULL, meta = NULL) { x <- factor(x) n_of_objects <- length(x) n_of_classes <- nlevels(x) if(is.null(k)) k <- n_of_classes else if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") ## ## Should really use a sparse encoding of this ... M <- matrix(0, n_of_objects, k) ## (Could also use .one_entry_per_column(M, as.numeric(x)) <- 1 for ## the time being.) M[cbind(seq_len(n_of_objects), as.numeric(x))] <- 1 ## But note that we also need to handle NAs ... M[is.na(x), ] <- NA ## if(nlevels(x) == k) colnames(M) <- levels(x) if(!is.null(nm <- names(x))) rownames(M) <- nm .make_cl_membership(M, n_of_classes, TRUE, meta) } ### * .cl_membership_from_memberships .cl_membership_from_memberships <- function(x, k = NULL, meta = NULL) { ## ## Dropping and re-filling of ## zero columns in case k is given may ## seem unnecessary, but really canonicalizes by moving zero columns ## last ... ## x <- x[ , colSums(x, na.rm = TRUE) > 0, drop = FALSE] n_of_classes <- ncol(x) if(!is.null(k)) { if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") if(k > n_of_classes) { ## Fill up with zero columns. x <- cbind(x, matrix(0, nrow(x), k - n_of_classes)) ## Handle NAs if necessary. x[apply(is.na(x), 1, any), ] <- NA } } .make_cl_membership(x, n_of_classes, all(rowSums(x == 1, na.rm = TRUE) > 0), meta) } ### * as.cl_membership as.cl_membership <- function(x) UseMethod("as.cl_membership") as.cl_membership.default <- function(x) { if(inherits(x, "cl_membership")) x else if(is.atomic(x)) .cl_membership_from_class_ids(x) else cl_membership(x) } as.cl_membership.matrix <- function(x) .cl_membership_from_memberships(x) ### * .memberships_from_cross_dissimilarities .memberships_from_cross_dissimilarities <- function(d, power = 2) { ## For a given matrix of cross-dissimilarities [d_{bj}], return a ## matrix [u_{bj}] such that \sum_{b,j} u_{bj}^p d_{bj}^q => min! ## under the constraint that u is a stochastic matrix. ## If only one power is given, it is taken as p, with q as 1. ## ## This returns a plain matrix of membership values and not a ## cl_membership object (so that it does not deal with possibly ## dropping or re-introducing unused classes). ## exponent <- if(length(power) == 1L) 1 / (1 - power) else power[2L] / (1 - power[1L]) u <- matrix(0, nrow(d), ncol(d)) zero_incidences <- !(d > 0) n_of_zeroes <- rowSums(zero_incidences) if(any(ind <- (n_of_zeroes > 0))) u[ind, ] <- zero_incidences[ind, , drop = FALSE] / n_of_zeroes[ind] if(any(!ind)) { ## Compute d_{bj}^e / \sum_k d_{bk}^e without overflow from very ## small d_{bj} values. d <- exponent * log(d[!ind, , drop = FALSE]) d <- exp(d - d[cbind(seq_len(nrow(d)), max.col(d))]) u[!ind, ] <- d / rowSums(d) } u } ### * print.cl_membership print.cl_membership <- function(x, ...) { writeLines("Memberships:") print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### .has_object_memberships ## Be nice to users when computing proximities: all measures for ## "partitions" we currently consider really only assume that we can ## compute memberships and/or class ids. ## Note that the cl_membership() default method works for cl_class_ids. .has_object_memberships <- function(x) (is.cl_partition(x) || inherits(x, "cl_membership") || inherits(x, "cl_class_ids")) ### * .stochastify .stochastify <- function(x) { ## Try to ensure that a stochastic matrix is returned. x <- pmax(x, 0) x / rowSums(x) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/classes.R0000644000175100001440000000460613434542602013212 0ustar hornikuserscl_classes <- function(x) UseMethod("cl_classes") cl_classes.default <- function(x) { ## Be nice to users ... if(is.cl_partition(x)) cl_classes(as.cl_partition(x)) else if(is.cl_dendrogram(x)) cl_classes(as.cl_dendrogram(x)) else stop("Can only determine classes of partitions or hierarchies.") } cl_classes.cl_partition <- function(x) { n <- n_of_objects(x) out <- split(seq_len(n), cl_class_ids(x)) class(out) <- c("cl_classes_of_partition_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } cl_classes.cl_hierarchy <- function(x) { ## Assume a valid hierarchy/dendrogram. x <- as.hclust(x) n <- n_of_objects(x) labels <- seq_len(n) ## Only use the "maximal" partitions for each height (relevant in ## case of non-binary trees). groups <- cutree(x, h = unique(c(0, x$height))) ## Give a list with the (unique) sets of numbers of the objects. ## Note that objects may already be merged at height zero. out <- unique(unlist(c(as.list(labels), lapply(split(groups, col(groups)), function(k) split(labels, k))), recursive = FALSE, use.names = FALSE)) ## Preserve labels if possible, and re-order according to ## cardinality. out <- out[order(lengths(out))] class(out) <- c("cl_classes_of_hierarchy_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } ## Be nice to users of ultrametric fitters ... which should really fit ## dendrograms (which inherit from hierarchies). cl_classes.cl_ultrametric <- cl_classes.cl_hierarchy print.cl_classes_of_partition_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) writeLines(formatDL(names(x), sprintf("{%s}", unlist(y)), style = "list", ...)) invisible(x) } print.cl_classes_of_hierarchy_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) y <- strwrap(sprintf("{%s},", unlist(y)), exdent = 2) y[length(y)] <- sub(",$", "", y[length(y)]) writeLines(y) invisible(x) } clue/R/lsap.R0000644000175100001440000000147012036747337012521 0ustar hornikuserssolve_LSAP <- function(x, maximum = FALSE) { if(!is.matrix(x) || any(x < 0)) stop("x must be a matrix with nonnegative entries.") nr <- nrow(x) nc <- ncol(x) if(nr > nc) stop("x must not have more rows than columns.") if(nc > nr) x <- rbind(x, matrix(2 * sum(x), nc - nr, nc)) if(maximum) x <- max(x) - x storage.mode(x) <- "double" out <- .C(C_solve_LSAP, x, as.integer(nc), p = integer(nc))$p + 1 out <- out[seq_len(nr)] class(out) <- "solve_LSAP" out } print.solve_LSAP <- function(x, ...) { writeLines(c("Optimal assignment:", gsub("x", " ", strwrap(paste(seq_along(x), x, sep = "x=>x", collapse = ", "))))) invisible(x) } clue/R/hierarchy.R0000644000175100001440000002341713036464014013532 0ustar hornikusers### * is.cl_hierarchy ## Determine whether an object is a hierarchy. ## Note that hierarchies are n-trees, which can naturally be represented ## by their classes (as done via cl_classes()) or internal ultrametric ## obtained by assigning height one to all splits (as done by ## .cl_ultrametric_from_classes()). ## We typically used the latter, but note that this is an *internal* ## reprsentation. ## User-level, cl_dendrogram objects are indexed hierarchies, and ## cl_hierarchy objects are n-trees. The latter can be "converted" into ## the former (using height one splits) via as.cl_dendrogram(). is.cl_hierarchy <- function(x) UseMethod("is.cl_hierarchy") ## Default method. is.cl_hierarchy.default <- .false ## Package stats: hclust(). is.cl_hierarchy.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_hierarchy.twins <- .true ## Package cluster: mona(). is.cl_hierarchy.mona <- .true ## Package ape: class "phylo". is.cl_hierarchy.phylo <- function(x) ape::is.ultrametric(x) ## Package clue: (virtual) class "cl_hierarchy". ## Note that "raw" cl_ultrametric objects are *not* hierarchies, as ## these are meant for numeric computations. ## ## Is this really a good idea? ## We can as.hclust() a cl_dendrogram and then it is a cl_hierarchy ... ## is.cl_hierarchy.cl_hierarchy <- .true ### * as.cl_hierarchy ## Note that cl_hierarchy conceptually is a virtual class, so there are ## no prototypes and no cl_hierarchy() creator. .cl_hierarchy_classes <- "cl_hierarchy" as.cl_hierarchy <- function(x) { if(is.cl_hierarchy(x)) { if(!inherits(x, "cl_hierarchy")) .make_container(x, .cl_hierarchy_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_hierarchy_classes) } ### * print.cl_hierarchy print.cl_hierarchy <- function(x, ...) .print_container(x, "cl_hierarchy", ...) ### * Complex.cl_hierarchy ## No Complex() for any kind of hierarchy. Complex.cl_hierarchy <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_hierarchy ## No Math() for any kind of hierarchy. Math.cl_hierarchy <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_hierarchy Ops.cl_hierarchy <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) if(n_of_objects(e1) != n_of_objects(e2)) stop("Hierarchies must have the same number of objects.") c1 <- cl_classes(e1) c2 <- cl_classes(e2) switch(.Generic, "<=" = all(is.finite(match(c1, c2))), "<" = all(is.finite(match(c1, c2))) && any(is.na(match(c2, c1))), ">=" = all(is.finite(match(c2, c1))), ">" = all(is.finite(match(c2, c1))) && any(is.na(match(c1, c2))), "==" = all(is.finite(match(c1, c2))) && all(is.finite(match(c2, c1))), "!=" = any(is.na(match(c1, c2))) || any(is.na(match(c2, c1)))) } ### * Summary.cl_hierarchy ## ## This is really the same as Summary.cl_partition(). ## Summary.cl_hierarchy <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * as.hclust.cl_hierarchy as.hclust.cl_hierarchy <- function(x, ...) as.hclust(.get_representation(x), ...) ### * is.cl_dendrogram ## ## Once we have cl_dendrogram testing, we can simplify cl_hierarchy ## testing. E.g., ## is.cl_hierachy.default <- is.cl_dendrogram ## should be ok, and we can add cl_hierarchy predicates for hierarchies ## which are not dendrograms on top of that. ## is.cl_dendrogram <- function(x) UseMethod("is.cl_dendrogram") ## Default method. is.cl_dendrogram.default <- .false ## Package stats: hclust(). is.cl_dendrogram.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_dendrogram.twins <- .true ## Package cluster: mona(). is.cl_dendrogram.mona <- .true ## Package ape: class "phylo". is.cl_dendrogram.phylo <- function(x) ape::is.ultrametric(x) ## (We could also support ape's class "matching" via coercion to class ## "phylo".) ## Package clue: (virtual) class "cl_dendrogram". is.cl_dendrogram.cl_dendrogram <- .true ### * as.cl_dendrogram .cl_dendrogram_classes <- c("cl_dendrogram", "cl_hierarchy") as.cl_dendrogram <- function(x) { if(is.cl_dendrogram(x)) { if(!inherits(x, "cl_dendrogram")) .make_container(x, .cl_dendrogram_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_dendrogram_classes) } ### * print.cl_dendrogram print.cl_dendrogram <- function(x, ...) .print_container(x, "cl_dendrogram", ...) ### * plot.cl_dendrogram plot.cl_dendrogram <- function(x, ...) plot(cl_ultrametric(.get_representation(x)), ...) ### * Group methods for cl_dendrogram objects. Ops.cl_dendrogram <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) u1 <- cl_ultrametric(e1) u2 <- cl_ultrametric(e2) if(length(u1) != length(u2)) stop("Dendrograms must have the same number of objects.") switch(.Generic, "<=" = all(u1 <= u2), "<" = all(u1 <= u2) && any(u1 < u2), ">=" = all(u1 >= u2), ">" = all(u1 >= u2) && any(u1 > u2), "==" = all(u1 == u2), "!=" = any(u1 != u2)) } ### * Summary.cl_dendrogram ## ## This is really the same as Summary.cl_hierarchy() ... ## We cannot really call the poset specific internal meet and join ## functions from here as e.g. max(D, H) (D a dendrogram, H an n-tree) ## should use the n-tree poset functions ... ## However, dispatch for cl_dendrogram should not be needed if we also ## dispatch on cl_hierarchy ... ## ## Summary.cl_dendrogram <- ## function(..., na.rm = FALSE) ## { ## ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) ## if(!ok) ## stop(gettextf("Generic '%s' not defined for \"%s\" objects.", ## .Generic, .Class)) ## args <- list(...) ## switch(.Generic, ## "min" = cl_meet(cl_ensemble(list = args)), ## "max" = cl_join(cl_ensemble(list = args)), ## "range" = { ## cl_ensemble(min = cl_meet(cl_ensemble(list = args)), ## max = cl_join(cl_ensemble(list = args))) ## }) ## } ### * as.hclust.cl_dendrogram ## ## This is really the same as as.hclust.cl_hierarchy() ... ## Dispatch for cl_dendrogram should not be needed if we also dispatch ## on cl_hierarchy ... ## ## as.hclust.cl_dendrogram <- ## function(x, ...) ## as.hclust(.get_representation(x), ...) ### ** cut.cl_dendrogram ## Not perfect as this perhaps return something more "classed" in the ## spirit of clue ... cut.cl_dendrogram <- function(x, ...) cutree(as.hclust(x), ...) ### * Utilities ## To turn a mona object into a cl_dendrogram, we need to be able to ## compute its associated ultrametric. Hence, provide a cophenetic() ## method for mona objects ... cophenetic.mona <- function(x) { no <- length(x$order) ns <- max(x$step) + 1 m <- matrix(NA, no, no) FOO <- function(ind, step, s) { if(length(ind) <= 1) return() grp <- c(0, cumsum(step == s)) ind <- split(ind, grp) len <- length(ind) for(a in seq_len(len)) { for(b in seq_len(a - 1L)) { ## Need both as we currently cannot assume that the ## indices are sorted. Alternatively, work with the ## sequence from one to the number of objects, and ## reorder at the end ... m[ind[[a]], ind[[b]]] <<- s m[ind[[b]], ind[[a]]] <<- s } } ind <- ind[lengths(ind) > 1L] pos <- which(step == s) step <- split(step[-pos], grp[-1][-pos]) if(is.null(step)) return() for(a in seq_along(ind)) FOO(ind[[a]], step[[a]], s + 1) } FOO(x$order, x$step, 1) m[is.na(m)] <- ns m <- ns - m rownames(m) <- rownames(x$data) as.dist(m) } ## And while we're at it ... ## (Of course, as.hclust() should really "know" that a cophenetic() ## method is available ...) as.hclust.mona <- function(x, ...) hclust(cophenetic(x), "single") ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/ensemble.R0000644000175100001440000001567013435044575013361 0ustar hornikuserscl_ensemble <- function(..., list = NULL) { clusterings <- c(list(...), list) if(!length(clusterings)) { ## Return an empty cl_ensemble. ## In this case, we cannot additionally know whether it contains ## partitions or hierarchies ... attr(clusterings, "n_of_objects") <- as.integer(NA) class(clusterings) <- "cl_ensemble" return(clusterings) } ## Previously, we used to require that the elements of the ensemble ## either all be partitions, or all be hierarchies. We no longer do ## this, as it makes sense to also allow e.g. object dissimilarities ## (raw "dist" objects or additive distances) as elements (e.g., ## when computing proximities), and it is rather cumbersome to ## decide in advance which combinations of elements might be useful ## and hence should be allowed. All we enforce is that all elements ## correspond to the same number of objects (as we typically cannot ## verify that they relate to the *same* objects). For "pure" ## ensembles of partitions or hierarchies we add additional class ## information. if(all(vapply(clusterings, is.cl_partition, NA))) class(clusterings) <- c("cl_partition_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_dendrogram, NA))) class(clusterings) <- c("cl_dendrogram_ensemble", "cl_hierarchy_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_hierarchy, NA))) class(clusterings) <- c("cl_hierarchy_ensemble", "cl_ensemble") else class(clusterings) <- "cl_ensemble" n <- sapply(clusterings, n_of_objects) if(any(diff(n))) stop("All elements must have the same number of objects.") attr(clusterings, "n_of_objects") <- as.integer(n[1L]) clusterings } is.cl_ensemble <- function(x) inherits(x, "cl_ensemble") ## ## In the old days, kmeans() results were unclassed lists, hence such ## objects were taken as representing a single clustering. Nowadays, we ## take these as lists of clusterings. as.cl_ensemble <- function(x) { if(is.cl_ensemble(x)) x else if(is.list(x) && !is.object(x)) cl_ensemble(list = x) else cl_ensemble(x) } ## c.cl_ensemble <- function(..., recursive = FALSE) { clusterings <- unlist(lapply(list(...), as.cl_ensemble), recursive = FALSE) cl_ensemble(list = clusterings) } "[.cl_ensemble" <- function(x, i) { ## Make subscripting empty ensembles a noop. if(length(x) == 0L) return(x) cl_ensemble(list = NextMethod("[")) } rep.cl_ensemble <- function(x, times, ...) cl_ensemble(list = NextMethod("rep")) print.cl_partition_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d partition of %d objects.", "An ensemble of %d partitions of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } Summary.cl_partition_ensemble <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) ## Combine the given partition ensembles. x <- do.call(c, args) switch(.Generic, "min" = cl_meet(x), "max" = cl_join(x), "range" = cl_ensemble(min = cl_meet(x), max = cl_join(x))) } print.cl_dendrogram_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d dendrogram of %d objects.", "An ensemble of %d dendrograms of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_hierarchy_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d hierarchy of %d objects.", "An ensemble of %d hierarchies of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_ensemble <- function(x, ...) { writeLines(sprintf(ngettext(length(x), "An ensemble with %d element.", "An ensemble with %d elements."), length(x))) invisible(x) } plot.cl_ensemble <- function(x, ..., main = NULL, layout = NULL) { if(!is.cl_ensemble(x)) stop("Wrong class.") ## What we can definitely plot is are cl_addtree, cl_dendrogram and ## cl_ultrametric objects. (We could also add simple methods for ## plotting raw dissimilarities, but of course seriation::dissplot() ## would be the thing to use.) What we cannot reasonably plot is ## partitions (in particular, as these do not know about the ## underlying dissimilarities. But then we could perhaps provide ## silhoutte plots etc for ensembles of partitions ... ## ## Think about this. ## ## So let us check for the things we can plot. ## (Note that currently there is neither is.cl_ultrametric() nor ## is.cl_addtree().) ok <- vapply(x, function(e) (is.cl_dendrogram(e) || inherits(e, c("cl_addtree", "cl_ultrametric"))), NA) if(!all(ok)) stop(gettextf("Plotting not available for elements %s of the ensemble.", paste(which(!ok), collapse = " ")), domain = NA) ## Prefer dendrogram plot methods to those for hclust objects. ind <- vapply(x, is.cl_dendrogram, NA) if(any(ind)) x[ind] <- lapply(x, as.cl_dendrogram) ## Now the usual layouting ... same as for plotting relation ## ensembles. ## Number of elements. n <- length(x) ## Layout. byrow <- TRUE if(is.null(layout)) { nc <- ceiling(sqrt(n)) nr <- ceiling(n / nc) } else { layout <- c(as.list(layout), byrow)[seq_len(3)] if(is.null(names(layout))) names(layout) <- c("nr", "nc", "byrow") nr <- layout[["nr"]] nc <- layout[["nc"]] byrow <- layout[["byrow"]] } op <- if(byrow) par(mfrow = c(nr, nc)) else par(mfcol = c(nr, nc)) on.exit(par(op)) ## Try recycling main (might want the same for others as well). if(!is.list(main)) { main <- if(is.null(main)) vector("list", length = n) else rep.int(as.list(main), n) } for(i in seq_along(x)) plot(x[[i]], main = main[[i]], ...) } unique.cl_ensemble <- function(x, incomparables = FALSE, ...) cl_ensemble(list = NextMethod("unique")) .cl_ensemble_type <- function(x) { if(inherits(x, "cl_partition_ensemble")) "partition" else if(inherits(x, "cl_hierarchy_ensemble")) "hierarchy" else NULL } clue/R/dissimilarity.R0000644000175100001440000004506114335746635014460 0ustar hornikusers### * cl_dissimilarity cl_dissimilarity <- function(x, y = NULL, method = "euclidean", ...) { x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_dissimilarity_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_dissimilarity_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-dissimilarities. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_dissimilarity")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } cl_proximity(unlist(d), method_name, labels = names(x), size = n, class = c("cl_dissimilarity", "cl_proximity", "dist")) } ### ** .cl_dissimilarity_partition_euclidean .cl_dissimilarity_partition_euclidean <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sqrt(sum((M_x - M_y[, ind]) ^ 2)) } ### ### ** .cl_dissimilarity_partition_manhattan .cl_dissimilarity_partition_manhattan <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "manhattan") ind <- solve_LSAP(C) sum(C[cbind(seq_along(ind), ind)]) } ### ** .cl_dissimilarity_partition_comemberships .cl_dissimilarity_partition_comemberships <- function(x, y) { ## We used to have the straightforward ## C_x <- tcrossprod(cl_membership(x)) # M_x M_x' ## C_y <- tcrossprod(cl_membership(y)) # M_y M_y' ## sum((C_x - C_y) ^ 2) / n_of_objects(x) ^ 2 ## But note that ## \| AA' - BB' \|^2 ## = tr((AA' - BB')'(AA' - BB') ## = tr(A'A A'A) - 2 tr(A'B B'A) + tr(B'B B'B) ## = \| A'A \|^2 - 2 \| A'B \|^2 + \| B'B \|^2 ## which can be computed much more efficiently as all involved cross ## product matrices are "small" ... k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) sqrt(sum(crossprod(M_x) ^ 2) - 2 * sum(crossprod(M_x, M_y) ^ 2) + sum(crossprod(M_y) ^ 2)) } ### ** .cl_dissimilarity_partition_symdiff .cl_dissimilarity_partition_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the partitions ## regarded as binary equivalence relations, i.e., the number of ## discordant pairs. ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) n <- n_of_objects(x) .cl_dissimilarity_partition_Rand(x, y) * choose(n, 2) } ### ** .cl_dissimilarity_partition_Rand .cl_dissimilarity_partition_Rand <- function(x, y) { ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) 1 - .cl_agreement_partition_Rand(x, y) } ### ** .cl_dissimilarity_partition_GV1 .cl_dissimilarity_partition_GV1 <- function(x, y) { k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) C <- outer(colSums(M_x ^ 2), colSums(M_y ^ 2), `+`) - 2 * crossprod(M_x, M_y) if(k_x < k_y) C <- rbind(C, matrix(0, nrow = k_y - k_x, ncol = k_y)) else if(k_x > k_y) C <- cbind(C, matrix(0, nrow = k_x, ncol = k_x - k_y)) ind <- solve_LSAP(C) sqrt(sum(C[cbind(seq_along(ind), ind)])) ## (Note that this sum really only includes matched non-dummy ## classes.) } ### ** .cl_dissimilarity_partition_BA_A .cl_dissimilarity_partition_BA_A <- function(x, y) { .cl_dissimilarity_partition_manhattan(as.cl_hard_partition(x), as.cl_hard_partition(y)) / 2 ## Could to this more efficiently, of course ... } ### ** .cl_dissimilarity_partition_BA_C .cl_dissimilarity_partition_BA_C <- function(x, y) { n_of_classes(x) + n_of_classes(y) - 2 * n_of_classes(cl_join(x, y)) } ### ** .cl_dissimilarity_partition_BA_D .cl_dissimilarity_partition_BA_D <- .cl_dissimilarity_partition_Rand ### ** .cl_dissimilarity_partition_BA_E .cl_dissimilarity_partition_BA_E <- function(x, y) { z <- table(cl_class_ids(x), cl_class_ids(y)) z <- z / sum(z) ## Average mutual information between the partitions. y <- outer(rowSums(z), colSums(z)) i <- which((z > 0) & (y > 0)) I <- sum(z[i] * log(z[i] / y[i])) ## Entropy of meet(x, y). i <- which(z > 0) H <- - sum(z[i] * log(z[i])) 1 - I / H } ### ** .cl_dissimilarity_partition_VI .cl_dissimilarity_partition_VI <- function(x, y, weights = 1) { ## Variation of information for general "soft clusterings", cf ## Section 5.2. in Meila (2002). weights <- rep_len(weights, n_of_objects(x)) weights <- weights / sum(weights) M_x <- cl_membership(x) ## Weighted marginal distribution of x: m_x <- colSums(weights * M_x) M_y <- cl_membership(y) ## Weighted marginal distribution of y: m_y <- colSums(weights * M_y) gamma <- crossprod(weights * M_x, M_y) delta <- outer(m_x, m_y) ## Entropy of x: H_x <- - sum(m_x * log(ifelse(m_x > 0, m_x, 1))) ## Entropy of y: H_y <- - sum(m_y * log(ifelse(m_y > 0, m_y, 1))) ## VI is H_x + H_y minus twice the (weighted) joint information. i <- which((gamma > 0) & (delta > 0)) H_x + H_y - 2 * sum(gamma[i] * log(gamma[i] / delta[i])) } ### ** .cl_dissimilarity_partition_Mallows .cl_dissimilarity_partition_Mallows <- function(x, y, p = 1, alpha = NULL, beta = NULL) { ## Currently, no "real" primal-dual solver for minimum cost flow ## problems, and lpSolve::lp.transport() seems to work only for ## integer bounds. Hence, rather than using ## ## C <- .cxdist(cl_membership(x), cl_membership(y), ## "minkowski", p) ^ p ## n_x <- nrow(C) ## n_y <- ncol(C) ## if(is.null(alpha)) ## alpha <- rep.int(1 / n_x, n_x) ## else { ## alpha <- rep_len(alpha, n_x) ## alpha <- alpha / sum(alpha) ## } ## ## etc right away, ensure a square cost matrix so that we can have ## integer bounds for at least the default case. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "minkowski", p) ^ p if(is.null(alpha)) alpha <- rep.int(1, k) if(is.null(beta)) beta <- rep.int(1, k) lpSolve::lp.transport(C, "min", rep.int("==", k), alpha, rep.int("==", k), beta, integers = NULL)$objval ^ (1 / p) } ### ** .cl_dissimilarity_partition_CSSD .cl_dissimilarity_partition_CSSD <- function(x, y, L = NULL, alpha = NULL, beta = NULL, ...) { ## Cluster Similarity Sensitive Distance. ## Reference: D. Zhou, J. Li and H. Zha (2005), ## A new Mallows distance based metric for comparing clusterings. ## See .cl_dissimilarity_partition_Mallows() re solving cost flow ## problems. ## Dissimilarity is defined by minimizing ## \sum_{k,l} (1 - 2 w_{kl} / (alpha_k + beta_l)) L_{kl} ## where ## L_{kl} = \sum_i m_{x;ik} m_{y;il} distance(p_{x;k}, p_{y;l}) ## with m and p the memberships and prototypes, respectively. ## If we get matrices of prototypes, use .rxdist; otherwise, the ## user needs to specify an L function or matrix. k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) if(!is.matrix(L)) { p_x <- cl_prototypes(x) p_y <- cl_prototypes(y) if(is.matrix(p_x) && is.matrix(p_y) && is.null(L)) L <- .rxdist(p_x, p_y, ...) else if(is.function(L)) L <- L(p_x, p_y) else stop("Cannot compute prototype distances.") } C <- crossprod(M_x, M_y) * L if(is.null(alpha)) alpha <- rep.int(1, k_x) if(is.null(beta)) beta <- rep.int(1, k_y) sum(C) - 2 * lpSolve::lp.transport(C / outer(alpha, beta, `+`), "max", rep.int("==", k_x), alpha, rep.int("==", k_y), beta, integers = NULL)$objval } ### ** .cl_dissimilarity_hierarchy_euclidean .cl_dissimilarity_hierarchy_euclidean <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sqrt(sum(weights * (u - v) ^ 2)) } ### ** .cl_dissimilarity_hierarchy_manhattan .cl_dissimilarity_hierarchy_manhattan <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sum(weights * abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_cophenetic .cl_dissimilarity_hierarchy_cophenetic <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) 1 - cor(u, v) ^ 2 } ### ** .cl_dissimilarity_hierarchy_gamma .cl_dissimilarity_hierarchy_gamma <- function(x, y) { ## ## This is a dissimilarity measure that works for arbitrary ## dissimilarities, see e.g. Bock. ## (And the current implementation finally respects this ...) ## if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) n <- length(u) .C(C_clue_dissimilarity_count_inversions, as.double(u), as.double(v), as.integer(n), count = double(1L)) $ count / choose(n, 2) } ### ** .cl_dissimilarity_hierarchy_symdiff .cl_dissimilarity_hierarchy_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the n-trees when ## regarded as sets of subsets (classes) of the set of objects. x <- cl_classes(x) y <- cl_classes(y) sum(is.na(match(x, y))) + sum(is.na(match(y, x))) } ### ** .cl_dissimilarity_hierarchy_Chebyshev .cl_dissimilarity_hierarchy_Chebyshev <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) max(abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_Lyapunov .cl_dissimilarity_hierarchy_Lyapunov <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) q <- cl_object_dissimilarities(x) / cl_object_dissimilarities(y) if(is.matrix(q)) q <- q[lower.tri(q)] log(max(q) / min(q)) } ### ** .cl_dissimilarity_hierarchy_BO .cl_dissimilarity_hierarchy_BO <- function(x, y, delta, ...) { ## Compute Boorman-Olivier (1973) dendrogram ("valued tree") ## dissimilarities of the form ## ## m_\delta(T_1, T_2) ## = \int_0^\infty \delta(P_1(\alpha), P_2(\alpha)) d\alpha ## ## where the trees (dendrograms) are defined as right-continuous ## maps from [0, \Infty) to the partition lattice. ## We can compute this as follows. Take the ultrametrics and use ## as.hclust() to detemine the heights \alpha_1(k) and \alpha_2(l) ## of the splits. Let \alpha_i be the sequence obtained by ## combining these two. Then ## ## m_\delta ## = \sum_{i=0}^{L-1} (\alpha_{i+1} - \alpha_i) ## \delta(P_1(\alpha_i), P_2(\alpha_i)) ## ## We use cutree() for computing the latter partitions. As we ## already have the hclust representations, we should be able to do ## things more efficiently ... if(inherits(x, "hclust")) t_x <- x else if(inherits(x, "cl_ultrametric")) t_x <- as.hclust(x) else if(is.cl_dendrogram(x)) t_x <- as.hclust(cl_ultrametric(x)) else return(NA) if(inherits(y, "hclust")) t_y <- y else if(inherits(y, "cl_ultrametric")) t_y <- as.hclust(y) else if(is.cl_dendrogram(y)) t_y <- as.hclust(cl_ultrametric(y)) else return(NA) if(is.unsorted(t_x$height) || is.unsorted(t_y$height)) return(NA) alpha <- sort(unique(c(t_x$height, t_y$height))) cuts_x <- cutree(t_x, h = alpha) cuts_y <- cutree(t_y, h = alpha) deltas <- mapply(cl_dissimilarity, lapply(split(cuts_x, col(cuts_x)), as.cl_partition), lapply(split(cuts_y, col(cuts_y)), as.cl_partition), MoreArgs = list(delta, ...)) sum(diff(alpha) * deltas[-length(deltas)]) } ### ** .cl_dissimilarity_hierarchy_spectral .cl_dissimilarity_hierarchy_spectral <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) svd(as.matrix(u - v))$d[1L] } ### * as.dist.cl_dissimilarity as.dist.cl_dissimilarity <- function(m, diag = FALSE, upper = FALSE) { y <- c(m) ## Fill non-inherited attributes with default values. attributes(y) <- c(attributes(m)[c("Size", "Labels")], Diag = diag, Upper = upper, call = match.call()) ## (Note that as.dist.default() does not automatically add ## 'method'.) class(y) <- "dist" y } ### * [.cl_dissimilarity "[.cl_dissimilarity" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_dissimilarity")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_dissimilarity")) } y } ### .cxdist .cxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the column cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[, j], B[, k]) ## Currently, only Manhattan (L1) distances are provided. ## Extensions to Minkowski or even more distances (a la dist()) ## could be added eventually. ## ## Possible implementations include ## ## foo_a <- function(A, B) ## apply(B, 2, function(u) colSums(abs(A - u))) ## foo_d <- function(A, B) { ## out <- as.matrix(dist(rbind(t(A), t(B)), "manhattan")) ## dimnames(out) <- NULL ## nc_B <- NCOL(B) ## out[seq(from = NCOL(A) + 1, length.out = nc_B), seq_len(nc_B)] ## } ## foo_f <- function(A, B) { ## out <- matrix(0, NCOL(A), NCOL(B)) ## for(j in seq_len(NCOL(A))) ## for(k in seq_len(NCOL(B))) ## out[j, k] = sum(abs(A[, j] - B[, k])) ## out ## } ## ## The one actually used seems to be the best performer, with the ## "for" version a close second (note that "typically", A and B have ## much fewer columns than rows). ## only few columns method <- match.arg(method) ## Workhorse. FOO <- switch(method, "euclidean" = function(M) sqrt(colSums(M ^ 2)), "manhattan" = function(M) colSums(abs(M)), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(M) (colSums(abs(M) ^ p)) ^ (1 / p) }) out <- matrix(0, NCOL(A), NCOL(B)) for(k in seq_len(NCOL(B))) out[, k] <- FOO(A - B[, k]) out } ### .rxdist .rxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the row cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[j, ], B[k, ]) ## ## Could also do something like ## ind <- seq_len(NROW(B)) ## as.matrix(dist(rbind(B, A)))[-ind, ind] ## but that is *very* inefficient for the "usual" data by prototype ## case (where NROW(B) << NROW(A)). ## ## No fancy pmatching for methods for the time being. method <- match.arg(method) ## Workhorse: Full A, single row of b. FOO <- switch(method, "euclidean" = function(A, b) sqrt(rowSums(sweep(A, 2, b) ^ 2)), "manhattan" = function(A, b) rowSums(abs(sweep(A, 2, b))), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(A, b) (rowSums(abs(sweep(A, 2, b)) ^ p)) ^ (1 / p) }) out <- matrix(0, NROW(A), NROW(B)) ## Be nice: thanks to Wael Salem ZRAFI for ## suggesting this improvement. if(!is.null(cnA <- colnames(A)) && !is.null(cnB <- colnames(B)) && !identical(cnA, cnB)) A <- A[, cnB, drop = FALSE] for(k in seq_len(NROW(B))) out[, k] <- FOO(A, B[k, ]) out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/partition.R0000644000175100001440000004352212212427231013557 0ustar hornikusers### * n_of_classes ## Get the number of classes in a (hard or soft) partition. ## ## We generally allow for classes to be empty, unlike the current ## version of kmeans(). Package cclust has a version of k-means which ## does not stop in case of empty classes. ## However, we only count NON-EMPTY classes here. ## n_of_classes <- function(x) UseMethod("n_of_classes") ## Default method. n_of_classes.default <- function(x) length(unique(cl_class_ids(x))) ## Package stats: kmeans() (R 2.1.0 or better). n_of_classes.kmeans <- n_of_classes.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_classes.fanny <- function(x) sum(colSums(x$membership, na.rm = TRUE) > 0) n_of_classes.partition <- n_of_classes.default ## Package cclust: cclust(). n_of_classes.cclust <- n_of_classes.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_classes.fclust <- n_of_classes.fanny ## Package e1071: cshell(). n_of_classes.cshell <- n_of_classes.fanny ## Package e1071: bclust(). n_of_classes.bclust <- n_of_classes.default ## Package mclust: Mclust(). n_of_classes.Mclust <- n_of_classes.default ## Package clue: Memberships. n_of_classes.cl_membership <- function(x) attr(x, "n_of_classes") ## Package clue: pclust(). n_of_classes.pclust <- function(x) { if(is.null(m <- x$membership)) length(unique(cl_class_ids(x))) else sum(colSums(m, na.rm = TRUE) > 0) } ## Package clue: (virtual) class "cl_partition". n_of_classes.cl_partition <- function(x) n_of_classes(.get_representation(x)) ### * cl_class_ids ## Get ids of classes in a partition. ## ## Currently, all supported soft partitioning methods provide a softmax ## hard partitioning as well. ## cl_class_ids <- function(x) UseMethod("cl_class_ids") ## Default method. cl_class_ids.default <- function(x) { stop("Cannot infer class ids from given object.") } ## Package stats: kmeans() (R 2.1.0 or better). cl_class_ids.kmeans <- function(x) as.cl_class_ids(x$cluster) ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_class_ids.partition <- function(x) as.cl_class_ids(x$clustering) ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_class_ids.Weka_clusterer <- function(x) as.cl_class_ids(x$class_ids) ## Package cba: ccfkms(). cl_class_ids.ccfkms <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cba: rockCluster() returns objects of class "rock". cl_class_ids.rock <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cclust: cclust(). cl_class_ids.cclust <- cl_class_ids.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_class_ids.fclust <- cl_class_ids.kmeans ## Package e1071: cshell(). cl_class_ids.cshell <- cl_class_ids.kmeans ## Package e1071: bclust(). cl_class_ids.bclust <- cl_class_ids.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". ## ## We used to be able to call flexclust::cluster(), but this now only ## has S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexclust has been loaded ... ## cl_class_ids.kcca <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::cluster(), but this now only has ## S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexmix has been loaded ... ## cl_class_ids.flexmix <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_class_ids.specc <- function(x) { tmp <- unclass(x) as.cl_class_ids(.structure(as.vector(tmp), names = names(tmp))) } ## Package mclust: Mclust(). cl_class_ids.Mclust <- function(x) as.cl_class_ids(x$classification) ## Package relations: equivalence and preference relations. cl_class_ids.relation <- function(x) as.cl_class_ids(relations::relation_class_ids(x)) ## Package clue: Class ids. cl_class_ids.cl_class_ids <- identity ## Package clue: Memberships. cl_class_ids.cl_membership <- function(x) as.cl_class_ids(.structure(max.col(x), names = rownames(x))) ## (Cannot do cl_class_ids.cl_membership <- max.col for generic/method ## consistency.) ## Package clue: cl_pam(). cl_class_ids.cl_pam <- cl_class_ids.kmeans ## Package clue: cl_partition_by_class_ids(). cl_class_ids.cl_partition_by_class_ids <- function(x) .get_representation(x) ## Package clue: kmedoids(). cl_class_ids.kmedoids <- cl_class_ids.kmeans ## Package clue: pclust(). cl_class_ids.pclust <- cl_class_ids.kmeans ## Package clue: (virtual) class "cl_partition". cl_class_ids.cl_partition <- function(x) cl_class_ids(.get_representation(x)) ## Package movMF: class "movMF". cl_class_ids.movMF <- function(x) as.cl_class_ids(max.col(x$P)) ### * as.cl_class_ids as.cl_class_ids <- function(x) { ## For the time being, handle only "raw" class ids. ## Maybe add methods handling factors lateron (if necessary). ## ## This could also be used to canonicalize returned class ids ## according to the docs (vector of integers with the class ids), ## using someting like ## match(ids, unique(ids)) ## .structure(unclass(x), class = "cl_class_ids") } ### * print.cl_class_ids print.cl_class_ids <- function(x, ...) { writeLines("Class ids:") print(unclass(x), ...) invisible(x) } ### * cl_class_labels cl_class_labels <- function(x) UseMethod("cl_class_labels") ### * is.cl_partition ## Determine whether an object is a (generalized) partition. ## Note that this includes both hard and soft partitions, and allows ## sums of memberships of objects to be less than one. is.cl_partition <- function(x) UseMethod("is.cl_partition") ## Default method. is.cl_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". is.cl_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". ## (Note that Cobweb internally uses a classification tree, but ## definitely does not expose this structure.) is.cl_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_partition.rock <- .true ## Package cclust: cclust(). is.cl_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_partition.fclust <- .true ## Package e1071: cshell(). is.cl_partition.cshell <- .true ## Package e1071: bclust(). is.cl_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_partition.flexmix <- .true ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_partition.specc <- .true ## Package mclust: Mclust(). is.cl_partition.Mclust <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. is.cl_partition.cl_partition <- .true ## Package clue: kmedoids(). is.cl_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_partition.pclust <- .true ## Package movMF: class "movMF". is.cl_partition.movMF <- .true ### * as.cl_partition ## Note that cl_partition conceptually is a virtual class, so there are ## no prototypes and no cl_partition() creator. .cl_partition_classes <- "cl_partition" as.cl_partition <- function(x) { if(is.cl_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_partition_classes) else x } else cl_partition_by_memberships(as.cl_membership(x)) } ### * print.cl_partition print.cl_partition <- function(x, ...) .print_container(x, "cl_partition", ...) ### * print.cl_partition_by_class_ids print.cl_partition_by_class_ids <- function(x, ...) { writeLines(gettextf("A hard partition of %d objects.", n_of_objects(x))) print(cl_class_ids(x), ...) invisible(x) } ### * print.cl_partition_by_memberships print.cl_partition_by_memberships <- function(x, ...) { writeLines(gettextf("A partition of %d objects.", n_of_objects(x))) print(cl_membership(x), ...) invisible(x) } ### * Complex.cl_partition Complex.cl_partition <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_partition Math.cl_partition <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_partition Ops.cl_partition <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ci1 <- cl_class_ids(e1) ci2 <- cl_class_ids(e2) if(length(ci1) != length(ci2)) stop("Partitions must have the same number of objects.") z <- table(ci1, ci2) > 0 switch(.Generic, "<=" = all(rowSums(z) == 1), "<" = all(rowSums(z) == 1) && any(colSums(z) > 1), ">=" = all(colSums(z) == 1), ">" = all(colSums(z) == 1) && any(rowSums(z) > 1), "==" = all(rowSums(z) == 1) && all(colSums(z) == 1), "!=" = any(rowSums(z) > 1) || any(colSums(z) > 1)) } ### * Summary.cl_partition Summary.cl_partition <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * cl_partition_by_class_ids cl_partition_by_class_ids <- function(x, labels = NULL) { if(!is.atomic(x)) stop("Class ids must be atomic.") if(is.null(names(x))) names(x) <- labels ## ## Perhaps give the raw class ids more structure? ## E.g, class "cl_class_ids"? ## Problem is that we used to say about extensibility that all there ## is to do for a hard partitioner is to add a cl_class_ids() method ## and two predicates, but *not* to have the former give a suitably ## classed object. On the other hand, the recipe would need to be ## extended for soft partitioners, for which it would be necessary ## to provide a cl_membership() method which really returns an ## object of class cl_membership. Note that we can do this using ## as.cl_membership(m), where m is the raw membership matrix. So ## maybe we should ask for using as.cl_class_ids() to coerce raw ## class ids ... .make_container(as.cl_class_ids(x), c("cl_partition_by_class_ids", .cl_hard_partition_classes), list(n_of_objects = length(x), n_of_classes = length(unique(x)))) ## } ### * cl_partition_by_memberships cl_partition_by_memberships <- function(x, labels = NULL) { if(!is.matrix(x) || any(x < 0, na.rm = TRUE) || any(x > 1, na.rm = TRUE)) stop("Not a valid membership matrix.") ## Be nice. x <- x / rowSums(x, na.rm = TRUE) ## (Note that this does not imply all(rowSums(x) == 1). If we ## wanted to test for this, something like ## .is_stochastic_matrix <- function(x) ## identical(all.equal(rowSums(x), rep(1, nrow(x))), TRUE)) ## should do.) if(is.null(rownames(x))) rownames(x) <- labels .make_container(as.cl_membership(x), c("cl_partition_by_memberships", .cl_partition_classes), list(n_of_objects = nrow(x))) } ### * is.cl_hard_partition ## Determine whether an object is a hard partition. is.cl_hard_partition <- function(x) UseMethod("is.cl_hard_partition") ## Default method. is.cl_hard_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_hard_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". ## ## Of course, fuzzy clustering can also give a hard partition ... is.cl_hard_partition.fanny <- function(x) { all(rowSums(cl_membership(x) == 1, na.rm = TRUE) > 0) } ## is.cl_hard_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". is.cl_hard_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_hard_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_hard_partition.rock <- .true ## Package cclust: cclust(). is.cl_hard_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_hard_partition.fclust <- is.cl_hard_partition.fanny ## Package e1071: cshell(). is.cl_hard_partition.cshell <- is.cl_hard_partition.fanny ## Package e1071: bclust(). is.cl_hard_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_hard_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_hard_partition.flexmix <- is.cl_hard_partition.fanny ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_hard_partition.specc <- .true ## Package mclust: Mclust(). is.cl_hard_partition.Mclust <- is.cl_hard_partition.fanny ## Package clue: (virtual) class "cl_hard_partition". is.cl_hard_partition.cl_hard_partition <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. ## Rather than providing is.cl_hard_partition.cl_membership() we thus ## prefer explicit handling of cl_partition objects with a cl_membership ## representation. is.cl_hard_partition.cl_partition <- function(x) { ## If the object has a cl_membership representation ... y <- .get_representation(x) if(inherits(y, "cl_membership")) attr(y, "is_cl_hard_partition") ## Other representations, e.g. for "definitely" hard partitions via ## vectors of class ids or class labels, or a list of classes, may ## be added in future versions. ## In any case, this must be kept in sync with what is handled by ## as.cl_partition() [which currently runs as.cl_membership() in ## case is.cl_partition() gives false]. else is.cl_hard_partition(y) } ## Package clue: kmedoids(). is.cl_hard_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_hard_partition.pclust <- is.cl_hard_partition.fanny ## Package movMF: class "movMF". is.cl_hard_partition.movMF <- is.cl_hard_partition.fanny ### * as.cl_hard_partition .cl_hard_partition_classes <- c("cl_hard_partition", "cl_partition") as.cl_hard_partition <- function(x) { if(is.cl_hard_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_hard_partition_classes) else x } else if(is.cl_partition(x)) { ## A soft cl_partition ... ids <- cl_class_ids(x) cl_partition_by_class_ids(ids, names(ids)) } else if(is.matrix(x)) { ## A matrix of raw memberships, hopefully ... cl_partition_by_class_ids(max.col(x), rownames(x)) } else if(is.atomic(x)) { ## A vector of raw class ids, hopefully ... cl_partition_by_class_ids(x, names(x)) } else stop("Cannot coerce to 'cl_hard_partition'.") } ### * is.cl_soft_partition ## Determine whether an object is a soft partition. is.cl_soft_partition <- function(x) is.cl_partition(x) && ! is.cl_hard_partition(x) ### * .maybe_is_proper_soft_partition ## Determine whether an object might be a proper soft partition (in the ## sense that it is a cl_partition but not a cl_hard_partition). ## This is mostly useful when computing fuzziness measures. .maybe_is_proper_soft_partition <- function(x) UseMethod(".maybe_is_proper_soft_partition") .maybe_is_proper_soft_partition.default <- .false .maybe_is_proper_soft_partition.fanny <- .true .maybe_is_proper_soft_partition.fclust <- .true .maybe_is_proper_soft_partition.cshell <- .true .maybe_is_proper_soft_partition.flexmix <- .true .maybe_is_proper_soft_partition.Mclust <- .true ## See above for why we prefer not to have ## .maybe_is_proper_soft_partition.cl_membership(). ## (Although this is an internal generic really only used for making ## cl_fuzziness() computations more efficient, so we could be more ## generous here [perhaps using a slightly different name such as ## .maybe_represents_a_proper_soft_partition()]. .maybe_is_proper_soft_partition.cl_partition <- function(x) { y <- .get_representation(x) if(inherits(y, "cl_membership")) !attr(y, "is_cl_hard_partition") else .maybe_is_proper_soft_partition(y) } .maybe_is_proper_soft_partition.pclust <- function(x) x$m > 1 ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/lattice.R0000644000175100001440000001356713036514161013205 0ustar hornikuserscl_meet <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_meet_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_meet_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_meet_hierarchy(x) else stop("Cannot compute meet of given clusterings.") } .cl_meet_partition <- function(x) { x <- unique(x) if(length(x) == 1L) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ids <- seq_len(n_of_objects(x[[1L]])) ## Cross-classify the objects. z <- split(ids, lapply(x, cl_class_ids)) ## Subscript on the non-empty cells to get adjacent class ids. lens <- lengths(z) pos <- which(lens > 0) ids[unlist(z, use.names = FALSE)] <- rep.int(seq_along(z[pos]), lens[pos]) cl_partition_by_class_ids(ids) } .cl_meet_dendrogram <- function(x) { ## Meet of an ensemble of dendrograms. ## We need the maximal ultrametric dominated by the given ones, ## which can be obtained by hierarchical clustering with single ## linkage on the pointwise minima of the ultrametrics. as.cl_dendrogram(hclust(as.dist(do.call(pmin, lapply(x, cl_ultrametric))), "single")) } .cl_meet_hierarchy <- function(x) { ## Meet of an ensemble of n-trees. ## Need to find the classes in *all* n-trees. ## Equivalent to computing a strict majority tree. .cl_consensus_hierarchy_majority(x, rep.int(1, length(x)), list(p = 1)) } cl_join <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_join_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_join_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_join_hierarchy(x) else stop("Cannot compute join of given clusterings.") } .cl_join_partition <- function(x) { x <- unique(x) if(length(x) == 1) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ## Canonicalize: ensure that class ids are always the integers from ## one to the number of classes. n <- sapply(x, n_of_classes) ids <- mapply(function(p, ncp) match(cl_class_ids(p), seq_len(ncp)), x, n, SIMPLIFY = FALSE) ## Order according to the number of classes. ids <- ids[order(n)] ## And now incrementally build the join. jcids <- ids[[1L]] # Class ids of the current join. jnc <- length(unique(jcids)) # Number of classes of this. for(b in seq.int(from = 2, to = length(x))) { z <- table(jcids, ids[[b]]) ## It is faster to work on the smaller partition, but this ## should be ensured by the reordering ... ## We need to "join all elements in the same class in at least ## one of the partitions". In the matrix ## C <- (tcrossprod(z) > 0) ## entry i,j is true/one iff z_{ik} z_{jk} > 0 for classes ## i and j in the current join (ids jcids) and some class k in ## the partition with ids[[b]], so that i and j must be joined. ## I.e., C indicates which classes need to be joined directly. ## We need to determine the transitive closure of this relation, ## which can be performed by repeating ## C_{t+1} <- ((C_t %*% C) > 0) ## with C_1 = C until C_t does not change. C_new <- C_old <- C <- (tcrossprod(z) > 0) repeat { C_new <- (C_old %*% C) > 0 if(all(C_new == C_old)) break C_old <- C_new } C <- C_new ## This should now have the connected components. ## Next, compute the map of the join class ids to the ids of ## these components. cnt <- 0 map <- remaining_ids <- seq_len(jnc) while(length(remaining_ids)) { cnt <- cnt + 1 pos <- which(C[remaining_ids[1L], remaining_ids] > 0) map[remaining_ids[pos]] <- cnt remaining_ids <- remaining_ids[-pos] } ## And update the join: jcids <- map[jcids] jnc <- cnt } cl_partition_by_class_ids(jcids) } .cl_join_dendrogram <- function(x) { ## Join of an ensemble of dendrograms. as.cl_dendrogram(do.call(pmax, lapply(x, cl_ultrametric))) } .cl_join_hierarchy <- function(x) { ## Join of an ensemble of n-trees. ## Only exists if the union of all classes of the n-trees is itself ## an n-tree (see Barthelemy et al). classes <- unique(unlist(lapply(x, cl_classes), recursive = FALSE)) ## Now check if this is an n-tree. ## We must verify that for all classes A and B, their intersection ## is A, B, or empty. check <- function(A, B) { m_AB <- match(A, B) m_BA <- match(B, A) ((all(is.na(m_AB)) && all(is.na(m_BA))) || all(is.finite(m_AB)) || all(is.finite(m_BA))) } for(i in seq_along(classes)) { A <- classes[[i]] for(j in seq_along(classes)) if(!check(A, classes[[j]])) stop("Join of given n-trees does not exist.") } as.cl_hierarchy(.cl_ultrametric_from_classes(classes)) } clue/R/proximity.R0000644000175100001440000001244311304023136013605 0ustar hornikusers### * cl_proximity cl_proximity <- function(x, description, class = NULL, labels = NULL, self = NULL, size = NULL) { ## Similar to as.dist(), in a way. ## Currently, as.dist() is not generic, so we cannot provide a ## cl_proximity method for it. Hence, we have our dissimilarities ## and ultrametrics extend dist, and we use capitalized names for ## the attributes provided for compatibility with dist (Size and ## Labels). if(inherits(x, "dist")) { ## Explicitly deal with dist objects. ## Useful in particular because cophenetic() returns them. out <- x if(is.null(size)) size <- attr(x, "Size") if(is.null(labels)) labels <- attr(x, "Labels") } else if(inherits(x, "cl_proximity") || !(is.matrix(x) && (nrow(x) == ncol(x)))) out <- x else { ## Actually, x should really be a square symmetric matrix. ## The "self-proximities" in the main diagonal must be stored ## provided there is one non-zero entry. self <- diag(x) if(all(self == 0)) self <- NULL out <- x[row(x) > col(x)] if(is.null(labels)) { if(!is.null(rownames(x))) labels <- rownames(x) else if(!is.null(colnames(x))) labels <- colnames(x) } } if(is.null(size)) size <- as.integer((sqrt(1 + 8 * length(out)) + 1) / 2) attributes(out) <- list(Size = size, Labels = labels, description = description, self = self) class(out) <- unique(c(class, "cl_proximity")) out } ### * names.cl_proximity names.cl_proximity <- function(x) NULL ### * print.cl_proximity print.cl_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_dissimilarity")) "Dissimilarities" else if(inherits(x, "cl_agreement")) "Agreements" else "Proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } m <- format(as.matrix(x)) if(is.null(self <- attr(x, "self"))) m[row(m) <= col(m)] <- "" else m[row(m) < col(m)] <- "" print(if(is.null(self)) m[-1, -attr(x, "Size")] else m, quote = FALSE, right = TRUE, ...) invisible(x) } ### * as.matrix.cl_proximity as.matrix.cl_proximity <- function(x, ...) { size <- attr(x, "Size") m <- matrix(0, size, size) m[row(m) > col(m)] <- x m <- m + t(m) if(!is.null(self <- attr(x, "self"))) { diag(m) <- self } ## ## stats:::as.matrix.dist() provides default dimnames ## (seq_len(size)) if no labels are available. ## We used to do this too, but ... if(!is.null(labels <- attr(x, "Labels"))) dimnames(m) <- list(labels, labels) ## m } ### * [.cl_proximity "[.cl_proximity" <- function(x, i, j) { ## Subscripting proximity objects. ## Basically matrix-like, but proximity objects are always ## "matrices", hence no 'drop' argument. ## For double-index subscripting, if i and j are identical, ## structure and class are preserved. Otherwise, a cross-proximity ## object is returned (and methods for classes inheriting from ## proximity need to readjust the class info as needed). ## For single-index subscripting, no attempty is currently made at ## preserving structure and class where possible. (We might also ## change this to select objects, i.e., the same rows and columns.) size <- attr(x, "Size") if(missing(j)) { if(missing(i)) return(x) else j <- seq_len(size) } if(missing(i)) i <- seq_len(size) description <- attr(x, "description") ## RG's graph:::[.dist avoids as.matrix() in noting that for dist ## objects, entry (i,j) is at n(i-1) - i(i-1)/2 + j - i (in the ## veclh dist representation). We could do something similar, but ## note that not all proximities have zero diagonals (i.e., NULL ## "self" attributes). y <- as.matrix(x)[i, j, drop = FALSE] if(identical(i, j)) { ## Testing using identical() is rather defensive ... return(cl_proximity(y, description = description, class = class(x))) } cl_cross_proximity(y, description = description) } ### * cl_cross_proximity cl_cross_proximity <- function(x, description = NULL, class = NULL) { attr(x, "description") <- description class(x) <- c(class, "cl_cross_proximity") x } ### * print.cl_cross_proximity print.cl_cross_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_cross_dissimilarity")) "Cross-dissimilarities" else if(inherits(x, "cl_cross_agreement")) "Cross-agreements" else "Cross-proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### ** print_description_prefix ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/pava.R0000644000175100001440000000507013036461767012512 0ustar hornikusers## A Pool Adjacent Violators Algorithm framework for minimizing problems ## like ## ## \sum_i \sum_{J_i} w_{ij} f(y_{ij}, m_i) ## ## under the constraint m_1 <= ... <= m_n with f a convex function in m. ## Note that this formulation allows for repeated data in each block, ## and hence is more general than the usual pava/isoreg ones. A solver ## for the unconstrained \sum_k w_k f(y_k, m) => min! is needed. ## Typical cases are f(y, m) = |y - m|^p for p = 2 (solved by weighted ## mean) and p = 1 (solved by weighted median), respectively. ## A general design issue is whether weights should be supported or not, ## because in the latter case the solver could be a function of a single ## (data) argument only. Let's assume the former for the time being. pava <- function(x, w = NULL, solver = weighted.mean, merger = c) { n <- length(x) if(is.null(w)) { w <- if(is.list(x)) lapply(lengths(x), function(u) rep.int(1, u)) else rep.int(1, n) } else if(is.list(x)) w <- as.list(w) inds <- as.list(seq_len(n)) vals <- mapply(solver, x, w) ## Combine blocks i and i + 1. combine <- if(is.list(x)) { ## In the repeated data case, we explicitly merge the data (and ## weight) lists. function(i) { ## Merge the data and indices, solve, and put things back ## into position i, dropping position i + 1. j <- i + 1L x[[i]] <<- merger(x[[i]], x[[j]]) w[[i]] <<- c(w[[i]], w[[j]]) vals[i] <<- solver(x[[i]], w[[i]]) inds[[i]] <<- c(inds[[i]], inds[[j]]) keep <- seq_len(n)[-j] x <<- x[keep] w <<- w[keep] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } else { function(i) { ## In the "simple" case, merge only indices and values. j <- i + 1L inds[[i]] <<- c(inds[[i]], inds[[j]]) vals[i] <<- solver(x[inds[[i]]], w[inds[[i]]]) keep <- seq_len(n)[-j] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } i <- 1L repeat { if(i < n) { if((vals[i] > vals[i + 1])) { combine(i) while((i > 1L) && (vals[i - 1L] > vals[i])) { combine(i - 1L) i <- i - 1L } } else i <- i + 1L } else break } rep.int(vals, lengths(inds)) } clue/R/predict.R0000644000175100001440000002453614503541710013210 0ustar hornikusers## ## Maybe add support for "auto" type (class_ids when predicting from a ## hard, memberships when predicting from a soft partition) eventually. ## cl_predict <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) UseMethod("cl_predict") ## Default method. ## Should also work for kcca() from package flexclust. cl_predict.default <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) .as_cl_class_ids_or_membership(predict(object, newdata, ...), type) ## Package stats: kmeans() (R 2.1.0 or better). cl_predict.kmeans <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- .rxdist(newdata, object$centers) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package cluster: ## * fanny() cannot make "new" predictions. ## * clara() gives medoids, and takes metric data using Euclidean or ## Manhattan dissimilarities (and we can figure out which by looking ## at the call and the default values). ## * pam() gives medoids, but might have been called with dissimilarity ## data, so is tricky. We can always find out which by looking at the ## medoids: as in the dissimilarity input case this is a vector of ## class labels, and a matrix with in each row the coordinates of one ## medoid otherwise. We then still need to figure out whether ## Euclidean or Manhattan distances were used by looking at the call ## and the default values. ## Both pam() and clara() show that the interfaces could be improved to ## accomodate modern needs, e.g., for bagging. cl_predict.fanny <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) stop("Cannot make new predictions.") } cl_predict.clara <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(clara)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } cl_predict.pam <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) prototypes <- object$medoids if(!is.matrix(prototypes)) stop("Cannot make new predictions.") ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(pam)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_predict.Weka_clusterer <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata = newdata, type = type, ...), type) } ## Package cba: ccfkms(). cl_predict.ccfkms <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(as.vector(predict(object, newdata)$cl), type) } ## Package cba: rockCluster() returns objects of class "rock". ## If x is a Rock object, fitted(x) and predict(x, newdata) can result ## in missing classifications, as ## In the case a 'drop' value greater than zero is specified, all ## clusters with size equal or less than this value are removed from ## the classifier. Especially, 'fitted' uses a threshold of one ## because for singleton clusters the neighborhood is empty. cl_predict.rock <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) newdata <- object$x ids <- as.vector(predict(object, newdata, ...)$cl) .as_cl_class_ids_or_membership(ids, type) } ## Package cclust: cclust(). cl_predict.cclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { ## Package cclust provides predict.cclust() which returns (again) an ## object of class "cclust", but does not give the labels of the ## original data in case no new data are given. if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata), type) } ## Package e1071: cmeans() gives objects of class "fclust". cl_predict.fclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Note that the 'fclust' objects returned by cmeans() do not always ## directly contain the information on the fuzzification parameter m ## and the distance (Euclidean/Manhattan) employed, so we have to ## engineer this from the matched call and the default arguments. nms <- names(object$call) ## Note that we cannot directly use object$call$m, as this could ## give the 'method' argument if 'm' was not given. m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) power <- c(m, if(method == "euclidean") 2 else 1) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: cshell(). cl_predict.cshell <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Not surprisingly, this is rather similar to what we do for fclust ## objects. Only dissimiliraties (and exponents) need to be ## computed differently ... nms <- names(object$call) m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cshell)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cshell)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) d <- sweep(d, 2, object$radius) ^ 2 M <- .memberships_from_cross_dissimilarities(d, m) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: bclust(). ## ## One might argue that it would be better to use the 'dist.method' ## employed for the hierarchical clustering, but it seems that class ## labels ("clusters") are always assigned using Euclidean distances. cl_predict.bclust <- cl_predict.kmeans ## ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_predict.kcca <- cl_predict.default ## Package flexmix: class "flexmix". cl_predict.flexmix <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(modeltools::posterior(object, newdata, ...), type) } ## Package mclust: Mclust(). cl_predict.Mclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) pred <- predict(object, newdata, ...) type <- match.arg(type) if(type == "class_ids") as.cl_class_ids(pred$classification) else as.cl_membership(pred$z) } ## Package movMF: movMF(). cl_predict.movMF <- cl_predict.Weka_clusterer ## Package clue: pclust(). cl_predict.pclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- object$family$D(newdata, object$prototypes) power <- c(object$m, object$family$e) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package clue: (virtual) class "cl_partition". cl_predict.cl_partition <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) cl_predict(.get_representation(object), newdata = newdata, type, ...) ## Internal helpers: this looks a bit silly, but makes the rest of the ## code look nicer ... .cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") cl_class_ids(x) else cl_membership(x) } .as_cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") { if(is.matrix(x)) { ## Same as for cl_class_ids.cl_membership(). as.cl_class_ids(.structure(max.col(x), names = rownames(x))) } else as.cl_class_ids(x) } else as.cl_membership(x) } clue/R/medoid.R0000644000175100001440000001561514503541657013027 0ustar hornikusers### * cl_medoid cl_medoid <- function(x, method = "euclidean") { ## ## In principle we can get the same using pam(k = 1)$medoids. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid of empty ensemble.") dissimilarities <- as.matrix(cl_dissimilarity(clusterings, method = method)) clusterings[[which.min(rowSums(dissimilarities))]] } ### * cl_pam cl_pam <- function(x, k, method = "euclidean", solver = c("pam", "kmedoids")) { clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid partition of empty ensemble.") ## Actually, we should have at least k distinct elements in the ## ensemble ... make_cl_pam <- function(class_ids, medoid_ids, medoids, criterion, description) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, prototypes = medoids, criterion = criterion, description = description), class = "cl_pam") if(k == 1L) { ## Simplify matters if a global medoid is sought. dissimilarities <- cl_dissimilarity(clusterings, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) row_sums <- rowSums(dissimilarities) medoid_id <- which.min(row_sums) criterion <- row_sums[medoid_id] return(make_cl_pam(as.cl_class_ids(seq_along(clusterings)), medoid_id, clusterings[medoid_id], criterion, description)) } solver <- match.arg(solver) ## Argh. We really want to run k-medoids for the unique elements of ## the ensemble, but pam() only works for symmetric dissimilarties. ## As computing cluster dissimilarities is typically expensive, use ## the unique elements for doing so in any case. values <- unique(clusterings) ## Positions of ensemble members in the unique values. positions <- match(clusterings, values) ## Dissimilarities between unique values. dissimilarities <- cl_dissimilarity(values, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) ## For pam(), we need the dissimilarities for all objects. if(solver == "pam") { dissimilarities <- dissimilarities[positions, positions] party <- pam(as.dist(dissimilarities), k) class_ids <- cl_class_ids(party) medoid_ids <- cl_medoid_ids(party) medoids <- clusterings[medoid_ids] criterion <- sum(dissimilarities[cbind(seq_along(class_ids), medoid_ids[class_ids])]) } else { ## Counts of unique values. counts <- tabulate(positions) ## Weigh according to the counts. Should be straightforward to ## add "case weights" as well ... dissimilarities <- counts * dissimilarities ## Now partition. party <- kmedoids(dissimilarities, k) ## And build the solution from this ... criterion <- party$criterion ## First, things for the unique values. medoid_ids <- cl_medoid_ids(party) medoids <- values[medoid_ids] class_ids <- cl_class_ids(party) ## Second, things for all objects. class_ids <- class_ids[positions] medoid_ids <- match(medoid_ids, positions) } make_cl_pam(class_ids, medoid_ids, medoids, criterion, description) } print.cl_pam <- function(x, ...) { class_ids <- cl_class_ids(x) fmt <- "A k-medoid partition of a cluster ensemble with %d elements into %d classes (dissimilarity measure: %s)." writeLines(c(strwrap(gettextf(fmt, n_of_objects(x), n_of_classes(x), x$description)))) writeLines(gettext("Class ids:")) print(class_ids, ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } ### * cl_medoid_ids ## Little helper, internal for the time being ... cl_medoid_ids <- function(x) UseMethod("cl_medoid_ids") cl_medoid_ids.cl_pam <- function(x) x$medoid_ids cl_medoid_ids.kmedoids <- function(x) x$medoid_ids cl_medoid_ids.clara <- function(x) x$i.med cl_medoid_ids.pam <- function(x) x$id.med ### * kmedoids kmedoids <- function(x, k) { ## ## For the time being, 'x' is assumed a dissimilarity object or a ## matrix of dissimilarities. ## Let's worry about the interface later. ## x <- as.matrix(x) n <- nrow(x) ## Use the formulation in Gordon & Vichi (1998), Journal of ## Classification, [P4'], page 279, with variables c(vec(X), z), but ## with rows and cols interchanged (such that x_{ij} is one iff o_i ## has medoid o_j, and z_j is one iff o_j is a medoid). make_constraint_mat <- function(n) { nsq <- n * n rbind(cbind(kronecker(rbind(rep.int(1, n)), diag(1, n)), matrix(0, n, n)), cbind(diag(1, nsq), kronecker(diag(1, n), rep.int(-1, n))), c(double(nsq), rep.int(1, n)), cbind(matrix(0, n, nsq), diag(1, n))) } make_constraint_dir <- function(n) rep.int(c("=", "<=", "=", "<="), c(n, n * n, 1, n)) make_constraint_rhs <- function(n, k) rep.int(c(1, 0, k, 1), c(n, n * n, 1, n)) ## ## We could try a relaxation without integrality constraints first, ## which seems to "typically work" (and should be faster). To test ## for integrality, use something like ## if(identical(all.equal(y$solution, round(y$solution)), TRUE)) ## y <- lpSolve::lp("min", c(c(x), double(n)), make_constraint_mat(n), make_constraint_dir(n), make_constraint_rhs(n, k), int.vec = seq_len(n * (n + 1))) ## Now get the class ids and medoids. ind <- which(matrix(y$solution[seq_len(n * n)], n) > 0, arr.ind = TRUE) medoid_ids <- unique(ind[, 2L]) class_ids <- seq_len(n) class_ids[ind[, 1L]] <- match(ind[, 2L], medoid_ids) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, criterion = y$objval), class = "kmedoids") } print.kmedoids <- function(x, ...) { fmt <- "A k-medoids clustering of %d objects into %d clusters." writeLines(gettextf(fmt, n_of_objects(x), n_of_classes(x))) writeLines(gettext("Medoid ids:")) print(cl_medoid_ids(x), ...) writeLines(gettext("Class ids:")) print(unclass(cl_class_ids(x)), ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } clue/R/prototypes.R0000644000175100001440000000346711304023136013777 0ustar hornikuserscl_prototypes <- function(x) UseMethod("cl_prototypes") ## No default method. ## Package stats: kmeans() (R 2.1.0 or better). cl_prototypes.kmeans <- function(x) x$centers ## Package cluster: clara() always gives prototypes. cl_prototypes.clara <- function(x) x$medoids ## Package cluster: fanny() never gives prototypes. ## Package cluster: pam() does not give prototypes if given a ## dissimilarity matrix. cl_prototypes.pam <- function(x) { p <- x$medoids if(!is.matrix(p)) stop("Cannot determine prototypes.") p } ## Package cba: ccfkms(). cl_prototypes.ccfkms <- cl_prototypes.kmeans ## Package cclust: cclust(). cl_prototypes.cclust <- cl_prototypes.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_prototypes.fclust <- cl_prototypes.kmeans ## Package e1071: cshell(). cl_prototypes.cshell <- cl_prototypes.kmeans ## Package e1071: bclust(). cl_prototypes.bclust <- cl_prototypes.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_prototypes.kcca <- function(x) methods::slot(x, "centers") ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_prototypes.specc <- function(x) kernlab::centers(x) ## Package mclust: Mclust(). cl_prototypes.Mclust <- function(x) { p <- x$mu ## For multidimensional models, we get a matrix whose columns are ## the means of each group in the best model, and hence needs to be ## transposed. if(is.matrix(p)) p <- t(p) p } ## Package clue: cl_pam(). cl_prototypes.cl_pam <- function(x) x$prototypes ## Package clue: (virtual) class "cl_partition". cl_prototypes.cl_partition <- function(x) cl_prototypes(.get_representation(x)) ## Package clue: pclust(). cl_prototypes.pclust <- function(x) x$prototypes clue/vignettes/0000755000175100001440000000000014715042016013230 5ustar hornikusersclue/vignettes/clue.Rnw0000644000175100001440000016521512734170652014671 0ustar hornikusers\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/vignettes/cluster.bib0000644000175100001440000012321614134256553015404 0ustar hornikusers@Book{cluster:Arabie+Carroll+Desarbo:1987, author = {Arabie, Phipps and Carroll, J. Douglas and DeSarbo, Wayne}, title = {Three-way Scaling and Clustering}, year = 1987, pages = 92, publisher = {Sage Publications Inc}, } @Book{cluster:Arabie+Hubert+DeSoete:1996, author = {Phipps Arabie and Lawrence J. Hubert and Geert de Soete}, title = {Clustering and Classification}, year = 1996, pages = 490, publisher = {World Scientific Publications}, } @Book{cluster:Barthelemy+Guenoche:1991, author = {Jean-Pierry Barth\'el\'emy and Alain Gu\'enoche}, title = {Trees and Proximity Representations}, publisher = {John Wiley \& Sons}, year = 1991, series = {Wiley-Interscience Series in Discrete Mathematics and Optimization}, address = {Chichester}, note = {{ISBN 0-471-92263-3}}, } @Article{cluster:Barthelemy+Leclerc+Monjardet:1986, author = {Jean-Pierre Barth\'el\'emy and Bruno Leclerc and Bernard Monjardet}, title = {On the Use of Ordered Sets in Problems of Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, number = 2, pages = {187--224}, doi = {10.1007/BF01894188}, } @Article{cluster:Barthelemy+Mcmorris:1986, author = {Jean-Pierre Barth\'el\'emy and F. R. McMorris}, title = {The Median Procedure for $n$-trees}, year = 1986, journal = {Journal of Classification}, volume = 3, pages = {329--334}, doi = {10.1007/BF01894194}, } @Article{cluster:Barthelemy+Monjardet:1981, author = {Jean-Pierre Barth\'el\'emy and Bernard Monjardet}, title = {The Median Procedure in Cluster Analysis and Social Choice Theory}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {235--267}, doi = {10.1016/0165-4896(81)90041-X}, } @TechReport{cluster:Bertsekas+Tseng:1994, author = {Dimitri P. Bertsekas and P. Tseng}, title = {{RELAX-IV}: A Faster Version of the {RELAX} Code for Solving Minimum Cost Flow Problems}, institution = {Massachusetts Institute of Technology}, year = 1994, number = {P-2276}, url = {https://dspace.mit.edu/handle/1721.1/3392}, } @Book{cluster:Bezdek:1981, author = {James C. Bezdek}, title = {Pattern Recognition with Fuzzy Objective Function Algorithms}, publisher = {Plenum}, address = {New York}, year = 1981, } @InCollection{cluster:Boorman+Arabie:1972, author = {Scott A. Boorman and Phipps Arabie}, title = {Structural Measures and the Method of Sorting}, booktitle = {Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory}, pages = {225--249}, publisher = {Seminar Press}, year = 1972, editor = {Roger N. Shepard and A. Kimball Romney and Sara Beth Nerlove}, address = {New York}, } @Article{cluster:Boorman+Olivier:1973, author = {Scott A. Boorman and Donald C. Olivier}, title = {Metrics on Spaces of Finite Trees}, journal = {Journal of Mathematical Psychology}, year = 1973, volume = 10, number = 1, pages = {26--59}, doi = {10.1016/0022-2496(73)90003-5}, } @Article{cluster:Breiman:1996, author = {Leo Breiman}, title = {Bagging Predictors}, journal = {Machine Learning}, year = 1996, volume = 24, number = 2, pages = {123--140}, doi = {10.1023/A:1018054314350}, } @Manual{cluster:Buchta+Hahsler:2005, title = {cba: Clustering for Business Analytics}, author = {Christian Buchta and Michael Hahsler}, year = 2005, note = {R package version 0.1-6}, url = {https://CRAN.R-project.org/package=cba}, } @Article{cluster:Buttrey:2005, author = {Samuel E. Buttrey}, title = {Calling the \texttt{lp\_solve} Linear Program Software from {R}, {S-PLUS} and {Excel}}, journal = {Journal of Statistical Software}, year = 2005, volume = 14, number = 4, doi = {10.18637/jss.v014.i04}, } @article{cluster:Carpaneto+Toth:1980, author = {Giorgio Carpaneto and Paolo Toth}, title = {Algorithm 548: Solution of the Assignment Problem}, journal = {ACM Transactions on Mathematical Software}, volume = 6, number = 1, year = 1980, issn = {0098-3500}, pages = {104--111}, doi = {10.1145/355873.355883}, publisher = {ACM Press}, } @Article{cluster:Carroll+Clark+Desarbo:1984, author = {Carroll, J. Douglas and Clark, Linda A. and DeSarbo, Wayne S.}, title = {The Representation of Three-way Proximity Data by Single and Multiple Tree Structure Models}, year = 1984, journal = {Journal of Classification}, volume = 1, pages = {25--74}, keywords = {Clustering analysis; Alternating least squares; Discrete optimization}, doi = {10.1007/BF01890116}, } @InCollection{cluster:Carroll+Pruzansky:1980, author = {J. D. Carroll and S. Pruzansky}, title = {Discrete and Hybrid Scaling Models}, booktitle = {Similarity and Choice}, address = {Bern, Switzerland}, publisher = {Huber}, year = 1980, editor = {E. D. Lantermann and H. Feger}, } @Article{cluster:Carroll:1976, author = {Carroll, J. Douglas}, title = {Spatial, Non-spatial and Hybrid Models for Scaling}, year = 1976, journal = {Psychometrika}, volume = 41, pages = {439--464}, keywords = {Multidimensional scaling; Hierarchical tree structure; Clustering; Geometric model; Multivariate data}, doi = {10.1007/BF02296969}, } @TechReport{cluster:Charon+Denoeud+Guenoche:2005, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, institution = {Ecole Nationale Sup{\'e}rieure des T{\'e}l{\'e}communications --- Paris}, year = 2005, number = {2005D003}, month = {May}, note = {ISSN 0751-1345 ENST D}, } @Article{cluster:Charon+Denoeud+Guenoche:2006, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, journal = {Journal of Classification}, year = 2006, volume = 23, number = 1, pages = {103-121}, month = {June}, doi = {10.1007/s00357-006-0006-2}, } @Article{cluster:Day:1981, author = {William H. E. Day}, title = {The Complexity of Computing Metric Distances Between Partitions}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {269--287}, doi = {10.1016/0165-4896(81)90042-1}, } @Article{cluster:Day:1986, author = {William H. E. Day}, title = {Foreword: Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, pages = {183--185}, doi = {10.1007/BF01894187}, } @Article{cluster:Day:1987, author = {Day, William H. E.}, title = {Computational Complexity of Inferring Phylogenies from Dissimilarity Matrices}, year = 1987, journal = {Bulletin of Mathematical Biology}, volume = 49, pages = {461--467}, doi = {10.1007/BF02458863}, } @Article{cluster:DeSoete+Carroll+Desarbo:1987, author = {De Soete, Geert and Carroll, J. Douglas and DeSarbo, Wayne S.}, title = {Least Squares Algorithms for Constructing Constrained Ultrametric and Additive Tree Representations of Symmetric Proximity Data}, year = 1987, journal = {Journal of Classification}, volume = 4, pages = {155--173}, keywords = {Hierarchical clustering; Classification}, doi = {10.1007/BF01896984}, } @Article{cluster:DeSoete+Desarbo+Furnas:1984, author = {De Soete, Geert and DeSarbo, Wayne S. and Furnas, George W. and Carroll, J. Douglas}, title = {The Estimation of Ultrametric and Path Length Trees from Rectangular Proximity Data}, year = 1984, journal = {Psychometrika}, volume = 49, pages = {289--310}, keywords = {Cluster analysis}, doi = {10.1007/BF02306021}, } @Article{cluster:DeSoete:1983, author = {De Soete, Geert}, title = {A Least Squares Algorithm for Fitting Additive Trees to Proximity Data}, year = 1983, journal = {Psychometrika}, volume = 48, pages = {621--626}, keywords = {Clustering}, doi = {10.1007/BF02293884}, } @Article{cluster:DeSoete:1984, author = {Geert de Soete}, title = {Ultrametric Tree Representations of Incomplete Dissimilarity Data}, journal = {Journal of Classification}, year = 1984, volume = 1, pages = {235--242}, doi = {10.1007/BF01890124}, } @Article{cluster:DeSoete:1986, author = {Geert de Soete}, title = {A Least Squares Algorithm for Fitting an Ultrametric Tree to a Dissimilarity Matrix}, journal = {Pattern Recognition Letters}, year = 1986, volume = 2, pages = {133--137}, doi = {10.1016/0167-8655(84)90036-9}, } @Manual{cluster:Dimitriadou+Hornik+Leisch:2005, title = {e1071: Misc Functions of the Department of Statistics (e1071), TU Wien}, author = {Evgenia Dimitriadou and Kurt Hornik and Friedrich Leisch and David Meyer and Andreas Weingessel}, year = 2005, note = {R package version 1.5-7}, url = {https://CRAN.R-project.org/package=e1071}, } @Article{cluster:Dimitriadou+Weingessel+Hornik:2002, author = {Evgenia Dimitriadou and Andreas Weingessel and Kurt Hornik}, title = {A Combination Scheme for Fuzzy Clustering}, journal = {International Journal of Pattern Recognition and Artificial Intelligence}, year = 2002, volume = 16, number = 7, pages = {901--912}, doi = {10.1142/S0218001402002052}, } @Manual{cluster:Dimitriadou:2005, title = {cclust: Convex Clustering Methods and Clustering Indexes}, author = {Evgenia Dimitriadou}, year = 2005, note = {R package version 0.6-12}, url = {https://CRAN.R-project.org/package=cclust}, } @Article{cluster:Dudoit+Fridlyand:2002, author = {Sandrine Dudoit and Jane Fridlyand}, title = {A Prediction-based Resampling Method for Estimating the Number of Clusters in a Dataset}, journal = {Genome Biology}, year = 2002, volume = 3, number = 7, pages = {1--21}, doi = {10.1186/gb-2002-3-7-research0036}, } @Article{cluster:Dudoit+Fridlyand:2003, author = {Sandrine Dudoit and Jane Fridlyand}, title = {Bagging to Improve the Accuracy of a Clustering Procedure}, journal = {Bioinformatics}, year = 2003, volume = 19, number = 9, pages = {1090--1099}, doi = {10.1093/bioinformatics/btg038}, } @InProceedings{cluster:Fern+Brodley:2004, author = {Xiaoli Zhang Fern and Carla E. Brodley}, title = {Solving Cluster Ensemble Problems by Bipartite Graph Partitioning}, booktitle = {ICML '04: Twenty-first International Conference on Machine Learning}, year = 2004, isbn = {1-58113-828-5}, location = {Banff, Alberta, Canada}, doi = {10.1145/1015330.1015414}, publisher = {ACM Press}, } @comment address = {New York, NY, USA}, @Book{cluster:Fiacco+McCormick:1968, author = {Anthony V. Fiacco and Garth P. McCormick}, title = {Nonlinear Programming: Sequential Unconstrained Minimization Techniques}, publisher = {John Willey \& Sons}, year = 1968, address = {New York}, } @Article{cluster:Forgy:1965, author = {Forgy, E. W.}, title = {Cluster Analysis of Multivariate Data: Efficiency vs Interpretability of Classifications}, journal = {Biometrics}, year = 1965, volume = 21, pages = {768--769}, } @Article{cluster:Fowlkes+Mallows:1983a, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {A Method for Comparing Two Hierarchical Clusterings}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {553--569}, keywords = {Similarity; Graphics}, doi = {10.1080/01621459.1983.10478008}, } @Article{cluster:Fowlkes+Mallows:1983b, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {Reply to Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {584--584}, } @Manual{cluster:Fraley+Raftery+Wehrens:2005, title = {mclust: Model-based Cluster Analysis}, author = {Chris Fraley and Adrian E. Raftery and Ron Wehrens}, year = 2005, note = {R package version 2.1-11}, url = {https://CRAN.R-project.org/package=mclust}, } @TechReport{cluster:Fraley+Raftery:2002, author = {Chris Fraley and Adrian E. Raftery}, title = {{MCLUST}: Software for Model-based Clustering, Discriminant Analysis, and Density Estimation}, institution = {Department of Statistics, University of Washington}, year = 2002, number = 415, month = {October}, url = {ftp://ftp.u.washington.edu/public/mclust/tr415.pdf}, } @Article{cluster:Fraley+Raftery:2003, author = {Chris Fraley and Adrian E. Raftery}, title = {Enhanced Model-based Clustering, Density Estimation, and Discriminant Analysis Software: {MCLUST}}, year = 2003, journal = {Journal of Classification}, volume = 20, number = 2, pages = {263--286}, keywords = {clustering software; Mixture models; Cluster analysis; supervised classification; unsupervised classification; software abstract}, doi = {10.1007/s00357-003-0015-3}, } @InProceedings{cluster:Fred+Jain:2002, author = {Ana L. N. Fred and Anil K. Jain}, title = {Data Clustering Using Evidence Accumulation}, booktitle = {Proceedings of the 16th International Conference on Pattern Recognition (ICPR 2002)}, pages = {276--280}, year = 2002, doi = {10.1109/ICPR.2002.1047450}, } @Article{cluster:Friedman+Hastie+Tibshirani:2000, author = {Jerome Friedman and Travor Hastie and Robert Tibshirani}, title = {Additive Logistic Regression: A Statistical View of Boosting}, journal = {The Annals of Statistics}, year = 2000, volume = 28, number = 2, pages = {337--407}, doi = {10.1214/aos/1016218223}, } @Book{cluster:Garey+Johnson:1979, author = {M. R. Garey and D. S. Johnson}, title = {Computers and Intractability: A Guide to the Theory of {NP}-Completeness}, address = {San Francisco}, publisher = {W. H. Freeman}, year = 1979, } @Article{cluster:Gaul+Schader:1988, author = {Wolfgang Gaul and Manfred Schader}, title = {Clusterwise Aggregation of Relations}, journal = {Applied Stochastic Models and Data Analysis}, year = 1988, volume = 4, pages = {273--282}, doi = {10.1002/asm.3150040406}, } @Manual{cluster:Gentleman+Whalen:2005, author = {Robert Gentleman and Elizabeth Whalen}, title = {graph: A Package to Handle Graph Data Structures}, year = 2005, note = {R package version 1.5.9}, url = {https://www.bioconductor.org/}, } @Article{cluster:Gordon+Vichi:1998, author = {Gordon, A. D. and Vichi, M.}, title = {Partitions of Partitions}, year = 1998, journal = {Journal of Classification}, volume = 15, pages = {265--285}, keywords = {Classification}, doi = {10.1007/s003579900034}, } @Article{cluster:Gordon+Vichi:2001, author = {Gordon, A. D. and Vichi, M.}, title = {Fuzzy Partition Models for Fitting a Set of Partitions}, year = 2001, journal = {Psychometrika}, volume = 66, number = 2, pages = {229--248}, keywords = {Classification; Cluster analysis; consensus fuzzy partition; membership function; three-way data}, doi = {10.1007/BF02294837}, } @Article{cluster:Gordon:1996, author = {Gordon, A. D.}, title = {A Survey of Constrained Classification}, year = 1996, journal = {Computational Statistics \& Data Analysis}, volume = 21, pages = {17--29}, keywords = {Model selection}, doi = {10.1016/0167-9473(95)00005-4}, } @Book{cluster:Gordon:1999, author = {A. D. Gordon}, title = {Classification}, address = {Boca Raton, Florida}, publisher = {Chapman \& Hall/CRC}, year = 1999, pages = 256, edition = {2nd}, } @Article{cluster:Grundel+Oliveira+Pardalos:2005, author = {Don Grundel and Carlos A.S. Oliveira and Panos M. Pardalos and Eduardo Pasiliao}, title = {Asymptotic Results for Random Multidimensional Assignment Problems}, journal = {Computational Optimization and Applications}, year = 2005, volume = 31, number = 3, pages = {275--293}, doi = {10.1007/s10589-005-3227-0}, } @Article{cluster:Guha+Rastogi+Shim:2000, author = {Sudipto Guha and Rajeev Rastogi and Kyuseok Shim}, title = {{ROCK}: A Robust Clustering Algorithm for Categorical Attributes}, journal = {Information Systems}, year = 2000, volume = 25, number = 5, pages = {345--366}, doi = {10.1016/S0306-4379(00)00022-3}, } @Article{cluster:Gusfield:2002, author = {Dan Gusfield}, title = {Partition-Distance: A Problem and Class of Perfect Graphs Arising in Clustering}, journal = {Information Processing Letters}, year = 2002, volume = 82, pages = {159--164}, doi = {10.1016/S0020-0190(01)00263-0}, } @Manual{cluster:Hansen:2005, title = {optmatch: Functions for Optimal Matching}, author = {Ben B. Hansen}, year = 2005, note = {R package version 0.1-3}, url = {https://CRAN.R-project.org/package=optmatch}, } @Article{cluster:Hartigan+Wong:1979, author = {Hartigan, J. A. and Wong, M. A.}, title = {A $K$-Means Clustering Algorithm}, journal = {Applied Statistics}, year = 1979, volume = 28, pages = {100--108}, doi = {10.2307/2346830}, } @Article{cluster:Hoeting+Madigan+Raftery:1999, author = {Jennifer Hoeting and David Madigan and Adrian Raftery and Chris Volinsky}, title = {Bayesian Model Averaging: A Tutorial}, journal = {Statistical Science}, year = 1999, volume = 14, pages = {382--401}, doi = {10.1214/ss/1009212519}, } @Manual{cluster:Hornik+Hothorn+Karatzoglou:2006, title = {RWeka: {R/Weka} Interface}, author = {Kurt Hornik and Torsten Hothorn and Alexandros Karatzoglou}, year = 2006, note = {R package version 0.2-0}, url = {https://CRAN.R-project.org/package=RWeka}, } @InProceedings{cluster:Hornik:2005a, author = {Kurt Hornik}, title = {Cluster Ensembles}, booktitle = {Classification -- The Ubiquitous Challenge}, pages = {65--72}, year = 2005, editor = {Claus Weihs and Wolfgang Gaul}, publisher = {Springer-Verlag}, note = {Proceedings of the 28th Annual Conference of the Gesellschaft f{\"u}r Klassifikation e.V., University of Dortmund, March 9--11, 2004}, } @comment address = {Heidelberg}, @Article{cluster:Hornik:2005b, author = {Kurt Hornik}, title = {A {CLUE} for {CLUster Ensembles}}, year = 2005, journal = {Journal of Statistical Software}, volume = 14, number = 12, month = {September}, doi = {10.18637/jss.v014.i12}, } @Misc{cluster:Hubert+Arabie+Meulman:2004, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, year = 2004, url = {http://cda.psych.uiuc.edu/srpm_mfiles/}, } @Book{cluster:Hubert+Arabie+Meulman:2006, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, publisher = {SIAM}, address = {Philadelphia}, year = 2006, doi = {10.1137/1.9780898718355}, } @Article{cluster:Hubert+Arabie:1985, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Comparing Partitions}, year = 1985, journal = {Journal of Classification}, volume = 2, pages = {193--218}, keywords = {Agreement; Association measure; Consensus index}, doi = {10.1007/bf01908075}, } @Article{cluster:Hubert+Arabie:1994, author = {Hubert, Lawrence and Arabie, Phipps}, title = {The Analysis of Proximity Matrices through Sums of Matrices Having (anti-) {R}obinson Forms}, year = 1994, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 47, pages = {1--40}, doi = {10.1111/j.2044-8317.1994.tb01023.x}, } @Article{cluster:Hubert+Arabie:1995, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Iterative Projection Strategies for the Least Squares Fitting of Tree Structures to Proximity Data}, year = 1995, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 48, pages = {281--317}, keywords = {Graph theory}, doi = {10.1111/j.2044-8317.1995.tb01065.x}, } @Article{cluster:Hubert+Baker:1978, author = {Hubert, Lawrence J. and Baker, Frank B.}, title = {Evaluating the Conformity of Sociometric Measurements}, year = 1978, journal = {Psychometrika}, volume = 43, pages = {31--42}, keywords = {Permutation test; Nonparametric test}, doi = {10.1007/BF02294087}, } @Article{cluster:Hutchinson:1989, author = {Hutchinson, J. Wesley}, title = {{NETSCAL}: {A} Network Scaling Algorithm for Nonsymmetric Proximity Data}, year = 1989, journal = {Psychometrika}, volume = 54, pages = {25--51}, keywords = {Similarity; Graph theory}, doi = {10.1007/BF02294447}, } @Article{cluster:Karatzoglou+Smola+Hornik:2004, title = {kernlab -- An {S4} Package for Kernel Methods in {R}}, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 9, pages = {1--20}, doi = {10.18637/jss.v011.i09}, } @Article{cluster:Katz+Powell:1953, author = {L. Katz and J. H. Powell}, title = {A Proposed Index of the Conformity of one Sociometric Measurement to Another}, journal = {Psychometrika}, year = 1953, volume = 18, pages = {249--256}, doi = {10.1007/BF02289063}, } @Book{cluster:Kaufman+Rousseeuw:1990, author = {Kaufman, Leonard and Rousseeuw, Peter J.}, title = {Finding Groups in Data: An Introduction to Cluster Analysis}, year = 1990, pages = 342, publisher = {John Wiley \& Sons}, } @Article{cluster:Klauer+Carroll:1989, author = {Klauer, K. C. and Carroll, J. D.}, title = {A Mathematical Programming Approach to Fitting General Graphs}, year = 1989, journal = {Journal of Classification}, volume = 6, pages = {247--270}, keywords = {Multivariate analysis; Proximity data}, doi = {10.1007/BF01908602}, } @Article{cluster:Klauer+Carroll:1991, author = {Klauer, K. C. and Carroll, J. O.}, title = {A Comparison of Two Approaches to Fitting Directed Graphs to Nonsymmetric Proximity Measures}, year = 1991, journal = {Journal of Classification}, volume = 8, pages = {251--268}, keywords = {Clustering}, doi = {10.1007/BF02616242}, } @Article{cluster:Krieger+Green:1999, author = {Abba M. Krieger and Paul E. Green}, title = {A Generalized {Rand}-index Method for Consensus Clustering of Separate Partitions of the Same Data Base}, journal = {Journal of Classification}, year = 1999, volume = 16, pages = {63--89}, doi = {10.1007/s003579900043}, } @Article{cluster:Krivanek+Moravek:1986, author = {M. Krivanek and J. Moravek}, title = {{NP}-hard Problems in Hierarchical Tree Clustering}, journal = {Acta Informatica}, year = 1986, volume = 23, pages = {311--323}, doi = {10.1007/BF00289116}, } @InProceedings{cluster:Krivanek:1986, author = {Krivanek, Mirko}, title = {On the Computational Complexity of Clustering}, year = 1986, booktitle = {Data Analysis and Informatics 4}, editor = {Diday, E. and Escoufier, Y. and Lebart, L. and Pages, J. and Schektman, Y. and Tomassone, R.}, publisher = {Elsevier/North-Holland}, pages = {89--96}, } @comment address = {Amsterdam}, @Article{cluster:Lange+Roth+Braun:2004, author = {Tilman Lange and Volker Roth and Mikio L. Braun and Joachim M. Buhmann}, title = {Stability-Based Validation of Clustering Solutions}, journal = {Neural Computation}, year = 2004, volume = 16, number = 6, pages = {1299--1323}, doi = {10.1162/089976604773717621}, } @Manual{cluster:Leisch+Dimitriadou:2005, title = {mlbench: Machine Learning Benchmark Problems}, author = {Friedrich Leisch and Evgenia Dimitriadou}, year = 2005, note = {R package version 1.0-1}, url = {https://CRAN.R-project.org/package=mlbench}, } @TechReport{cluster:Leisch:1999, author = {Friedrich Leisch}, title = {Bagged Clustering}, institution = {SFB ``Adaptive Information Systems and Modeling in Economics and Management Science''}, year = 1999, type = {Working Paper}, number = 51, month = {August}, url = {https://epub.wu.ac.at/id/eprint/1272}, } @Article{cluster:Leisch:2004, title = {{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in {R}}, author = {Friedrich Leisch}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 8, doi = {10.18637/jss.v011.i08}, } @Manual{cluster:Leisch:2005, author = {Friedrich Leisch}, title = {flexclust: Flexible Cluster Algorithms}, note = {R package 0.7-0}, year = 2005, url = {https://CRAN.R-project.org/package=flexclust}, } @Article{cluster:Leisch:2006a, author = {Friedrich Leisch}, title = {A Toolbox for $K$-Centroids Cluster Analysis}, journal = {Computational Statistics and Data Analysis}, year = 2006, volume = 51, number = 2, pages = {526--544}, doi = {10.1016/j.csda.2005.10.006}, } @Unpublished{cluster:Lloyd:1957, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, note = {Technical Note, Bell Laboratories}, year = 1957, } @Article{cluster:Lloyd:1982, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, journal = {IEEE Transactions on Information Theory}, year = 1982, volume = 28, pages = {128--137}, doi = {10.1109/TIT.1982.1056489}, } @Article{cluster:Margush+Mcmorris:1981, author = {T. Margush and F. R. McMorris}, title = {Consensus $n$-Trees}, journal = {Bulletin of Mathematical Biology}, year = 1981, volume = 43, number = 2, pages = {239--244}, doi = {10.1007/BF02459446}, } @InProceedings{cluster:Meila:2003, author = {Marina Meila}, title = {Comparing Clusterings by the Variation of Information}, booktitle = {Learning Theory and Kernel Machines}, editor = {Bernhard Sch{\"o}lkopf and Manfred K. Warmuth}, series = {Lecture Notes in Computer Science}, publisher = {Springer-Verlag}, volume = 2777, year = 2003, pages = {173--187}, ee = {http://springerlink.metapress.com/openurl.asp?genre=article&issn=0302-9743&volume=2777&spage=173}, bibsource = {DBLP, http://dblp.uni-trier.de}, } @comment address = {Heidelberg}, @Article{cluster:Messatfa:1992, author = {Messatfa, H.}, title = {An Algorithm to Maximize the Agreement Between Partitions}, year = 1992, journal = {Journal of Classification}, volume = 9, pages = {5--15}, keywords = {Association; Contingency table}, doi = {10.1007/BF02618465}, } @Article{cluster:Miller+Nicely:1955, author = {G. A. Miller and P. E. Nicely}, title = {An Analysis of Perceptual Confusions Among some {English} Consonants}, journal = {Journal of the Acoustical Society of America}, year = 1955, volume = 27, pages = {338--352}, doi = {10.1121/1.1907526}, } @Book{cluster:Mirkin:1996, author = {Boris G. Mirkin}, title = {Mathematical Classification and Clustering}, year = 1996, pages = 428, publisher = {Kluwer Academic Publishers Group}, } @Article{cluster:Monti+Tamayo+Mesirov:2003, author = {Stefano Monti and Pablo Tamayo and Jill Mesirov and Todd Golub}, title = {Consensus Clustering: A Resampling-based Method for Class Discovery and Visualization of Gene Expression Microarray Data}, journal = {Machine Learning}, volume = 52, number = {1--2}, year = 2003, issn = {0885-6125}, pages = {91--118}, publisher = {Kluwer Academic Publishers}, address = {Hingham, MA, USA}, doi = {10.1023/A:1023949509487}, } @Article{cluster:Oliveira+Pardalos:2004, author = {Carlos A. S. Oliveira and Panos M. Pardalos}, title = {Randomized Parallel Algorithms for the Multidimensional Assignment Problem}, journal = {Applied Numerical Mathematics}, year = 2004, volume = 49, number = 1, pages = {117--133}, month = {April}, doi = {10.1016/j.apnum.2003.11.014}, } @Book{cluster:Papadimitriou+Steiglitz:1982, author = {Christos Papadimitriou and Kenneth Steiglitz}, title = {Combinatorial Optimization: Algorithms and Complexity}, publisher = {Prentice Hall}, year = 1982, address = {Englewood Cliffs}, } @Manual{cluster:R:2005, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2005, note = {{ISBN} 3-900051-07-0}, url = {https://www.R-project.org/}, } @article{cluster:Rajski:1961, author = {C. Rajski}, title = {A Metric Space of Discrete Probability Distributions}, journal = {Information and Control}, year = 1961, volume = 4, number = 4, pages = {371--377}, doi = {10.1016/S0019-9958(61)80055-7}, } @Article{cluster:Rand:1971, author = {William M. Rand}, title = {Objective Criteria for the Evaluation of Clustering Methods}, journal = {Journal of the American Statistical Association}, year = 1971, volume = 66, number = 336, pages = {846--850}, keywords = {Pattern recognition}, doi = {10.2307/2284239}, } @Article{cluster:Rosenberg+Kim:1975, author = {S. Rosenberg and M. P. Kim}, title = {The Method of Sorting as a Data-Gathering Procedure in Multivariate Research}, journal = {Multivariate Behavioral Research}, year = 1975, volume = 10, pages = {489--502}, doi = {10.1207/s15327906mbr1004_7}, } @InCollection{cluster:Rosenberg:1982, author = {S. Rosenberg}, title = {The Method of Sorting in Multivariate Research with Applications Selected from Cognitive Psychology and Person Perception}, booktitle = {Multivariate Applications in the Social Sciences}, pages = {117--142}, address = {Hillsdale, New Jersey}, publisher = {Erlbaum}, year = 1982, editor = {N. Hirschberg and L. G. Humphreys}, } @InProceedings{cluster:Roth+Lange+Braun:2002, author = {Volker Roth and Tilman Lange and Mikio Braun and Joachim M. Buhmann}, title = {A Resampling Approach to Cluster Validation}, booktitle = {{COMPSTAT} 2002 -- Proceedings in Computational Statistics}, pages = {123--128}, year = 2002, editor = {Wolfgang H{\"a}rdle and Bernd R{\"o}nz}, publisher = {Physika Verlag}, note = {ISBN 3-7908-1517-9}, } @comment address = {Heidelberg, Germany}, @Manual{cluster:Rousseeuw+Struyf+Hubert:2005, title = {cluster: Functions for Clustering (by Rousseeuw et al.)}, author = {Peter Rousseeuw and Anja Struyf and Mia Hubert and Martin Maechler}, year = 2005, note = {R package version 1.9.8}, url = {https://CRAN.R-project.org/package=cluster}, } @InCollection{cluster:Roux:1988, author = {M. Roux}, title = {Techniques of Approximation for Building Two Tree Structures}, booktitle = {Recent Developments in Clustering and Data Analysis}, pages = {151--170}, publisher = {Academic Press}, year = 1988, editor = {C. Hayashi and E. Diday and M. Jambu and N. Ohsumi}, address = {New York}, } @article{cluster:Rubin:1967, author = {Jerrold Rubin}, title = {Optimal Classification into Groups: An Approach for Solving the Taxonomy Problem}, journal = {Journal of Theoretical Biology}, year = 1967, volume = 15, number = 1, pages = {103--144}, doi = {10.1016/0022-5193(67)90046-X}, } @Article{cluster:Sato+Sato:1994, author = {M. Sato and Y. Sato}, title = {On a Multicriteria Fuzzy Clustering Method for 3-way Data}, journal = {International Journal of Uncertainty, Fuzziness and Knowledge-based Systems}, year = 1994, volume = 2, pages = {127--142}, doi = {10.1142/S0218488594000122}, } @Article{cluster:Smith:2000, author = {Smith, Thomas J.}, title = {${L}_1$ Optimization under Linear Inequality Constraints}, year = 2000, journal = {Journal of Classification}, volume = 17, number = 2, pages = {225--242}, keywords = {$L_1$-norm; Ultrametric; stuctural representation}, doi = {10.1007/s003570000020}, } @Article{cluster:Smith:2001, author = {Smith, Thomas J.}, title = {Constructing Ultrametric and Additive Trees Based on the ${L}_1$ Norm}, year = 2001, journal = {Journal of Classification}, volume = 18, number = 2, pages = {185--207}, keywords = {iteratively re-weighted iterative projection (IRIP); Combinatorial probability; explicit machine computation; Combinatorics; Trees; Graph theory; Linear regression; probabilistic Monte Carlo methods}, doi = {10.1007/s00357-001-0015-0}, } @Article{cluster:Sokal+Rohlf:1962, author = {R. R. Sokal and F. J. Rohlf}, title = {The Comparisons of Dendrograms by Objective Methods}, journal = {Taxon}, year = 1962, volume = 11, pages = {33--40}, doi = {10.2307/1217208}, } @Article{cluster:Strehl+Ghosh:2003a, author = {Alexander Strehl and Joydeep Ghosh}, title = {Cluster Ensembles -- {A} Knowledge Reuse Framework for Combining Multiple Partitions}, journal = {Journal of Machine Learning Research}, volume = 3, year = 2003, issn = {1533-7928}, pages = {583--617}, publisher = {MIT Press}, url = {https://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}, } @Article{cluster:Strehl+Ghosh:2003b, author = {Alexander Strehl and Joydeep Ghosh}, title = {Relationship-based Clustering and Visualization for High-Dimensional Data Mining}, journal = {{INFORMS} Journal on Computing}, year = 2003, volume = 15, issue = 2, pages = {208--230}, ISSN = {1526-5528}, doi = {10.1287/ijoc.15.2.208.14448}, } @Article{cluster:Struyf+Hubert+Rousseeuw:1996, author = {Anja Struyf and Mia Hubert and Peter Rousseeuw}, title = {Clustering in an Object-Oriented Environment}, journal = {Journal of Statistical Software}, year = 1996, volume = 1, number = 4, doi = {10.18637/jss.v001.i04}, } @Article{cluster:Tibshirani+Walther+Hastie:2001, author = {Tibshirani, Robert and Walther, Guenther and Hastie, Trevor}, title = {Estimating the Number of Clusters in a Data Set Via the Gap Statistic}, year = 2001, journal = {Journal of the Royal Statistical Society, Series B: Statistical Methodology}, volume = 63, number = 2, pages = {411--423}, keywords = {Clustering; groups; Hierarchy; $k$-means; Uniform distribution}, doi = {10.1111/1467-9868.00293}, } @Article{cluster:Tibshirani+Walther:2005, author = {Tibshirani, Robert and Walther, Guenther}, title = {Cluster Validation by Prediction Strength}, year = 2005, journal = {Journal of Computational and Graphical Statistics}, volume = 14, number = 3, pages = {511--528}, keywords = {number of clusters; prediction; Unsupervised learning}, doi = {10.1198/106186005X59243}, } @InProceedings{cluster:Topchy+Jain+Punch:2003, author = {A. Topchy and A. Jain and W. Punch}, title = {Combining Multiple Weak Clusterings}, booktitle = {Proceedings of the Third IEEE International Conference on Data Mining (ICDM'03)}, year = 2003, doi = {10.1109/ICDM.2003.1250937}, } @Article{cluster:Vichi:1999, author = {Vichi, Maurizio}, title = {One-mode Classification of a Three-way Data Matrix}, year = 1999, journal = {Journal of Classification}, volume = 16, pages = {27--44}, keywords = {Cluster analysis}, doi = {10.1007/s003579900041}, } @Article{cluster:Wallace:1983, author = {Wallace, David L.}, title = {Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {569--576}, doi = {10.2307/2288118}, } @Inproceedings{cluster:Zhou+Li+Zha:2005, author = {Ding Zhou and Jia Li and Hongyuan Zha}, title = {A New {Mallows} Distance Based Metric for Comparing Clusterings}, booktitle = {ICML '05: Proceedings of the 22nd International Conference on Machine Learning}, year = 2005, isbn = {1-59593-180-5}, pages = {1028--1035}, location = {Bonn, Germany}, doi = {10.1145/1102351.1102481}, publisher = {ACM Press}, address = {New York, NY, USA}, } %%% Local Variables: *** %%% bibtex-maintain-sorted-entries: t *** %%% End: *** clue/data/0000755000175100001440000000000014715042017012132 5ustar hornikusersclue/data/GVME.rda0000644000175100001440000000221414715042020013351 0ustar hornikusersX=LA^PZ\DCLQ p 9= jmC46$&&ZoL Dxȡ6Zhe77}ݶ5і4-b-Myմ2-UzFFM8]xtn;sGbi2T{?3}s8M㟯bz;krBLC?CUbf7^b?U}Iy?@#D Cuާ+FC]WhJFl.t +T3ՑpįtW[өnkf-ٟ ~O~HX\)#=t96#6{z"IK Cǯz˰\ҩbW6c4a7薞o63Ʈ =nu`6 eYiRYpTG|g`s(ve3 -ټ۽6;TnZȧbiV5y dKVEʫG -Be5hl?86oopǍ y*fYxrU& i1VuR} i%HZB B2J$ܠ"&h|շ; ͇$PA_(]/~U)IDLu!5="?6KOՁctX^|yt?# hW EgRtBƶ?3S%m}%3ٜ"THQz9yvP7r*$ U쬿 ldT uGyo<::8({)`@ #ԃ-o7. H0?\Au q+zSG<oQf޴apIh,TZBsI%1H A<6g}DB~w`) *d3 'B%Iؘs[BP]7eUeXJrPupnBխ )Cclue/data/Kinship82_Consensus.rda0000644000175100001440000000143314715042020016434 0ustar hornikusersV?LSA?J"LuX\ 4014+}}qEYXHHLN*+޽﻾ MM ǧ⌱F#,bȩu(;'=y|s8uU1 EPn\:?k{ag`A1AI~fkw[%]~oO篬%DyZΈ`x⍧gV^U3!C}}^3?|ߩ0t.ż16C^o;?zU v]owR\*S0doXm!RyM͊kvɑ8w+rr1V1mUSA,]$ڠ"WBכW?^=O:/W+p,>o_<n rqvD k肇Ve<{ea hڀ}XBk,mTBF!q_ OILMlBkh҂Q(׆]K^лF9,N3z#T;a 9z-e(P!LRo[cOJQ}HXz"* -Nx'a"He8.[v4m`Jʭ6v6;8 q|l(r^/^ ?z$WI7Hd@_[=~HTRr4Tm$z.cUtOUd"9j"q\J<h{ap3CTɿ~bQɩM]NyYaCM$?O 1y XgFT NB^|5*?IZN+X1)qjq̽ TFjkNud":r5.bi4e}'HMV6y-Kū2@CO74GA {aZ<4B[`><ǡ'%&nC퉀TB?z_k8Fq}v+kV<))_B X |2{4bgR?j32E= ´,ACy+JȾ2G:㨩o !ANRtl;~RUw tU]ږJ9TfU(> U{ M 64sh(RmSgq `@*20t%v,(I삌TVg;{`Aiv]N: E:H/mj,`YIY;YG\$oQݑҳ*%M*{3hD/*<v{,@s>ro),⽈n!X2Jؾ= >giMw[iiZYsƃt)66I*q#. '3XQۃ_wF唝׷һVz ۼWr ,7Zǀ04%195A! /7ʢC9B*T~ԃsT(T8!G!Pg !4#d[ EJl>IX]|zP3bQ- L~.20SIb^HU ?l_f#~0*3[EϕvV-`?&{R  bX6='"r =Ny!:酳rd<⧡\.C|m]$f:h1/ x0rN3(dub$D߰4=ai&sMV;A@9E߷? -ZZ (|Ɗs/&1Wj'C>SUn'ws!3ELSc.iJw'(<4>BkC je˂VӮ'ѓ!26U1cb2  ESec$K*:=: ZkHRn)N(9Wg\Ƀv{7 5M=v*LDlDbcd&ŪWmO|(3R.T@£,V m6\(Cj?R)}ՃSlu p~mA37aY  ⋠e>Z}럓;` OGL&F#nȹB.(6z2Aݯ%_]\ΡW`6kH( zsF=1kWYdq)U6VCii{֤݁beF.C% (śX]jT:EB`OK}OG8gyČ ^di3$ߌx>| h6$hش%'PhiXx)"Rl N!ʾ t%Л㔞I\q![j J*q\3ޗh{xWO TlyUmwB*;> )ED3db|fM؞:oiUͨ2U)*}|dCE& R~>wݭ.jzk큯T9dInoC7Vn}KM~&:fn ˸wz]7(اQ?$]WEȔdac]?J$t.Pyj>cJgɷZ7U[jCjzl_"zF#S?;QXJ^u5p 4ur?.L W&XPAuHJ-hhhn9a*m!Z޷|y^z{}iQhk@9:Bp>V4E8`=\)Lk/CCETEhї#v(x*D)d_%Ƅb Q׆ $: `ͺ42>Qji,sw+Z.$PuC.iM2^#|>Uu M{2PNÊ+ _C2 V|;h&|bPBe< ~B:0W4M ]? VGpپ۽ COfya󑋚G z A0媯phS ~PS& ߹"Pw|=Eefd][>R$q]\%̱`0_sQ\Vm>pnzvgc |ﱃ; uLS:GZ`_ݓ|hGTO%ÓwN ,:f**5}xyXa-Ri]@Ppڷ2꽸PM4|v=3HU(kSSȚ_& Lx06&U؉D;l3v sv$dpLl.O[/ڨDS^R4#7];W01@͝1:9T|ԇyڍV`HþXCĂ@Kjt(|cY@ x=ODJn~}Nz7 ܱb[搖{)U> H䟧ֹC}ֶ1/^&a&~@ <5dH~3q˙+:_Q,.ܓSYIM:x' WbH,N:꾊w 'lhP= =2%l,'b T-v@H+ۣV^eQ`)(& S>0 YZclue/data/Kinship82.rda0000644000175100001440000000235714715042020014402 0ustar hornikusersBZh91AY&SY9ڜRJTT@>@PBHR2h14@`M4a4@S5CɈPh42h14@`M4a2dшbi4&&h 6I< F)I"ii2e5= P =< ,ʓFf rN'|d$ڊ`ImfDX ^%2XшFIl&%mFMbض)>'Dԓ06$O&䓪II$ܥъ >vRLotԷn$Y$eFTbVV2̫#&{:FqCHY Db(Rs3YF'dJS$S$2IؓܤI8aZ qؗe]lRYINN\rW*'(]DE)&P,$ʅa&dI DiN#F7X6]lI ,Lk YYd1fbkoNj#ȼ7ߩ%IiIm$R\ - ' &I2IM$ͼdI=rM&IPLʩK$J"bI$L0RYED2FcS|լƞ쩻Q@?J|RLR$RbTO] ܻoAP| '}B*X…)yڻ[ C)aQkTb,|cnr@b;-Hŕ9S1"uLIH4382*!BH 3v`qҴғ77#LVG^kRڼ1ז M*1,gf>^G]g uh5he00'&1'~7ȄC DZ#HK$_n-I1}YiO$fإzYbYD^+"SsOo):I'IruGВ{Ԙ$&UeIo$\^y-I.)&$ .夰 IIh$$괤6 -z%I}pRlRpN$t|ғ$'I$lRpjIII9I6I4RgnrI;;MdI$v=ѧMs3ޮ4 x:ڑ+/-?^#t螑0a ~NuS7|KQ5aEʖκ^ k, _KwaEu{1ՙ9{SP w(yS乼Q9ͳz˜f'sI~{K(VfZEKO\qAUbQw }qVn.Z H`.k/ޚz酮Kf9ڝ nGauz.ݬBķ.M?UcZ(J4Y#,Du3L"lKDqH~xs{{=nv8g;g.;K4.9Ƹt~1֭i>Nw(=͂K<"}^2 evrϱ;\noL{ WxnteUe`и6){B6?]=K=&gA{Ntm~VbǾY9c\V>_/k%W,mczJ.={ݫV2-EQh^MA$8P .^+P E2 |r!=]C-ʹ@= YbdD^.v"v]c&\M۶Jfh,^TpC VKt0 FpR@>x5"q>6} ,x`ƒt~' !#3gHh!ZHaQTZrדRN,RO+'Hgc 8b%4W|',Gp$$2)&|T1-~IJ00k0%2PtAGIPVMRǕdj#=  8i/b!RHh=].:` }̐z<7$u<@rY\xPIl'm ;&tە,n:څQNӋ( ?{!pC, ;6w?CِU6_*X22z~7 щ5*kV=%IQRQ1!$&&pltDTk1.-V#<ցXGK"{k|kIޣԝ`{@vޫ&~5e>1:z- N3%(?>J*ܮɌtyBVL&'_A'ہ})Ƽׁׂ`gca$IyKQa,qJ9(5A*#AF0Asy+rX i@ɄA_h D|  7ĵ"ҬGeAV!܁٣zuc89X؉AzGe!wЎIlj\z2vBu y6y\O6L_Hw&0!I ev:UVB'IFI2~dt 4txX>1Y߀(3lWP 47Q"՟UXr̵_$3Y 2oL# 9 zapw`F41ȯ Aȓ;>oT!5H̟}ֺl RvLFld[gj,fE*T;&c(\UPd$FFY)D2NTKTQ =<+5C & UV]J#>pc l% iP ' J*28 eGj4|8R'(Dy 0bR %Y) ~_ +/g[8˩%>?+O@~,A&RVpbe y&oI2JQ7 q UU?P0 Yΰ>}rM+P'l:?Xtmf5N m[r*#clue/data/Cassini.rda0000644000175100001440000003170414715042020014212 0ustar hornikusers|gTTK.Iňx D14 H1H01DDQTP,Y`K< S}}k?W=X xxxx9rZ..n.>.N˯iqƅ+x2.Ŝ iTԷE9oXZ::׼fG ,]K^L:?ÔC5pgRFZu/U7o˺nJЭ0wScPՅv%}$]E,9v}տ.#t9/,r;oڒ6Qe+Sxteo nt_׉gP=j1[fe4+I1.{כh_z f%xQ{~DN{:9B# ;jlõa^uD <.<,qVB+lqvt[;,\ΈnsqձwL\pKdϪ(a\(҆rN^G֯SP& { h*u^o [;_* m+g3O>ʓ_ZBtmBCNڢbDﲚUw-¥"޼>;5v\6By|LPHeÏ&7k-E \W"ʻ?;ށLST/J ,Q#!L5+}|l=ɓ!9mX8[FeuKE¢hh}&4=ȇemGt#}V˰=s ۍ7CJ' v0}%wPh0F]N"(.̆q.1E}F3=A}}RIPe{/<g#.~j8U%F;L>3_',rB,Yi\~8sb%jTҹKE4ПT9>2v!ye*O gZ+*0]SQcQpKlj\{\8WmPqB4=?ថWJQiMD*v[d  nEuǦ6j\if/Ŭw| Π"ڽFhTL;LO|6>f]5?M]_}0Pa YGGL{OiK;k`O.n9UQy 6h1S[LY.u1{1HJ1ͪ3oo<qE_ʠ<]Lu+ܡ2#w֏1ͪ}o`)G2#rkԙ2s%~UsQ)a PR3ƈvCjѶw(CڒFDo`ځhD)c(+q񿮿U"P©M+nyk1NZ+Z $@u8,<4VH4Y2ASe_^uu?c_D:Yc&/ތ!Veb(Z|In]=m%p4mG)FXKbZ{Q_>m;BhZ<ǩGS03k0 WOe\6V}{P^ԑ^Ag)fR_QjՍPgKt+8@e[ f8 5PPO9w1㴼Z;ϼסw̡MF}|m ' !}!>^}v1DU ?qO!Tlc6j7T 1{eq55K,DNf}Bq3"T0T|"ueYny,%Ì~<`[asٸƹsn( n U˝7ş)7"mjpDW~NLo=oSeO ?sOQ+|Д.t]H܈zu+WA $JUa&E=Q][R354xīfc:]RO#̺j(,gԐ,EH(>Y#T64n*yJh5Px\(Bmn4Y4F~g^t-BVTE QCsE1z-Vb'S6}<uoK^z,<̐r1;67!FzXYo#ś)Ŝ:k}ڌk4vԡOTXh#oQyAsw ڜC3RX}PV돆kpl]0ek^-| *:TA yR>ui, Z]ōvqQCD-3Z/]*+HQAC޸~v8PG^vqoрb+xn+;O.=jӵZ?Q߆D 9[npQ [pHx!=҇VDwtぷ^'ypO`:1԰-v<ZsWfru{wBA'[iX˯6Y]ţ,Pܪo0r+O_?ZsF)n(AR˼v5Uh0JӓtY\tDNn:?C~_U eǸD)b.]@B#m?!Z Gqf^阽κp6+qr{c4#7}+ٿ/u\>%>(\E ^n-rxXv1}Sd>^}g_t*fVQ/ezb_[w bh?8̽9WIt.̳KŒ!Q=Z8Oܵ_K+.?j^"IMxw(S/;({Tpl[xDѢ0wTJ%PܱS&ˏvWtqYޕ?[vXo 9#yw$ZƒΠs\̎Lz_7UZJL "Pユ qh瘺hjxt"2;ߣ-[I:{,~S6$ǯ܎zCymٸcRgvH >)GQА^d*> %=wKRgyʜ)Xp7|an7)Rw^#]Y2^Y<3>ڋ]0ж,_&w<0 ۡn' 3xYڪ!]"q>h"1qF/q= n\PW?<R\+QqpKmL<2@ pGmgxvB׷IʃRuco#?zo0A]S.sK.K2Q%趯ҡw9JEpݟ_|*$.Q7DRx=*Lj%פ'fUc݈K7+߱388PĢP[d`ܶ1Jʧ6RpۧÛ얉(6Zlh25#7q恳&Tm21 "|*7sO# 3,M6^E#WSuoi`&c7ܿLvV0҈,s)"Bf–!~=ԻCfY{w~7\ DH%fS{"Ż([{NG_x Vu7_r ~x 5 &F"gs_:0݃ӅK{2`*f>뽉)+W7kB],#7aT]3WƭqɊe@@y|k ,n;kԷf%9\7؟Z`XLxgbmQgJha4h^G U0OU Lov'N_jU^'_;wH7 ,u6wpT X4<ڪ \a+^VM^E_6l]G:tI9_n3n{/ru9)\Nɳc?9W_x2񻯌כQbW,q{0G uv=7fEERsbQ:L+89WW{d ]GLy=*5U $:F-EiOJJfa (J"zh}YErzo+DъPmO㆓%["f |M2rxxܛS݅e_h4ʾ7>CۨPngc7Yh0=Š`Vᘖ\:}x9e666ɡ-(aBLjʡa;8nZ 8 5D)))f./?Y@_&̙,WʙUPkY jqDl9^hkEmm- U9"AG\)q &&: Msʿ)NpSҪF;`R[-ͼ B_^?8Bi pc?i iu]7 h9%C[ xT^FqFލ'_%S[ANp'Ȉu}χPE+"|նoa=o\U&@_;?É+?}WTSS_7! ~桼_y1J@OaCt#;wvl`qKwFt:oڶ7z@$Jx-'!Qz׷p_Jz(U |8M":Ŭ |A~Vo>b8" V>N09$+|>9>+?zz xL$]q|'jc;t O~\)GO =WZs ?d}_XiwH~O@_]Zm]ZN|u^/jMצz72+ S\Wa t^yj=x' 8u(-tg KxO[=Wxa}n%H{I[0Om Е~{~?#jV߯-@Wh-]@'W'D8 pGr7$xpzZ|ėC^3 OCKl 7~U))G@O __@jH57 tD6X6kן^ȑ3;v;x4 CG_OP/QzA353ߗ:_G π>W 5*58ttW+вu߂yVt3{:'õ@nWWDp-MyU=гnj?[swߐ]@߭-v''~?3n 87;-SOցSVo|cLsi>"OJJʸ/efO2 @;yy AƘK1M{g>L1~}0O{l~/RM$~< 3̧Wo;,懬#Ƨ |ί9 I0 +.]Bțyܑrw@l+qϞ³ w|#gvuU } Sq˕hq#q&9DDvLx r ~NS{B/k7ȵv2\<d^sԱp^9qR=KnDգxZKNY7u>ROsI?Qwe\j@;)gb:w9io$"r787&~`< us:{&W';BKrҮX8P^㌡1{@+3E W_NQTH>汱>I7"yo ?K}5 "~wۘA I&q6 rߨuPn~Ȣd5#LI$6:"-&c=o%k$Ζz=cΐ dpD)Um'=pRr&g$zoZy]'~6Egu쁖fd m 1%d|B*7S 6caOxob'6F$^6ϐuL1H׵]7^;Nk'yXKÄRG@r{ rawn`9c0յy|d ;^BZg/zw'[kZD#^z9q&[?/&:DK%A3mIaTDykɾ"Yogyb9R^$Fɹf8%xjv NK$3~M/߅I^Dm'`l̳FlHRF*[\Cvak -2J67gR<) ' e)uR9AI^2 YDpQ~Dɗ+U>u]@7f?xwY2;n~MWctg?Ui1;s 2H<]@]Q=E7xq'Tk5vpUF3ٷ$xQ nQ?{O͸]d4o ?dhCeUX-S8'~N <* R~@L*nznð*yڈk,O$ .@ڒ& El75@?VCR>js?;zC TZ>L!י+7W3Lg|)u5&8_$0AP1?\qSaF̣ $h1O$I֙^R$\IJڗZz?95hxt&}Y@,dzWA;mĞ ޤލ%:s$(NqNTb{Yk)/Ro;I[dJp;%u!wRIu9̴`LV=Fe?qd?i#N$f>@FNjݸ_A"wl.OMR}k I?#_Hj5%NڳO&rb ֆcϐ&2nYG cTr$KeJ'ڑd_>"y!6OI^*] |%,-'@?-D~ŗ;$N`mmKRx6:oeiC9K.v3n#c~Rb_9M% &yb7, ?3Z./#IST~(:4]>F5F$ZtQJfo]9RҾď5$.`w» *]G|dzN"؁srzĽv=!֕5 =]&bO߷|n*A gZ9]՚%^>)krqr:ߙkg6r %D=I-YyԟGDo}h?uÝh N #矏c\0ԫW)5ζ$?]I]LTh#zQr [J{{O[kr}"#(M*M{^ܫ\X 麅U@K蚴r=-ʔI7P5vȽpԙU'9JW}!3M@W7|@?yImxh Bp_EN; |(q~P8]oTP+}$)8 tʂoB8mBhSm}l*SOOrϬ982;p: )$U:NY6 :Ƌp; p)f2_NfqS#|T˖Ŵ{eOA\ T KBd6L%E|X5/= 0޼ti,~~/½3+Mx^ݞ#G܉#p@#[7{7/ R@~ʱFⓗ T*~/yg󣁾l$.8"HqAjv/.$OX=ee5Ͼ-5)Mp/gNP E*= 㞤4RGΪ[r}}Q]ES 1^7ǔ>OE8qXZCFpt]U{7vB]*ਖ;%€sQor.W{&l ύ!s) +y'W%_zG%9+fY7C+ghzM੢dПxܬI\ClƹqrSu2Kr.s.H~lPgk8cU?gpS`O!Ν$A^~Wm f({F \;W69Gֿb\>Q#uvI"L랆K'ϿϿϿϿϿ:?/%Lf?bgu4 +ǫ׹WwDߟE+VvrV/qI3dOclue/data/Phonemes.rda0000644000175100001440000000152714715042020014377 0ustar hornikusersV[HTQSP"=)"")ȘFY "⇉E2)"zhKSKS2^Y2DHDғ=9$}70s眽^{3w+}#aͰm- ͫaL2TWgNCO] #!\|4+vXN5*w'WئmKWԧJx\vTj2~,ooj YVfjgf|L&vtvK:(yzWxL tG,y;U #j7Ém3ѵ@ rqK3e\Q}4Yv4r]ԏTGRK3e\7Zpx]#nwkb2~BucJAj8o-+:^,Ը=TmB3x3}_#W+<%<>1D8ADΎ|2]BF7œݮ^!am)ND6IdORBhQ;IK(Q4DvOӃ(#(Y-oa~ clue/src/0000755000175100001440000000000014715042017012010 5ustar hornikusersclue/src/assignment.c0000644000175100001440000002370411623271641014334 0ustar hornikusers#include /* error() */ #include "assignment.h" /* main routine */ void ap_hungarian(AP *p) { int n; /* size of problem */ int *ri; /* covered rows */ int *ci; /* covered columns */ time_t start, end; /* timer */ int i, j, ok; start = time(0); n = p->n; p->runs = 0; /* allocate memory */ p->s = calloc(1 + n, sizeof(int)); p->f = calloc(1 + n, sizeof(int)); ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); if(ri == NULL || ci == NULL || p->s == NULL || p->f == NULL) error("ap_hungarian: could not allocate memory!"); preprocess(p); preassign(p); while(p->na < n){ if(REDUCE == cover(p, ri, ci)) reduce(p, ri, ci); ++p->runs; } end = time(0); p->rtime = end - start; /* check if assignment is a permutation of (1..n) */ for(i = 1; i <= n; i++){ ok = 0; for(j = 1; j <= n; j++) if(p->s[j] == i) ++ok; if(ok != 1) error("ap_hungarian: error in assigment, is not a permutation!"); } /* calculate cost of assignment */ p->cost = 0; for(i = 1; i <= n; i++) p->cost+= p->C[i][p->s[i]]; /* reset result back to base-0 indexing */ for(i = 1; i <= n; i++) p->s[i - 1] = p->s[i] - 1; /* free memory */ free(ri); free(ci); } /* abbreviated interface */ int ap_assignment(AP *p, int *res) { int i; if(p->s == NULL) ap_hungarian(p); for(i = 0; i < p->n; i++) res[i] = p->s[i]; return p->n; } /*******************************************************************/ /* constructors */ /* read data from file */ /*******************************************************************/ AP *ap_read_problem(char *file) { FILE *f; int i,j,c; int m,n; double x; double **t; int nrow,ncol; AP *p; f = fopen(file,"r"); if(f==NULL) return NULL; t = (double **)malloc(sizeof(double*)); m = 0; n = 0; nrow = 0; ncol = 0; while(EOF != (i = fscanf(f, "%lf", &x))){ if(i == 1){ if(n == 0){ t = (double **) realloc(t,(m + 1) * sizeof(double *)); t[m] = (double *) malloc(sizeof(double)); }else t[m] = (double *) realloc(t[m], (n + 1) * sizeof(double)); t[m][n++] = x; ncol = (ncol < n) ? n : ncol; c=fgetc(f); if(c == '\n'){ n = 0; ++m; nrow = (nrow < m) ? m : nrow; } } } fclose(f); /* prepare data */ if(nrow != ncol){ /* fprintf(stderr,"ap_read_problem: problem not quadratic\nrows =%d, cols = %d\n",nrow,ncol); */ warning("ap_read_problem: problem not quadratic\nrows = %d, cols = %d\n", nrow, ncol); return NULL; } p = (AP*) malloc(sizeof(AP)); p->n = ncol; p->C = (double **) malloc((1 + nrow)*sizeof(double *)); p->c = (double **) malloc((1 + nrow)*sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= nrow; i++){ p->C[i] = (double *) calloc(ncol + 1, sizeof(double)); p->c[i] = (double *) calloc(ncol + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= nrow; i++) for( j = 1; j <= ncol; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } for(i = 0; i < nrow; i++) free(t[i]); free(t); p->cost = 0; p->s = NULL; p->f = NULL; return p; } AP *ap_create_problem_from_matrix(double **t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* read data from vector */ AP *ap_create_problem(double *t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[n*(j - 1) + i - 1]; p->c[i][j] = t[n*(j - 1) + i - 1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* destructor */ void ap_free(AP *p) { int i; free(p->s); free(p->f); for(i = 1; i <= p->n; i++){ free(p->C[i]); free(p->c[i]); } free(p->C); free(p->c); free(p); } /* set + get functions */ /* void ap_show_data(AP *p) { int i, j; for(i = 1; i <= p->n; i++){ for(j = 1; j <= p->n; j++) printf("%6.2f ", p->c[i][j]); printf("\n"); } } */ double ap_mincost(AP *p) { if(p->s == NULL) ap_hungarian(p); return p->cost; } int ap_size(AP *p) { return p->n; } int ap_time(AP *p) { return (int) p->rtime; } int ap_iterations(AP *p) { return p->runs; } /* void ap_print_solution(AP *p) { int i; printf("%d itertations, %d secs.\n",p->runs, (int)p->rtime); printf("Min Cost: %10.4f\n",p->cost); for(i = 0; i < p->n; i++) printf("%4d",p->s[i]); printf("\n"); } */ int ap_costmatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->C[i + 1][j + 1]; return p->n; } int ap_datamatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->c[i + 1][j + 1]; return p->n; } /* error reporting */ /* void ap_error(char *message) { fprintf(stderr,"%s\n",message); exit(1); } */ /*************************************************************/ /* these functions are used internally */ /* by ap_hungarian */ /*************************************************************/ int cover(AP *p, int *ri, int *ci) { int *mr, i, r; int n; n = p->n; mr = calloc(1 + p->n, sizeof(int)); /* reset cover indices */ for(i = 1; i <= n; i++){ if(p->s[i] == UNASSIGNED){ ri[i] = UNCOVERED; mr[i] = MARKED; } else ri[i] = COVERED; ci[i] = UNCOVERED; } while(TRUE){ /* find marked row */ r = 0; for(i = 1; i <= n; i++) if(mr[i] == MARKED){ r = i; break; } if(r == 0) break; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && ci[i] == UNCOVERED){ if(p->f[i]){ ri[p->f[i]] = UNCOVERED; mr[p->f[i]] = MARKED; ci[i] = COVERED; }else{ if(p->s[r] == UNASSIGNED) ++p->na; p->f[p->s[r]] = 0; p->f[i] = r; p->s[r] = i; free(mr); return NOREDUCE; } } mr[r] = UNMARKED; } free(mr); return REDUCE; } void reduce(AP *p, int *ri, int *ci) { int i, j, n; double min; n = p->n; /* find minimum in uncovered c-matrix */ min = DBL_MAX; for(i = 1; i <= n; i++) for(j = 1; j <= n; j++) if(ri[i] == UNCOVERED && ci[j] == UNCOVERED){ if(p->c[i][j] < min) min = p->c[i][j]; } /* subtract min from each uncovered element and add it to each element */ /* which is covered twice */ for(i = 1; i <= n; i++) for(j = 1; j <= n; j++){ if(ri[i] == UNCOVERED && ci[j] == UNCOVERED) p->c[i][j]-= min; if(ri[i] == COVERED && ci[j] == COVERED) p->c[i][j]+= min; } } void preassign(AP *p) { int i, j, min, r, c, n, count; int *ri, *ci, *rz, *cz; n = p->n; p->na = 0; /* row and column markers */ ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); /* row and column counts of zeroes */ rz = calloc(1 + n, sizeof(int)); cz = calloc(1 + n, sizeof(int)); for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[i][j] == 0) ++count; rz[i] = count; } for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[j][i] == 0) ++count; cz[i] = count; } while(TRUE){ /* find unassigned row with least number of zeroes > 0 */ min = INT_MAX; r = 0; for(i = 1; i <= n; i++) if(rz[i] > 0 && rz[i] < min && ri[i] == UNASSIGNED){ min = rz[i]; r = i; } /* check if we are done */ if(r == 0) break; /* find unassigned column in row r with least number of zeroes */ c = 0; min = INT_MAX; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && cz[i] < min && ci[i] == UNASSIGNED){ min = cz[i]; c = i; } if(c){ ++p->na; p->s[r] = c; p->f[c] = r; ri[r] = ASSIGNED; ci[c] = ASSIGNED; /* adjust zero counts */ cz[c] = 0; for(i = 1; i <= n; i++) if(p->c[i][c] == 0) --rz[i]; } } /* free memory */ free(ri); free(ci); free(rz); free(cz); } void preprocess(AP *p) { int i, j, n; double min; n = p->n; /* subtract column minima in each row */ for(i = 1; i <= n; i++){ min = p->c[i][1]; for(j = 2; j <= n; j++) if(p->c[i][j] < min) min = p->c[i][j]; for(j = 1; j <= n; j++) p->c[i][j]-= min; } /* subtract row minima in each column */ for(i = 1; i <= n; i++){ min = p->c[1][i]; for(j = 2; j <= n; j++) if(p->c[j][i] < min) min = p->c[j][i]; for(j = 1; j <= n; j++) p->c[j][i]-= min; } } clue/src/assignment.h0000644000175100001440000000334511623271704014340 0ustar hornikusers#include #include #include #include /* INT_MAX */ #include /* DBL_MAX */ #include #include /* constants used for improving readability of code */ #define COVERED 1 #define UNCOVERED 0 #define ASSIGNED 1 #define UNASSIGNED 0 #define TRUE 1 #define FALSE 0 #define MARKED 1 #define UNMARKED 0 #define REDUCE 1 #define NOREDUCE 0 typedef struct{ int n; /* order of problem */ double **C; /* cost matrix */ double **c; /* reduced cost matrix */ int *s; /* assignment */ int *f; /* column i is assigned to f[i] */ int na; /* number of assigned items; */ int runs; /* number of iterations */ double cost; /* minimum cost */ time_t rtime; /* time */ } AP; /* public interface */ /* constructors and destructor */ AP *ap_create_problem(double *t, int n); AP *ap_create_problem_from_matrix(double **t, int n); AP *ap_read_problem(char *file); void ap_free(AP *p); int ap_assignment(AP *p, int *res); int ap_costmatrix(AP *p, double **m); int ap_datamatrix(AP *p, double **m); int ap_iterations(AP *p); void ap_hungarian(AP *p); double ap_mincost(AP *p); void ap_print_solution(AP *p); void ap_show_data(AP *p); int ap_size(AP *p); int ap_time(AP *p); /* error reporting */ void ap_error(char *message); /* private functions */ void preprocess(AP *p); void preassign(AP *p); int cover(AP *p, int *ri, int *ci); void reduce(AP *p, int *ri, int *ci); clue/src/clue.h0000644000175100001440000000210114245105414013102 0ustar hornikusers#ifndef _CLUE_H #define _CLUE_H #include typedef int Sint; void solve_LSAP(double *c, Sint *n, Sint *p); double **clue_vector_to_square_matrix(double *x, Sint n); void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count); void deviation_from_ultrametricity(double *x, int *n, double *v, int *max); void deviation_from_ultrametricity_gradient(double *x, int *n, double *out); void deviation_from_additivity(double *x, int *n, double *v, int *max); void deviation_from_additivity_gradient(double *x, int *n, double *out); void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); #endif clue/src/trees.c0000644000175100001440000002401211304023137013266 0ustar hornikusers#include #include #include "clue.h" static int iwork3[3]; static int iwork4[4]; static void isort3(int *i, int *j, int *k) { iwork3[0] = *i; iwork3[1] = *j; iwork3[2] = *k; R_isort(iwork3, 3); *i = iwork3[0]; *j = iwork3[1]; *k = iwork3[2]; } static void isort4(int *i, int *j, int *k, int *l) { iwork4[0] = *i; iwork4[1] = *j; iwork4[2] = *k; iwork4[3] = *l; R_isort(iwork4, 4); *i = iwork4[0]; *j = iwork4[1]; *k = iwork4[2]; *l = iwork4[3]; } void deviation_from_ultrametricity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) delta = C - B; else if(B <= C) delta = A - C; else delta = B - A; if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } } *v = p; } void deviation_from_ultrametricity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][k] += delta; G[j][k] -= delta; } else if(B <= C) { delta = 2 * (C - A); G[j][k] += delta; G[i][j] -= delta; } else { delta = 2 * (A - B); G[i][j] += delta; G[i][k] -= delta; } } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void deviation_from_additivity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) delta = (C - B); else if(B <= C) delta = (A - C); else delta = (B - A); if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } *v = p; } void deviation_from_additivity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][l] -= delta; G[j][k] -= delta; G[i][k] += delta; G[j][l] += delta; } else if(B <= C) { delta = 2 * (C - A); G[i][l] += delta; G[j][k] += delta; G[i][j] -= delta; G[k][l] -= delta; } else { delta = 2 * (A - B); G[i][k] -= delta; G[j][l] -= delta; G[i][j] += delta; G[k][l] += delta; } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta, tmp; int i, i1, j, j1, k, k1, N3; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[j][i]; B = D[k][i]; C = D[k][j]; /* B & G have a divisor of 2 for case 1 and 4 for cases 2 and 3 ... clearly, we should use the same in all cases, but should it be 2 or 4? */ if((A <= B) && (A <= C)) { /* Case 1: 5080 */ DQ = (C - B) / 2; D[i][k] += DQ; D[j][k] -= DQ; } else if(B <= C) { /* Case 2: 5100 */ DQ = (C - A) / 2; D[i][j] += DQ; D[j][k] -= DQ; } else { /* Case 3: 5120 */ DQ = (B - A) / 2; D[i][j] += DQ; D[i][k] -= DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, delta; int i, i1, j, j1, k, k1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); delta = 0; for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[i][j]; B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { D[i][k] = D[j][k] = (B + C) / 2; delta += fabs(B - C); } else if(B <= C) { D[i][j] = D[j][k] = (C + A) / 2; delta += fabs(C - A); } else { D[i][j] = D[i][k] = (A + B) / 2; delta += fabs(A - B); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { /* Once we have ls_fit_ultrametric_by_iterative_reduction() we can always do this as well ... See page 67f in Barthelemy and Guenoche. */ double A, B, C, **D, DQ, delta, tmp, N3; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2) * (*n - 3) / 2; for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[j][i] + D[l][k]; B = D[k][i] + D[l][j]; C = D[l][i] + D[k][j]; if((A <= B) && (A <= C)) { /* Case 1: 5090 */ DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; } else if(B <= C) { /* Case 2: 5120 */ DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; } else { /* Case 3: 5150 */ DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { delta = 0; if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; delta += fabs(C - B); } else if(B <= C) { DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; delta += fabs(A - C); } else { DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; delta += fabs(B - A); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } clue/src/init.c0000644000175100001440000000373613340544526013135 0ustar hornikusers#include #include #include #include "clue.h" static R_NativePrimitiveArgType solve_LSAP_t[3] = { REALSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType clue_dissimilarity_count_inversions_t[4] = { REALSXP, REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_additivity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_additivity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; #define CDEF(name) {#name, (DL_FUNC) &name, sizeof(name ## _t)/sizeof(name ## _t[0]), name ##_t} static const R_CMethodDef cMethods[] = { CDEF(solve_LSAP), CDEF(clue_dissimilarity_count_inversions), CDEF(deviation_from_ultrametricity), CDEF(deviation_from_ultrametricity_gradient), CDEF(deviation_from_additivity), CDEF(deviation_from_additivity_gradient), CDEF(ls_fit_ultrametric_by_iterative_reduction), CDEF(ls_fit_ultrametric_by_iterative_projection), CDEF(ls_fit_addtree_by_iterative_reduction), CDEF(ls_fit_addtree_by_iterative_projection), {NULL, NULL, 0} }; void R_init_clue(DllInfo *dll) { R_registerRoutines(dll, cMethods, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } clue/src/lsap.c0000644000175100001440000000033311304023137013103 0ustar hornikusers#include #include "assignment.h" #include "clue.h" void solve_LSAP(double *c, Sint *n, Sint *p) { AP *ap; ap = ap_create_problem(c, *n); ap_hungarian(ap); ap_assignment(ap, p); ap_free(ap); } clue/src/clue.c0000644000175100001440000000135211304023137013076 0ustar hornikusers#include #include #include "clue.h" double **clue_vector_to_square_matrix(double *x, Sint n) { double **data, *val; Sint i, j; data = (double **) R_alloc(n, sizeof(double)); for(i = 0; i < n; i++) { data[i] = (double *) R_alloc(n, sizeof(double)); val = x + i; for(j = 0; j < n; j++, val += n) data[i][j] = *val; } return(data); } static int clue_sign(double x) { if(x == 0) return(0); return((x > 0) ? 1 : -1); } void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count) { Sint i, j; for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) if((clue_sign(x[i] - x[j]) * clue_sign(y[i] - y[j])) < 0) (*count)++; } clue/NAMESPACE0000644000175100001440000002447214503541633012454 0ustar hornikusersuseDynLib("clue", .registration = TRUE, .fixes = "C_") import("stats") importFrom("graphics", "par", "plot") importFrom("cluster", "clara", "pam", "silhouette") export("cl_agreement", "cl_bag", "cl_boot", "cl_consensus", "cl_dissimilarity", "cl_ensemble", "as.cl_ensemble", "is.cl_ensemble", "cl_fuzziness", "cl_join", "cl_margin", "cl_meet", "cl_medoid", "cl_membership", "as.cl_membership", "cl_object_names", "cl_pam", "cl_pclust", "cl_predict", "cl_prototypes", "cl_tabulate", "cl_ultrametric", "as.cl_ultrametric", "ls_fit_addtree", "ls_fit_centroid", "ls_fit_ultrametric", "ls_fit_sum_of_ultrametrics", "ls_fit_ultrametric_target", "l1_fit_ultrametric", "l1_fit_ultrametric_target", "cl_validity", "n_of_objects", "n_of_classes", "cl_class_ids", "as.cl_class_ids", "cl_classes", "is.cl_partition", "as.cl_partition", "is.cl_hard_partition", "as.cl_hard_partition", "is.cl_soft_partition", "is.cl_dendrogram", "as.cl_dendrogram", "is.cl_hierarchy", "as.cl_hierarchy", "as.cl_addtree", "kmedoids", "pclust", "pclust_family", "pclust_object", "solve_LSAP", "sumt") ## S3 methods, sorted alphabetically. S3method("[", "cl_agreement") S3method("[", "cl_dissimilarity") S3method("[", "cl_ensemble") S3method("[", "cl_proximity") S3method("Complex", "cl_hierarchy") S3method("Complex", "cl_partition") S3method("Math", "cl_hierarchy") S3method("Math", "cl_partition") S3method("Ops", "cl_dendrogram") S3method("Ops", "cl_hierarchy") S3method("Ops", "cl_partition") S3method("Summary", "cl_hierarchy") S3method("Summary", "cl_partition") S3method("Summary", "cl_partition_ensemble") S3method("as.cl_addtree", "default") S3method("as.cl_addtree", "phylo") S3method("as.cl_membership", "default") S3method("as.cl_membership", "matrix") S3method("as.cl_ultrametric", "default") S3method("as.cl_ultrametric", "matrix") S3method("as.dendrogram", "cl_ultrametric") S3method("as.dist", "cl_dissimilarity") S3method("as.hclust", "cl_hierarchy") S3method("as.hclust", "cl_ultrametric") S3method("as.hclust", "mona") S3method("as.matrix", "cl_proximity") S3method("as.matrix", "hclust") S3method("c", "cl_ensemble") S3method("cl_class_ids", "Mclust") S3method("cl_class_ids", "Weka_clusterer") S3method("cl_class_ids", "bclust") S3method("cl_class_ids", "ccfkms") S3method("cl_class_ids", "cclust") S3method("cl_class_ids", "cl_class_ids") S3method("cl_class_ids", "cl_membership") S3method("cl_class_ids", "cl_pam") S3method("cl_class_ids", "cl_partition") S3method("cl_class_ids", "cl_partition_by_class_ids") S3method("cl_class_ids", "cshell") S3method("cl_class_ids", "default") S3method("cl_class_ids", "fclust") S3method("cl_class_ids", "flexmix") S3method("cl_class_ids", "kcca") S3method("cl_class_ids", "kmeans") S3method("cl_class_ids", "kmedoids") S3method("cl_class_ids", "movMF") S3method("cl_class_ids", "partition") S3method("cl_class_ids", "pclust") S3method("cl_class_ids", "relation") S3method("cl_class_ids", "rock") S3method("cl_class_ids", "specc") S3method("cl_classes", "cl_hierarchy") S3method("cl_classes", "cl_partition") S3method("cl_classes", "cl_ultrametric") S3method("cl_classes", "default") S3method("cl_membership", "Mclust") S3method("cl_membership", "bclust") S3method("cl_membership", "cclust") S3method("cl_membership", "cl_membership") S3method("cl_membership", "cl_partition") S3method("cl_membership", "cshell") S3method("cl_membership", "default") S3method("cl_membership", "fanny") S3method("cl_membership", "fclust") S3method("cl_membership", "flexmix") S3method("cl_membership", "kmeans") S3method("cl_membership", "movMF") S3method("cl_membership", "partition") S3method("cl_membership", "pclust") S3method("cl_object_names", "cl_ensemble") S3method("cl_object_names", "cl_hierarchy") S3method("cl_object_names", "cl_membership") S3method("cl_object_names", "cl_partition") S3method("cl_object_names", "cl_ultrametric") S3method("cl_object_names", "default") S3method("cl_object_names", "dist") S3method("cl_object_names", "hclust") S3method("cl_object_names", "mona") S3method("cl_object_names", "phylo") S3method("cl_object_names", "twins") S3method("cl_predict", "Mclust") S3method("cl_predict", "Weka_clusterer") S3method("cl_predict", "bclust") S3method("cl_predict", "cclust") S3method("cl_predict", "cl_partition") S3method("cl_predict", "ccfkms") S3method("cl_predict", "clara") S3method("cl_predict", "cshell") S3method("cl_predict", "default") S3method("cl_predict", "fanny") S3method("cl_predict", "fclust") S3method("cl_predict", "flexmix") S3method("cl_predict", "kcca") S3method("cl_predict", "kmeans") S3method("cl_predict", "movMF") S3method("cl_predict", "pam") S3method("cl_predict", "pclust") S3method("cl_predict", "rock") S3method("cl_prototypes", "Mclust") S3method("cl_prototypes", "bclust") S3method("cl_prototypes", "ccfkms") S3method("cl_prototypes", "cclust") S3method("cl_prototypes", "cl_pam") S3method("cl_prototypes", "cl_partition") S3method("cl_prototypes", "clara") S3method("cl_prototypes", "cshell") S3method("cl_prototypes", "fclust") S3method("cl_prototypes", "kcca") S3method("cl_prototypes", "kmeans") S3method("cl_prototypes", "pam") S3method("cl_prototypes", "pclust") S3method("cl_prototypes", "specc") S3method("cl_validity", "agnes") S3method("cl_validity", "default") S3method("cl_validity", "diana") S3method("cl_validity", "cl_partition") S3method("cl_validity", "pclust") S3method("cophenetic", "cl_ultrametric") S3method("cophenetic", "mona") S3method("cut", "cl_dendrogram") S3method("is.cl_dendrogram", "cl_dendrogram") S3method("is.cl_dendrogram", "default") S3method("is.cl_dendrogram", "hclust") S3method("is.cl_dendrogram", "mona") S3method("is.cl_dendrogram", "phylo") S3method("is.cl_dendrogram", "twins") S3method("is.cl_hard_partition", "Mclust") S3method("is.cl_hard_partition", "Weka_clusterer") S3method("is.cl_hard_partition", "bclust") S3method("is.cl_hard_partition", "ccfkms") S3method("is.cl_hard_partition", "cclust") S3method("is.cl_hard_partition", "cl_hard_partition") S3method("is.cl_hard_partition", "cl_partition") S3method("is.cl_hard_partition", "cshell") S3method("is.cl_hard_partition", "default") S3method("is.cl_hard_partition", "fanny") S3method("is.cl_hard_partition", "fclust") S3method("is.cl_hard_partition", "flexmix") S3method("is.cl_hard_partition", "kcca") S3method("is.cl_hard_partition", "kmeans") S3method("is.cl_hard_partition", "kmedoids") S3method("is.cl_hard_partition", "movMF") S3method("is.cl_hard_partition", "partition") S3method("is.cl_hard_partition", "pclust") S3method("is.cl_hard_partition", "rock") S3method("is.cl_hard_partition", "specc") S3method("is.cl_hierarchy", "cl_hierarchy") S3method("is.cl_hierarchy", "default") S3method("is.cl_hierarchy", "hclust") S3method("is.cl_hierarchy", "mona") S3method("is.cl_hierarchy", "phylo") S3method("is.cl_hierarchy", "twins") S3method("is.cl_partition", "Mclust") S3method("is.cl_partition", "Weka_clusterer") S3method("is.cl_partition", "bclust") S3method("is.cl_partition", "ccfkms") S3method("is.cl_partition", "cclust") S3method("is.cl_partition", "cl_partition") S3method("is.cl_partition", "cshell") S3method("is.cl_partition", "default") S3method("is.cl_partition", "fclust") S3method("is.cl_partition", "flexmix") S3method("is.cl_partition", "kcca") S3method("is.cl_partition", "kmeans") S3method("is.cl_partition", "kmedoids") S3method("is.cl_partition", "movMF") S3method("is.cl_partition", "partition") S3method("is.cl_partition", "pclust") S3method("is.cl_partition", "rock") S3method("is.cl_partition", "specc") S3method("n_of_classes", "Mclust") S3method("n_of_classes", "bclust") S3method("n_of_classes", "cclust") S3method("n_of_classes", "cl_membership") S3method("n_of_classes", "cl_partition") S3method("n_of_classes", "cshell") S3method("n_of_classes", "default") S3method("n_of_classes", "fanny") S3method("n_of_classes", "fclust") S3method("n_of_classes", "kmeans") S3method("n_of_classes", "partition") S3method("n_of_classes", "pclust") S3method("n_of_objects", "Mclust") S3method("n_of_objects", "bclust") S3method("n_of_objects", "cclust") S3method("n_of_objects", "cl_ensemble") S3method("n_of_objects", "cl_hierarchy") S3method("n_of_objects", "cl_membership") S3method("n_of_objects", "cl_partition") S3method("n_of_objects", "cl_ultrametric") S3method("n_of_objects", "cshell") S3method("n_of_objects", "default") S3method("n_of_objects", "dist") S3method("n_of_objects", "fclust") S3method("n_of_objects", "hclust") S3method("n_of_objects", "kmeans") S3method("n_of_objects", "mona") S3method("n_of_objects", "partition") S3method("n_of_objects", "pclust") S3method("n_of_objects", "phylo") S3method("n_of_objects", "twins") S3method("names", "cl_proximity") S3method("plot", "cl_addtree") S3method("plot", "cl_ensemble") S3method("plot", "cl_dendrogram") S3method("plot", "cl_ultrametric") S3method("print", "cl_class_ids") S3method("print", "cl_classes_of_partition_of_objects") S3method("print", "cl_classes_of_hierarchy_of_objects") S3method("print", "cl_cross_proximity") S3method("print", "cl_dendrogram") S3method("print", "cl_dendrogram_ensemble") S3method("print", "cl_ensemble") S3method("print", "cl_fuzziness") S3method("print", "cl_hierarchy") S3method("print", "cl_hierarchy_ensemble") S3method("print", "cl_membership") S3method("print", "cl_pam") S3method("print", "cl_partition") S3method("print", "cl_partition_by_class_ids") S3method("print", "cl_partition_by_memberships") S3method("print", "cl_partition_ensemble") S3method("print", "cl_proximity") S3method("print", "cl_pclust") S3method("print", "cl_validity") S3method("print", "kmedoids") S3method("print", "pclust") S3method("print", "solve_LSAP") S3method("rep", "cl_ensemble") S3method("silhouette", "cl_partition") S3method("silhouette", "cl_pclust") S3method("unique", "cl_ensemble") S3method(".maybe_is_proper_soft_partition", "Mclust") S3method(".maybe_is_proper_soft_partition", "cl_partition") S3method(".maybe_is_proper_soft_partition", "cshell") S3method(".maybe_is_proper_soft_partition", "default") S3method(".maybe_is_proper_soft_partition", "fanny") S3method(".maybe_is_proper_soft_partition", "fclust") S3method(".maybe_is_proper_soft_partition", "flexmix") S3method(".maybe_is_proper_soft_partition", "pclust") clue/inst/0000755000175100001440000000000014715042016012175 5ustar hornikusersclue/inst/CITATION0000644000175100001440000000103314366206623013337 0ustar hornikusersbibentry("Manual", other = unlist(citation(auto = meta), recursive = FALSE)) bibentry("Article", title = "A {CLUE} for {CLUster Ensembles}", author = person("Kurt", "Hornik", email = "Kurt.Hornik@R-project.org", comment = c(ORCID = "0000-0003-4198-9911")), year = 2005, journal = "Journal of Statistical Software", volume = 14, number = 12, month = "September", doi = "10.18637/jss.v014.i12" ) clue/inst/po/0000755000175100001440000000000012213262407012612 5ustar hornikusersclue/inst/po/en@quot/0000755000175100001440000000000012213262407014225 5ustar hornikusersclue/inst/po/en@quot/LC_MESSAGES/0000755000175100001440000000000012213262407016012 5ustar hornikusersclue/inst/po/en@quot/LC_MESSAGES/R-clue.mo0000644000175100001440000002064013143661614017506 0ustar hornikusersSqL/AHa?X ^ j5v2,X We V : 5O ) , / ! ;. 8j  % + )5 (_ 2 ) # * *4 _ %| )  & / I T1_*+19"X&{& *Eb%~  94.+Zy"07% ]+j(> 1,F's6/H"k?X  +572m,XW&V~:9-J0x7%C8K ")+)(H2q)#**H%e)&  2 =1H.z+1""*&M&t& #>[%w  94.$Sr&0 7" Z +g  , B !1!,K!'x!4I%R52'M+ D0.N O3,L"AS1)$=E*B#! J;-? > KQ8 PC<G(9:F&7@/H6A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class '%s', with representation:Argument 'weights' has negative elements.Argument 'weights' has no positive elements.Argument 'weights' must be compatible with 'x'.Argument 'x' must be a partition.Arguments 'x' and 'y' must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to 'cl_addtree'.Cannot coerce to 'cl_hard_partition'.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic '%s' not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method '%s'.Invalid consensus method '%s'.Invalid dissimilarity method '%s'.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter 'p' must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary '%s' not defined for "%s" objects.Value '%s' is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.Project-Id-Version: clue 0.3-54 POT-Creation-Date: 2017-08-07 11:31 PO-Revision-Date: 2017-08-07 11:31 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class ‘%s’, with representation:Argument ‘weights’ has negative elements.Argument ‘weights’ has no positive elements.Argument ‘weights’ must be compatible with ‘x’.Argument ‘x’ must be a partition.Arguments ‘x’ and ‘y’ must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to ‘cl_addtree’.Cannot coerce to ‘cl_hard_partition’.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic ‘%s’ not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method ‘%s’.Invalid consensus method ‘%s’.Invalid dissimilarity method ‘%s’.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter ‘p’ must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary ‘%s’ not defined for "%s" objects.Value ‘%s’ is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.clue/inst/doc/0000755000175100001440000000000014715042016012742 5ustar hornikusersclue/inst/doc/clue.Rnw0000644000175100001440000016521512734170652014403 0ustar hornikusers\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/inst/doc/clue.R0000644000175100001440000001736414715042016014030 0ustar hornikusers### R code from vignette source 'clue.Rnw' ################################################### ### code chunk number 1: clue.Rnw:40-42 ################################################### options(width = 60) library("clue") ################################################### ### code chunk number 2: clue.Rnw:310-319 ################################################### cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE ################################################### ### code chunk number 3: Cassini-data (eval = FALSE) ################################################### ## data("Cassini") ## plot(Cassini$x, col = as.integer(Cassini$classes), ## xlab = "", ylab = "") ################################################### ### code chunk number 4: clue.Rnw:889-890 ################################################### data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ################################################### ### code chunk number 5: CKME (eval = FALSE) ################################################### ## data("CKME") ## plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 6: clue.Rnw:903-904 ################################################### data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 7: clue.Rnw:914-916 ################################################### m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) ################################################### ### code chunk number 8: Cassini-medoid (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 9: clue.Rnw:924-925 ################################################### plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 10: clue.Rnw:934-936 ################################################### set.seed(1234) m2 <- cl_consensus(CKME) ################################################### ### code chunk number 11: clue.Rnw:941-942 ################################################### table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) ################################################### ### code chunk number 12: Cassini-mean (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 13: clue.Rnw:950-951 ################################################### plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 14: clue.Rnw:984-989 ################################################### data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 15: clue.Rnw:993-994 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ################################################### ### code chunk number 16: clue.Rnw:998-1002 ################################################### data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 17: clue.Rnw:1009-1012 ################################################### set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 18: clue.Rnw:1016-1019 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ################################################### ### code chunk number 19: clue.Rnw:1022-1023 ################################################### max(abs(cl_membership(m1) - cl_membership(m2))) ################################################### ### code chunk number 20: clue.Rnw:1027-1029 ################################################### m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 21: clue.Rnw:1032-1033 ################################################### table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) ################################################### ### code chunk number 22: clue.Rnw:1036-1037 ################################################### rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] ################################################### ### code chunk number 23: clue.Rnw:1061-1066 ################################################### data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 24: clue.Rnw:1071-1072 ################################################### mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ################################################### ### code chunk number 25: clue.Rnw:1076-1079 ################################################### data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ################################################### ### code chunk number 26: clue.Rnw:1083-1085 ################################################### cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 27: clue.Rnw:1088-1089 ################################################### cl_fuzziness(cl_ensemble(m1, m2)) ################################################### ### code chunk number 28: clue.Rnw:1109-1111 ################################################### data("Phonemes") d <- as.dist(1 - Phonemes) ################################################### ### code chunk number 29: clue.Rnw:1115-1116 ################################################### u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ################################################### ### code chunk number 30: Phonemes (eval = FALSE) ################################################### ## plot(u) ################################################### ### code chunk number 31: clue.Rnw:1126-1127 ################################################### plot(u) ################################################### ### code chunk number 32: clue.Rnw:1137-1138 ################################################### round(cl_dissimilarity(d, u), 4) ################################################### ### code chunk number 33: clue.Rnw:1141-1146 ################################################### hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) ################################################### ### code chunk number 34: clue.Rnw:1153-1155 ################################################### ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) clue/inst/doc/clue.pdf0000644000175100001440000145753414715042017014411 0ustar hornikusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5038 /Filter /FlateDecode /N 90 /First 762 >> stream x\kS9m;U %Ld'&#}@3[QwK}ttRD+)Ce*-Ue+]`M+)B%U* JYiQJ7SptA:VNDY)Q9Cd3RuU@ <ӕUP7<$TBDUȈ**eutVU F*l@!\ >NA&uVV)JJk|eNJE*@V 03S 6VcpHеd#@j!4N8dhH k@Jq@F`4ilSZAF@vt> =N,ьy @vf1.@ѓb< {f '{@l9y@ aZ< cІ!DQ[aLE|A.mx0h :jv|IP! vP/Gq3e uQvǽAd!퍫&9GuՓt{T6_~] NL>ՃP3;Ng\Wv 8M ITBiGϼԇ:>F(Nׁ{=]vk{Et0΄am\~Tw Ao|VY`{p= K1˩T\2pʗk0Z#_|mJ 6Mey^zv1_ Ge sYA ^ȵ`}.Tef&*U!]5KE) ZM) <].ugD?@L~<|A ze ף ꈴ]\Yx}8辩_b,o$?8;,KE(lgME_/ $&(|i{''zۧ!N \:)M?I8uI˦߻V!JSU 2H)H, PIg ChCcQГ# NNTO&R;1r +l kԐM.\YB2x)u$qttJM=7G(rdY_2:  a".+w6g!ķ ʟ/+7wa=xfn˻A;׼t/(_^:S~̏ߗc=?V?3~|0r ~A>aG7`\}:#||:k>:reuÚ_7;O=꒞RpYeejḺ4rnstj[/; }I˽h 9F-hF-ħ.mU^Qozc/B.x%vu3"NWj$\ȤTGs&IJ1'|̮b[fʩ;iVyR@=p'%YrƝt SBj;] ȢQI'_ NSv09˳3j8R>;N(eIpƘ0OE9%ږn&{3& ɃeHD+Rd i"aPlb3Y4#}O1w]ұļ%,G‹WtZᣂr.r g(>Czߟl{[/Q )r%u_2~$Ztv/{8A\ɣMKPp\ljC03͔MtlJLj?"KNS:hc^獺."%ȸR#Jk:1&c6{ 3[S-ڎ-HGooW"3&d֚Ą #WD|޽xN-2,3bw]F(+]5w6Gum3},cQB\y8Қf62$7zrÂMl~]-h-BdoφyEZCUR$E֪I_EqE&,]gq &Xo' K" WXX DT^ۥ 8ԋ>E8M5v cй5nrtnﷂokoOkv i,$m_@%]×lb87\AInr4nO>)1'7B 4R0oP48 J2-o r"F:&Ÿ}t,:&z sZn'iLm-5Gug=%In{[ΤFṄu*DHt jըޠ? ;^w?O- 1]&XmݬR(: ki0<c HM2inp æ@Fd2[`d!ZAIXU+JeuFTvK e$k)mwגF$isj3sӤ{;1?~H)%2VpQ$^kFLMzH! A6S߂ ut&uwFLKV8j#0h:펱+G]#Ӵ'?1&GQw 8-B,ҊeSВda%BI E(EHFJ''gC) ] "򛥗t}^*^@l8|C˖@_m02;iiꁂf.ɘ8pT8Zq11iBY mshOdԒ)_d-9I˺j#2"blSƱH_BI-.)DO"a=*!IZX'a0po̳i9yS}~H3%E;wN)'l-=,L㏂dľɑ]oD1@0hO+ֆ}"nH_R**gVWb9yވ`D儶g_&EQZXܼ4w̓ͳwQzj +Mp(J` r䲒˄'ZIC% c;85n5AL,tݵ^S\ۤ %o=c ם6AT+e{Fߞڧ7!hHЇ| My4> stream GPL Ghostscript 10.04.0 2024-11-13T07:15:42+01:00 2024-11-13T07:15:42+01:00 LaTeX with hyperref endstream endobj 93 0 obj << /Type /ObjStm /Length 3278 /Filter /FlateDecode /N 90 /First 826 >> stream x[ko_1؎UCJԅP""HN}ϝ]rGB}{} aEld18b`Z0)pUPJ& ;I9:c 8ϔҸUt3)=&*㙖i;4LۀHtp$e ̘HwEf%V;K)fJ3" sA*^ǜ!3\ M@*B%^KM +z)1Kk0I2I@X,(M(`hhZ<n,b!<ǀA:6E@BH٨GxҸ&xBÑIi ># d&O4d8/dՋ| f`ǬntW5~Y]}U;~;!`F+:\^SV}_Z{KLk4f_ j!Ga[lus|҈P*9]>gtN= n0ЃA 7iz0;vuJsHU֢˅2;˰xSƤBɤJ1^WI,!f4!Ȇ5k!BNnEdt {F7vt֒1yH#GB.9B+QI%;%]ˋuEmXoBHe7>0?#jTl/*xPnÖqd ;}5JF =z̨0 qC~ٖL!m/sȗ`6r/8 !rm<.jG8L?r1Ǣh9x{!5!pbYu4f`P9RGgFxTO&hʺpⳬMݫUs}ޟgwHl>_$mC&u0ې$Է4[dTDϞ>M/>=t$q\\OQVIMTo>&I말mL>ԛ_R--vkfؚ0·h\[\&:B&26HuJFgDItDI!kHNxr}%oW(.$!eT7rA?g~oz8o9;~s뀾3}Pُd ,OoPؤ)T%gy4|ۗ_5 l6r>ߋ5$!zvZ\.( V'7WIr_bi>k?y;;[]>U].tL615חRikQ U>tL:!2mO;[NI ^L^ճ!d%?U/wOΫc5uV'Sɇ%Q}}ML?m;]*aN}?Xf5M? - /iDdU9{DVdc{^6^L^ًm/m(+ًV8{~G XOJ++to։KAog4f8ʻ<FMR \T6~ :oTt:)0tʑ)mIF hUaQ`w#k pK+QDPv)hd h :ϭhZ`mTOiNwYkpZ0 uZ!iWY1Z<^b-'6VrVmž HCxW<,Ӫ1 FcZϢd{XwQZyhPK7¦FZ*R`R`1 u /)>:QrtLʺ;yU_Vfg;Q**eLA-JmhxW+6OHxQv{){kjmIġס<ap7Kգc­ѡ%p4 @ Jɠ٣u!rZĺ%LXEn?&I PpqX6ːS m*z@h0t͉h1# W;Ct\ޯ0zpZ(8H^?;44TI3KvnaXCmi3h\uɆܺoڶ 7`|y@QnzC 0?%G'n;4)IjwdkxFp(cxƐ Mb}C *Zt>xE}}ׂV?YMVuE7c~sPy_ ET剭(TW`Qj B*hrץIO5Puybl)5 HsGngHBFlesSLÿQG>t,< h <r&q/[aiZN?jKƕ~+)›~FҐ$ -qIC,VtMwt5[̛S]VWل_-^͔g7?.)ToTm:;@[) eѷኪvt#!|HԽ:n8Ma8wl?$tendstream endobj 184 0 obj << /Type /ObjStm /Length 4115 /Filter /FlateDecode /N 90 /First 847 >> stream x\Ys7~_1ILeS%)V첔(>@6E*$;k pHShTFe(dpG_p XĨ 1'ވPg8qu6JP8jCX8H90\*>Mi }*s/|Pd 5Q }RJX.C ރ Q˶¡vOEԞ>E-e 9 ]+ I !Qa8ڑ"CO@RJ"D`(!b4$$Cㇳ@Ѐt 4<.[PNӷ<\Hi( hh /4t4'C#ġ,hI!J1 U#mi\aG,hT@&hXNI 10 鞡3("muh8jUi("v:з,)(JHjR|.xxpxǂ oRiQ':M:+h8-xӲ>{Hgakx3ƫE-?/'qD$Hƣr&Y~~߷%5uU>'E:܍r7]߅:.ۋE bAbЙ=swuչ:wW]kr{&gr{&gl`.btY̏:Ou}yS;?g/yxɯ5|ohX[~[ ,"=~<\M/*N2'URgk=x^DϦA{0x'U7.47s01blܿE`i;2~wG;awCəTǓ) umYI~ N&V!|rt1==g3.)3I\rA"?S'Y% OGd l[Xf!@BCtWT/Rp&I` -N8/<m-x8\2fNu0B;OIKu wXMyg}~'h8t?AǴ>gp;3sO$IgU ` T Z~+ǣ⨨ hJ|KV,z77 +1M+isYi%td&ۄʛι~hAK~[ZZ2hg<}]*mq{Y=ÍYJ Vg7!4y!_*'x&OOif憶mfku>xy" }mVʹ}Fe1#yؠRoFZQw.+iʡ *.T.)\Q dz^ݎ7xy.$R mL[s!~FYF^932 LR'hB$[,n0/NLb}3a".2edB铙6j>`F{A2g^؋-!bA5ii`X42Y|tse^qwcN+MLYA&OSɳ!THR_GBbeQL%cyf05geiES̢2{,c J- e 7MsW!fY ,VwB H^ s+~g-,ƳT~i,Ff[>h˂kt9—̦I8ss;6l?fٞmgf{ٞmgs=s %6رed\`}' <;Dli0۹4WqcSuǝ6фx[M֨-AVRj ˴j΁$ģ?0 \3wp\hV#kw4xzBuU!̸,w*h弝;)%7oR+RNET:$lŒ;ᘽp'yU(+60kkn ;43_6·JtdΛ%sJjDݹT:U^?ϙT*'[IpRB6x \X^pTe.NSE_ U=ZmNk$1 1<>k@NɢYweW;{7.fZ!cs0K<=>~߽8>?MVDij63ЛO덋[Fl\6% du9K&/1l~YkD#14*X kg+Xgu8s%Cuy MPQSm SGQy3p|q/ܵgmֲy$c<š{,Cؓ#E*`Zc:_;s@5wZfk X;ת3ތR%?94My̘ g1jZ+*.m-ǵCxbW" {/R }QgYz:yor?ee_wAɥWIuTߦHr\!DvJa3\M[2?ٞQW\AXcsOa}T|k~eߎGA=u7&0*-VxniDp.UhzC;9iO&/kgXM!He"mO楱\#!ݕ^Ba; qeBʝ2ϴFթH@\}Y*ϭY[9y U1<><N IU4R#7Vۼ\& 6'Bq%pq:%pPֹh ;5HV%*QXW_8/7aZ  `U^FRhxj1J )/Q$;uIV ХU%PWՂzlo .azq61NmmQyD/Dx :v&h{xd촾zC'Fìј]>)]BUbi'`nf2 o|ϲ V!h> stream x[mo7~PoW($8I_EBeɕ6g\J%K8r3ÑAhacg\̟0'4 +LȄ&yMVkn5!dq 88ȝdǽd c0[A6-H'G";A^Pd9oI8@M @"ms./$\B_p9B* |x 7I1&#46Zl <Ѹ"8BC"xi"d<'DlGdD$L ,cZPd)LX4dnD"jH. RpMM"m@iy9*Y T*5+Y#b4%nXq5+H 3n4zT@"Gx9!>}H.0%5Vky,P2cBWgb|Xt3j8Su:3ըҝj~Uq8\VY3X43ukc䴙͇Y~WOM%-ї$xp6#kiK+|(Ih4n[#jplI-lܠz9ϱco׷~K`[;aTNbM6B'e\lL[맟NSσYMcn٨ ;de/:HK `:Ҁx\/F_;n o:'{{e'3Aκ<pSᲗb)}#-{ЁHW.{:Ŗ7ysfF R,e B lCFHS $p 6$}N^Dom J% [8;xȄXJdoF$+9DD$}mc.鷅wl֘@9m?pGXcGt$6S'(c{v^w-w#lVa1sp}ng6h}ԘHrv`=ݯkLEq+pjvq^4 뾡tQK/؛TlQ+tFCs.3;9h nj准gj j|xݼɼƯ&[X'ixz EX}/-}Ԡc{tˠw,{8iU䦽VyʳUUyTQGUUyTQGUUy!=m;wI%MƩ&ݻ +m=X&x{v@`}Z fw3-=6d1:-²`Ny`t6yɧ:=]'MKX  &1/[TᆇnztQ 1^, f*H k+>9.9KA)V%.>+w[C}=:+ 8G0<eC)`qȕ'8NYspx g;bcBɓu2Ϡoƶ>KZYxf7Ŗ ĕpXvF {h쀎4D*cAј`ͨ )rRkWb"K\}GnIEdžnZkdn>Dnybl1+v.vkt&6+teAutn4)6JBcR`1_',)d/sB/3mqu@'$x>sT|KU`ۣ3dI)Ewh! H^?[xqbSGɀڡsh97xߥצ -/-08OLZW mq9.iJ [NenofݕkhtWa/)3X)%8Xn#%˵9O0G˹`z>L :~j ]2p=L,qM<t8Z͝M[Nu_3־f}X5ck xvmF} ?^_+]UeR>h)U9X]6es]/dz]tkue]SvnE][ a1'kί*W`n˃s=Ӕ~÷Be4~1&q7h V L'͵Cd'3?'̼j $#LAY hGfGX9FS$j"b`t|c u=tS*=e!4nzjv>&ˮ,Mp{@edR[y9Ѡ8{UFWJL\Vj:X {j+Ξ<t]fgv }n凰{?_nnn.` g Acˆ #VNvMrX9m-Ic|h0-< E-!9Hendstream endobj 366 0 obj << /Type /ObjStm /Length 2659 /Filter /FlateDecode /N 90 /First 832 >> stream x[n}W (օ7`7f$! G ,9}ahiy%h"faUxXc=!)T5Zh5H2tD: 5ɩ)k܂xAЧѧIP胯lb>k,P _ƷJ0-9EkĄٱ1N0i=ztTO S2B6.CxJ!,Ǔ\1z -OŅ}J(O *PZej2HY\$Ts`5c0Dj)G<`NɡvHԱ&54⤅晝{hE&@oeAj rc й&zzŨ5th0%t|$@&xRX뒔$ZSDzEDa"3 \%ci1&j(*G:D)Yw̡]̡UFC;WŔ˅ѐ)DX5u1ȴT9`W7|^\]]ބL*N|BpVFhhhhx21tc<O~{}uʬ۱y{yQ MEW5l:;\fs<ƫo^on1٫߇?o~~gá~Odc6ac6ac6aC,V,=e9o/n7A| bBq{ }p c8VY 7ݕP8ft&=6ck%ͪAԨJ04 gF7k֋D%Ceg ASY-0Q^m'F 1sUל7|b G5*b сڡyMBȁO򮀻kܪ%:?q"8fɃO O \ O!hA`aCwmh4/zNCFoc{f1?xfx<0X<N&ۜ4-Oi+k~zn&i۶2ZGG[F;c!vi4hr49 cF8&Ǎ?5>zύ/{!]G^(cC .(>'5)11;xt x1d9D3|G^ 3˓$c_9x:ˀ;ޡp,}r@FҺtf$ '$'Ȍ\qƁvp91Eޞ+`p\O I]ES'zMTkBiJcڳČM"S'*l+%`աN : 4N$2& 3 IO^]G,¶N%O?X(i@Exwf3C)RLW1g?lws{\i*ޭ<1e]RӱET@yu/xRd  Ow S̕ր.OGy/Zb'ţI.rnUђڌѝڭ{7K?4vt8#\{=,_3Vp+ünYS]> stream x}YKsd1)̈́x8k'Tk%+;C|H@rV[)D@Fb/7ou՗ͷw7}oKWҚ Kqu(7w7۝j~EvW + (k}ޤ0~9};V9lx8oVNVa߻P:rU((7;ޗasw1I rS׷ )h>x,=6x$)TQ(}pLT ;7;mR::0U=n}'dK,ưE^ƀs ',kx9Q1m Rڼ?fJA4ynSu^yZC'дʟ86(EQd"/_^*5^M{:e@ ~P@"SSJbt^t}(hcᆡ_q|hvhO_;T>)H7wMU{qFϖdKz.M]@Z݉;rm ćuD1G_zts/D dq.QW$%%7?R # `ӴK>-^wWb$U1rLzUiW׾k/!f8n{$#> \F\ ~Zs:02r%yii{*T$+|Sh'HʩDI/Z\PJ'L XL&qIS|;5zB%@7@%Xn5C S:X5MX~h㽳b[Ca!I6N9>\Dsԓwlą@"bVD|/}aTyωՁ "N\($&pS>̅!BaaC/[ř`>s<'KL"~ōR!bbZy7^hewZz/q2y|.)}]*ƺG LQU u Pœh -' ɼ 9 8f`%a;NNvp:ԯS͘ s%Y[(JX4p#<WG9;`J.Q<WThӽ&>ƈ-TÌqTރ>u<16AogYbSfP8@R @H:!nop;::jυ҅a6k"$VD[΀IT3ސ,e>㉝iLXY"|Lz&Dng܆_IAۋB2do\Bg%g: :U[" xh{,HiTdg[h@hc>O_8t3#%\"RK]#jGuCy[+;e&huLٹ,UDE }Y2cǾ͈hRpH*PT$XJ^]p)]ucj#Tx* e MriF%`Oݰ=duVczFѠ@G̉Mۙ$I?֛1.ICDho~bk;9`Nj*/hS7-s;ߝaT0%=$c5oZgލUmQ_fU [%wV(#:w'/ i0.Fvq\@Ҿ w!!u8;sJ,]La)zܔ(u^uO )w$(MU )J!TdrZXWC" Nquڼ"KH049K,mՑ!nX'nͱjK ˽H^P -"o*uNgNjo-R5 X13Qҗ(1 (@KIF|{d,W3lk.O>"[ @*t|'@6F:wPr8 yNv0CEfڹ*2[ ;v]85㸆@8mTɣƀ;./[W! >1fup yy/gMT(}7=q0kҧ Pb5 )=)HI|MXq9kiHqB2H:Xcr#a4x'+Nq56 -dmm} si|bmf F b/avs##O"U-"DR:3OwWmendstream endobj 458 0 obj << /Filter /FlateDecode /Length 4413 >> stream xZMoHri/9˘db`6xv0Z tKSmϯ$K,Y cXdf22>^7e!oJ7Ӈ_?H{m7q_>Zs }fiꢒ٪P?$}֮,JimƵ)Cz(o+ oq=X{qxL8m<_(8ZU^Wv~>KTEk e]7lۇ0^tY4&PNZ7;L%ٰZa9+>5VT(ke|SYYp&渲{ȲLQBVN%Yc}(;5pBAww"-3BcNK5D7J Dͩ!EYRNeg~؜f?xmX0-OR<VPa:;mKV:,.jYpN!iCʊ?4K`KP4+~J`Ax TkA\\;=6 e/ Kvu*ocZ qj `Y!,µy8Ǯ<,8ܯQTK9 Q-ms}(CZK{ע9ۘl!-x;uCvc!lkߞ՚@  6RMkwmTuYVc-MHyeiV5I 3/EI 5gzEWFj~o_욁-kN|y"n+-oʉ2I oJ!>&)!0!}-~˥N`*q mU](ImM) eL\@B0UBWhG6KWF̃,+QPV-n}?0ǛaS +gM_jQdm]?"Am{Ή=y䦖7cnVivم {-Oy,@uþX (E~(čM3YTI?-Duȋo%z4h@p96 vZGxGxdc=TZ^9o *nm?LKZ)ĐY bH4МT %a\ rA88oW6p+<\&y|"=.W#=JX8/<=vN ˃oZX98b$USkV5Y0^ݔVA))凹5^p@܋=IKi?Sx7LUv=q)EOyȽ-Xi"3"7zB`] lԧ$>ܝ:zS0 swr]hAcE8Q}mN~=r><ƻJS:c3nfl7BtM d(}hio:}t c!/ &'` MXT7TT 8+U2!46Vu n2liN/gH0Ц ڀ_qЦ[u RrG$r UV I^NTBW6C5$҂<\KV\ɄtR#]kG*&>I.èp3=mR 3ZvWR1<ԅaΛχǟ,~M!Q+Lat9ڒ|Fr.֘պ ߨ[r   (cs!pCCpr! ڼd7YYd̮C =nIB(l+TQrN2a!*DxSx1vTS^a+ 5yUaAigĺ -OgN|mU!򹖧 H\NZ(5MT<1.V7j9/:FZEU9햁L]XEpCۧ&qD*nsh=aXY˲(?X_NGc}G0tgC KPCPJ}3_ΓS8Hd&L8Ďm. &?aE82Ho!Y~"7o(TڜJAsL\dɁʢV1F:'t'W=['vRX/^G%n+b[l]Z_O,:@ zUvZifNJ^OՀ֐m6_)j<\{c_<:yzmsL]:ܑ2$ ; NR$uf0jXnpkPI%WF?Yyof9qrϲ/ [x =uK&ح/Fq_:rF2Pkˉ);ŭDeE~x.m7ٖj 72ٿx}.Kzh+e}^aKXNʾ9.iu_#o; FFkHs@ӥs\Kx>_O~EMVJ h+nmHTB$Qjm5}q8̗ǽu)gk(ޣr~fkg%*o?G_ m: $ga;WZyVmas(iyT_Zě9EB.ԣoVS͵k^D HsLk(_7Oc"S41^ka(U"E{y.&4ܵ 6*|q<&zo$>Ҩ ݦɄvt jRdARcyn t ԯ|(0pTfvRboPIa!S{ž+ ^E}"Zr>"d?$sS?ҨEF5hNS 혱Q .7=aeHt+c>[4J+:Sob'?e9kR.}r˩?^ģl Yu<D~[-t}) 2U8N\Bqy _K [_fYq>m\:!_ja?* SR5Om Y?jo5G>K\wQŃ@,o<i6t^;D%}17ā\*ڎ18J`*u0\7HW˦V6=eSg]-:Z45Jm}fĦ6biR5i+"XjJI@ul^͐*/ڞ2BϜ/P @OX_Hendstream endobj 459 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9577 >> stream xz xSպ.{ofh DTDQ<2@۴I6&i9i66mt҉"Y9"*NQVx>4YY~BG ذdsf l`lӡ1oa`p(̧#`İÉ~!!#'.' bVcb)wA[u!?r %iҗ#$WdYwUV5qkOؐ1ios~g^`f׋3_zYs޸?i AcC6+K0. ig󨳦08KDY,Nniwr@CY AE Ƞ7Ã~>DYo@"JfK ,'/GjIJuk,j FʽQ)B5l +.;s[#X> F<|pEe2sf:te g'jq3tt &KPh?]0{sHR +žP55 InҬ oJ r{%uaXPFt޴z~8a݀fh,Tk()֍<o^1Aڬ][ȋɎ қ"ѿMEANQVvrN:WڬJvv))K/znJj/t]}@Ł'+e E7a>|%n(eJJFIC/Ć8Y^(Mc}=9c@ ;=%LH Fm,m3t$!pNmJ RUR @4sl;yAzw=w?SGwd? j2J]b;PH@_%X"bDžWxÊ/F jr =`;{~@h;j?փ7;! 쏞%㞣`R5o7["vp(ԥO^:M'0OzUp+1W` gE0/i)8zepmI"GS'秣iJBԮ.bX`#KaEG俽m]tqO%Y?W?$bw']$71,Z׌Y5U2g8᳟a09qэ]$WY 8Qo 4\y(af KJR@"_&2-~܀}:TJCkCٙy=qz|Ǭ~"vÊp3lOl|"hb'4ޥDt yX^8߸lp4/(pQvRAH޳f<8` fRs1*ruyg]qpKfL m}0O{kDUbd / qd޹c"}0Jdz:U-hRņJ+ܝ: b#݆J"/s]K,%07_ WFZY`7.c7-& |s+E.rz*W9U)sWq[uC.jr?a?^S [ H uwzТ+OuaKZU3B쳴lr 5uem?,-XE=+l/(֝Imy퀾︅roN/wkޱ2aݳ`ʦu(rdBHQiUfߙ6qu5ۑ(E/Z#Ʌ'.i +2mԴ(/t؝V+cZmV6ۤ܋˾ lgjK|1;:C3c0'RPjvY+_J.d@Fۻ rϧ;WEpyLώp(d\%h3]{ߝ[q#Mh|-7w?{^I3O]͆GD8H0= }c_bCv4FEr9It&5G1,ْ(L{7c{p=zɏX^-/jF[ I73 TZqqh2{ 4j&no_!WL]Bc&:V8W-ɼIj67hraeK-e̙!S̉3m1euxS ԉcǏ:u%(GUKٹR+WrZsĒ $O&˓ixh035~P @Hy;\⪔T"y'ǬHɼkw~=WY'SM\xʞ G/<ןAELw?#3CkQ`p^o=V\ul Yl@.7K!Iu5AҬ3bXW`u@P >ޥƖw#{!gc;rΌh.C[];%^7}{H|2=Z)Ta4zM>} /Ljrɋ[9ee s9| {8W*QIu0mȶV[%M˓y"IԡΟg1?`¼ƣ(gAZL^p]u1%|{15<p2?Hr5Z-\vkh箔߆$7[^842cZ6mP5}@ӭ^b;TZsTp*08kX.}M8&F;RYj0WZ.0.V^IR%IsM?gt%ʳQsJTY:zϑ)>5Z^R3>!82zP4*.SίP'Ix-ⵕJZV3GkDR"Bo;ppj1J]Q`}P}%^ :P[ETo 0 CGtjp +4sLR(ׇzv1q| *6 šNV(傘 W)-Nu*}?0W"Pʰv  uv,;ԙHFE556UZlFE&L)ikA8\}6J4\W zB_@ͥPH,6.2=mZ,8PW8 !s`@wQP I&&혮wH \xbx5u͇:ΗL&,ԕTxV^jtFNJj }+x]"A.8$ѠQF+7lj HѴ͞47~ךM hw-\uʽpIџ 'GAnf)D 8Η#c'l'W?hx$f&`Nt`0Ɋ5 m?M*j-j*\2:8.(-nH)MLɅbVVc!I*AzIfMfc缲af\鞺4.YmԱ_wA0Gwu"ZIBj.|q]H(Yw֧_! G롟^~&`~)Rl>4P@i:w裱Y .VԱx;$$+1Q NT{X9 3JaC/@sOi4밦ĥs=~CcgKyt (NZ>mƧy*:c˷U\w=p8z$+Cذ.(a> ?)F P{DpIG?kҁAG꨽}$d6%H5su0,U#ǤTYcv`7x|(t]*u+7v}^ +K8V9xOt]:z?-X).28QvM!\P˶vʄEhش Twt~(t 8>)\ gPoV: N;c-:,EW?gR?HlXisE ޤ43ff \/%ɕi𯻘}/Z KC]`S`Kz'GXjݾ`٫B9 /'cb0>wY0ypUiK2'hLY{&AK%uuX'8;/t (`[8-{U:1ʪp<:;CaO%46TZ[s$fXJNhK4 ĥg-]4XGRp"ZoH1)>4,B( Sޠ>8ak]'3 L|\BI599D2IBp m~j"x0;+n>h@`6֦JP/Ibk5h5ޚeUZ9=; {m*?&UBUeUpLj AmhN4t+Vf1jUEMpml-@Oſ;~bC/1΁'rerm@EZWc$vo)k"ť5 =/#ǖTJS5gXqfԿu9"ȠʌZKq|S.: WrLÈ$i&{hdkk{g\LG `t@ dp ^b0>p|^M;ZN(N┄ɉSYYڬ޸hB`GTbghxd7(T-rWT:فFFHUڥ.|,%x/ћuL|}jv(So8W΅^x`u: YuCmfz,Z9PJL`Uec X..ဓp!ob BIeQ=tz73[?f?x`R0MF:HY +MڪL"XkǢm#S4Y~Yߥ|/)/uʲM4f[s2˞+5nzٓ}L0<a"q}"Ksn^ֆ(~ ?S.X./{tro޵" pH Cc~$WT{4Z,҉3ʊZD]_v L΢bx\$b}#J`YƊHF.-N08LϯMpCe]rbKa!Dɓ, cL޴JJS9Әe hfL`+Fkݗh+\"d[mX0f|H!U죉 yB`}a'U!! [>Z˚kŵZk'5! ^a8uY egyyII)IIz*ȵZ,f:o7Ѭbu_IJdiOwJYP[Sˁ:Kmmc.W-˒)Z/cq$[$$CL䫲b O_ujZ ~FL6d?p0<|8̪ǂ+AxmX|l4;{/z T+.w,| .߬h ЬD > > @=ہ9prj>XY6[*4a[b2DHm B͍*9`}:.c0K_r#N,7&ǑRa #VIA{9 O95Viv^"ّQG!G~c\l?ʟ[$b5W[Q)- (/\' cU ZH b s.:{y㮠Ͽ0oBbUWQۄHkSka; ~ x2pS] oL:Ңug:X#T]Ub_ ɫpg7p, iօƦqP& 7x)*[[UW:=r;ĸ w{>$$bz NH~b`7v#7-f>3-D\?W1Uz?u~uh?XGֿ - ף _Z yS?u{8 `(e} lLHa$U:.uXV`Ϊ11&UJ]R=`.RިVcu hzP Yfa| : idEϬ\![͐bl&Hoyyeih-z q|(+ۈ) аf2DxEF(k VQok00 @:\S> |rWuĝЯa'X, >SWi XUp )JSIe>L9 5XhR6>Mӱ#Gk9V\I))g`Y/;{SGp=<  q0$!y ͎wֽw=O^_a+rf2 ,ov|5C{80^dH0K;i Ix&dMex2~$?'t8,Eb, @#hw p Gʎ:8 $[I󧼀*8mϵr]2K!L䀆7!Øm]4<՘H]#j?rGO}O=zTM5Acj ?&aΒ1^X ir>2P6\iڕ5E3u9+U%eNJ5>YӮQ:qP&Ŋb\qV,TZ}fϻIV؀@sYuÁNp>vuɖDL@}4 <{z4Svќpp<ƾʵKm{@n{V/,.Nא:'yoz.#+E+O+R{ H2Y;'?!8 70Ugf A,ZUcbb:ny!WͰ$8W|.0{o%nz]0+.,1G/z?"p lI %55e#[*w'f&2$@OC"Qܠ .lT@UC@ VU˲ed92KUo&/ؘp=&uGy";'<w]<\ݺ߫߯m7VGjET]]fMfvF+`H AǴ nSurlhPT:IHyf]EPf`hTendstream endobj 460 0 obj << /Filter /FlateDecode /Length 329 >> stream x]n@D{?cص%kq(JX" ¸gvlH1'if-8yL׼nt[glSݐqMvΊk;̞cw~k/^|4~(M_6Ҏߞ}oݿO~?n7F%V`e Fk`ǚXM(>7 d& 0DƐ!J LFFQ@ T3 D"a*Tr6 xLFS`g t6F?=TX(,QPFHF#Ee#FX<ݖǕFyTe#endstream endobj 461 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3244 >> stream xmyXSwo^BL0o]mgƶZ6WDEDe @@ $aQ@1 RQX;V:kљ"C{ǹQ;ܿ .MMNΊ LHIM͊M?RrJjFfV\ILlP\M R 6Py|*LP[)jGS7ZEޢP@j=F zT/F~"3ww:9)մ3@?d;ws~ύNcM>=xF#^ryj{և^O قDfZx]$&7i %˃UkYRklGՓە6YC﨑v"|g-Jcw(Zdh"@ހUF <08]FpqⰏW4C'E4|pWvJF L& aMɱqq 1YnPaIB6͂^Xpb>hmS%‰mX'|1bdapXnB$;Lh#ш*|3@DvdQ0(}\U #OnKs&g[W86 Ec&}Eq/ ~M s[[խF/#2o&ٕ9 yDqK V2=|H,W:.b\n7/t?6; JWo PKx`NnB0}h@*$dIw(و-P%J>A9<}ьIf.xl +0} +\8KqNilQΜNөPcD`覠2V΄c}J˱t;| b/ w,Js:Z~>hhszmjuVa. g25:AVW,Qhgݡ mY^A6E m̭ʯj.. ̇QF+jzҾUʠ "PL\hP gʪktzVW8x;IV 8ۇ/v^e/ޗO6~s 2)q80_.P)FR:d[]89L?[n/!e__~@2!4֤}iW'hly r*b dNVU"&tź>D׍} Z r1.;05N$W+qtO݌1탸átĥ܅7pmȤ3֘93%N4)\Kg/k'NOn3r+,G,VZ+y{Qj2wYڋsq3-n [ GN=Da:nSV^mHrF P]-ɻr *\!'|i* n 5Uqt6%aSJsS/BG<׃IA^W]1sUKU3c%$E^[!/}kΫ4h儑w==CHؼE{K KmNfkM;d&Nl ;B#VJWFlAVYΖ r8{&:ݨEgtP(ψaͨszlYv-c-Ma*@BB--_0 79̄-R59ZG A}8衼.ZQ-T0wWQqu-Ե%pڀ[R*ǹ-‹j]=R[QipD-M3jP-ɕ!gSctA cϾnN.koFIQ"z,mdHU1,:p$`d?8|[?o FRĒ"nz9tY k UBĄP`%44P4,;j6e'4|[(:pHNQUsBG+a$8;^6e{8bwwd:-gB3}LȀtJp&,J>v;P]x) ee'B"K>^Q f~1,(bu10;w iMΚS:qOG Lfq{m{]FQn*lilNmO޵@ƪTJL_^*iL7,ݿ'[!"RWB<`mx˦pVFo)H7/`W*ۘq89|Fr&gNwv@xp 4RB8>Tq⶘d5UbεbmWX Ȩ4Ifj'؉Z+rm?.=h?4ǫF-T16@uńO oJ$M,OOO˘8n]g'0iQ/:%o󁸘X7m $Pؼi#q"Z%2]C\9'0ynkKmcD.9l =nC1E2׾+E&^N$N hzϯ0SL]YRܴ,89?ؚT7ky10VCTs%+^)q=Dŏ𕱷>}V,4j9`ɴޥ%:S0dv&4h~N^S|ߺ$hPp5d9E{^%ā4xr0*9 9k ntgendstream endobj 462 0 obj << /Filter /FlateDecode /Length 336 >> stream x]1n@E{7`q~4H%Y l˘"ϟHGzVovVqr1+/e휯mNwyhvZZ7/JM!֎)ĆXVsqmJbƨ)č#/(E1B0ǭ)ĭ1BL.)ؙB> stream xytSeH7 <&$MҔl4i !`ݬ7lɲorjj˒lɶX$@b &4&tΤv\17g2ܿ#=}["guK7o^9S v;n}~ký0>(>_;.RR2*e}C+um-UVD_oW,T4]~gCu-?VV=&Z'mm=%$Z(,z]EHT%Z,-!Z*Z&zABR4MEɒJ>rtX#yErS޷i_{?t:oЌ3Ce=JGDӾTdMN)!ˑu*_4o] qG<F5av 4Y[2R9hN 1}i?+z3O_^2_),ЎUkh8{u]ՠX 0; HﹾӉsl&mÕ6Ӿ+oOG.^X)~l|Tx2/F^}Orl6h>[9qKC ;?9w`''>޻EMV~/VTEn5<xᅕA ?2W/ye)A;)8;7|7$~V+w!TYbSfV^UvrXG>ШT6;izx t?nt.t..Ivx`=Z_k 6c9QzMiXX7 YA2?@gX6Ձz y~$|qw=R..[inMVSll!ohD eѨ,Ov{NHDwTr >oSn/؃/HvX[ve6-b;岃sI<1󡃖iK'i,&.;y|/$ M0 "2z7CwupA`aSl/m'878]Ii6 u]N(3v;&<#p'O|H3=)-jG3#&O bi\l\{22Гa\ (ƺ4{"x2pCE |6lݳmw\&Ġ֫- $nlc_r+ˊA2e3 3'ӧ&}s{9:tv\r{V6Qq1=WlzB?s|Q*=UZ4%윓"G<>> r&\kC BĮU'*Q *L6zswߌ4ohRl[a!hU#9@c)xd}74:9t&+$= |T AŦ L$L$II)FãbδѬp-(jr'8ڬTu8裖`v9P,L}ΚaK<áܾ6Ymh'Jڢ[>߹*ΦmNlv++LA +x`ի /Vqǹ^@ԏwebUbj%lho|H],4#~.AQW;%F_#{cDPi&H;Þ0 JNUy@5́ETcjqle!w!8'=)f"&C m]6K;D%v:~fۉd&^/+[{黒CVbzЇb*ֵ~D7Nvh$Gj&*(}[=Yv7I\WSi3Zdy_>9}~6qH"V2/"۠~{Kbgu}[7+7&@#PG"ҹtnab2=tq& &J&yRT(zDӼ=X'c(rw%Lj_4c2.̼ٴ6bgq7/lkgKݍ&.RՂ5]6#i@krJ#beݱL:XEŽt'rRϫOrݶ=_ܟN1pL-k{;ʿ+rABMv 6o`/2?z D{SOT.l|n_NN.<:afn*园[r=L_ٷPSܥ%Hە&jNs'm~܉G'6WB$Kˎu!5Ck&Bl/  )n=N#Sv m+Z ^{qg\{Q+$@*ʴ9 Qa|:)movo(Wu~?ȱ*@3FfMy T6VuG r~Mom1"6~QJu1~EwlիꟇ``## ב(Lp'd0 G. ꢵb63Gʼn,m mVxATUcUOgz[^_ЈKsdVgaA\acS~9~a=dfsXHmŀќ:r4t#~$G}}|K~^If e__ML}-OKU<=Ey e1I9¢BȥV 2\O,oG0qP&kum/y$>c ~ƨ`gEl0B!š#ܑan4pA*jZ/s01-Nvݥ.X=~w\_\uE:ӯdL7&Xح崺5H0PA㩽Dpȅ xkNһޝ4mk>bN2&aOFnz(qxx,4JDO':c iR^K2@8*݈U+aAepZ_̤7fflyڹנ#o+ToVS4anIkA  ՊeUhH4= bclA[j;s])oEz^jkqۈ\˻G3<jVMIt[MЈզjO"ۯ;w^w,čd;#=Gn؋M(+|TjЪEt?^v)ii;oC(?lTf ="9k1LI[BF鷟޼/#vJhP,8_~%ۖ+a~-׼> stream xztT !sEc9 HQA@)BI&dILId&mB Q@Q '^oYݵ L{~s=^g.JxyF NDֿ:41o45xXc0QQO )I IHJ ,H(?hpŴ陫fd%=+mlMs%߼`­R.I[*X|Oը _~bq{է^0_  b!:1XDу@ $EzG!$Q58 Czy"fK#RL>5{v[M}G%?6cC\>ޏ(x⧸:~ dpcO>d>(F̣f!ԴO>='>}xQ ow K GJy(i`Si:<6oy@Q1S,Jj.ߛU $Tg' `]XVEP+Y/,g1uB NMN@/B$<]^aaNV]0{` (4|? { 7]kL JȦՓ2ip۞moe`)( A=`,m}^@sf x{HnAHfᡭb b0G;1~T.i` G&?(6҇;FA^_ؼb)4M@8WAϐ33Թ$<fMk,:]W/n~Dؔ*@[^H٣5]=@dx(eX|+m ވMn2ٜl{ cS7#KQ%7maΑ xiICG)gH fh|:/($ ԑO YϞc/P1ㇻc %DŽ8㭭m{8\;\D!(4lz*5* }1u8JNq)k֮]y3?W\"H*ĐnOގQBPi[vLt\jU_Qڙز`ٲ* K x užcd!7je}M7fT!,[2@o.Wm4WZq8yּP"oKM`;qkpf͆$P-)Li3J[^-XƊ i tZ!-zaRPZd,7Uit&|^sꣳ˘a\{߻P睋kw{fTϘF@%a9PW˝yR[ nNoVoteȳ,#F^,/jMis8N m5=t .@qEtIN=Ŷ>]Rݖ |v׿a_^R^U Cq9BBs 5 bОe'/ i_.BpP0yț)D $mTkj0?>){i@fjjʂEK{iCI0 nτᶆN\q' t Bn̴f`9<:;ƸVc ;~p*ݵ RPElMaNlP#q-N<{tN}TLցz3\ZMT\Xb? vUZ]^Vdˇ|1an7=֌ɩ^[,,~*ds0@s]-d䧩 6C#Qb}Pޠ >+>othl2A9xd֡Z2j|]>_[ԫjemmC|vwM%`2 [ajU-+Z({F{T}&oAqtOVzp 1AKc/uMowuԷ6wuGX[SWe0 =K1PΚlmǢG'_TƢ4U(()򖖔}^?>r<4-~ݳ]*"dN6V 7l yҍg >_ټ6#}pOyw (kmpk5Yyyr%T 0`Gr6)OZ GGЪ*T Bv[5h?sY1+Lʋ+KJR8}̪Bķ]Z B<2Zd {Ǘ%mee,ea~cfQZ$`5aլPT;;Sr6w g؈ujH3Q(D!w*Gj9@-Ilq4f\&RcY>cޏz4.%PZJJW g%:EJ@@V.RA!&f =3zhᕐ}UUĹrurM$\֪6`+!lmE&uJqk5ʘ5[ ja[7Qy~_ Y/R H\&yᔣ Y. Ng3oOdUlvpbFF T_-\6}s]Rk'w~tVcbg "d2o͛6 Л8p+\Yp.uG#ެR>+#$0)n{I]8:K3B4 34 ̄eL嬆}3/DhɂMNИ.t4ilpڇMG)aj\^FgoBsk/6[5f5FAZ/%r]߻\33Xʚj;Yi*֊[S Yh0̘Au@(_YA^Y2:՟^ .=dK",śd2yM>J9zʴ!٣.AGC<ӡ4'g4|N"cVxE|P?qnl -*#-K"V v_cSp*z.Eihw= VvUi&/UOeNJYqc#cА0N#kf9`.@D"GE<'#6:GϟT7߭tYb=.э,Jj桕Zsfr!Zk&YC$.6[o~%?hGH9yp*5zU?޿;|M;p]z3/iYcb( 졮_8tK+m2cf9Ȕr CĠڴkMۀ>l>$LRW?.?^ zsW{$c=IQ&j%:vz\^ YAtAg:JGs& ,y Z5Ke)j[Qťٴ+^|:{NKwuLvn=mWN*ĩ)MLmcq¾>/-+p: 2A-xT3Z )\Ca(Ty:mSg(B͗V2%jZ"v[r%a坌>ݡL�EuO ߈&4xE}Z CP;i ZYUTcaA8?scM,쯷:`m Z!`pWu3-;Ȳv'*  &,3D&BEE AK:x4:efWVW4=͗ӦmU3TW7$46%^*xVE W&0RQ\=ৃ~jr{+]R[[R{E 7cJ k#ߚg 8 l@Q<1x>qJ0*Ыq& 6M;&Xq{=Xa{#ٕsp<< rU õuUa[U#KG!{Kx!_4X|_UWK8&LȘ2(֣~R_ Q*.C[ OC˧eto㳺\gdp5\~η,İ`f+sqXþq[_Grx>൝ kk]b)Rk3ʲ6줂`5H{GKq/mT6A^Pt[\`Sɮ&xhi)pEژ|,  ^}T3ݻ#eip nڳ#x UMm;F>Ϭ!AE)x>Q4]G4t|Al;dR.)?uG7*q+Fz.ea~FI.tڞG_ ݠ\i:x2N$ j=tP_?yTf3ޞpZmбU1jc%DЌ $c_t<>M?A1/6DMP]P+` J"@ _~ l wH;yђ:Y;\ɄLn [L#iѺn@q|+ռ4vjr3쌎! cV|!yoҕ1 ܵ}6чGNy~㈴`nmmueCX\) a80cW?|d Gz"ߤUjhHKh"O!@DzUqAcyHvWEZqYz80̽'/ar˩M{擉?G>L2oVuy[S) ꂺ28߰tΰfǍSXcC16z+(lKzT۴"_*ɏz7,+S 4y|<fx \ _pGݏ*EK6l0zջ":V(d4Uˋ]E { 74BQ/xg;^ohj͠I^iy\iVVڬ|G[\ǐ{ޟ{>  1ԛ%/!{eAy:2nnȭ~/ T{Hj8APm bY6}񴍓Vŀ q=mXʪ@=Uz> z+}˨"U` Nc=X"9ױ.KϒdBi3V-&L2rpVc[⇌kTKq[UB{<'q@&{znw{aM|+8~3Ӳf &PJ_~ o ݞ|lFVzN~j~c^JZޱcǶ٭'!\#-7W>}ʾϕ4=[/",V ׭ àjUUY_}pk 2&v0 ,#=eƛ/~+#5%g=H:6Tʯȑd5E8# 9vҜ8iYƌ́q`w8YB!48POk ۙw PhbF%iZåN{ gS]g=>RY.>['ݍWtz%{nʀ꿸)~)2Jr:x]z'!ZTnnr׻z߭cɩfǤZSWc(7VbyJcuK+cY%WA/kYְt߼οnR⇓l+I ~+'R0:5*yK2lw:!nSoa8Mf35[Џ8۩7^d]WtH+*ϭi(T\V)%]Eze-,Q:žlgRrFP6j* Sy}{75-(@6xRcI,+{}4a ;= @]ްp-+iye[8zmT> stream xW PSg>9xY[9-nvZ DD0+~Ir!!$@ Pԣ޺ :֭b_/@;'̜|}}y""f"hvT؎WXx]0ͪY8?#E,@?QO$ Y>;3G.K[5;1)/kюYB3 Bse RRwJȌ/&DID;hbCzbFAl"^"‰Ub+x" @"XS'("#NeoʬP7 7,0;ncwfd?#\:avnD#2.P aC&+\aRT͓̕0O}t?Bf)> R[٫}4sHr ݐ"c'F%*6Z&.8F$He3\,xbߩ-@$ty \4_~N$Rjfrڋ$ yPOO&Dh6lE٠hp>*, mܹAQݿ#1E9R`붫@'cPծC@hӰ> *rR=,8IȬl튿> jxsB-#a? O1͋ڼbi1,QnSeCaDX{@[xz*A$(I:Rd Ÿ O`V"R QxzѻBblncN6Z$%0|u'2C9<@{H+2 ~ z1zmj{ֈdnh-w~#b/un5ګi0g8Ouaқ9 ʲ2TEJ@)/-2-Ed1~0H^)RzN`Z zvozEbTG6jΕ`S-< F5D ijǣߓ ю1h38dQW)*,9(^Kkuo>rdz Rz1y) վ/t߻Z̑YݖIν2u2١Č' ݑU^XiD__Yφ7IRI$ג{sۆX[m54-?@vҤ3TLNZZT.ajoqve$U~+M5TkDrd_w>jvf>Wc\rS|'Kp l[Q%Q5Bke mmlfŽF1_w3*lޱ=8f9eUjOqL,)9v2)_ܟR4]I1VY?OB!N_nXKZ*r&vfpM8Կ 5QGbބ\o~̔IDCBj^ (T Z! )1hMڢ²Qz(= bf4ɐDbOɡ7f13HAVAAqT:MTfG'^`hпSt: ޜ%&[goJ5RV؅T:ڡjmi 6-6 FV`[>yB |n6՞jIU"{zrh'|~)bMZMXmq8G)U[Y¢Y rZ uZVJIaku6䧭;bJ? Fm ȁqWfֲ+Z#E0x8zuMAcT5UeVO su/ _<Ԓ6qe*P@K 2,UoN_Ȏ:KQjz={A$lb i{5+a fw ;WOt\3ހmz/ 36?9<`/!5aQmA1(b:Wʩ"PhixW"BA p5tb]?(D(śdG]%Ej08dJmyZ|̜x;т^|%A&'-ǪNøf'ɬ}>/ ̥sm ~SrfD׵447{{muheZlJ/oml7endstream endobj 466 0 obj << /Filter /FlateDecode /Length 3789 >> stream xZ[s~oTOl+kqU &Bdثî$-`qE{f$$Ż믿<9?ާ{当7/K]dBb)ɜbWhqܱmwMz Y٬W8v1TuV6݇ۅUsy(0zU}j8N.[/A,juUbi)br^tixyv= kb5e Yz.|q! 뎗6 d[6 N[Ha5 س> kcƷïU fjMsQoM-,^9ЗwuV1ƜVa4i,Tg+\t>etď|LV}vF`:lѤK#$,/3BgtNQ m#5(shj0ue&^UZO=m,q5U{K&AM˯v!lM0o`"G(͘o!'˦|kq7Pp߱/4o~,۾]e3xݾmJIm$Ykw_Mu}Jx?<,>'0!3~qsy7l^웸E<g7d'0Fa'EKOcw"S˖TKAD]oP]ƕ~x;ο@ ܜ<ÂeDn|S)rAWoB5p,xs[1K.{,-xN\q|t| F4Շ0,~NfTQA.@wԢF}Xh :aH-ρ1A3IIG03JSxAz#] ]]7Q*d-A.l6R6ܡ `aoWuH@%Dh?.'Rzm\ a!r#` ¾c.,&ݸ,ȉN C .p!,@܏:ą׉j5M}CḐ53S]* GMph+=+@9.$lMb@+1PMĊQ8Dj fO!$݅z@KX#iB?Ҷ*D552Ai(! 3&=*z?L=fރClaR 6TsؾW]|l*i$uՓ Y0ݍNwR±UUw%d}C.<2e5#07pt/719@ҿvH "BJQ)Ti1bXV7-iHVe ./Q"!٣z@Fi[ܡb tˏ< )" (JZ@֜\%=L9uR.%Ηm@qn[#Y1)4iI "ME6ĈI6aRrG N["LL|ֈkU>6iM`?jߖmM:I4n -Rňʯ-UK%TH袾:+~^ p/ֆ\~ rbHLCB՝%\7)It5ėWMXEzϚb Jd( p6t ȱG!y;BlN.vtt~;ƏJ}a۴0 22T>݌ܖ'>X$J ,RHd!WX2.@]o1Î ZQYSk8RupB$bqǾ@wRj]7/ށ?1Q 9вŽ1IL@}8TS5I jRx~1G ^Mpl.u4VFO8%eZqHB2šbL`v_cA yZUKuMA)hCΫ3:d$@``vBGElp`xI2Cz5MzmSiP qqiH4AP UVRvs>bBi݀4*OFTU릆 9W/ MaAQ~uBVM2B ,;#qgrؔf& V2ŏȵϘ# U&LL`qrs(쀤5 X5?/ RN8M=oi>(T0#/May6ł上E2SgdeY0腲\hnf队 ZJRb&4Q=1E0^ u[R(i<8r Q ^r"E*7UzC" pyipڷDg>l$fL7\ 4 +[T()1O 9aav] >T=z V)H!8zkBVI. qX@\.t!<z*9Jݒ*<NA)IQMy M,c֓f=v9Xb4 w9+IVBuѠVapx)1Khj^:V*d+S0Lc\&)9vTPۋRpX܏~ANnqb קű%LFF kD*yqA [ ^ʟֿ 5|#niFQ#BIXi74қa.,(¯["SCk;\D;GM8q) b1h h$S짍"Sj`PyN-5'PS7xS>>ZSAWI"::&T\pbOBh<'IŐendstream endobj 467 0 obj << /Filter /FlateDecode /Length 3611 >> stream xZKs뮛<%)<XDzXrQ. kXXo!_X䐈U$y{,YL?r(>[?H3g;{vڤgY9{F3ud,Mltrv{4Z'obiO,8m/Q)Q,(ϥZԋszN$0ߏWUEU5 W/Bc&CחmUοժ8X.ʨw&ml~!i\TyW?o^b9^'gfkdR'#rHڜ%/a:fȬhsgߔi72UC*uWM]lg%L[YL&ijz|W,i5o+s%Vxfx]L'57BOşڛRQbSca=YkCľXJ$e*}X<%tbujK(e_"Ejb26$q[;XW^HgkgĞr+j,ɜ$)n!MEW%7^Qk[z⦺6U[>$Cd V 3)+  S\WXrڡ8xu[vq1JdO0&תJo)k [96z[0AfBBސD!_s |CV4,a]$2*؎Ma쓀]jmr jccP:d@84E]H(TInKd Ə[Yd̆ AʕxQo3זo )!xp9lu>%p\5>M~bG^N+I 6T,V=,2*dC/UF7a>J '#:N.]d&r%4eO9mq[e ÝSr&D@[1 ^F6i*Nl^EeHJP,omV672[ᎄMRv\(KŦX-T\HZue!11;& p-t&<<Ո޴$-Aݗ!9Cp#mHrK@T+;J- lV=b{U'8NbT#t詁*4<%z=8s^.2NzeJIȓbM%Y ^DhgD@wó#˲h]8©M#3)glP r 'F> $ɠ ->\ġ'%8uȦׯ_۪Y68D6jŬ42 (\wK\U*>/ ny,RgApVBH5uN@@!U jrK}ώh)tdն@sAp3Ju9ՈMg;c+䷕lW!-g!T,0(D]}Ch'?lZArtVБ&8;rlppRӜ@ l 9ʄ*Է6 R[a8Vi; ]= +TyO +L_7і]Җe?`@y M,K \z 2Y4 &B Yg/$@LсQ&C ?!}a}hN:cAĪNډmOBa;۷0q6?L!Ů!tVp״~$w}9`Ve+x=X턲gx/7ͮsAhM(I\ Kӑ;jTXIr6ًg?`dnyI?$Rfƙ#jI Њs&F I@$`Dd Xi*C5Z3r1jL.*Mͼ}ajgѐjTw<*О,dfZjv.zOm_]Yw>Y6U_Q22~6p1q5}R]X< r87" <-,#ǖ,,_x1Ox㠋 38DC]]qJ|4TtI>XA9BE!.,N{Jj$"]n0E;\!)|S5)CE4'#}UTwWL5_JiU1Gʞ9RH)3 $72Kej[)zbcԨrOf{`18 JO jL(t&qbﳐ*)N 6 l=Dpj~XX:JqTAg$uVHsҵ `dGO^h55 ON6 s'i69%a~2޻䳛EM3$ȰP|>;SKӹOq@Ib᠕bU9i71 TԊl~~1_ ekl|hGIIxijG,*'fKr "Il5ESTv@d v-%Xr]BF#9{ B$<ѯͥcdhVD)@5Jbq;rHGWu@$>|@n01P a5 Gg :wѡx%eP4-lh=bTbc9W?5fmuF)ÆkLR$zV?ҙ"ENi%3X}~z(VY2ŚgoOQF#Dz,V])g*nJo~KF7xZ׎ LHДp9N҅w+_{ _]vK5_m7?:522$-~B&Zk0>Lʼc/AMxKp#W-ۢuP#6#B#f'+4)9N5l +šMCAsI%PKΖ2Yek_r:e[HHvyFtex擐Hx7̹zdu}_:@|1P6Ƿm|t]+GTbb0c`k ˦ߠ[c\Nq~:_Q5ms8$P.^pJĔ`-r닣R)=]gND Ʃ,ΔQ:+uLdqs -:F3pI3wI4IC^tN /loމpmy;Uɵ1w1 z+"]-j{fAAՃKR1Sʱ4NuʻqwCl۲©mݗCr`B 'lv߷eb6 K>OC|!bS 灎Xz}"_6.BU]s%[WU!!J*JR y0r6ws? ,u9l@GG=‒i]<&7W_̍9Hܭk1 K `ʢd ee.*%o": endstream endobj 468 0 obj << /Filter /FlateDecode /Length 3686 >> stream xZMsSrq2!r{%ۑJS)H$Hiߛߐs^LR> =8qLxg=/\Fy㫛g)q,w!_:b>\/֤#%VGճk\2F*gCG+Hu9I|'gGD)k@!< Cm^4'k&F<[Y'j'VcEhDF֢kZv[UsFN&ҝdpʶLhˮ,YTw9F>b!flۈ<`V:)|`ig͢Nq(CKqw?Y5{h*0flˎU65 /YUGpmT|VM .\Z [ʇ0 N'`Ϯx[omyO1ϐz 5;]*$>5{ya,0dYk`jMNk6;:OgG'N4rr 3abhb[Lۡu>0 -f?re *jGXE{*^/GxJvڇF(mx1s|`%&rg?QHUey:ӑn%P/!yI@ruH:q_8>#dƸ<߀mC7R,H>庨)Mlx MgZA7,';gv\@m!Bΰwmn^< @'q,Ͱ(E95gIaoWm\[f!ƕx ! CxDIOx/?djC@fǙxSڗb~Wja=9Sn9'HVF A&)&@d <8LE|^:HqU!JS/`@X&U9z)BIl +JMxV[N.]וmN:1%&ܶǜ{ dnmov.a%2i07?/JvR!U7(f=Rsy=06v,8fDk>T G?u&̝u}Oٵ=gB݆ȿseʭ@}Q$ܣQiX,4lһg]~G6Wd7+&}Vt.7@x6B7 sE9IP. 3.czRc;!pJ˂p&C?#;@AfYCSzQyH*)#M"vEdx?;Ny@^K9_OBF# mYX9'{3}V?'] NGn\Ty؜qgHDwU]v]GJ}QC]И/u҅&vߙb=,>ȿZbdOJ'TSԗ|}嬲#ArNzF'F_:VIk9r3aqi':YvV%MruC\17ՠ:fBh ?Ҽmh z2v/ 0Ap=mVuO:FqP wc~&oDric_WAGiߌ5ǚ?3v~Fϐ(W46>Y_WHj/o2Oem=k㣯{^VRŢ11OK8Yԋ' L|8 wtj \#@pVwE&0dr ;?rjluSm4A#YWKf',{W ?g1G>PSn9T#DUyQ>JtT&4BtO'JL2@8mjT 47y[MnݤIM_*]0/f{W2!#=x##%,ej~hMuv[M^byO)+$2X1dLd> stream x]1n0D{B7lS`4){)r'[l1ĨieN2nڞۼ4C;M>6a֖u^>iT.cXRayͩirS񿟎8Oߣ1;]ǓXC>;]Ǔx > stream xWtg ճDoK %%@cB506r܋pmu˲en\ EcCJdCB a/ lNξs99G3swˣ|(gm;aO6ggU_"8C_g{&AH |x<ɉ)iW$ŧ% 'ygŻ2𒢨I SҗddF/"ne[پcS^>c,ZEFS!Z*OSPB*ZDMSS j :L-fPR˩ԛJj 5 ԟ(_r2vQ3ۧW[ߙ$?F}7_Y=AW8X88j!!we@ 'ep{q!ȿuȳJ f괉b*ÿ$ApOmJdi"mhӵgb%0?:3#ϟIׂЎ|.X'MR`Lwg.i:WXn W)Smuum%*7)bԆ&oHqWђNv௾VP~n4,_,~J0K1/wQeof߽6% */q keuvHNTXds', V!^h>}䛿nͪn Mj'b);q[RQaNia Ֆmk˿(Tq8 lh Ge_ڼ٪ejw2_^#nKu<ػG4v9ḎJ#7]2!փ>̫L:6KqagkGkk_<3֚jX705]ZXπbUDrLs<o6 Oz=-^'d2 `GMV4冘bV.^ARPtצO3f/bHAK*HRd\ŤA_&M7r;=יXvobQaVC˷F~čs,90`>}їܻ~+U*ӟbr{P(Ty¥ܴTe92oCsAG&oB6I-|=yfFf\?J(ח~MMoomliiwzmSؒ?gKSŖ8w{ 1r2E#ȷ6U@ 87j'(|sEץ`;` 7%n͈<1\D+Nss]iGF >)mʀ(O8bh= Pی)a)ܡSr&RבJm7;k:(WUT67 IF&+L,bf^.Mlդ=/%Ugkd:ÖMfݧm}qa#揿lCph\Q^F[n0!Ff`Gr bujHɿ{*Y2WoBo͑ݥ eÅg ~;$\:8w+l߭/+ӕ J6D]bV a6XqNS =oq䘜rrI\k_ܸи)l9)xԘ/=xw\tۜGs>^ uKD v5TJN_^3O,y'[)[_ovDdmMv&E~kRb ^c/{9?mt?L$cUP%d2!۴k7W 6Meő/lv Ӥ+ (f9/ND~ v=s W!3p6_^gv^iV ;vM=M#tKjv ,u/q׸X26Nz Fč_Mvx wMw?Nʛ_Dk~n\z'_w$|˹n_vsն 2Vzʒ X aۢ e[zy>]{z\|WsyomѾ8&rR棿﷞ z>P7S+XY$W @Y$WXG¡  Z[۪Ѻ"&6S@"4 ö@6I}>  ִ}[S_lV;)8H/ =׈wX%gm/sc ^^5'8ojYMW*:@8g 6(c҆Ku+[$;%\xǻ[D5:o_'3 ,M{y)np9N(- Ɨ䉒t N`Gs< ^sOD{g)] PdmZ!\2#hfo (OO~FViȟ߅( ?>mI).N%.@41W8VqI,N`HwpKbY5fr\pGV*ʠ5wYSh.])u9XAC^eRYq $2zúé%mrwtendstream endobj 471 0 obj << /Filter /FlateDecode /Length 571 >> stream x]=n@D{7~l@n\$\" S-}fFvC`$xoruun|Y_?uzYvc7]gs|۰?h]/שضaymS<][q?z> stream xy xUo@U(M Z(( BB{;I'$!@d"YtXQqӼüN5}oޝ_NsoWѷѿ`%ǍgD*OL#^ :},]D?UUkϫx}mu%ʵ  /^WBՌ5Y"ht\ٚy]n %KJ_rG?bcǍ''<&O} F BbxXLؖ_'ɖz7PR-JO2Nّ{L|'+\C}#Zv,zj]ƮEeipMf(MߍM'a2$cg%i89%x.P>I8>6J.%J.hH6]+g?=y,]N5ZaXO,KUG/PJK[['aa2^(}oׯRהeX4-Ԑ&s'q!8dfR4ܰ<:E"RQRP@A1\G3X9 {hFXԾdU~xF W&-}}S$$q$l̈́\Zʆ2.=Gӿh/U3' Fy_>ݗQϽl=cɺ7M\Ɍ+"ZCGSFt,|@~5ke5FNSz|ҿAġmt-#TLWJGUI86 Ox̷8C.KlSHN( hq:p 'ܿH|tl/! Dw6pІ /{~2 }o#~߻g|7oj1NIօ^…B(N1R_c=Nn 8d^|\8N A8+ .K fCq8id8 /#@%TzHz' RGS"QU2nˆUhc#qei޶Cq(_6\O|z}s9مy.I2ux]BjubQIj|18sS, 0'}k|BXa0"P)`hz,lCMe 8vC. ,ʆ+I4-$BHח`QP){.Z;^-?R2[ujxUJhU,*tb_C JN ׅjLS8&awe 8YDAr?dDSchuYJ,?fIϽpW6hSB-_s6lRs a %hCwY^e~`E' z!ôc~ٛ1΄dJp2 AK]G?l p΢&vuZmN޹f]H;GE5~\:T\ IO)x,Ι9EV1aتܲz{/L)ȧ8a'{vI' Oe)0P5p}fS¡ hZ)y1mҸj4=W~ssh{ޜ\hh`F0<FgK[cm"ȧ4D8nH X/T`YVK%A HC~9hџ}ދjd"Z-{Ypچ;Wġi_V_~{^XKit)ScĒ& ]N$#Nw<#b1#q9o/H)(FOQb\W\)[qYsE f]zl;*DŰg(@ 783&f# {Hj@Y*o[}P[ZiF*~wIe7JgIBUŹdLY\p$ 1\>9B' |9T>̡?@5u*o5ƅ C YiՄ_i6WGh!?r[|6柺175:AFBn*‡x(ݞ*6J)ގYa/@r  9-hfh+tD -`I8-dmd,.wְĸwe PaKP_\W*RiW-s%c4{TE.cY{ 'J  ֨{'S84嗣;|̾9bBAoܤnœj )reTb͘{88NO>ד/O4*˛hsxSgWF[xlnnP2NkFf3/n2ΟK)^M|b`5G~bI@9=XӑrP{Y{bu'Lv 8h3eq8VO0+ w&Ø8j,hR*To9IEH+rqk$VJP]vA5\MgWM&5?<%:AuZf!I2uX`u6m)ϴش.%:I#{E:`uZIB5JhΒ?Ϸ[!!G{!@HnyB85va(oD _iH_L_٤CH46}0 S x4kN-8:UzG\@}'?j:nJՒb`T%k<>Xf`=ڞ#hfo:P6il  )_+z9Re$)rv.;9-gmM&K|M1q:[HECJYAh0_oҚZ4j6%eGrjw+,^H 1|i?=?ɷ@wȧ~rITJZR~d$jNS K*U&v PWO_AHиjX,j n)i.kOmu=ۨYֶ^TJk#E(4e}anJ+(\U7!#aWa&nOXR+`Q?Uuz~4&1UGpVˡ!GȷwEw:u)u@cC\57 Mi1fnZ1%8ȑ<Jr%p.N"2Nx8wd@Dg<^^KWTj*AM#׎9$YJxq^6~zTH ]=鹏U85n4FR ^f׬,]6l$}ؾmwh(v>i4כfZdkcX=\Bժmouo[ u3`1ف aMW@D8moXZD"5X-J~}<2 1: X_TS)Q/:Gqʻ!bC(pQw;Z}7:uU_fMr 46zJ0\7oක$YiZy'DwQXKn_Ң8]qݙ.e#\\_) Ե`Z!ѢpE{ge6̯&Lt+?9p0&8X|DN/(/nFS~#X?':+MU m8ึIRXM"KrŇ^|kn|aRl&ւ59l>K$2 D%PQ+{=ZE'GKPmhP,+71z7JhS2.I\b8I2'Siflf?_0r!)݌WXR$s<u;00EŃnlPbqd5.f %"E1A3 UrĘ//Iq +pg2hu5&:vrfEZ+gjI 6W#/_Dw![VW1s9h{\a?.|R#/ҸˑYFP 4siiRœ)=b.dbO*~R@yI¥lA* _FY bRt_|G=:?8|6R` ݈$ 8WqˆwWG'hgB䐳L{XL:C$)N#K8HpL{Db{ > 2QeB-mIl|^2sk*6ītseCezʫw 1=A8:n -DžcalCLn_eQh2?e2oQSߌ.JShqn%X6j- s鑂 7c!x+LY:[+wd]U757R@f Cdk)҄q_ JMyRQTZ(Z,W) *ӟܯ 2nvܡ_fz={އ8?L,l埔doڲb-@ 1EM=pYY tXg& }4em-0Lsqո~a"jcei+E9 DtE!+l.*fWH 6һj-nǛ|TpѲ|/(MYɜ6gQ~t>+@,6IFڵqsn+ٖn=ؐ!qc.r`");nP'F7G+@5J#(ۙ*S?u}JgNHf| p=Űr?Gpx*o-.-dC Jfmjve @_?k,vMS{5g 44ZS%}_7#86QL-ZNwԓuTy1/9_ ?hWu@U6[SySSh4lAҿMыWH׾E 1؇|rz GVx= Q^qdd*.fD$Kk(, ,qs5ѐpakXn-X+Tj?}/nڒ+Y{ za fVO[ gV p.+f;90pccl.+O83Dܾ}Y'M^;>Q48$'u9vdSl(Xu&D,o0'35F{'; ;mPr'endstream endobj 473 0 obj << /Filter /FlateDecode /Length 338 >> stream x]n@ D|HȗCTCICcZ3uq:qˋuiˇqtokLy)ەy?A.Yqzm%8;T|偟v8麴1&k4~8Z&B@͚ΨPQqoppWQ`jDh$n$Q&B#q#QO;c4 {#.Fx9#Q@\nF9DGPW>"ry3+*3gVUfFW|>?sx[4m8[Żr(endstream endobj 474 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 10225 >> stream xzpeVG&%clp'[4ь&4Fђ%[rccrZ,챷oDײ^W]*Z/Γ\y$//ů,yu3Sܙ+"wk~zn:͇khMz䊼ILoRnhmn]ن^UWo*/H$SվT7(f*g5zxdtnKټ 7.(Xiq咪5zx#>SNW"C2_rd.BݒE{$%%K$JJ~/Y&O\rd%%$/K.!yXdd,ɫ$I̖qמnucw` LA˿YvI!— GoM-ֽO6p펵w|~wo{|?wֽg~>x̓ϫ{Dr߯kV!]_ne\lt$emKu*4Ʀ'jc<S&wv ʶ(c1[K+ Ϧ 74y#z6Eh3;¼5+L jC!7(DA&A C~}FhO;bB/㧏dh1[|^ش s6wO6/D>ɘ0tOh. Ns Pɓ[x&ʮg eK!ġ=7Q<}WCʚ5/*gӖCԓk*v}oc125>9 # O Gڎv#+zYSv* ',ꛘo(&aE];;?×~K ӧ4#2VC3:Ω[?Δ4X :.BA.9R6-M -zMZWs%f.x`Пy:~4IaGmԌϫՎfPtRPY8ry2vݝ= HQ$%D%rWV|3!tֶ3 w;8tg!7r9F.ٽv]邺zb =8upL-;;I %M{=G;߅>2tTL6N4XL{\>UKԼ~LwG3Sg0Z00HՖ+5pM`BEbO]wL(wЍrO›p'Hġ̱l`r nDyBt .Ή!2"R[R2XIgg|,#a2KHŶ2dyxSi5G1>ao ¤uTlT)0U@jEoUm)gF`k{Oy 2lE˦˴즽|[BCs;u&3٥8) $$\:ڋү9B;z'Nܛҝ=D{`]m ȁSaDIr8XgхRBzEFLKodβZD M%gPMs~" tn? qٲNJi(W*ZQ>SOF Aw~C7!& Դ%ܑO@%n?\A굨9If֩Vq*IªT!relƦՁTdwЌ`ͪ%e͔="TcH/AKk:zж{øcMZL"tP3)7e݇P0jx Hnq6\hh`=D{=mhykTzHؽ8#]9#h)Y6Jb*Mes om%#(0p<ݽ[ T3@3y+f,]GuN-h!ؘV(At+3Jmx=:@_WL4З s_#|tYBG@x?K+q"4-rw%ɴ.hU٩X-P8p 1XAgOJĞ`&v9 NUaWʭ5.ͫ? ٩kok9(A,!G#EFYåhwk mERyl"AҤd&zтk#& -&(.١Y-,X{r. YiŸy3s6$1]QW~ƭUP 5|-٥ץ M{E᧦]ͶfZ .K#8ll^ =^o$ihVsq ;ȇ`!=5gRQǐ"qb1^6q :'e.ofGg'憐"p1MES i5LMUŦ8yuǻSeYݲqtVˏ܈(*:-òɴ$exO|8@|ʓ)w<v'$>p1fT:Vpe6l7i cEh X<DlfZUvڧ[VS&-D1tE1rwwE;Iz*OĺQ o?8}ۓdp6(JKAK4F5>lZ<S@ډU @?y'!wcɄ UQe£*sQS`=K~;0hIlY4ckoUzr΋E`koVsa wSqG$-@O>Ai[L7H%w;)5 sBwAR6k5m`Ѣ⒥3`&K~62Q om QVnX0.҂\wǏޑ%EvgE9l >5tduWleBOOlz%i Z-oY|>;{gkώ;6ؠ81\F k Mn/8fVϚ_TOy{{#{OCȈ١UcɣT;NrDyIqF-2X׻YM1v9l6LGduڈE{ />*jA3~ qȇjx?sE[rǞXy@Φvj~uPO =O} d@7B|%u5zYkZ2K F(AVzl'=;Z6.`/ֳt`b0Uٕpͅ k#^aa6 %D73ɽ=ɽx+z,=VӠS AlBQ_Ɯ5:Q"q'i[;| kt+[*g :߳2 KbK 7wqVeҺ-յTg_| [ϡQ e,1uEYBƸt(dt t4>z(^r_|jiߢo0Pm6MjglrھuE۲弣tOCDVD蹹dNmuK h+1cJ_e?}ὴK]+[K־iz? aSQ捿L:q'բˠUD=*&f rV+?@4t5&9©59j*F[yc5`1 a]@]}`&ʃHTS6BX([P[.:?QJ;G `Qw /N^Cxf&lL$w9rz.(4 Y˶} ŖbcRN#Lt=Z#26 N"IH1df.B|Lk-HaUFW=)%{몜=e]~) >]v $DKT޽iGNSQk,5TurK=]`_pF tCP84Lw.;+}x X5Z09 akvCFzrr7XB?>apSH`w>}I,  (-褑%`ߊSH;|H=/V)>NyYrOÆ#zKxxDȓ{*!Nm|CC?y0B827qp`k@ūZ0[{a! =#|!ZkO‘'ס %`=$wĝbK3v;(zv^d2: 3q<ԉh"DA$" s?l?=nH5j،6'nW\i=C-]AIt3,aݮc';޸'d_^.V'~$QÔww9cdBSPB)\p;ReȉѕQPz4$zN^XxBz*wbT-cGr'v{|̂ն?="a>C<֐鲧u\ 4h:V%;w)2gBM3ڼ)}LUoX?~3T?<oy26w S΄̜' _iD7Ș*{K (ɖ'ݵcmۆ;6P](s8؍n,N-/t9 n,4770*qj20-4g*;]@l;*f~\5qd͛XGCP&ЍФC{W%븵8C#R1K~*x?~ܛ]߉Cgu/nx^_<B\q&x@<3{U\ P<$ܑdWRuls494q eGvl #79b^!SNejf zR4YxNfǿCditġD[^mhiratp0<GRvH$&/CfgȻȐA\ыR- J5ƀq25UЂi* ZIT Z@[ Z{}uU~zӏ#7έZ@\f?S8UkLdtZG4 uәwirogԗǐDuYtob=IR] }A{P>|0LLvX EȭVB ua澎,Qt6!>L{ Z AGY.@ 26Z9 Z)+l8} w͑}q3iX/FQƵ5f5:.SS7qzO {h 9r&BP5u+Y s+i >>^in6eeeF}=SOZ V{nxYVP4u҆k[CPgNIMIVHGPJiVAmTZWSүe.nb|;ݿOyŻ Q>߽pd~;=[΢<>[_{NjDGKa32|ލhѦkծQ)Ł[&µKK4_6n&'ʳ3HC ؀m˼F+wF0 ~7޵}|t gwM㌓8>Iz>!Aă>[ޥQ@#_|ӆrh 漸%Jq8HK{z*=ݜk\$~&g-@R8ii ؃m$OGd0R_Fcyr e۝o+`9) ZZV+tvӝ68xJwIJ`jUYXuc>8-l 9YWjimjX*S"txۿ@_/u+6 ~زim-Na9cK-qSlwK@E|̋UWEys*9?nN"HE[eeIs~+ #aISOgĮc2HD|YFxqS8>G:cpH6B6s㛭2UR7kB>W/MV,XH$ɦHY m6 w#DXGɲ2Q՗tvEi V5.'3W-:E;CUA:Cf40aaQtĆdvt3>|=_UHR`t:B:rp"3.I"DlW Kc>–|`{ӗ|>f91XqjgƟgeWY k4:9bq7v=@qD؝IE󟡉o j뿶 HG?rz^:{.E½4L83 up0jQcGm!vKqzw`݈r6$ˠ[;P'kZIp(lv*aܩujQz Gqme|4(y,JPh]inֶv'|6|V F >»#;#v![`խP95"K.qmZM--2*tG}UTfbӎ=ńB:"dgCYcFWdm7oT;7L{Hs caw(ŝ"J"QPTȟuJ3)J3B XHZz[T)=!557"'qմD|ʓU;jT0Xy{д#>kΎѴQ.*^f urƁE.@$=px诹z~FIGYe.>J f 몄;++a9!tת;;]xO`/#?g!@Uf}ͫ-/YlXPŹH`88z>cwpg=o 9r?ƼM*Sf`)3ox^8-/O(NT4FV G?tC bws݉;k\i,`⾤'vGb d1V;UbźWL [ PjRUgV>̢P d'g++hIҽяef_kNrk5"p¬'ßFWXvL`u;xYի_AIgc\T/ģ\nh)KD%Q]Hq?}G9`ͬnatNK7>pp7o_ bdUkV 1,IG8{ah`k6<6VTXpp{dl2Z 鸾LtaHżI2JNu?q{?$ Пe,o>v6Ŷk`PGJl :z[{/9rJJyt$v߉&D!CyFY_?g*(k۽-s8NnYS*oޡi1e£!`ۅ/x>[H6g,)z 9Lvhaư2^%8Uendstream endobj 475 0 obj << /Filter /FlateDecode /Length 404 >> stream x]1n@E{7)X0ݸ$\,L\N/$> stream xX pTU!pE0ז`ϻy , Q}!$wKzKz_t dd ;HMDqGGQu74YMTW%Ͽ|- :d3&Ќ֟^=!}hQ ͭ!I7G.شY\>D%ڬP"D*+oظi7l}tb ON4噩3(jZJ^^Jרשeo9\9j5O-^&Q/R/Q Eb7j(5FGQ,u?UL=H )5I Z=\«?85GȠcd{{6a ko}Nc_~oPCn}ڻWlnSMv3ddjTԧVvd1lA 1~kbZȩjϊa]c;n[76$Sjl"pM6li5g,/ź^ B\~b h4 *DPQv :+ɳ{Ҟhz-+@2a}g]LWF)M?Յd2;{b{!٬^cE)-ҡZJ} w;,{ =>;v$cq5q _=QtC{*g<rn C6{{ sr=oD:O>g1>s8 4 k\//ox 2u`%\VѹKYxt79?dWҸĵ=`IBf;ѣtZTT*peFgg  ~3F/}عcdР~3 ʍ@I͆` tim&vT[T<'EI0̐D1 {cFgm\>ʣ"lY|O/vě;.W;db Z^դ ӜH4<\&Zm[Y2nA85ʑM<9 zZ5qŚƯ$ ~m1.*\ hI`޳}Ճ+` P v40/9{ƫL`>#lvDTc)[y` γyBp+s)rrD}?|I ½JF(qXyNd(en 5aGQ h_Vly+ rn*4:99DX;lABzELcWAFa = oNv'W{ep)Ep6yZ/ S.óz gQF腞<^ j($!kʝEH Xi8lꠔ$`)ԅ E[$j~yO֭%lG5l C`}ŊKho)λTz2/m?= ؠ*\` Я:hܳ:j czIus9|ʹ 2􏼏@}Gx4g[JkdFZaйٯ JwpE Ahԫ:#|1aksBgp ߚ?W,;F?+wcGn\kش.h<~"e/zC'ٹKu^BE7z")$7x$W%PuC)pC%RHR6]GNikiV/Ow?QyƬMkTC=*_m] fW~?{$ZIHMli4[z~vxZm*aW~"A<'݅E:B㯊t+?ENWo LUc5:lZJi=n!RfvpV*\nzzsX`Z ɀN:4*)D\ =;ogC7qq&$ݮp)9<8K8bjјc }FL^%D"VJhvLU1Ӡ9OŴ_>G:/HRIܘ %u ?!|xm^aK H̩d@ wj~ꪩ ؼI&G)@RB&~7c =v2ꠘ\Ht,\DШC%wOwۚe Ȭ/D[tV/iD  U>ȹnK@C$d1LȌ.9_nUPro$% [y] kFi;RIu) /zvWqXnUH @,yݍ˔2T mURyn}1R:CxQ 3HlgD[9q4.Ʀ8G&iOwyI' π8([/TGW7uж\nXmXc\]Saˍ/ d@q~'Ӓ "bJH}]E oe'˰]`5 mC@*! e2MBH_!*hS_Om4j?آuY rI Ȝo=ϊTKz{5Y)c Dä"S8- ng$Y"l ^307[e୮]8}ؑ +L ? %#x9n;mコfØtLr*e̎5ơA<;)ZPt0[w5hb.#hy%Wnי*]2M}2}31ӳ]= m;8bđ|$(N7mZҾh _F+./Á:7zs<

2$䚜LA_Wyd6L1.@rxJ<⇙J,Cvc0E*E /9h}dDAOdvaD"fQIxԓ&=E٢(vcd|F2i1!3cMA5*EJy3פSߡI T$Bㅸpd|ϨQa-'=9Sps[tK׿U ;G;,v]R%5qLj >R~z[+M%Ͽ"wa= 5~N2!4A,85xʠ)" _:db`Ja)> stream xXA۸=w)>{EĢ]]ANzӅgkcIo%9o_o%;N% ofx-1f/.]f\/ڦ*]nn\JmڤdkYݱ.6?-VҬ<˕kk\n;[k炗fljPa^oIii܆ j;h9eƢ߾6Y *dzCJdK-vݩT SM&ƾHd ha+펑^[X ÏuHR?M8b̒<M{S.j!իwח&YeEq8֢]1Kx Z%͕xV*=}0dS C5}z;&Zl !]p.AqіዙbiŋhcX f"RtB=3 VkGR|3x#0eE݃vNy {|M5PiG5 H+o n$c (pt'[A1"¶Ӥ4tU-VI.cE^rzvRBJ$J ǚ4at- nHN 9C[-;vs$ʈtE*@ bS ^dp]Js1f!#HcqKdN< 9KD%cH.D,B!8Y_9Dsf~_RCUܠlE Di~|ݐMrzFfV=H{J0aiR uSȎY@{V.VtYԩBPƝwM13[+3u5PwcQl }Gj$1Կx$؂(cqhvW4Q&z88dSRnx'"M=e#ՈL&8 DEy*FND*h)Fil˨Kp1*O}ͤ0rZr"fmqIY&\I+&MS<IsF_+ `JvSUld/$]_81'sIt'Zes"ߝYJϘML,=.ӀcXqޟOmt))V:'hjW]{i c֐E{_@M \+#Ρrh_|Do*#d5Prl3E`WGS\sRu%Etdj*%Q',+R*[wNl.$:y2)#Yu`^i脌,e(+ .ZRzu.2n[$F;Tu?=6z*!l1B* R<0!LDB˕O{s˞Feߟ֮>]I7)Ua"Φ=;&2<PLqW8{*Nk;9pT M.fuڇx)ys:m=8!bxtR)ƈhW?;tbHkdԋ JQ,c o$KFe#I[54][aq]omM2q-Y/'XJAwηWc$ՑvW`U5 a㤝eN] K/ NHVeL\UU:7d9|.93P2L /@5X;1:X [r?cmt̗=9P5gnбaDz:vW%fq Bǎ6\Ɗ± XbVb_֙͝}4AQPvҙٞ+`Bg7pFFP]\ $ӈ;ђ%ޝl׆fEn>JW*jDp306n53JI܅엽irx'}Iv2{p:7$)٤M,E0=QƸuچES:.;[M`po24j01_d-oߙ4ןgN0ɳtfT(^n}`endstream endobj 478 0 obj << /Filter /FlateDecode /Length 2063 >> stream xXݓ۶S\gFs/:GAȴ}tę|(uxtB"e뻻HJ9sG~ Yf)_folxum_eԩVB-—O;;tx@'BMjP;}uQ/ARF/`618tKK:R]kY J>!Elo߅u'B ␜fx  V89zcp| 7 6K7^xƂG@yVFХpjaZyR|G\0 QLF8x)Mpx|UbGLU`'sHppȰR! ƇZEZB_w8O0ù`AkdGh϶T05L9K.&h6v@达M5sYXa65)6j6HiF9`@!1zXDVQ`hk7z˙Z!Y[A7h*ٻV !Ok(D-BČ@ 8$CKzZG:G&``dw~m=񠡛lGrmX=5)zh3Sy3iM9X"帿>RgPh5r~ICU)Ze8 !ₚNhO 6|&gʠǼ%˧05\~>F_ I"o\/K fEP#\lqH*o |OP>PF)t&@0 Cmn#4'N[3-qF>h;WƬ~ҧoџomg4kঝgEnk`/:ghn/֋'4endstream endobj 479 0 obj << /Filter /FlateDecode /Length 3151 >> stream xYɒω:[ ^\`@DP! ò=LCjM,G___efaa3Ę-+/]Atҟ_6Wջ^es˿'684N,qr}\}>r cY`m^Va&I:wC* z*uv(QU= uS4}rn@< <#o*HYZe2Jld5pha$׸f lemz޸.r3a RZ?h+3 AkǾK%I^ $hDffZ #_3v!CݱkrXZj=|/}nw6LZ˷arz!UwOB6]+j[А[aQ yr0Tƪ#B~~b;/7}suTo:GL"|Q#c1͠~vWDbBV}Xֹv 6_y׍Lu7KtĨ(~b==<#lnݝFB~]WBݭnw/hH.H;=.hesnMmuK"`hHqVV54s/^' #@}AExƬDvΒ\ lw'FȨ%DUޛPa`T?EE_ 5jj(Zn9QGY&d82* d+XV3WU @Spd =fYb28u שsVGnwS=(ǧ( Y)K$m9_AGcȯ+'06y,b@w?.3sշ6AQ0q-6 }ٜ͒zb*Ac+rp!<͑P?0*f7֛423K;"Zk'b9R?KF}642SΊ` 4r(xvNH#1 CyG֢yOcNF$0 ?S/i}ВIԩ5*BYb[^b,xAI{R;a%Gw6ӹ:B 5U?(u\n!{c> 'M[9&V>z-oLTԄCJDA+23T,uKDO[q2Z-5cIa%#s*4x˓ Lfdqe?\8L;L<+:_lpR0b+Q*#.=^l q4!-ָvB%)Gϩ>s$ST!I*,$tZ)ycԖ- {p0_Ƃ~8`U qO]ȳ!BCF/{:- c؏ɬ䗴;{ ᷨ*Jldx9!^ kGHգ(aX% L6Y(p Kɨng+#s4Dǖzfz9\ E3&x <ƹ݂:BØ!%]|{W բF0qzhH6 wDԙuhLAey,!T$Z~D-Y\pXdQ9Q9X0LW(mNoT ' V?ņQF1o(c!=zm0|v"ÚLa_oAtuJ,}ڢw`UT &^ ao]ܱI%/{@c\2ur@6~>8`0R\R0\0I {W'ۘCt}U7q^0d\*Ɯ҉u7·n =׾@N8F1O'-:\"@mTLuя?7bHIL[_#稹Yހ_4!QYvEI06->EY2Va-/V8f1w[27Ld,S_~Obp tG @GZ{dIB9Y3QS(D?r9ROf gv 빏HgZra7TX!8Fsq y&Oӝ8#J&}~uޢ2xrn:W~GM$*0&f /Eɍ%_OdI= W>R7o^}=G4vmT_=_/`ucr3bNGl;8@~}9!|^BOQK3q^>*q, 79i[ yC|N a`ZEnӋv$̦lɿj ͡^7 {E&L[/mg<HF tqt,z g/Ŧ&Z8ߢ& ~&|2gJkr\ߓ3pi+>\H)_Jendstream endobj 480 0 obj << /Filter /FlateDecode /Length 251 >> stream x]= {N ƣ&ۤNfw/@@d\3)d-f>?sw8~˲KI E.ENK^$5BUoEM0n}aO~) 5Dl\Q3(,%N=J8pd "DВcŵDP%$lju "tI+ǭ\abD"c!j4 σ> stream xT{lS{.4s6SV{&6Mm5Җ&8 '!rlv;8?;G$Ct4#h(T6i4{qt#ftu{}~HM!D"Ѻ½<?[Sb0H M i m姉h\RUV5 B?A澢o8WYU\&8@|" "%~L%^%v~ihȔHy %I:%_Yo7)U*.,p6 ƃܽ#@-s+p۽Mڈ:RF)FZ9+: eZf2^˰`Z6JrPx/v},Ka7,z4ϋ&vt(6l;qTc(j9#{\N|4߉j.5mK:~RwRRB?j)dIXt"ה8,}?c]B.̯nLj5~ތ%8no< Ć _?|^ rF0B3^]mC-^W/%P>>Mćˎ|7_  ? 1 Dq-.'vnᥰvǏ.\4܃}uꃷ޻tiCYa*F>ﰩ^WiUD8cQ#ETYt;N)5]*Y$q@b$yQ.^tngLChBPZ4 T.c(!N|'5Y$ au$.4H08GlfeivQxՀJ~DQΰXwGilCVk: ҽ4+`'ha[@E8i\NAk"@Y*mfbbٶO9Do^vD"VqápAoZ]@͢/ŘNy#V ԋg2ev*o>P<'q&g|$M_ӊY[ϬTWhyox\ԽG#F*6cMod+Cgp6k]hc'I']APp MqJ;̪-\]IUck6ʧ9e`MA33hf*p!L"\/6>Ox_~.am.k Kpu:-:n,/9PEGK{@p&srxIUIGJ9BQ"W mo {v$ %:Ogy`S6C)batlFӮh`/Fț@ X@xe~ ~K!:]뙥9u0XT  目&(G H v&KuXMf 6uf,kB&.N"dj4l9\Ns9Qu96h8#ƙ  %-W*Nb[~Rmz8R:7\}endstream endobj 482 0 obj << /Filter /FlateDecode /Length 249 >> stream x]1n0 EwB7V \%Cl 1f&9T3`RpڐmD{R'E@j:+)ΡBkIh$W yV%ڊv@;VICr}hýNJK,S_YUendstream endobj 483 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1879 >> stream x}P8gT+ dWf Zch,%j})US%x9wr'wp- c15/&:ccbcyy{v2>>[ EI$9;6do{eprT8E ڙ3[b V -)xdFQQOW)ԕmSV.V)+&EQÕ[JXڪIeQ;TJͣP2*J8 QZ*,)\]ԴVhu(dC&5Yw dSW@x\ًPA)UbP1oAF?뱬,ԣBE<_ʭ8Ej{;ra.i?Zo${&JC5Wx& %p|ar 2_)Nı_ ae!$foN&q)[!r\ky:BݽS0Vs\1zv~K~҂|X 5~. Н\+Bz`JT9AȍZm-Vpw#ALeog+WŚLci~"pG+m'W}o3$TZ$.PAʹ]_zMgtV5_ ؇S@R6$wGϏ? )mre F0rOQ¨^ {2i{M/R\tF נ9~Ҵ//K·!%%d7᤯^Q,ɕñ4|wƃBRpB^fStz}>ERVb-ɢȧwfpL7;&GѾ1`pJ9{Շ 1$3[FxZL 靰GN]U׽u=~RSH,766H&03a1j1yo.@٤ W݄ӏ ꯖ5'pw^; F8}='Mw8< qTƉюSj 0U>{o$}%I}|9m>1O20Cbwm-XZ] N; ۚ2jU&-«(7_247Z ^RG;NifOiJUVWk 7s=jz#~Պ.oW= NMRQ9Lv6 hf7^u.AvgreeZcC FxNPNQ^ksZ._*ӆB>I}mY7<}-}!\xqns>Xt72_Yv]A `_8AG?5*Ysl*<'{KL> stream x]M S+ib؍6M @P}QMxL7Y];g=/Xn05j=ֱNTڛ Ol|Wdsybk~9( QX1F2pß%/B+kj<ք5a)Ie“$!:\\ W[IbER# keLѬߚeendstream endobj 485 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1086 >> stream x]}LSgWaENM1l:B52&J/ҷ6e8vvVcg%۲eq8 +ϓs<'y"@ T,Lݓ1$"Bqk8FcDg"& ^ZC3VSש9jmMbfmuyF\]%B|S[+)-C( )Q>:2Q:?" FY¹uU$–id BR0Tde*1Fh8@?dpu[;U$hV> ^P>brc%j13;{\.l4L &!KB-a>H)K/Y2?+SI[(QIz-  ڛ`+ӈ(z1[t98We̱(> $Q 7fl>SP:)+*T*p.wk&s7n.XIV,fsLuUGt56LG 1e#X;)zejs6AwKqK8|}%Ƴfq~NׂvP 9) /%WXhlkWwZ1ҚXjJӅG[LkZw7Hݨ"=8,$7ؾ]X_\Wa}!Hk0QHELendstream endobj 486 0 obj << /Filter /FlateDecode /Length 182 >> stream x]= {N DcИ"L 8"ZYPSxocx+yppyq{gʼnNaԁSڍzDJm&sЀQY#jU +@oYeIINX,*!,B R*yZ~IG@k|V:'/] endstream endobj 487 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 534 >> stream xcd`ab`dd v r240q~H3anq7k7s7ˤs ~g((土[PZZZ&R )ccư&LV76P>*;w\F.`׽w{w~o̢5aWZt޿b&==[=5r>Ey#\R N} xb-C۶S$lZ]R\?Ek[*+92mܸl[Z[+-R3+N> stream xWKs6Ԟ{ek1@DœNrGqDb#IͿ J5L }($Hu$O>Lu[ӟ

duvS4zpǠ:")((kԮٻo%ۢڹڹ/S(v^EU^@˽bPD)l /Ed|,~ C.L\ÏSF> stream xYKd}%dH!pb#(9KghA>_wpgi06+ALuMUuW_׌V򊅿j7䢺m&o' eUA8j?T;dn«  ҌK*D;K4Z^m'/.i +`,)S\ZCϵ:I7/sNka熺L:~2lE&yLrvhYijCr7P,AA2JUwQ6I)=7"8,Fu+B+nxwgɏl6Jx(WKKEܻ6A34eZKjlR0*yaj,+8tؙCut8 Zڲᥭ,2*~Pgg*hKQMeSJ?JYʨHkqsN~j]zlSgFAK[{ ʭ%Lf=̧{&?f6T0ow,˴_jdeZsaA_D₩tØ;:Se9 cXŔ P:ERS#KK#٠W SCC'cю88eZyV@w,z:4#Ωgi]Sp:_X{Tb8ι3Z~eajS2-ae3f-5t7 ZyM/)mdC¢8N,uǮIe0FHr?[^zH&L>JѠS!2]*EOQlWN_l'||5aNz$ i1{rTP"BRdVeik$0P0O~I))G9Ԫ 0]jBH ^7+XӱδFsCgә8$&_p{\uf(Ń<<=Y6-T') \rZ[|rV=8i^ɓ7m}YA'&þ9)&GjF|b0Pkc7&"þ[􆬗F3>8G$pmz] Gn6ӶEY-zPD@-\JnvA~oMU 0gb\umgSט5>Q}w(x;>acx8"+T }rN8aҵ%w},ƨ#C ,)ll,kIf;q<@'.4?!I2| xyl66@22|<)Hr. JaS ۋȓdOj ȋ`.$IG׋e܎R T,yy,9z +YmOM[QϦz^h9f3l,N|@R[<1fہgI,t$,o8CNq( @s8h]98vF+;upnvgT) L9./|#>eOC?*jY}ߒE@>6qR.;N8y 0ٓd}8Dչ7j`Vt4d> stream xZK> [`ŰF<37O`p$kIʻ>nHjx[馸}Ft5J{{g[T^ZwkMKenN7e,s~84V6/N7ox_&Nd~ߌMVg3.c=䛭-Vz qyM۬{ؠOmFy!mtx7m]S`l75Feo; L0ٮ;/cY:;T>ٹGRs=<4(mdj]U}(jۦ}O|6vqF2b/dj Us_. `T41e {RDrCO5p Guoe;V5~z l2^/(!}abSP|RwߺfՐߟ}{ڵ}k`V|j)Hv=djׅwd<%QY$EQ M4.Xq ذIVq]!MӝYc&gFYKFǡ )PpP]fxY pKF qX/5:! /nĆ5r,6mYl\'o,Di˩ANOgbJ MvmW2C]N`{dq1cFgFc+|ߴ gKv$88GGǻ{wy%X2z޾p)N7OQP*bΪ0ݫ z v ;t4dCo+Fbo`,3sz,} RkX\I>!@8հEI4z0jѫ]BX%H?V R>xdT$+ MCP=}~-R a 'O-cѩa-8 -19?qJp35 5qQ)mƒ͵^ҘyBhhʩ\r\ϸɎ0e'DBq87,c:BR*pDYOJQkh=E!=q$[Un_WmB_.%8>a.Ń4"D&7¸Z 9 t>WE:؍Ogf 6ξϾՓ~×s&st[Hwy?7[F0iTIu;LQR\@[D2@P.21O<9YF\ یQEIOBA_9kpR<)LXbB+k]Li(zh,:$D3bE 5/j@r^X"7JG {zQl*͕D[ Iy+$"4Qi W1H\K}e9ܞ8|,)P.|* hZp>S?@vYАfK6'uNP~cM˥.85KbV]}?1EhZ+H\xD@ﯫ]'Ezt (PHRg$=ob^_55ŔX,ltc4 Cimߝ1~RL.¡_ZsQH?͑:u07ڨi!M_< JXt!mzۙkh* bϘrV$J_rWhbJXL&P+.T|5kYرߤlQҿ0)~ @M'@iv*p}}bhff73IGDj&tI->4[H]&v- -zv߄iHj>ֱp^p+rs΁޶N9 guz\YN›,<ٍ/u(@)S;3ܗW&N9Nlc).\+ Y3u 5r9U선e^>~HH'1zҰRKS}Zp٘xZ{H}5u5WZ[Н\ۙ̑=W8no~&W|D~EpX_޷kszB<+ Vi'XJkݙٙ$AB&N)z~P =P}'x;҈U;ݝ򄋐z(uL(|J5|XP0IQyČet|H>Vd HqJQ6yŲvњGp@)}S' SK$;SMat<#)x{'q,W༈>}7"hL jT :'\+VK`n,wî"d IUw` 2_`h'ecpqofʬmc3=* c6t ~Eƥ>Jn/,o5$yOZ6Xc%!ȲVRѧ$^77e<J^C$^ 9v@8*wC~{^|[")Gp6(Z{bje|7 f}_ $PVV XxakZy3%~Wp29X Hm7r/[r؜hSPKZ"I*j&wX~@48dNʅ͝xoM3OWꠁGKB>]}ĀxgÏD Ϧ]ݮeB:8(^5i# d_ƿ!~O҇\n6TFeٴ'`x؞r ty(.@[;]t̮HIZUXܣM'/ɵEBHH%fi TWTJ e\_J}FqEd[7epv,.w%"cs(z dӮV Vu M/ t5R_SiMC)0Jt)\MH؇H-c]ERS=q\j!tWht*\H7)=n/`=.td퀨E!=#+L"tb. 1]|3{ ``ܴq$2mQ\iׁ}U4T/0Q&Q@w Uvt)1M-@oVDsqYR_Or9!',f\B  ֒Af֋(,$y0 K¼ Õ<FN_}nG!R(eÀ׮H+pL_^-؄3f pW u~{LLSl[l TBҷFP JXK ~>;0&P(Mb=[NeXOY @$OTww:+*lnѢ0#P}+ ϊRfҳk8ӢeL/f3Dctz#V`ƿ a5M_;0<:_Ѻwgp}X^zWJ:u/} R7 `'f9¥7]*Vp?LM2S(n O\endstream endobj 491 0 obj << /Filter /FlateDecode /Length 4089 >> stream xZYqs?gh %u;:i5^Yn1v;,EWDd&jLk2adq|Ed3~tnpz{۝n{iu3o 3-2m3!m6[E=gn#\ҳf+\͖g9J3Y.XOyp400%Uˍ 8;Mz%ijWw&}?c5~oF¾޲9W- ךzÆSwpIL-@U{P̰\4$w6 ~laceP~?aA#ab+e2)[иl.* s*ýʲ/7 2 IJ.ffYX]I˜_KK2 ݹˤubwЇvFͅzՆ2,^ܞU PO &/͜@!83<kEa2-SEO(/Ьʢhtw+vhsf  #ZYWe.WӲetrީ4"}r-=[ PŽæű!X49@ Xӎˌ{" H3 = sBpbB' t|f4va{ ^ʩ:J$IAm6 C agmvq~3rrN~ 4& Zτ[D2H/2ǥ0AzsWN3oq$L+C2%48^veG&cE]A2gAt~ʆx븝| I8Ek:'lr^+J3!*CUԏ˄a\5JC"_EijGR ib^COZ9-voKm]T2)J!L:A9Tc/>(Az¸uk!Nqڎ%fRuGRmS,3E׼!ٗx@nHyn"V#t cqΡ>9y,1yʓ&QTBiI]"Ejh)X*c;K1(6bݳtAҊXȑ-_G@9P @8 LP=X>IN$ic ;@:Y=v6e9H9O WΌCT{qP!+ JF3*-G@%}#.0z}hFƅ|43 Olȷq>CWiwiƮhtt4I,; Ew'4TR J?hd}'<@(KLUm {""DWSsjۧyRX]wB`wThAPP`ns!m=|FJpPO '=;E& $a6g!= 2PHL3 D$35{( |m$ꔥ蛇MEj{Á#{U_6ꡪ7$(5t(mh- mI8͟=WLz ?-{*&!],&~dWegM_6au/%=]_EIP(4n7Zϑ[yMCZ#skO$(!:ڈC %M)UhEOQ,}@(\= ݊>Cُ19A&Bni󪍦\t6oRa`5RM81|vM5](]֠v/[/[HnU|2t `bU̼ a7qĮ=A$'\]dI\v(F 'D$oɳZBTr;7()9}fEvLKT'} l2>"adPۓ 8~`)?-s̿ZcR60[G9c5f %Z=CЖ\br>Urb48?`̵?5phQ >[qZBS;kU0겶xX&Tqeê&sF"uOm]FG!,8wD3ؕ ų N/gWigA48e@\EDg\HVn,X@Ǣg T9C9j-zeok"ka6YZ"6 -*TEX.Aw]$;ԛ_1/CqO$z;H5?^U&s60͘Uь0@0SK?˫=ٌe! tM iqDz;"!@͕ϖQ5|jfFb _)/ÿ[@t{j6غ8VT_Å)Î5eVrss0=(LPȆߜo|9Y2iYc^dlyF`R1pf<O~eL1lV OWwa N#!AeK3'.} D+T eoM#%o:449qE_ qendstream endobj 492 0 obj << /Filter /FlateDecode /Length 3442 >> stream xYKo@'P4u6vdfP3g$ǯ_ |Ur@DWDV۳|uO==HLeg~<%Yn3'JOfIQhQ~fW/g} 9Md:iQK,$KS]RsɵIԝeb #_#iML:DBL4rhQL`ݦҜ։ 65D#u^\!>"wt-z\ GX+Ŷ^6宏s]hyH%%/ZSsT,-!wB2T!_7{u/wKzx)amk }vBA |.sQo>ϜJ f3Emye5C'nG~Z3mԉ~\4B0z.^ΜƁؑ,(%Mj} 4 xN:*8FCom^YP"^9#W#!DO'j ޱ8 5A'wQb`>3OtߐNN|kI"-æJE‚fTKd(  `&5E0CQu,GD:Q]~q>ZakJ} M0J2׉]_tIFZ^fVܴÚ0 ]U[' $׌W fcť2 'ۚPLwǘŎc(a6CXNC'ʮYnKTI&(XTCE]C &Ň1?Μ؄6IFèRۙҹBIRDI&™@ CBWXO)F9R`qNv2d#`tcJKfȱOtYP琹T;~? 粓ږBX/gd`3M)#Z~4qS/W[$ Md1../"g) W]9M}1/,q=âd:/_'az۪ Ј= Ƨ=rv^v!^kb76HB8X,I4 /36/݌G\#wWTMGMveX$Nы/F$鯀_6rQ(ralSNq~ّy G`7jcm,/C~!I#A? &zk?5f ]Paiiq#a ,:/l6y,  YvT)+16(; s><~U?P|U k /q~c$YZNBV8*(_kNGijuSj7̰aG% kQ&%d&1u+pw,$iLQ^ߴ>.yIl.ueRI(^7p5S76W$ rI`Mً|hǾeKCTBz2Bg8> k֣7#i<1 }b˪N(PfuzyX#6v%ZbC c~i! \p˄PSsϞ"]_DZ`'Лr@r{v㇂p܈|Q[@.X5ր! QBnH9|snwj!tQG ίIy|WE>/smSbYMHYLW\lЇ퓈&2w2><(΃$sp#"a 6i8eAC0#"fv;cq 6B\v]8ϕ*;J@LgeGгPqy:T`dk('RaRي0 cuyM|ˢb]C[rIeY?lj@v}:cE-jH|E/ҍy(h RB[?~Sw_1J*_>+Y3 ӌ;iK@, }"QUn؎s @_P=-_O|squr3,$c!i{k}FZdRML l]_iW#_;ry .a7[xDɩ"Zu#̆(0- .2g%m?V(!9DOb()ODW쭐T~R Ȉr{*fbV8ĴFs}-TH m-:tN.`X@^J;e(ߞ?_oendstream endobj 493 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3265 >> stream xuV TWC d`Mr֩ugEA  B<C!B ~Pv[gPکk{\3kf:++d}[@k׬Y,u-p-Q'x kVy]xh!J#$+g)33RӒATy)Z55-=(#S͎;uLOt"9e_E-B}~jF©T uFEP۩ *ZKPS;j/5Db2GJǨP%9U_,&]Xss{mR{=z@0~epR F2,㘔P`TgZ-Z9ͯ'k*4v EPZn,nۈ^Эl $U;+8Y+7( 0\4 Mzt}ZܑX ;nuM\1 LUZ =Ȇ=rɤ;a'm ;3~V >ZZME!dA('=栊g!"3Ϋ0Ȟu~ӹza1Λwd1Zv\+}ʻz8wJ#Zy"{}1|20tUg$@!>.UcI'2|9wPQXOHaXIGR]\w6kb&@4/$(z\NuHv@(K6e Zk ;'z8rfvJZͥ%Z(a3䴷57v&w$O.ϔzĚh=-6]G38sl6[M̈aD= _X2CWyH2.R*KsR!ͮͤNa r'޴+L[ݔ2:Zǥ/13;o3SjJ x){XC~I*rx_@ א9lw=^e.~K;ݯj=aD(]7$ʟwG;SFV<懍'u1g0zeuoYQQPSzPe1A[nS Ŏ\U_O{]F.c: rxxd|=Ehڛ{7&.Ѣi6n쓏h{ٌ49Cw +JLiVZ6$k#!/2MZ4q7wqȤ(/o>vTa;uD4aQa{$*e+[\yu]Oq1^CٞX%'#5^endstream endobj 494 0 obj << /Filter /FlateDecode /Length 396 >> stream x]n0{=). &פ H,(NQY3Ӻ\ӟR^뵝us/۲6ݹr&>lj u>yxQLcJ݇6!<{S鿣|:8vP#8\ aL FB8W"\)`!nLD |۟] ]>d=\{9t8dP! $)Qb$DthhllT*]rV`c2YaNY!1'Ӭԑ#C7dqVduQ\dUdfeUYU0\in[ý-^׫Xk\ej6˨endstream endobj 495 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4774 >> stream xXy|UNNu2Hu"P( ;[M-i~ɛ}O6Mt}QVE(ʌ卵Ӗq\6Msy<<rW|;=(7؈llүy0~y}9MDœ8r2P'%ҘsL~Ο?oV3sΏY*LN'I3Rs䏜DarfT>dQT*Z2lNRn$}.bdҌ yԔB4fmRnjps sETIL0%U"He$Hғrs$B3MlkH"/@VwMr|J꺄 Yٛrr.xEENxYǙYy3*g g+'333y g5g yN01|.BX Af2 }ɠ#kV l2~N'/h`B5LL3\57C3\g=d[ T&a@S5FIEqOWz5Jzboۘ8c1|>h't=|)W9:ZO+ׯL\S*PSNµ'?c*5@9SLV:2 xskWg5R(Pmp0MBRl`jV/IJ|<<{4g Qh2S{jӆ@3s}T)(b.)ʎPTw}FBB-Oe2WlJjUه6 /%Ƿ629YPDM  P=Y]~R: Q 5E$J}3m1M7mƾw#p:',HH lZv+İ23[ě326CA(Soz3> V2N&<]l>\Y%7@M݀fĴpp S o5XKٔ(ɕwyf=i)}@}׋ (S+4z&+G8|ODE5\{τ#09DE"`Ewͨ:=>Q8Lzoa6p1E'e/ t\flC fQ:f7z,|(8&h7V{xF1:pDŽ3qo=كڼn"MՖ:AއW,KfepKд8OOϭz7prq$ud5-zE(7-~fhƽB;p M;ʄ^D2ܶ(G#>Z-ptjuQGRBJ^Ӓ{@eiBS6#a!DãzS}VIE"WT_ZJ/}jr~MCvkhQwaxO, 5ϙK&k3Wwit}6T\䅧jJy\iW\D/\}C"Q;[JY# ғ_W gIݛԱuچA;ps@+ ?y~5'5 C- JF[=(~P @l^?O6Uu#sBe-eNRX`ywO!К `0鷳P ucK@t.N8:35%9tb2=u?16u}3ek#fB&5м`z V_b`cqT&~@&h#E8zPn@tcYFqdr`p7Z@LJRl+ 5Br%4 BQHtJCD!Ef%xXUNMhUv%Rd( Z.l2@ҥ벲%d[~0/H8ʩhX-K'^G J56+SO}Ϸ֒J"VhPwX G]Vk=fmqzOGsD= zꛝG=?'xb2-6]@:o{fuJm=0x:f{(MV2z)uj(AйuaAWc"Vr;nƪ1AdP!muXUi8|Qevr@)U*ʪipȫTӂjfؽdryp!{#%9?%^^wڻy_NPgBZVòoK'ErZ&sEC$p /]XIk]nϥv9vl h i4H `ln ëxrS!]|b}'dfޓ@*h E`9.bbkԒZx ;[7A*/qw.vh'g׆/\iDvbF# -Z+Șe1j^Gpk &%4=s#V ~uV !;)_]H>+r'Q=|hu>?a:Hׄ861GSV.{O\M©;-րK0;>|d0TW`F)dٞJ=v1kkendstream endobj 496 0 obj << /Filter /FlateDecode /Length 269 >> stream x]An0E>o4RMɢU"1C} .HaݜkWݼ%~Ҫ9JY#nsVm_w!h-ܩ> stream xUkpSe>!Bm ]nS. Ҵ &MMӤ_\{ӦMCܑ .*..Ⱥ _©Þg939~<| ,m`0W6w YL U43Mz}"d2!3euQ|=j {$9Err,.%jnOR$Ow/i/YR&ϟ/=ƺ%e Z_VKkx>wuP Jbn/.Kx* OTaش+EUg84 l=aal 6{aaÊMҰlƵ U-i>ؓ^L|i#C54RAQ_zm)7k.WBdtѤ^;7G{MeljQ,^ &?tX`n';63 Q:9drY퇁*q|I*n,VV}ҿLϞ<5d)2 3TirufZCXoJ=HiJEQSS_>ybspfC(fQ\j*೩haO Z&W+\*MhҁT{I7ʎ'8#YsybuBpe5{N1~8g@%M)a, ͠_' yB)@)4dkA+`Zu$|-RQYiѓW쑫'Z~`zuzR}ak:HVҢ?@-י-5m`ŝFJ,&& MANS%d|%4+9ZP ^gh⠙({]5"<.γY`$hu{t}$y|mɼlրKաP$>g3{ӵqE]#`։ǾvmlB£'k^+rڏC?;û{XU7V‹qDH1J)6а~go;5u"R'RcYF^ }C6wϡo_|ĺ^lR6m$Y6U؉I 'D{Blؤ_o4L: n5U]5oDC!݆44d "{z^18ő9x0a C yO>G͚CԂ'u^ 8p[m#sPuF䢅Яψn'0\ǩ¤3VC#Zu+n"GYh  kջ[6^ͼGg<9ub؅SdSZ5kP8-sP:)Bx*{?ܟG>aj6,?bSyw/9w W )=⍪1$ZG:*m}ʶ[ +]k yc-&b_OԌM]O+Qw.2=􀍦+Js` 8E.ih[+uz/Y?0 B/hZz;z!p=5 'FP:FR4*q'?x"K&l& :b4A7}%X܎ 8Vendstream endobj 498 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 490 >> stream xcd`ab`dddwu041~H3a!-C5eҏvb;022V7N9(3=D! H*ZZ(X*8e&')&d&99 ə%`-6%%VzzEv St3K2RSRSJsS nՃP%E )Eyť KjS+JₜJ.nfFF/?:~mWsfVǻ]aJ\~e-b+r3˫ns+ǖuKvwN}Gl߫Z.XfEy>%}$q7 ' [;+@S疵i/'M>Aeꟴ?v}~V;`[KǭZVIl%]]] ?͐3.X*y80γendstream endobj 499 0 obj << /Filter /FlateDecode /Length 339 >> stream x]Mn0F`pl)MɢUƘE}CEόǞ|yLVV>Vԯ>?֔._ǩk:[\%ؐ-WTJsKLy5'c4 RԴnxnucQvl(JS7ڋbLDt̊b F(ڊm(jqEbe^0VCY(ІjEZ*nDڋI&rLtq#DMTDk, 4SQ|ZXo@qA ,0DxŠ/Qi=/;ezk6m'mv8忎[慧JPu 6endstream endobj 500 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2966 >> stream x{P7+*VҽJ*'bEmUDJĔg A"$' "Hy)B-oZj}`Z{[Z6QnRm{sqdf';9 WB V47I=M6s`Ƹ?97ơ@c(9T2I!Qx>?x^!c"G*b$H+Qd8,Q(dMOO)M$^8;=VI"Ivy%%*7DJ%\8+TD>iD,DGnK$\[ }֫~?hs"H"FkXB! OBL:QѴ% b'{,߹ƒ3%_8w;{2|"6~ 5 PY!@.-{ý@AS0-<1'4 (c$~E.eHW)^_lc w`m-@Iz؏>gB]6t(lBn6tp! Qtaw6q#ӫ07Q660@@4u(4Y{%lh]A%@i ~$!0y¥/?|zK_(ӻ`%{;pކ`L}a jzi(,uU/*}6Iw8k'uk@`ֿ8⁇? 9~L|5?+l549E<>☵0߿c<}_ .!z.E;gG/${`s煞CeSY{mFmLj3-g~rmK6gfRKHQ t {yk!ՙj9yف\ -1&ItOx?̻Fs0P2֞fLф$j6Oɇ> stream x]n <od*_Km/SPhz;4!}`tNf켤XK`;eIơK~Lpi/clkϻWn޽wF1pӅLSwTҎiS* :v`ľTUv\IA'ўh[Ѯ%˨ $:R5N _=/p/Ӧyj^Ӓ7f _ vAendstream endobj 502 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1420 >> stream xu}luK/ג $,ۭ-v-^uӗe]6Qcd !0$@fhD~z[h~=~$&$Ifm+ٕ/\%y<a:g7n@gSH%8="uQ#t2//,\BT 6)LmdR6 QLtUV4 y6On AaFϔU:[%#e"FhDFu@)?P1RZUH2F6p7'(F=THQ2*j'AL,ھ +K/XH!8Fh>CzI'^Gd0H^$'x yYY٦ %1NIz= dׂ>])qN1_#ts#!!m:c(_pz&miCn6)Th٣>4n|r)P YfA@34SNww^aW%4JAEw! FH\3~?DF}E#L@UuɯZ^huX >zsx,\}Ulti8b .\)l)6Gyv3¾'JqmzGm]Hlh=TwfP\XTw>|_7/\? nBKMM B779tzؘKS-hA.dJXK-F&AgbF0 | .h3g<. z JUڞD8V$X9GC}Go 2!@El`Y,޹MCPee*t\]ݾqޝ ĜQ[ A{#|ӿa)5GlhU#}GIg O@YVL3%)K?0FM> stream x]=0 "7h+,ea!i M=vJb,OKQGY\So8JCpdQ6A [oǏv&>xE4naq[Moqb2⠔>8 dJ$+!% nE *K#"U^,+8rSJ:,>_b% ub`endstream endobj 504 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 826 >> stream xUPoHg_c\ڵ:ӻ+R:10v2`Bjhs3 3y<$hLte0ei >lc@ܷ{`FϿèa޾O.VF6i t/j TW:IV^# Tq0GOW>hme;;{i6MRK,N T~Rlr/>lJ5P"U|dѓ-AZ q&<DqG> stream x[KoFsl`Y΄v13;Ob$NC&nG&dԲNj>H32/"~JqUzzx+Ao՟n_oF_2Xin_qq%*WָR*su{}(WFkW^ xk- Q.Cn^?]lZXkj}5O]7k*Go:-C< hᩒki ЄFqxY]pwbѰ/WA~i7‹Roib8Tii{]s ͷڔN諕e0FD xwޗJtu_>xID).6VʖJ>JVxPi>mE{eub:!áH3ک;?tI qjm.m d4)l@٢8" )[(al+| 28r=@f Z$ZpҶxQl?Q - HoGd|¬R(DsFoP%j3X)['xT\McN%l>~񰕊pt4-#At`&s҄JoݛN.|&ߞ%JLlsw}/VJNLt 4M} J\4$#wxȝY:5 K )Ml77ߚg4Eݡ6x x )k&;UHTKIV-vj)Emb߀? M DI\h`M_**ґ(1ʛ"fB$,Jź^AQIt#f.=K>-+L|Kk|sևln= rρ6^ zWM'[ ~t?~Ũ!nYm3藁CR^9 {B4},A"䬈bKP!X+ x'.vM3lJ+橊];op0XNx !mfW5'Bn=k%|0HUF| PyP? [+ZҦ]&K8#Fl\Jnң$]!%Cu*i{HsyF4%gUg$8j9R*Ozba errrl4 *;Eǩb|Xa*#QŨF:5Eؼ yO`g9LB.6c,`R_e@~!}sˣXmGS" l #\L'bNxz߯g cT:,mKIævׂ3~ęO'U \ "}?30w9}jlq`ȲI g8QQYCm݁UC@Q9vqyܾ~П#ᙇ5 ֨8ŦaA ȸ2@B3Q؃l7Ycb~5z Fᓛ|z;_oo'3%T( %!] 9X3ŧ&b'8,m-t R>۬sQ(7]єt?ئ6.iz_+fúm 6'"uVms/4 %l M@}R^4tlw4:q &gBA[r_TS;DsoƈbI<~c##%;/9޷4M+XIr 0HCm,!ĩ&8X =Oon04$ҍo}߬q.?e@Q(w_;8;X 7ocbbCWŕY Z:<ItBZB⎴*{řgNd(S?b9PS2$Ե=pF)<,gI”KH-a8_:o1> [I ` ʔƹ1ƒ&-AQqj8PBCq`=Q]_ף웎?딚?uܤPay1F9o/42/qM1/v{4m^ml$o#EZDИXEGn0i*Q8w_w |;T6?dqJ c~Ð]ph[F0 f]Io9= |ܷө>?o58l%MD֟!e]Ga[`h0)]@b|livÚgw:^qsx  =6PXk$|պ;% CX(| _z7"!Nab/ۏd@Τ;ͶRJ9`'9B`N u`ſZkO; w<+L#OY5Q-K1ST^S|T Y$/`^27m{»n%l+;얷ƨ߷!&MiGx\%/!&7Ta@.z>/YT Atyg!ʦLS:{/΃ MywHqC%BNBNmul/@u^8 viyg0 XM|at4&&~|fn^ 慑eh(obiV^`#0Si}8";`ؔncZ#RTγh H jT0WY&{jRP_?qtOL;u`0J_eMfΜ اB i@q7-I(-8rn?\5z̹\J:SK5PM"2Lgz]=p8(`z zjPjE_hg蘞K[% ${y]S﫜 I ߰Q?"Er/fLo{ՍDZp(# RN8+vX#?P9]G]$v9blN.u|b %m&|:Sՠ?!}b4ÌXmtQ|Sb:ڝz][,) !VOV @,"PX+1ea U(mZY&^f=U-5E,%]E.-2s$=RSRh8LG%惂OvşOmi0ˮSEc35Zܾ ZCNh$rĸ cfܵ_/JR%+'S)<2KA=a h>ž}ƛfWxz*O*NE(jkX>f2zxg3!Qf9 !p4G?X>%Z(d{_oSuC<1q!+ Z{+4|7'˗*V̼F܍gmx.{=L]2//0Obv .7Ee"tOVq.a"\G3UY͇@b̿I} .YAց=v1XxX#w ϳ N43֛kObN/ Ō9g]>㐱;(}V:q~)B/5Zxs뚤`gBZDzESJi-GV$,;>,D|9d,@R(Wjw:4ck0zd|餶+<FHӲ|I TyZf畁\jPP&iƒjV ^T`=MGZPVj}.gIM;J ! D0XƗ_Neyi pju\7MK1S@Sg!*s]JaN̎!;D 傧e_,`fZb^`qöZdpz; \6K/JKg &WةhͺvUi9 {E?<=?2y  uH+2goX/byV8BbɊ zT5G?Rcqe }*Tr.> stream x]Mn! F ?MH7&FU 0`"a,rڦE 曎ӹUO}_\jx_=ZN_5BS=gCM_ ز). -D^QCΠG֌9o;`4{%uo C+= LUWy eu :VgM &{al;UD8R/Զ4҄pbendstream endobj 507 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1200 >> stream x}Lwzp=":-N|$*L]P|aS'e϶8v:Z"kPZLɈCt3M\de'o?P4MGF?/:Ie@\TͤhzpAHc1`y>X~~0S͗4@*,Eev\" ;#pR.{7gNqk[9LJ[u wxOĽS0yp+H~;Lutvf#2ޮٟ`1IdIHK{;ϝiRgNRVZ'zvtGG6Ԏjx]7ԳZ$ ͫrSjCFW{#6,o8r[w0sVϷ|&[9wt=7R [$ٳmxy\=_\7. ރSx ԍBx)=Xx2-Vj @(g$<1:vk2r00?]ܵ >JLJ{w=P}!`z^wzC+ 9{.twdd[7$1́|t}k"+OJ~ga$=`kk[3 r"Fa bT:Iz=e_NJT%l|v4 a&P}櫽O L,> stream xZKstۃ*¼gTծHnm & .3xPvS><0_4')du'bsrqwtz̉K1QO5hO6GfWlRxg؛l.<6I9gg$}S/c˶Kl*ػ~RϮ}ɶ?wpe)/haywKX:znzhL'S'XVv4M =&߶Y[T[ kUsjGOF[)GZ՜kܪ94ڇ\gm],`L;Kg?:^x/-='IT 6^d[c:kVe/Į͐c 󦪷è `8#+/ʼh *pLxm>j;)GHԜE {^?VI#`a2*|J u[6qgŶ{t04Qv~W-bVsEa$ۀҦ%x!Vq[}nf5Ak)`)Y'?H$wTDЀ^ap צI<8h>XAxS`j XH^nI@,18ȡ+yqQy4y7Fl3Z{VW;FUۼaYSraآ4H7y6hf_|︅*`9|0Vzi%>6uXCzmHse v&?ڼ֣!kEdA}z r>Z(H()(D{0|E>ÈA.陈I<2iJ9m^ge{>>4P^<=)m9,H#t3/;yٷrߴy]lW<|MC Zn XMkhw8X"[lMRɖY SgM> !p<'(%lx|e.)[$tY]HRr'c#18sab&#Z*Tuh#Dxea=C ݎ#.߷8LnT+ [XRGhdIz7C+@-R"2CV7F mIbxH/E._Rbm,W}Fj]8&h u_'.ŭ^71 `O!1lQh@zMAyYUݼ1 4@=a_pgzlk6yKq!ƒ܁ 0bMwhǞ Al/E^~v=S%,g[8 @zaF薊H 6A~|&\24q~@ ZaOkOV/X.xDB3# @$AZ;Þid H.!~@67e {ob8Lr~,EtKQ0py |?w<ʶ+ĺ< |jtF&8%W:oRu70دdbңF z_H_QK?1r,Zt Y)d\6q_xЉ7}17/iGMc=o߯ڝ1d\chC[9 r)6l4$[$Y{Z,g ݞradrPqPr\M VJv&QQhyP? O0b:tmbPZȎML,]Eq n@:.lΧh̉`UE 'K>r}\L ,I@o)At_xCKO1p)}}iֽB,"< R70#WnJrrs`XQfN=m, Dy(Vo]oK8趙Ds)-&<_t(7J,I(mEB(\ׇ)"e77Ze ^ Fg %tcٔD5Y,Sȿ; q shz܆HpzCr35 @_ETt- D ́0ƒcz.ڏΝo:Bjz US K!7NN{$^Bq媬g_Рm(^V.oQ~neԠV~4oЉCΙǜi9 QCR*/rxj: )Ry1vHY|h6b/ڈ}H좑&3'8o///a %|dYT` ̊,H)YhAxPӤ@ٴB c=O02q(y/} b}b"?{{P3OQw <%KLQQe֠Aa`!3$V'i Hk;qY6QOsCੵѿlztluhbO72oI$p$OH8vwiܹ(Ϲ*l]M:S^WT2=!qODY0Q^?+J{̟%]L9veNrɂ6/!a2RN0$iMxՕ㱛\ ! K T<ukavAe k^klBPdyO!P hVof] JW`)E RA-b.H)a_دECⱆ@UzxF,T z J&\ \K1\5QAƄARb}ELcC YJ#thcYYW7sU^b2[RdpqGD p]e6JKQnӗIY"/C'@38mMo} k1S*@c],xmd@J+s6[ܮʂ. e}(O-L1iuB^0㥦<ha?kendstream endobj 509 0 obj << /Filter /FlateDecode /Length 669 >> stream xmTo0 v蝣# ;$$uTe&;PczZоi8}Dghyw}~MW}Hޗ%> IyO`dEF&a3Bȡlo];T]sPu[ <\.tP]33pfX ~)ھEq@Ӆ|'̍:Z<%?1ӵ \o۷g@ CB&Vsl,^$>` ,y_ՍL6Jr+áҸi\R,ep$YC3z!n~a@BUCݯ!Lbj_k긠Mߵٜ֮i$ۖofrsci^`F{IesM!1a8GBVґ%^f=KfxG[ˡ 2E^y'5A#endstream endobj 510 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 !U%BB 8QC_Ct;̻֓K&LůQ#89bt:XzV6ڝjF^DYU{H{KP Y#DX2$'κn O%Gs&5FT&#=|)22S> stream xcd`ab`dd v 5400q~H3a#ewo_ 0012:)槤)& 2000v00t;gu`Í ʅ|(YQϡ;çg /xk76i>9.i< `D> stream x] w76֩aƨ/@h hprwp\M=W/$j_1YGjT 7ޟ0ul%5 tHϹNڭ0=yEyf Q-j Ӂ@#\XmҢY MWnendstream endobj 513 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 640 >> stream x[HSq3N"BR0(3B(!24ngmn3wKtΎl]UX 5SKAͽ||?E%UwF @sǝx2EbpY6wx{K"OAƱ?u] N 89E 56 .0.`ki_N}* aѐ< %! _/4ėf2l>zp ļ8\Xe,9usGG'?T[,NLI?jendstream endobj 514 0 obj << /Filter /FlateDecode /Length 4116 >> stream xZKil")J\lā#q1| M7[^ԖԞԃԫ5`_bW_m&=r#;~uw忬2ʍQw7rLl̉7Dތπ%Kߋ+y1 ɂæIQh[\Д>L%:FQ P a2 x+n׶i>h,ʂ`EW jpDИd{tuQ7n~]:Dƣ:g(T3q)36GI ~HI8v=U*TUN?e*) Kٻ=>Ǥ h.>W ޺=6ѬoʶbWCRi<pIM3Mtrx0+zLldl6XԒ,. ( B oPZ@=(5Zpؑ׀( mC qB( 87rB,u.Z; uHEX a 0Y/|c1PPM#}U6l;`A,Cn 8et?8}pxf#~H<8WFJhh(WSA0" SQhӱ\Hf `,-X__ٸI so/A(x9K+`Zb&]][+#*V&cg`FsD Lg/'.¿uV@ םӄΖ# {M-N+L8P]S]Nk7*8 \L 8kbtf@1Wc&AM\k8[hQjpѐHqAZjj$^mgp#x})@4`r'w:@:P90ʑ#M|< -$ڡ zM{8CSQYL},^9 d/8î@)*W-JtV^S0ہw- #f[HGC&ʤN{Hzr-Ǧړ@ B&+!O[H~-  w^ԹQzz=1~PC 8w;IL|V!Cp^EghH LPݥmp1n`Gí8.d2 %h$>1K;L~d i.SRWs`t6O·eN Qbv)ܛ\R,e[3q2"s93/>iq EL磴-y G;|Ch D j_6姄7"MF.LاpŒ #dx+d5` ` ]RM\ڠ tB02NS|,h8 2Ž>ibEh,$G#+qyE)4-#tuG&(1 B$\3=DL"R\ɀƙ$X 3 -L,'ptul.&-o]!j[{򩴵t4[(!p̠x jRpQ6Zs4s9R(wY`΄$;ȥ\GZ݃I J,- ޏUa9D0mӽHdwnғ6ѹf%UI;-[kz)cueR+M+"yuck.Hƙcz5Qk`_"}J&},A1c)KZ*9gwfDjjxCZ/ItY AHd+ާ!62,1w }&'K0f^\M#0ci*Dyt$HZ:OIA0^e!m|K~ Dr Zw]9h.`;s1AܧvkSKcշ`15+,e-Ƈa[@S.䁄EٌfJE.Cyy dms" 8y%Η:M?ޠ#<9(ʻA*"ڊsF;w|EV o1 Ʃ"/3cw3 /B@~XP84 $mQ3)ą9%pSȟF%W-tY&yBH`bD z}h'rHIՎsq;t,B³'SfxZM\=cR,_qo5qepε R3qlǢ>=!@s&74!T(n ਻Rrm~ЊTy:"P̒RWq[9Y*Cu! 7ˠiuO'L< ,ٯ>& w)_Qt=r ɣr 0SݜKP⪶+5xL,c5})G?~[QT9垚7 )75HXh.N=[-D,iZ3=g|1 _X\?ћXiV,1]}HЌ}J~&}L+Cw2U,U !"9?Sc_TGS9]'`Z|]ר9_~e2?w ,繍0P|#뫄*wu+X?Xצ[DY7|{yE)TR\z4O$9d/1%$~9$W~|3 ׹ҼIT|^k1LWUވsQb*w bl/UO$ 0Z34aDzsR|Ae/[مo:Lev/5'+>\bfc?ףxi,{3c<(jsH o Jd['ȇzRme$,P;;4mn_84M-'+'fE E!.,{r-bSJfE{k$+ Hܕ $JC>ٕh.Y,#-CBq,m2m0sT@ȄەӺ'G= v|ץ-H-A^äBJˏ~zW\/<-e!A{_,q]=Θd Po|hAP%NŇkm)(&n늮0$v^,U&sendstream endobj 515 0 obj << /Filter /FlateDecode /Length 4965 >> stream x[Kuv* 4z`xa'rri#o76ndk,-sU|5If1z:Sfb*<敠_7y7~z+ͻWŧqɾ9?]zFe*JE(Q^H E7Ud%ѼE'0jwMEWƧAO۰[;8ǹ7*)=X}}  zKOCSIapI]ˮ+|`yWoˤ:z`Cy9yxx$3ڴ'39yNPX")( 8NakMft o릮)# NvZz"1E&ebo[klGȃ,dhsd7((;E{Enz,x͊xӑ'hڄr+/7@nw09q7#!K g\)|NcG ɳPr[߲2V+)D4N{x9tXb_fW 8Hr5_Mw|= Xϐ4V#xYg@4?TeS5q53:$>lnJ;i+^U*h'm@ھ"$S@U@ȊI IBS[=՟%.bY\:o:r1 g°p7x9u4V[JS$C8R5uGD4890 j;]aci!8G`Dqh)ic{G7.ڇ gK~ D{z֏zxawkAXhˎa/r# 'CtCDcV\w%|T Skꗡ?dkgGrdA9q873;.G;J Co˧SMVZ+}t0fq77W%TpkeZYⰋruM` It/Ne%4ÈD2u ΂G{&4vQ9E=R>N([ēδk&⣎u逬䡡l?,Nϥ"y*͝C4c *F hdP!bP7Bȹr 8-4QFVhHhvS+~"sH9M$-1]Hn5{yFXvexKpuT˅bWv߾$7T(3H G L\ݥ-?aDE#T@~:DVOSLZ?C:_iXMa쇸|^PrE+cxd4@r^,N$+Z-gG_ \'%Bl{?8AX]353RQZCsHw.<,ȫa :S ŵ>TXA{ @r'S)]IWˉq'Oj_dJܟ2&zWg+5'8HErw* <)~5X9Fj*b=$*7EfBS & z9Kuq2& Ա '1'jO}h-~Ȗ%E%HDB.s\vRZǿuXeK!8 张S&局HV8t]Y-8c\%p5<10.g6P%4ژ|pHİ%I1{S0)2.IQs>s.BlS2 %_B& cbBBFN^F^AF!D $x.X'{ \e QRԅ< N5vVxͲ2`ꡊ*7LBD3S >4mURKQ' ;$_PLP)a'jGޜ#۾u8./? +XkxykSz W $ :fyR·c>'Lj,K%I{ )^WtrPb Vt٣6⭋_=ۡšf{cH<ȁ% Y ұ4BN:{"QB͡6Axie;wJJ/+:%[ta?J t?kg$TD,- +eg A:}B$pı DuKx iuq W23-靖؊\=*0|*i4bl?*-("bGwNKV14eƘnHx* E:r1Kdܮ˖Lw ;Ш6tK9EW; mt1+Э }6}TIn<ۈ B@=7t&7+;B&'&׷C_[Z:ʋ.ҖuvQUL=wxJqzi Flt,̨—N!X4_ͯ:qbh]S,=ěYş᾵௤% CŃĿcE;bc= &:Tɼϱڈͤ/ZL(ThnK_5Y;G -}⥢Zpdn*GYd+6q{vmopEk8#ix5z&^rΖGբzrKId/ n܏nkm-@Sy^ kfG/~r!55ge6ufhUQzN>{0FVl&6t sbyFO-q8"ÅˈT:r,Ns-! 9V|^xZX - gvp6 5sO6y~QN9}LS=~|شkJZUtJ YGw|,s ,"Sy,Msi6`qR?y?ZcJendstream endobj 516 0 obj << /Filter /FlateDecode /Length 4353 >> stream xZYq6yZ?ɆQofTއV2Z6li`WX_82$Z0dq|W+fF||#*W}Y::Vox\IkY_+mW7v;UzY߈ZXihQhS;T|>gfaEq˗// զۻuc?h P9SoHYGkje?Fʐ;'sl7Oj??)-LlS=}RŵI:T㲓fuCdJociĸݮ9ULAnXY _66m4F(p+)- {AF: a8C/Sp83U5y"*&"BwAn$T\{y؂T{p5rX46=r #;8+k OsUF:~CpE+[Z&"w-:5͚Li += _ cϠ[:Pc󻵔X@;E-ApQW[`;\HͶԡ/'T@% m$"%p,h@ƥJ IX0sAl/ݙZ3ZF?LesYN`6xνjdbg;$UYTTT[(azϧq_v k@4#BrREЇA8WS.p UW>lH@8c7 J/('АgzQ'G2ssb!AMD45K )_j`M H(86CQ]>k  2ޏY^%#x ʠJVhj3tMT*':$Ewi| [/RV셀ށ 'Xj)@4Rr|i?7"ׂ֑eyGz`&h0P/dF(mH /¿aDmwSTP Nǭqz"tI@ {7d%&$SJQ?UQ4T-(luQ$Wpo:;j4"jS %рk܌ULYm^_y=b?}(zoҦ5Ѯ'TTDvL{^"?EjhH,60vVpMraeͷ "6N8+uISj7]Y''+IӲM;K}o V9}6e jq~}*ϛDR kqcJQQn2/OMO b)!ѬHiA:t ^e4vډ@&|bjAʓV+Rե`yCOS=H2 U+*͉o*79_(bӚf9eL.w|Pb J_=lqr&%ǾȵX & xh`QLro+.FuHɹit1LF %!LǥΧ첬Lfr7Fbgf>&cʲQg,0Fja=9 \t`J/ FlPW{FןԡIcI0TZ@x38wCJ#gO]p8Ž6`c6F MWԆp(_?,AXk~\#;2 9y١-D3m솲;-}3gLtSAiLXIP6!.wfE p&Ҭ&SO'V-vpEޤ AAfj͠qa]?O$L劎"=>yB) k;3T[J7xhB]qa$շ[ mif7RuҦ>߯C @t Kǀ;}cNz %Ғ $W &djMZ ;vfMJ;WTg>@G{vz,=C3@wE8Br4o[iӷU)kMC7:Zjp>&i]s{X)~ϭ]}Uե:n.)KsZW\&޻~ zgW%u9iYe _I{ }g ~$u Y׋R>vgu&Џ/šQ2`K{E -ojkW?=6_*?"tB,^n80t9˜` ,mqpLJ<=ǥ>((Vs,C(.s-s}Ȩ̚ Ձk0cқY@ݡ>%xSAHa@IVs#랉z9uG9CJ!Xa8fpMI:Q:mFj̡xlPАͿ۽endstream endobj 517 0 obj << /Filter /FlateDecode /Length 555 >> stream x]n@D{}@vkE PPa""[Ľ7wruun|Y_?uzYvc7]gs|۰?h=[OKuj0mX^4uז8ϟ?=D> stream xyw|e9""#'fPTPAAқt*FB{/齷t $4 "Pစ"`]]us>{}{<+o6ΙV?:󣦏(PTUYc: ~jc|u3| il3ύdg8]7\>4Z Sԏ8[r[E`0SNgCGgݦ]ݖ9] >^4" :=x=XIP,)6\ h5BZiq}-*JFIݓx18wۿ.e,}q&JNsLb9Z!iR t`5.+E*MYx<ET!!U;=ݮno/7tKa*Q`R|̼H& >"TLSp[ҙ'p0exr.ZK֐f_mK94G=#'{9_=rkLGB覭;3+w'ʅ=FsVgU8 #E֚ԫ hި"~:VsL1[mx *0UǗ'vu-[eQICښ){!;*U ٿ#>K즋Pv ؉:@Nj/OyI8T',JMʚMĬʏO?k0YL]=x dQ΋[pB!u•dwVWQv,펤Cmu7yŵE4 |cx7pƤnk2f'(BK\zZ =yK4&}uؓI=aWy1Ԭΐ3k g'awAh%$}/.B5u8ebH$N:QXF]X"EIWkL@iLm6gñ߶+gڸ,ٜH6vֱEŋ_n`梹 *_t)R𽰪bBB%. {ac~~Gң/E7 Cw'۝.383;95=À#t&QG{y\rj$އ]zM[0=jorJXd%i¥vWb$V \=`d`/_"x>;#rIZWLla{. 㩰/5EzF ѿb; 2 ULY@o}\a/!Ր;j"%EGi6=ۻb{Zb>d}:X99ʹfIiQ*/|wG5dKRD F\)(wG1[ |w3XW"Lӎc/ф"RLrIk1k@s8չ8LQ&3LQLAR.RzؠĴ˲GHqD#+"Q )5B7pm^, DgXx .Ay:ViWWKU2@HA l5)vdxv%,}i5TO}Ʉ@I֪+j)xcB&x6Mhj3m x*u#_pn‡HDqEGvy25_ 6"٨ u,sE^P-°Vȯqyץva4ӇQ%z-ΗAk/:K$^d*xN  V* _u ϵQ'RqZXv2#.I rَu~=z; 8:|D9%Ƣj3ȗ+Б&Mzp` \Qϔz)L5Y_Cv!?;3;K/&nnEN=k+DNPit!vB.qx%DnIL#ڝXOr{%뙬L|?zPc4(b⍾FK}BJ*wг޳]Ƶ m B1QmW]wmgo5Ds(03 sy77)df]jOhkNOqZ@}y24:"hhHXZsM{9J5 ˦f"; o=;| +oH:Cppp<Z-'ڠۡĵ3m^))d?XOu5A!= y=z>BsZ^ޞФ) $uXIa}4/"j+ޯ>nœŀx  }A'9;P=N^O] e8f3OCv& |>|nݎ]?=O!]K0G'QnY K IWolǾ`iƓ..ZPLAc: 4*"G5S-<E*5fMQ=3 ؛=C辨-n]h_hRhMIcFzx #znw4#́o"6i1ByT-{qd?Z+U/nG& baHN&h*  /r*m 3fEgNN;ó/?Y/|} xб|CNew#>" AMȲ\ǣ'DYKׁʍ5b79ޤ.*bHǓejNh60+}U;DVB? DN[Bٌ6@+:Q\șO]ڳg;h Ì?zü*4Gx@TvBt:uY@]kXİם$/Q…$q5x 27 +;~|q${?;@5"JP}[@$Bb<j Ҭ ; ؍&I U_a"mAN 1Һ"ġʤQ_?E:KQ1.,>~ k]PT@ /yU,ߔ!ӫID\ңqZ*CAzO dG֨1']P@JBO"TPE[!a!4pLB0dt[}dAPq>dwsު:&? AmLQ:h{485ck3I8<粢2)Ғ78M,Ի+uYɳ0.! #(2< rFҠkL]#\kw$<6kY@/5 U -H `9bF PHpsHW քAf3StMDCw{3b(-hTCA q1Pօn8bfǁ8p8)ќh'\@eu?xyo!D)B/ :Ua-`g`J$H +w;Ot_*jz1W_-`NגK5Q}>}Θ=nao'@(JRFΟ354)Վk5ڝ zaMuȰ/8eY΄= q]{=]Ԇ\ʭ(+dN#+٭9D"vIOeGٓ}~-gDzVmO!$qr-wRBbgK-bzH(V a.d]EFW=S "K4FEnbgfwO \s9m @Jnb@F%7][p0=0gcD) M"<cPxpK@<,e#%r$dsm6NGVm)o%jC;@)ofOxRo.%PYP^]=OOc*=_6< wK 6CijָۭEfŜi(7y̻9g 0j)%s~iXA5,Z&&sӘD0o)w3Jb b~u鸃:ur<8,5LR\ڤ*IX-"cp@!p)ְ#җ@>, J)fEYP0ʇ{lU֊J 1da3q;2e;jJku]8K9Cc\= R C^1Th 78ij| '3x t LϹh} ‘ 'B؄'E\Lp(UEEt0gpX7QPKLjF6(= \WT#ըe1Ҁ4#~>as Eb JD! yD΅|zh*-SG!pN[(QzO$L9:XKOyxY5OEW-y]}9]sn@[EUT8NY 50@#f]2(hE2 YbhP",5: ^pk^w^N>á'c8 ėˮܧmb iFGj.)O-^xI+r^@dUϢE/[G-޴djV8J/xzΎRxAPoqh;7eg/ȼx ^>&M:0Yդts"l#EZѓ?F^S+Ez !D~Ui]x =h7USn:v_~͙Urb`O -*>DҲfBJApL QDXRW&A ICv!(q=e 3^Xݯ4*ZKxqIz9==LA_a] ڗzΪ<-<L{~HN9;\ W6,WJxջ..u臹BUcmq/ 8xA#\SK+6pJ\gYYٚ/}ڧ ‘+Q,:/KCmz|"u%Si?I 'TXب[\%,DD BE0.GJɨ7Vw+;f8D% $ TՄ46\QEaenɥ I_݈*t7Oрt<ۧCBq Q[饦Xy|8}Sp=%"d99dsendstream endobj 519 0 obj << /Filter /FlateDecode /Length 3948 >> stream xZoF.^U!!)R E/ Wl+O+m$s^IQZKD͌~Rq8e/w? zzN<{˴4?> q/<7ڦ2^a׹NB&U@ O05Sw#>3i&R%w,t^ i"2-T/Xu x4czh Kv?QiQ<Q$}8f36P QR~/DZj-;PFϦJ 7ΦLZ*)gs~r4Pwp05vMc|v=uGd/H^+ZS=^mt}'8dN?N:8yr9 0 (K,F7Pk1Jm'`Lw&t"9qd-`CU_fFj"i+A$>Hf$zpdGm~m-˓k@ // V֠p8Z+GhҎ]p*i:f2yӖv2wMҀXoHHg/aƛ3*Ph-.ؔ9j^+(+ܥԖ7ӼE'-l"xB>sШY 1 9R7D_~o0{:OlEg6hho[%nt'ޤ%Iš~h~Q2ޫ:HVsk ]|?A2y{T$G.T5݃{%rg k]ߵX(m}XC3zuL.fL^h 9H rA$ovhzu?#8=Щ}jpѥE t7/_&G WY E40V IINW"kPaŤ蕧1 Lu4&k ,8rӑ@LdіD,IZ戽h^fNb211sim^K6 D >|i z>iNnXngȊ"Fp,J/?0Ұ`cV#b&]E( ҊxixL~vnc΅<tS*C|*B!p>A8؛T6ܐ0c& %T/p"S&x^!BUhU Zа֬W86p[)+8Jaf#).)L\hUI:T24rSrCF'a"s(Ѷcf}ʿN(prA!f ћrcd9'ltx/U#0t~}16s{\ .*(N f@A+FfZ%1聟X3?$_`a5WV|g5w6GR01o&74  +aC~b4<5DaIR>P"&npm4c ijܠ9YߢH% }1Y^F%1jk|&fv隟/7f@xA ]ɁѴr`U-aI[8Cp5 #(c6^- .ٿn)H||',$? TH LPZB^kH@G` yݞ_t(\53яs` nk`\9{%C%HASE (>A/ldnޡ恇E3n ֏x@*B|ik5rX2? 0̚ɂp: 's>зm}D“Ȫ;"6g˜4SjzWJpo _I>:g27.eUB;X\2qWW_gՍb*Ƣ"f2H`-Kj I{81znR5=WsR~z MUz`ȬخiSD5nVv&79dr>YP$,J(Z+ Բ wxݐρSI$QϲֿĪL:yS&U<`_1YėAxPxstl[|A h#:j"NiCx,[0֥$R')Ao(pQ's:Y"7g5X#wg`f x-aZ΄]p(Fq@~tA噸r yUKO]wI%W%9_+8+RJ.D@B{&52,at n 8\耯b\)uvEy zXMwaiޝ |_\irs T(rW:YZv*gr0Cj%unݩ2W: 3_}^Sq%Mp3&LCdžCۏ5j7A@S:&4M `iU/47W][8q{ mSG <NܫK7^!o ɡU&m] KAbf*\HHͥ#-N"_cas=loE&RWNc6i@cuoT9da_h󥛕VpFj喱:9H Bpj=>3|)V 5d22`27N )'b(Pf6(>w~&"y7&9(?%qlL0n4+ a0_r}k\qRQ4Z]%0]-z/ ?hHendstream endobj 520 0 obj << /Filter /FlateDecode /Length 4508 >> stream xZK䶑I>[v袉'_$YXԡlC홃7 Դ舙"$/_&MM*9_uoH +^!oIgDi{sw~7t66Ie&j\RZ;kHETM@tڧʒNԻ}Vژ#mg ދiw"qlE ~GE<OCs.a.p\tq\sEމrep=@É3?)Nln^J8~Um}MJ.c94eCFA4Q,!J<N(<SFXJV.,5b+>+:lKԜi=5%M){ 5"\u%D,\׷( H'fI,_gp)elԢoq-BB4秶>WXyuHO^KqV&R~QOeW ̇N@ɦmb`)dvϧ"R!؜[\.PwUz})E'bS`DB+ 3p"fX0 -g'q/ۣ@RnR`Pɻ>-7ɒ JՖXa/rHgS8] TSS݉_dYMJCV6 Z˩`f@F^";I֨QUDka@8@KJ$E)&kodG^`']p8_a1qbACfTroV;0Eb_,t+d|PձhN3 1X[LKR:\6S4>"GGΤf&ceCӁypYG8N4Ȳdui u?HV 3/Pq$un:mn=4e1Haxduc^WH(Zɓ:]ԙ`~Vyb2čJQYA)y,{d@f3+U\;Td6ͣGy8𪰜&>Y僋{VY4s=MRL;vDϪmn1pd`9~ɫa-')h~.9C͇M RL!6)&Ɩr{ZGǎpFl02-_86 ŚZY;e|d@aT0uW>PaGxk\Ur׊LZNf>|Cx?rN2WXP8wS ){I9.!4ǎx t>"Zq'L,zۀ #vEzG]M0PN)"x:V:!&8ZT[ {&ZMh*\-MRp /6U`j>[Q 7(60xfr|mH]!MК]1,qF*&uZOMj~_.i;?j!ƂdTjCv`9iie|\X@&%H]K3'̶oe!I9rclՃSmLuuM-|M :娌0wEE@v %CI:2%1dJ.'Vnc *ޜԬXMO*RGou2@"tsOzɒUP%/Ѐ0:*P1CΡ̡lg<I&9CfZ pl\ ?#MmZo@禎2e.(1H5` QzrfK_T l}"\`bS1mB"W1~ǝh'j]K8ӵxyMq1ʙڶjCF5;‰B4ie;y^FKؼ_6453Az 9YQX͗Pٞ6*Tspkvh߮EpƯg*TZ*|POe-^#%Sa}KX( "eH%Α @ULEӗ% :y)€b(hfF$sT9tb*wjo խ'&(`T }57C64MʂmnA(d|գPxl:TstjbyԷ)Yrji5h&qXpAv2Lv6*2@ 0I|YclC>vÒ`L(Q)H} {e67l[.ͥiaj6Kj xݴ6A4⾾}) ~  J/ }࿒PЃ<GB[DUy !u&F|~חU>o x:V@>6͖Zh2H]ǡ\d0z0D4%|X'mL0 {Βoj0i%R__1\%* 'b:,cHzRG1 (<9Cs~ C$7TyA.,uc]Cs dg:$3x1*p2.TC M:!/fE#(2⎌7GpwkO5'ex,ǡ?{ 9} j kV 9"C0R!D)w{7w8>$_t'=\ "y,> stream x\Kȑ^KtL[4_6f ?ʆ.`I*Q-JizGD&C%zчddRfָ6Rn77or}>ys6\jaTܹ;υVz|I a\8iў԰W>y^xkWI޹p~9w2s'nCs@,n&WWj77g^k>߲bzK\-\޻BqA)cˊ{ AbtT"Be[ dsdNqs.gGI%Lf\9?92aNO.2wfͩHD"ۗ~Oa*yw#Iδ t,6\簊QlURRF^~~ERL-VS.$7vhE);lpϪ)i uF>Cf?WO}f@ *l)=McMGq݌Q*h"$,ZJ&^oC\ݱ^:>Vq(}c`* `}E|'^)IăJ$]G NDiyj⣖<%L!%mJmY#`G8InB)f Mj=,d 2.yB~@פke'j4d {jB&T'&^!ݾQޕCLs'@S8g$4`|9Ty &f'{IF޲7h7,ͧ"wYނy%+.4SQ]iK>~w,:6 xI5[F \1h_D1I2n5=IAml5)" K6*b|J Yf]"X|p,&` +9 p,gõCLO=rΊqSno>L)"j"iX_!シDa>0'IиRiF] !A,KbXVB ܡ肕 ,mf1-ģ#?Vâơ ;zp\EYzykDMZgp[@ĖGh2IL)CEC+LkFe!)A]HnV7rX_n|zwCE^rJ!hYa̲ HF6K{(vpWi䢢%q͑L *-M<ʾCB،֔spUա@~tmA!sI* RBV7,Upρrk]m(YVkB#? CπlMFְq !eWcѤO\SdUW/KQfy۸JAy@Ntk<Օ9fcaMֽ];kj b0(]HqdDqb ! C(cu)(s9')(r!D* @*~p'pB@HϹXXs4: '2kPEƂ7h۠qLPHmfeXEՏtdVG:i:SL]Hw:nVwK@($%`kOE9RsջC|.CK)\F8kҭ[:%’׻60Jq7)Yb@WOwŨBtQP:|p Q ۶K.fZ ~ghCe /EޔL$AjH],\lg9cSH@JvDJr olUnw2Y\i2zp혪ǚF!E xCNSCH`C3&7C}<( %* aVY"WU(q]zE_+XI6)}$%m[Ah"Q*y*7hxKd]YYa- :#mY=R:ӗۦlŠzL;ЀA(0rn'^gJ sXFsOsO:~3mB 'I M5hivLȆ8?p>@dH"!WjB*fpgiÁĕb乵͙#lا t.:{1$cɜ6*mA)VA\?XR79]hΜYXoj'Nbo"iY9o-**+7JbP{[-cXeøR.'􀵺ǷvUJDFa`Gh̟A6"EQ)\ Q;(Ӿ**{Mp>v p#ܑ88wɤ $;Gɇ08&N0&!$da~6 ?}(*VA>9إE#iG=Lho3,3.@H+KyKw[ƹ0ytSS҃Xt,h*ɩS! QYNbM/v&b5Շ#EN,b[uyt/KuȂk 0֩\.r?kx '!T'gѼPO 8)5H.Olbn*A{l{`0qTnlU,h_5&LSG|&|rz Qu^lu&4o 3-Yh ai:o/VN_vB7Ksj^#  V38>=i d::9#+j`%X`N])-?ؾUixJ"i&=-/b8$؄-^~_߶;)7x=Hb=1cvN_@ǽwFeȇ mX=Dc& ؅:}GtgÐ|GX"A \ %cq~_/&2qp3΁рBs B!NmkSj!&"\|عjk/ ?(ӢA~u-!m>vpdB|*]94N*a#3WuB KƂ}JS rTF!g73zI9oCy_u6v|j=5|ohFr68ϲA0$SNԑ[,ϱcH… LJZxXޗCAbkp]*<¶@j׿o/U$8Xg݃;& m>Pc/~;&~- ~V>E8e}VKV͡PFMpԥG_Pr#M jnZCϤ֗il9h}BümȠ~[4h`띋 Jҽ*ֱL]Nx,bbeQFǂUTW j!M}Iq'}Ԓaӫej["1|?BXr]sYgE#Գ"~R'#/aV={/_ƽ66͡}A}S +ZRkL\R[bW / 0UrD sw"*+TuR"Cnw^Ùk8ḢTd?wO&3ѢU; R(8> stream xP{HSqmk-{1RfBier-WU[wi933Eeك’0*z=~M9!*aFڹinb hQF_eX,%*npw*$7!OHNNZ''j.0YF$XRs UͿl5IRiJ\\eeerο];p-~<~N#\ ~=ji\}hs4h1yuqzn^n %)h6C4d\) C׃1tQ:]ʅ7+}7Πr|UT`\}PKNU;ۜ--A^d4^~kp@9:;Fc$߬1j_Fo|7p3}rŧ޺]vĶ831 8_|]>YE|S*c福qtEpt-ф!V/N;dhVu_iV-ڪVP+׹ε`13hfnVGNN_]K~cD p: -Tф O\endstream endobj 524 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 b C 8QC_C;ɾ;,ڳK X&었EUq0N:txj w=|^sYU[9hy$(6ֶI{`TJqp8I.~r V/+S%endstream endobj 525 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 301 >> stream xcd`ab`dddu 21T~H3a!cO=nnIw }O=V9(3=D! H*ZZ(X*8e&')&d&99 ə%`-6%%VzzEv St3K2RSRSJsS@%E )Ey B L,A?:~to1mߏ=gS@teGwon9l]=AQ.2P@P^Vqd`hendstream endobj 526 0 obj << /Filter /FlateDecode /Length 5190 >> stream x\IuEA@l@8fc_$0G#̌h10dUF"uo{/"23ɑ:+3o p+q_;[?7wO~x"M3[|rP+'JUk\%]?a.^i*xn'߱?L !t;hS[$J-t󏓩{X,{$J+'DA/@ 3bPY#NHPp?`Bp5R >Hxiٮy[Ʊa>m'iUjkx0Odo=S*/BJQzxEm#+nQrP2$2@uʙky[yuSty}.KnkEC+BlGVW,N/\~>eP nt%q'z #Md}D|̚Vw$>I4r'XW_.pd$c W« Hp!j{vD(qHMۺR(Q 2JpjR,R]744(& WHhR4I,MBlTRF`sù~̲$>FqF%nHgVJ=4,DS䒳qB+".3!0' 5֣k^Ep~ ^TkaXBnd qamнGxͬo۾ɝNZ(W>|n6vS_<׽_m!~71)S^5Ȏ: 6Q /^M9L!bEcG~1I.3 lL͡lc]{8llQcPXˏ_L<=O R_xM i&vr| S FtQyX ;m ԁYy@ a8;|D 3kCS8w}ןWAqf`oxzsɾwͻؔ^oyd~]l4R`T{O#Ŗ٭ܪN=m=q"lGɎ/¤ vիUJ8č->wW<ŠX#l6nz{Λիy dƸ IL1'~o [3RYC((=ҬϝRa? ?5C~Hox;l>wM;΃FqC!ZLi qy\"`te"zJb6Ty\6迀X9~E qz0q7ḩ]z<x67 L zWp 0RzePxP~9W>ApN5Xf#Q^0P2cխɉb:q$CU.^Iz3r4[׭-HӢ3M(%l m"lv@Pf~ d8e~3kQs0k[j N't9b\4 J=su&W; յӤqܥH =YѳIZegh3ORѡE@j߾9Dd2&/tbF\ԲH;0#*R{`cʡ>,\+CZvEamg+ƱrUnnA FgnusXlDg8ag}g#-CPdP*9nٍxz1JVp.F RI= Ú{y>~t^9$d4vSn4w7l-"d.[1кhWU2ij%1u\tnplmn1>ѽFSOfCQ<8/Iű0~~yn{[tEO$5Lx-CQra6 $k:_(UF85H S"Q(4=]܁ ,qfhdDK0Zě?c%nD߅ղ^ܟbN RK ) X1:йU"'}ԟ5*ZWˤtc gE.<<pZtU 5**B(7Ũ츚qޥnss I#htף\ʁpHFa &R"A}f.jA':dĐSMY2n/NIA#B3s%VO,& y0|xaӸrf@'(Wi#LHˈϐIqS| ;1Ȁ@H&FKmUm.ch(óݤo`K216 |A/9Y$"z6`/t\P (aM*sA]N \V8cNm0jFۆ:4_ NS0 2W'0|GFlec)\/@'=W&'R#|elyd>{jv\bbb^U}לN;}bzM (Ê=\.wDz;3YH=wO™-6gֳyPztlAXN3Շ3VC,$/Oߒ@+:s\DWe6Rx*{TyʈDVTjWT6y vxa;)n 5 WyycQ}Ͽ],§'`/,UǢSʎ)v^]u…{ܿO R.QOXRV,x*łTzUOAR&+I ԫ3Ə[%[dD+Vp O o =f&S8fѡw6T]ֈQ8Bd\ Gl pзng#$FaʔIOMX"AK+<{fcB@}lE,G(N@}wSŰpj+`0zч@g|{8FN3]i u-Qx.i d.ʱDt1mNJvZ> stream x[Kqŗ=w&tƱ U#҄/ۍُ!r띏*F6zfe~efwU)*?=o~x#]g{{_l]( wBYg+2w7-KW|zQ(+劗z{/}YUC78͆7U)0.*+SIe]0[+3}yѰ2W?rG,]s6qMU(rU9[\ОOz]{D% E?ԧ]72,7]MWw8v X1si7$u%Im( Mמ'ƴHӃ0i+G:D&؞7PpPڝЮeBۗzڟ+Rpe~۝i@fw隴4U_¶_pٝہ<7 W Xz/ЌyBŒZvtQ?8\,J >;#BQPw F<*^s>,AG}l)S: ϭ̖cK! 07R[ba!&,(V{ ,=Z)ĝr.ce]pà;:hoLrkK-vv»OmcWͺECŇ4@:A%WjB)P[%̨$b.7Jc{jP%c{7lR2G|QW+&M  MtdiHk4CnօL v/~bDWpk7klOKU!6PRH?~?ҹ/RzɛB;JVr!ji ]6= 97C'rV0<"pl]/qpӎA3@=ɎPd>mEO]}l>|q|U= 65H%e%kEhkh:΀I+KSOy{iW*l`FpCokG^8h4awh "uː=q"5 (pӥ(/-ÞvߞD+vPHp\;m2C G2, }?t e6U_ k;gvx|μ-sTyBkAxxnqM1ES8h8` Q=5<63nbH)^EP_Rc)'>rU[F*' ?s԰@z Z(xVn Aר?+E9+NwQ_&El~GZh&uiw9NEApJ 6@еd&95yNY|)]dfju;#(Nn$#ޞ!aP{T@ fpLˠw>(Tb6aj{ErPԗE5d%HRhWz?+W<z iAúxjz߯/w!M04FS#~hW6.rx  f=9(ϐ 1u#SAJZ89㏹hOrm B"ǂ[ GF*p\ր›O[>om6ѤaasV~LPЎ4_SBnGQԊaOa1l(qt] 黧0܁lh U4徜_<#-/L،#"׌6>gZLGZ@k=kW.>Xr -M8ntU 7T)PHC bۤ~h#~#OI5 {Z+!%\!Y1VSy`44{)+S`%|W~y0=fE +YݯƴRo*{7:huý>1R8ʪf2f.g1%?f3~e.ȴS~~}S&y\BXk2xҺK߬":Q8t@Q[yp|v1y$S{L;Rq)Y-b_%}c64ɜY+3fD 85}MQhr %G.йx8JBE{A|WHYIJRPx%)w4E0teYʘMcJW)90r. q3ʉq$kq0G^ ¹ɩZ{Oة-PR|"5Ux~7Uhm}!be!x2RJhI%Qk=G~@!Ԋ`c|ܴS^-5\KSSK4(j"Z07l.BB Va+6eK6EcaZanz0 YI#\#~/GLǜu 2{h䀹ɉw5Nt3enҘD"Eʗ J@%'s1:T.tXZ!]7욆 7{@:^iYSUDCYKdkFވw$BKqaO 铺fM/ 'XE6fVT<&\M*O㊔’0@;\\vB.z6N;OIUi\bm]˖.ܶ'ʫw*ifOwHsw>=ek rSt@4,הdcCILӼ|yߝ{rK&'z#ve!]u}vi4*ڲ)'sΏ'5i&I6vMܶzOW#*}z˗lH)=v%XOY 1h3b*Omʭ8)N|:gܐ46Pj5nNNڥy6O ڋ,W(%L(5?ݳ\ WAopMLk f56C>i Oi]#_0Z (،zyº gk ŷ~L3;NweӒ旎z5%d|_t&D8Wl8%?;mD fe YoG-|@:j*"**cg5u3^RrKC{H,\ jƞᐛUxAmtۘ t qJ> stream x\}I,ռVqWPw+@b3`)Hvu^ycwGO~??Z?=<~Wvz^׿)9:jQ{}oh1=gI%Q}Icڀ(0|  {a{i q|~OA뿵m]~[ O{_gϒD5$%$濭aʦm {a{i q`lY;h󊱙Ҏ-|=su $^vkIe i HrK li {%=KKy6TTH{^8µ$惃kkH !aJ%0AEc7IYziX^zxYu(`yC %]6@ v Casiˡ7` W |빝:x=Q׺m≩t$eډ??۾9ds$[ᓃ1awEqw,|_*oT`L!!J[3F$ qC$L/`(t߶o^&Q[:ޠqE^+w\m+HL[2%!~> ƵJ0up}Py,-w2z׍·1 )%CJ{[q MaR]xK9y?j| 7Ǹpפcڀ)q)A qq#ʆpHȋ䐗=^uƺ}PTxv_*z.z/NYlm+0PۺqO)gxo/<=^rߠ9⯼5='ǔ5$0e}w׀AXoǧ AKm] fgz3";eIG5FH$e͗h m1^0G- qn'f#{p|>će5kt|{΅דOR  `h pH2^b]}P.~ec5p+.=Z0q;ԇc3cJN۔H=[RCW)Xы,A I㚙^1\S1i4%?1G[/wQKY%`c(cƼ<氏VWb}=1Y63 ,|>XF$kpJ 5$L40m!a/̑~K{jc=wx>{$)˳Ԏu/7׺c-3qK<N i`92iıc'a'(2>路)/fl s4޳^\C\`+0 -A/7-}0}(^U꺬w'ۄ+>#_`/W!b;Ni‰]rtIXCbE_ihm҆pHȋ䐓V k7 aMJ_|ԑ8ķА6 J߂o ;D^^Z{߱m9-eMN} ;[* s/v6':!E&??NK= ;Ř`)W|joFf]7B_l_X'FHҭSbr>l08rl#޸-;OnX"$Cz-Q6)q&م/B`M~B+9.IXl@KPCܵmH[uΘ8;:IFkCMq<qSky]$oJvI=~p &&|Fa31ַsۺ= ucm L%]m݀ڴqN7%銋y`3l,|c=[*6ꭕ2xѼ ƴ Sʚۂ! '#ݦ(x˭ qm}Py1ፋ.7=#a%+N㒡H@ $LgjXS&qJ@ k*3 xaS ,MolqNZ]Q%a ٔJ61痄9+V|TbF}R.\Hon3?q?7L9SNB= qq39ɿ׮zWp%c7o\!h~X6?s?Y6=qN(ފ1cn ?OM˲]0>Uf-p_bSZPTC.Z{ \ɣAYSkcjqv( gSׁOL8,3Wg+h2h 8 H=>O \xsb_-)xe`b{ 3C}|0L H1M@BҰ<6AY`= nB,ż~냓xqL3'Xp w6Egа&vgfϔAn_I"awaucwLlS֐MšK)80(viFn(IY_#Bz-Xxb7Vx:! qbԐ6B")Q٦ldž0GM 'wX|Fyg{Z#ql J$Glkac؀(%|w4"mٞ0Gz) 7Vwmq#)Ǥ>v;c \O/CZ(Nj   6/ZF@=X7JArl9[DUO+@0$`0s>$\na8lCN$}(0`W':ifZ|1 80 kךgej]H ,76ʩ3n 9v^q}c÷L=65D Ǝq^\O1^u0z!I95 Ib95ubAPh$tڻ6NxS7E&؎M ,e}}% n {%=-d0%}P"|"v:ůP2yfV`4BG@M{ Ƹ9g*x5ԡ^ĈfZKƦ3_b)=&p`IJ?Ǯ Ҁ0/h8ss!Dę̵v=ıT_!hcpZ`LIXD`ж1^#?$dONI)^X'gLl!>SG}`_DYM 7k x[ ]0]?K?6/u ␰H=ήv%xO Bk:Zp|$1!q\ɔaa8$95ĬAZ}k82@8v4l\8cݔ߱"v&m ybl߼ZF _y ɻ {+Ús# a"Gl[ڑ 氋Vo/pL?VChwI ĸO:hTi0ܼG>$n2AvY CРQwrxc"e9Q9Hq1x!90|h0 "xPS:#9oixCϘ7>Z ^X|5ZC-԰qsR.2\6 L1?氋W,T"D5V Ķ,ŶMrDG>>c+0,ޔ+[mPvޣ86F Yx 79ۃIWEq$em a{i qpŤ{IzL@8X8qq=8c('Nm#)N>Z{6_S{L싄-}Pn" ߿qun#nP 9ײƗu{,5!t"k)x"Of8rqMWqHz5%6ephD̍Cdž&JH'x L..Z{ڹܹ2e"+ثz|lq cJ戵W{hH|8֔oPlS6\ sKkx_k((y Ms#d(-|aL8n8CYS.Rv"-bB]!)J?&!IG<4q#Z JNveD))h␰ 氋v bM5OYkt"wע(Lԧ:j) H.`=F>|q7⃊ b#aWX !7Z46C^^Z{ wk}O$O O\d;yh q ƴ Qb!t0$aĊU気amS6zm odp|uzypLKHqMix& h_ Dgs/)LWabLgaN؄7};!sʰBL_&&b37{,Pa!&$$Ȇ⢯cs-ZIR Gqp M.Gi@X YBrEkxBŠwY=ΗϟmbB1`=1$aiH|hg-)}D|vAXZ"ڇH,BxcsX]%a~c$ ƴ S5ӍoU ;at ޣtgmDN'#VSL^8;^/dI㤀 I(؃m02hg(*m4 Q DgiPlYSzܒ> kR?vڃY{aut.Vo]FdY{p^%v(!i`GE6@*`dW=(2"_N>81Ú# ?mz}~5D5M„ C2G+x+k_4`5]=؝ۅ4- kHlMok @\.wyh^ʸvlSYf8#YŒ}^ĘuTj0t\[F)Ht ޣgw¿O e+q0ŻavK{?)O1/a}hO/a/=zd-8*ms')t=nyi{}}YiJ[ɖ Ƶ,#p..ZßA"k}R&p WcqƥIl By!M95 ۔ bm-B (Kj;gT>)x>SϻqjCM-1#Gmn $D[Á'j F]ya{) ơE4KAAkĚz00 =v!pX :Lš^SqԋTxXXwBm 0&L X M 'a'5 pcnޮ*to+g)1/[pzVc#GY@ %*RtoRN$nn M!^N ޣ``8@YOs\8"YksLKH1m@BFl g}Vov΂o%ûeUMX+ %XwŌVq?)QgKz6IŸ; ޣPS%3Rݔr ֈvԉY8癛Uq{R ,!aJ(Nl6CN$(xbwq1MN5x ea1ƈL8ҟ ,%\K9v[ ޣ K|*wUo+J8wf̑P-aGTDb9&SI(5D>Ar ^ǙZgHatd#ʮ5l|4Ц 딆 ḇ%a{i qϸJaMVgd蒰c)I Ǎ/_Ʊ%#8'0 HStX;Q脎H@XCd]ƴQ% 6bsE˿=znLJeHGPفct8 !y^樍)0TAJY H!!'̱?'ϓ6eb͍?'v!'#~hIlb/qR$a4 `(FFVle;,٢Jb|YsHs|yP֟Tq;`tY C\9RMnͩ<ǹz@qֳFsܒUec4tIqH sKkxC5*EgS-pF! 'e _W72$ yf/n!`a= Tl*ѥh(M.;qGc5$ HC(<ψuq̺95*:W@Ai #?PObԗ9u>4$L4[>0G- qQ**_n-)s&5R-̤.a<0U0d EJEջpئlg{a[=@[KΤ bBbV$ ћ g @8DNXχf϶m +n-!Sq7 #3N8f8ǭ֐p,$ G_!`ϵ l5cY6zק )Kc h ƴQ$c¸m~気J-t6JJ9ps s",|F'D8{_9ʫk.֥ZjxɎQV֖TRƩ ?Ί@* 6p)5vYRIl̑.K{|#F Z'* <(ȷnɃYjф&Y:*+H\$I@}2IDZ2u8c8$6>?$:A$̑~K{PqsW53(5ŃK|!$!db"Mc@搓5 l :W6YSk>8doXEqhc@B(kk)]n sKkxC∆9%cOUȤ?qϤ†dhc(ɣVF1d, hQ%J|.(Ut 1)H Q~(0 #(8252kȽL9@l)3o+-,xJx`-T?\mŰ?tuqOjS;)QtJlydynArժ)rwdNȒU%#}-:Ι,e;U#)眪-0(^9߶CZ)kuX-]Cyl`w@}6e܁hS DC|>:P惃N[6X%!tZA(wrjL^,չ*"V6Ǵ4$Da! blYBp*xd'OySE)bD[BzK阃ydy)g.gp\w:#"g.&MX :EG%~WeHܘqrgJAD[ftWr!9FYxV{Z\;ACQip$vKa$s\cڀ(%!· blXB`=Y%-Gb<4'ǹ(i\#1b1 Ƈ&E]|SC6M/C^8 [T\n|S&d- uiZ``- ƧnMfY1$}(Yc, KA&e"q,7nŊlBy\blU5ǺzT' HR-! e\mǗ^#?P-S ޛOJ}| T?*.}oRFҨ4$.ޛOʁlM ғa/8o9tcEt˱/>+E7:\_VG8.I 0&4k&tv^^hm}]컵),ӢG $L֐sS,A QݴmCX.HcM ơNX2q<]X툎9dݵ .dS֐P D&㿭`H&QOE氓RttZ$a5hnS@<6>h7;E9\>V Gr9Ei3^N()D֟C;+{>i qrk0%pL9%a Bm5p95>Y)vFnJj8|wJTĽ#BzpL?R`\ݝ,):WǶQN>Z{gcBb'wʇXeJyptIXC뜔ݱa};A2+ e"$L$! 6!U`= m LmEFÎA!+#18aj§bka5 ~:4GOJejF]h18|}h0 HZ6pH6C^^Z{h>x^L'-G֜*C9 TqKfqc@)R(ümY#}p2$'88K"&anm(㒄5$4L˷jA68@s͒-:^JG!d2WQoHG=E(ıtχ~b9g*xAbmL[OQLM_6z >a2p]0]{ *gT~9).(ݗp`8h;bqYBO- 6e0(5 IД}{a{i q(ӈ!t2ٔ BfVA&1r ۖd`LD/JPCT{ D\L (YPSL+Nsa1LvI4@+7u̹M~~fVK]}P&-%[M- s79`L0TD V+ݦ(>y,)7DZ<>zqj Y `1o+00eZ4ڶK,{ּ0Պ[є8nWքpRb Aϳ82F<M/G!( IAEn4j ȄJ4Wk.sIHƞ%<Ҁ=mc7NⰗgVu(RvS& 6<Or̃1-֐Xڔ6eIP~ǥtI@j$]tLh ߡ)~.qc@(qZV0$a 85J,T"טhE[o̲5g|6Avv^rJ,'X%aZCAfߦ5zh&}[UNix>4$v֤xo C] sKkx_R#ܝ5DU^Z0R14vL4yPd[YCC? [ 9]{rN)&e*Nl8w\ɸ+{@D͢%a gH$a(6Zo/̑~K{GnVP4FMo'NY:(nR V+%qy5_n) kh]mL7K/a/= WmX))7Nyr&y]f23B'5 gk´B4i> c/Y..Z{*$H(cMA§IQE1,9Γ)kH\U"@YC, !¸Oś+IX{bW#}R>msh]sSQؑ6:GLJaY`.J T)>| fh(\# <<"GK`(4Q N B(`?օUZD.HQ @=v8k#-$VMFQyt ޣPO+LƀQ8QŽx㤀S_h0Żm>fH=<~AH)>~!E *Mc4 U)DߔX C'f6 3Ka8\ CమK͛L FC4{1W\49 a%LY5D AeGz) ;sT Ĕ /j[_0%Jh0.g INi8N/d8$9oix;cj$MYSXvX'ܔV`<]oSRԡ&NK̐^K{58>(7pXW P{ CL H~޲bva=,|KŤDу9H[k0"VcJiC%rC!9G- qh:U\wS(wzC:UTG5}99nYި]ܖn ??sAkxAu b2"y9.l9.j?ӱ9N X^ocsK:~C?1j`S ,8oZ* x㨪β0?,!/H ƴIs+glZ,8%Gx`Kbm)+g/~M a!sTIXCCtI߳!a/a/=nEE}hJā"NA"ڤa<8$ˆ0$aCǜ sGVKNj4ܔj`A+QhU-< qQM9Nر ğ8G)3 ~jC}*3΢ADÞwclJc EQTno{a{i qp^8{.ծ/IIq酻GK߯B$7.mUжbʗ~z_IPbga ᨎ!k>C0:$ޜ*= H=EC$AcBK\DG&`ʫ)`UUҹ#cM@ ޣ*z}:xhKgSflD!ڟ(wo^wXFXNnuRACtlJmMe]v!0>l2|D B{] GP?l*0)Q4ˬۤlt;Ǵ=r84$fΆB tZ&<'0GMQEv<8)̀.Ҽn!J*~T:$@yCX6AS\@p ef_6ޔ5ld_0Zsb.vsK'HJ2J آn X73l4vkBF'm1U#?e`+toqcW-K-_lOq-a5NY?_moBgD}U5h]|_G5,딆z2F6(Z{! ރPi44 `I,@S~o kriQuJCeشA\iġ|/6N&+T+晵1~Ne˓{>8⁽ 6 a%a=la0~O%a5Дk0"V0"OfZ.'[C6e i0T" Qw*"`J&lEnQnFtoJ=} MuLH!pР%5ӆHAdְO={UXźs;<(mV$*,9J[%aDb~",9ZIA~§iP1wS$M֐0vwS ^[! 8508 ¸}X%*ZAX%| b-0I w~敬C@He $W{3"2sJ{&%*jؠX,*e|ԪIWi#$Li) s>l084Я+ "gк=6qT_'ivSgSN)z40@ Iv6P+ދ66H`#,NkLu8s [l*0\ TDfۂ'}0GLt|wSʁ~#-UQ3' Bc:6цsAX6i RCBB6CB^$G- q(HOn&og]P pA"4]7vloB˳F6@_ Ç׿j$0[7wBM)G$T"7qWU8(o>p8S/)}DzHr!9%=^RTeR)7Crj1VpGQ\a.I4$ ,MNj(7>;iC8.0=b+)kʃwWma`rD*aX4hTmǴ^cM  j&GI,+Uw)2]C8Y95$v2+6)5 sI)xBSo>(v% qP$1&BEƴڣ$ağ9oix"U~SPhFZnя"~%$q#?  SPt@?G ! {{gDě&2:S0D=!Ǖs< @PB*Å`4!r"95G*9TȔׯSbṊ=t#WBgS.锆Ũsa=U tX| dxp~*xER$c&??3G+x8>kʈվ% }4[ #fPT`L !wohw=,|0}k\tV16,8ĉ`ιˤ cr>S`L0[CcִAoI#8EjMuzRτ-𨽋hV`X؍_O l(]=]v!0^@:n\Ä| F'TH3ԓ  ` a*[ `,TBI.Ah@\8ؚ 9ure}X8O%guPqT-I좈zǝ8:1CD?mIQD&Qt$9CP>X~dכD`0k$ ɄT`L خ!%N&j:ͣ[uT:}H.ɿ|7"npQ]>Sn*5ֽZjE1C:ZA9rRP4DѽM~~fVWaDds7)ɖ+tĚ1^39$x>ÃRVN]t8H=N#leJdN|"cVn(k8,. `7d!!#=bRehR֫ <۩ނ bu4/]4՚)H qHs`YP=Ge(>u/]XebF/>)p.iJ,AogTЂ+ftSbCgbɳaXB׭`<-)a8$95دRuA<'[4wz5]#LL)P, wPgLTy |D^V`= !6'cuPF^")/Bk*1TLEpXz^lğ9}a-<6n>) K16&GS"V樓 %X@ ؾmfq95G|sTd$D<<2d/1Q^qMQq1NƊby<1%ۂBLK5m4 SӾO Ʃ a1esLՓcIxۂXNg%?soBIjJ-((֏7gߺ-hL"3)^$a!aHw|~"]Į`M"g2IbZ1֐6 !(zϦ"Aϓ0?éx_V>NojlixX'%`[h65$9aJ T&?? sG+xTDM`e X0$a~+dO IC6qu:a;i Q;F nJGAȿ 臅`Ŧ{cx_mJVP$a СfV*2)9-gkTpq} Tp\8U;teF3M'(k}P"zQ^kVaY/-4Q$:`jh=0=[}d믓D;JȃGa&nbP{r'<VpNF!$`{h18,oD0f^>YW>eC li s _eM5"a`遈3{TĈ_2G4DH$a Qa;Vүh5H}RX:fNh7- c$ sVٙp@,ţ() Ҁpȅ䰋*>޸J'aJ _ǷfUOT( cZBoJNih Lq8~zDr#;}8HXtKՂ6eZBfSh٘H08*ʥܔ+KQo(P|Fl}+HXIJC&''a= ZM{IZ71ktBl%q|ݖ=^EX#m&^&v7ڶf„Щ?صf8-! %lH]6.]0]2I}PصFfլFvğ]a09T/5A|k`mz$a[˅jw底˅ǣ¾.v0qpNjHgɗ 畗 Ű\Ȯ6]*.UA%e=Z7S]|iEQ 72-@J@ cⰇVoaq%"R$*_ۮ W1m)0.*)UKʬm`= E !K)>LuN!o _KDqӭ63$JٮmFknf/a/=OR{Md$4c#CLfB7`K7Ն41\q Q B$H^~AÀJN c$Ɗ}蓣KWkLʐ5p`3気W+ m6qfgS"fO'L=mna`'88I -|~6ģt l̑NK{*5ʀ=|QMHl$3&4cy=2h*5*ܜ * kMm:"0?P ';le:"8w1K2c35gS.KȋHۆplvȋgYTYNyrRUώ3y$w[d&WRrDfyh0J!5tIF?]^#8q2XAK{$!De-l2- kHR^ґ54I1B_$a{i qRȲ i>gGQ1>8 R+XCM Vr0$a/=innJoP@(6\=(Hs=57iuqH sGajD~xVrvSG8 XnP` AVԺM [ى#(e[HCv|L֐z߸In.SmQQk0  pZ(MxFN#(4zgh:]6vt%j6]! χcZhD@Y!p#n4 lJŮP dzOM6srLetKHC:a*^^Z{›=2lw2&6?B{_jq݊LiwGBj ݲq:euiR*&r8h:2q|0Ƣ>c CXB j3RNHʍ mc/(l ƴDS*fIZmCQ*Xo[P1ċPS0ШNl-Zt(Gf{rD+s+0 DV)P[ l'a= 8:.>(7ķhHq``ws_/yjy0Z1MA{4 Rk)xBOZY\mʅ*:5\:e]Y劭qs3$LaIj8%axk)xBo0,Q7Tv<ΐ9N֪I ²I4unŸ"9oix#}~G9jegYYy"dSR ( ˔TP. O:4G jn%8r',&3)H\շ0)e/S) D|0}(+!-mL8 e :w'Q0\>ߺ9E㏛G!Xcae{Z=#yc IfxGmeJz`;h!mx|)]B`:fJp >Q _1 8Gi3 03"w/~Liqڵ>Бdڵ%(od֭!1m8?2i gFuIPJ)Ox}R&rDjZ(J`͌Ch1͍H7Væ4l b>cLHB 8ԤrRLRL52$w54+1‡,!VJ ,MagV8 OzP@%[EIj.F#sތ TXCbUHJ,k ) ᐐ!/S{j nLLʉ<tOEbkXVQ|h0)X5tIg9oix穓57`&TcG٫07Q! Z#i@Q 9NKP{ ; 'L>ё5$>a+3GTdauI0-5rl_CIG*)4$%"W/<zy0q+0.j)%@'mPg9N(8 iE^br1M6rGزI9X[ b^ Y~J~S!ֽ0'nyvlwA&^Š*u~*H\)4L]YqMϑVazgc9^j-Oރ:gUI閐9Ao0 d$uMŴ46ުS ^A,ߧ-nLz[AևbԾ9iixB8uݔ8;l넊cG++l h7&J*8)6!P:aQB80K dv*:<~Mp#Ozv^C;c:МwS*^RbF;.OC}jr ѨC60ܤZ{<;AJ@+ ke\ǵvu1AZ@ q1Sٚ-VtCr"6BJO DThcv E)m>t8vsIWt%%v#CfKa)/u>)v9%6B”Hko{(?:)^p a 1V|yqh S2ᔄm^^Z{\VHTU`ө\} bSm[ܘ !i iXRioY=ܰ3N=`S - q>n H>aq(HlRFZ  06w=nJpFYC\HI;aHNS &a؎N \Hj ixB1z0KAX(l gI9"Ըo R , bd~A]*\,4A75K6Xzd8SCI)V6q95qcW&ru'lЋGT69FxA[1m@Ȧٴ1\9oixCZzؔ{"֧;0v+Oɭ6ZCJUr4I19oix,"p ērWvͥnZ@ 1 LIX҄fV; !B/LiACCD_Ǝi1Lc(!H lil-j5āP3Q.ooqg[tM),a $dt Gl'G& 3Zea l%b1$a ƴS(Si`FѶAI4ǡu>ǟerERnܨwz2Zq(7Ǵ4FH,+ ƬY)ⰗWM̪~w5>-އ,q'C2yf EQ!L/a/=np:6XWƔGT) ƣkMbBgTQq* 0(TO(QRL߅% Mᘈ_s4IXCӤ(q6}N ' rcP24JHƴAdxa{i qpl7\0M”‚ϑ۵Gtd+sb9R/[Cb$%*LDIuk-ڤ aemg|5p;w42#\O& {+7㒄5pS56 ^#88W Bx Yզi,qq!+Px OIJF[ی4c񳽲mդbt)Bg/hu;7ߘ#pTqQ㤜i ζ bk 3G+xB}"b7Q"Zfa55i"Tl?'`LwO-xk P_ړǑy3d-KS@]ФePPdQ Z=.] Nń-HHb˶]:<G</Oeibx:a;i Q]Т+_IcϿ#Yq-1ޅ0d!ԐbLR n $l8$95\NJE:mVD?i6EDm@6AN#ݦ(T ◳%)]t# !p91tHG/IVb98Z@?AKcl&Cۖƺ$Dyj5(TqxaB8^Y%G\ 2PP"gC ߶!I/C^[kJh:M<)b 1 z]v׃cP4~.q]nزe)xALdWBmTn4V_Vz퀷f qtۧv:͔* klQ95uKu~p_ q=IgH 0R #k4AÚr>8kixB{%!KJG7@2cI/|X֛cZBiC{9p)1"mkⰗqo0_zcTQb3b$Ϧ44LeIA5kC.J=ފ͡u7GlJ5c77Mr]XKP9n[S*)<>ɐJWg7-D uFqǐ$dlJ4!m0yߣKPe&_&\uWc).sD&^$l[:J>v魤_Abh0MW7JG或[cn ޽2%>#ӆ0&1?ЮdS&5b].Ԏ J=Kc_|h0 5%24N\Jr95ǡŐݔ[$~=Du/7F j( eX *k ɑ^K{YDIcG(rSL]ς6 R(?7 [` H)#m@.$\L 0#K%uf&5M$B]8JcJB'5m#D搗=-D>)QV$o7wy 6?lSw/AX6>?rq}mŸ"95ǡXCrOOBE:j8]S f/@cZ)?o88  cY QzRXcrwlI5=b)!8i ƴx Q$VӶA(Ey!RC>LN$%rjbqˤ86ACK" 5mP!-|~ "k;NblB8$Dr(T~ -[L%٠jdVNe;ǚ]6) kH|)kslS695ǡPEX}- #Ń/o׭z S5oUԺlDԶU #1gǿHX?ss@i3<i0(pDh;UclY@qޱ剤[t$\tnC» OїP{<]M+-EI,Bi)p3o;4샴R!m ]Pk lV\ b4ΨI8E?Mt q}c-{K>L" K0cnBY1"vOB&gq<Éb mqȑx ԅcFk捷4$9L4ę%-wY)'NQ M>Fߔ$QLzpޏb:ʥ ,m Ew|WclLű/q|39i&-h3;B -Ea Gi upm 8wq4E3@g.ޛgxj᧍&J?iZF˙9'?ծd4.=V^kM[#J̉*^gS-FDl:#P@Ip5d!qAsb@(VvZFO `c79LաUuV/"4"a8c!]#O C1F*7-Y0NV nFn_4ԁ.oF?QӁiHa9#&c59Zq2c:E+t?@x?zcYLHC:??'K8f鿖-WCP%ko7x5;5IoC`_/ $m4*),m /TEf/g2!ɑMԑh!x q&;.iiL( K8J[x#ϊ ѽ@I xn YMH )M5$m d#61$k&y@\$RGѤ :qHbgᬦ)YҰ ޫІ 3r8Fr"ϠR]kV45$ `>J*zwR) K8J[xCbҝu_kKd4ʽzAĖ-l 0S>H~NȸeAQMZ%'`(ˢCz5?hڢۂ|?%gvXXҰҡ(,(mbܪIƍš 4#ېrFi$@;^Hzn~q֭&>@Bs'w(fs ,N R]sK(A다m/Mҟ *ZɈaQ?TĀ.*/bx9US4͇8=(4Ep uI _g6']{r";}F*0\Mv'u]Y? t)؀/…˓tA22jxB̩?#n4ik06WxEqUJ q"^{OiB92c,kt8-R@& $=aMΰ K0F0b$^N5sETg'i:2-@OcG+'h.o/6V |wA cE!" [x/B:οqn}2;F]uh$?s:ߪzHI_B3cZA$|Bǎگ}`Y;V4 wn .IMG]a!*|4Z UA-)nF 3*j,ZT#00N'}?Hx<mhL̆1e841%-ב NQo@")v#ieuT2%5d4}|~'7;b! Y8Y<,/(bmqgXV(a޸Nu Zg]9QrXDI  H] %AapbIs/&Fc bs<@hRҶ% W!܂50"@m">gNZmаD*-σc YW]ѻ[%2nYxC0%n,!oMТRR7]\\@9.iv;>WRW-h`gZX7%X%h=,$m۔Vi|F):Orט6#yW sIȵ=spD}$͢XLKpT-@+ZӐ,*gU67 0ƽ9Z; L<?Lp7! ?:'x,>8e0c;uD&i\a'%zq,h@pҐLQ)q{L\9>Fd0 x&Xd_+g>ozҕO3ePt@:% m~;A>q"A_*4ҰD[y Cr.\BAC>D~2ȸe,Fr!P5nPTՄH,1! '3qZt,8YC {+@bvakuёiH kЀIOf?aSOP6Z+H[4;%$p̖/Bazpx|t,,q?W%-בw.k +`DQҩMވ;i]<;r1r ZW.>$2_ kz+vmNhFn ,̞m}9xw! [H ZTՇ`*CC z>8B'`uq޲x okH{:`>!N,HAW!}^D<8hfZp pRۤ"Ko[="9zΚLM @_x̀мyz6O/cCp9;:K!Ф`TK+TcweȠ^l\cYY`;ȨGߨHԙ7ՀsUJ(uƨ#[rP,LWpZ: &7KKm\p=v]֠94B5vɅh.! GU`e 8Lȯ((nL bǹ sMS▂ID%T|; K H!{ <44G<8@wj+Gk3uMCbrI؛\cSs M91sh"*u!({@Y=lKKinjڦ,TTk[5;>W7RQk~7QPLlwN1`k\# F %pK850-&q '2I QAļLp6䁤w!^3 N%q `DL:2݌G}t pl` .fbD+ 8BxA=:cv>9ҺPnfvC٭#QQ6r,$hH q5.!:4EJ8J[xCsun"4N>8ZhZEc؈& [0Mg4bߟ4샴a[v KwkD%G:]#z(3m Y0Mm,B}(,(m_xEnJ͸Q/s4N <66!P 8d|w7O4xk'Id2^CbqbiaD5) e uԐD\ SlSJ#ܝ}'ep^6[!#e>YaƼcCo%Il/R``41`.$_v1f,Q{sA S+1b -A]cIic!i% Y7P҇hEJ8J[xCx!a0'= /^: B)QH?ſc < ҡ $AU ( ,9_} 9צs~d>/Ca&=#;4Хa?2ROL*mjhS?64.cR[a[W&7ݗ(|wTiBňtAyH`(Vę(CɉǑ=z 9f"!}$0pc hHu) K8J[x. ~Pѣ I苃#]SJVxـhF&yGPNKyNW GQUpScnbT6uo]8"Dkc+NEñ 'Hޫ`PV=8(/9d.jW9'J&/%B.'KN,\~J3 K8J[xC(hҘG&0Urn Q4N!pKjbNDQ :CJ(4^iLg`+zUW_[#F!Jph >!mIFGQ{,*Mnq:0'd,"]t)0 s8U2/ Cshh)ĭ> `gguB@EIcK! K5pwft+@ۼʶ&L$2HYx*fΜPxN4To}YbXCLq,۔$-ס(ĕx%Z]T^ O]fݓجURJ.2'ǢOfQXQŸu > 1 4芯OJ!TZ`r9m o.HK}쐭^o92-.Zcrw@}$Ώj6M!oNw)~  š.+^$(]xjeZwHt҂隀 >僴 Q%-ס'+>81h.-+F{4,;`l wie =1 ZEpa1O尼4c^#܆K/^#"`s`$(YO˫M3 ڎ!vA֐*T{Tpq%MNU *?~X֐t rpDmRՃcQ{d I04Z~oi 9B80 0)H"8AsH C )^51k@m7ќ@YݠE'|8A ,?@rdP? +!%2fx/A5U] Τb?~Xj0M/''Р6A4@ hVTG+J SUAEi[UkY;*Ē-$}Jp4lx%-סP1"̃$ FlJuf)a- [HR_brm癩Z(,(m\LG e(xΤAe&% [ 6` o5I9Ӂi@L]R9$,5yȃ`{|65MYW eڧ L(rP=?^>ڦ}v|{)z YK+b4#ŊlT;hK̆[R"Z1GJir0 $pB?Kk)EFMdH]x';*X`,NƔ1_HTݑP.@MĘƞi}ӢXͤ{LS`hVc Y}|(,q{<<$obNrgQ7MYT>i)K8Bx/A$5ZCU\ƶS 6t>œ/iq$o (X֛z;xpi7n&^my'הh(ZIkGp84XKe2K8FxyNrrk2&pZ7mtQ.LDvr$ϫ]ׇD-^|5J0>HrPȸe὎b/b/st~uŎ{hKpv߱`z,V&6-t=QX"㖅:3'́{^kTR.$J+U:+lre(zQcbz'G 71QWÁ-(QblzYZd-a I{`Kr|f Is&4,(m&DH%3?趂vL4Y84$6*ޫЖ 7[xNF@cĹDk{H,#@ȂbÑ-\@)NJ~ln4*~ c#ю f2>z qWmoT}B((i-,0M`Nu]F塵R p6^EGD#4U&Hg߆5$1DvM7KK;>i%-vtnraj^F3ړ<[ ޹t9^I4: =1)FxB7S9 gdzEMA藪q=&}(͚Ԁ$ə?&=ڃOZ"S&&[IPw0 (jHB!9Rӏq!:N` "%2j^?Z#!NeS.z ֏E |*Dt9i40 2Id2gjf /NGKWt6]!HI򨛦yB}gR2d@cUr6_z;"384N&ļA I/δ,D/ż\CEʅ R[pS } 'fxt8^J7o*6mo'JNI#¡7^) 1bp_tbEpGnװmXٻ&;b[oq I0MXiC'K8H[xB%WnZHřćb-Oθ?u7(ڇ8fc:I~Np^!U*Ùc\œ*cFGƳ@,CL%9]p%r|4_dp^=zExNܥDJ0\jC-"2KEHwzL;a6ߚS"ÕwRCKCgq[MpICǀIzĭ}DH!X"VHE(!v5&tXM)b9vy*i)u%QЊzjpѼϏ݋y*_ Ni}p+rign#!b. [0ِ*EbAJiX"㖅:Z| " / P"i,:5(QԂh I'0wrmA(,(m>7/#.1sZb`۬UI-I 4}&}ȗp@m? @CY9qrlc[3"QĒEv_d\_'/ 8@x//hsь0' A81>rH̭Ài,#3 J]G`(CD(  m눒3DR_!GWK9ev!nNwtkvI#K>qFY 95i?6*ֆ"y05(**C_Q@c٥*2K8HYxO&D{Zd9 kN)!VN4`>?SmR*id! h U(BR,|>@4f`$Qa I tp.T^dC!RC/l&PD:JZ%KD|;BJaf]V[ c*|-zP7'6wv `vU$i)Ѥ }DҌ-qU=џGKJ$_b:49;mF+F*&  -Œ:_1ჴ ay6ⶮ8KO63vOxq!_&[0M'84qp^AJ1f1ڨFC+hh<L4̱,즎"% Y ?jp97@mmJP5&s*6Im[0ݜA’}F:X5껑M?5?h6Լl("DWJliֽp,d|/|` h%rp"yUK75M+rA}MB qX(?.H jI8H ^?jBONǹ`P7F后`>BÜj8*Da Gi u0CYJے8pK =xWOf_[1`LK#KOs"5`4xdKO-u#(}{!M``Z_\Rp@p] +`m wg8 pkSOK]a脢$z菁Μ֛l%N?? 8jxB%*ZJTܪ؛{܅FeC.+%&,]P+s-m H ? %h4~Ŷ_Klc&@mtiǏ.,%-*\LͻzaW4$1.bB5גCP>Oȸi:ʩݏd]2Yu>{!Q6ze-d|<EJ04^2ɻ _`T(wO+i &)O`>>?ɹniB}Id ?W^P255>O@v]FIiUYtL1ݎ%y; $me ě@oqsS $fguNY-iX"p*BU)+3.+u1e`OpZogVt$ ή,Uz5NeY]\6`5׶ߗjw^1X1{ymV%YLi./}oݤa^vj Iz#9UP,f9 K8J[x'JԎcN98SI =l[DZUG iZ:iǐ[1O=8u~Jz 8Ȝx ը /-itmf*Mp5T0:im,1%{/qFpݜU4@+im^$H"! IӇZaɉczyFa[%֖6_/dEU֓p(iM~\H ;4 k`;rr91U4aWfX.L8ZYCLO&gh,ԇt;K8DWga 0ʞQ9[ q*p? /UrMt14`h4m ]*UEuK[!t)Lyč~H \*9M@E}<Ő %+5*91p7-& 絬!Qitw39MP1y|4[(,(maJ`g܍bm[[@d>#XHzOgYlCI K>c Da G) ֡JF]we!vOGSFo-,Ԡrqb},\>CΨ! EmEp]\Xr06m 0.I) S Z'Fl;8Q;=kS Gdlg{ؕȩo̱!4YiG$9U 6[pA[kZI{ -v;틚-&Bkk%%5*uh! #4aG0aCQXQ{Aj 50ㅾ Q'OEChηKaY0}i Qr_W['4C4EJ(ʴ^.\݉p87!m":H?#[I_R?-x(А% {XF,mKmm~%gbIA~N:Vtl5$QQd@̌NܨT*'K8D[x/~LTYac0dzb|9K-Ұq86e!pp^.wcżL^& *PcppIkX(wN~3 o0.H AH"c?ZlIjK=R[o%04xχǂi@8Q tۊgR~ [+g>81&T!1wMT)H[yb2 욅X.'4ҰDF- UhU`1HBz͔nVuS Ѥa If>`Q.H~NP6\3ԍhB''OטoUxߖtd2uL4̱,=| Ga[{zj= WFKSXF\] W릎Ķ,h3!  հDy~^P~_GbrQbǶܫ ,d ed]YHҽnrbΊIn%S>H%a\I8J[xCu9-=%X@%z4vO_^G_`Cܟ;l}n X6V%QX0=Fc煄:҂i@mzȾD-D1z-LJh|)Էf w{ ߼)`oU7uT`%fRӗ7svLҠ R rX } wIwH}I^l 23?$K4`>?hQ@9`YkcU\j;|Ei֘ɡ cZZ81s4l)}\Vepg2]~r3;ИF&I~8siC0Mg -|{}$e,m >xp;A `G!ݓs% Iற >iPHq,Lҟ%,YVEflRqzƆ͸Rmpͬe<3aq ӧnò;p4EX>MNb&BnѸ9LRT  lk\ IcG;dk%ThEGy 5rUEDBŽGP7iUQ!K8DWpq,= 1ӑͿ5$:Z'q$hŊ1Ms[;|A=,LbW]ؕQV}kyd Bup"VxY.DH ^PN״}q[0$GӴS^&g6q:P.HJ1H c?`-[4Ac28y$KQw֎C_Ƨ2K~ |RCu l#$c#~ƁTPH!`1`piP!gޫPv٨ۮ(fUX!}G^)=j–@b}s!"  F ^UB94MNE?>^*z燉 4{H<sZ|1,߃"zk_5d=d\mk@}<] 8QUUhHG {s[r&%11t4\) We-}\9OAX1{uU"&(݅(" TkH0R0Cr Xl- ^%EKÉbO61ڥ4e􌶴D- hsq*-m 9QX"㖅:<ҵ٪<LHb H  R70@U͘ɉLT`B!],m ]ljc7h3Qs\"G$H.{:?afLEN}:NȐe PWa9Yqb:/Z򖸉e$/΅D6D<"ȏ `Q+,^%[|F?JUt!p5h lҡ,mཊ,ڮ=4`x6>>^!14ߚN=b2>ot%֑& 91oPdLJIWdA" {(ade"]|50F_#Jp}%Kl IRE)RHA;@rޛ B40ҺEIq  HP;jM@a7O( ,Q{9[M5ylmKHVKB4|@Cf Y`[As:shL9α(M؜ M I s# IOE>4  q}49cߋ|1'ƾrj8uxs0_:2Z1˵̤Kdr]gXR>H~2 d2^&t3p/ro>*g@V)ζ,x}i7H8n[xCw_{3p7 Aa)XYIp@=Ԧ PpbAK΍ pk50`,5A$6X']uI 9lぴOb!m.'lM8hZ:=Rw|U5Msm!ia5b}-\%-סV]1~ FJϝ9 ERӿi I*Y0MLЩV6er :} 21d8-Ѓ;cF 盭$:QxaFjσSpn/=tgRǯ@J)q U/h[QOFrJ`>2.@Q)I˿+Pfo73q)e$n !WHJ[C)j J&1l 1OZ[ {8/TpJ"U&C`ID4̱(sB$*`Zޫߧ)Vadjy`"Ui$dI_JNnWLCAc*ɄrQ19 E~qjJd3V@HC}Q@ZlG"?Mb`C!;:;ZBddȁzgv@2#=cH H?K9~*$gSNrhljxZCi,0g"7-,ii%-ס7f }k<9.5pSsU sO݄ ئ! Y%bn<Hni'j/ĥET6I!.ƒ5)J}X'w dp8ș|5wTqN /)æ@И1<,18;D4̉4B4JC d?yT`F, gƤ+Km+=h`.>?Υ }5r}(RQkz^K͈\=mWD" }l-ζ2hr.N!@:%eu]%5"1WƹF%~ H\*xsbuP)2(}CX{ rt _C$eܧ!04t 4,4&D@[ MKCÿ`K8J[x;ʪhK99.&F*Icgj IJ )dlK=4џ<ދFneh>ag~5jOZۢ61D=,t#t[Ұ# Ii:0[5hȘIؑ)K i؂i9{ nn-DMU055 #d?YeP-Ժ4O!uL7QIF T ;.F؝ , m 8M.gh=RUk4V>Kbvxق,۔ ヴ%-בp3̹٘4iVX/^H9XJj)Ѭ! K%'`׿c!pfl9}^ĹW.$82ƣλI[KldМ4Х`)ᘥZD-$1 Hu'Zln CKBҗA[[E^Km+TL̈́V!= ;ue JD /rYH>!β,w}F?P£(5I<9PK }:.J4y~ph&wޔc#;Aߺ09vGGr`34GoM[1Ll@m8`>-N-T\~Ql I8J[xC>\ܜډ ti| Ik2*[`xHeR|ٸ.ԃ ?`k8^vJ:f`" Y8ZmR{Eso! Rޫͦx{hNVϣCۚX&n5$ ʂh58cՠ!CCQ㶅:T>uVKEM 1:B}u)gp6^U)$gq|AUp Ⱥ@ cD  .cx@(VT uvCF` `jRA7e cH҂hP93pd!!АW[Gen5S9%*jڱ@zjze%),lٶI e([>Dco I8n[x yb̹ 4 mn Y0Mqn偅f}&(,(m#= dx:(.]Iv$,<% XB>Q,G(e:{N*+=3Q,'ͷQU~MՕ*9D-$]SlN}w 4)S^  p2^:nѸ89ޅ=D>Y 6rV8 NNgoZҐ>ޫМK1AbL`~h96JF=iW<=F84< }jClۣ\F1X c͸ f͑ׯ*: ~Tq[@4`xC]Tq!=t[d p-T.b̰37ZQc@wG8 )91JoڥH"}Mp>v~/AD@/W9)%JTY"ÀiPpQI4l ??>v~/;;b25rf(g\UXҰ=b&GC-m4XLGa[?Rh |՜i̦M$r;CbIL4̱,\AiX"㖅:xl$pKm a4-GN>ڶ$ku \TVӴpmѡ RQ{|4F$"L`#$KryHR!@Gh˕^k~]VwSs+f!j?ǹCbL]Lʂi8KK KKeUP6Z{T֒V>@ Lewx-1Nvxx--`XҰҡ(,q{~}ϬJxqv4PrإhT݄γD-$]HRҠ蔉<=nI8jxB|]Nך&g3F6n4u4'wZ0=]kz8{0a%2nYx#xP/yfs`!`Ɠe~uFI||ʀiPz3b }AZ,σSj ].%2nYx<؍;H}9S#sDTbx%n*X ?9HG` YKk-Gц/cro-Mlʑ$*&bX\1f?]BDދ}Jv ,nknlؾ8yr`.g L n| E^1bYe W9qr0jxM#[#ѥ!I '9q 4*Cv_ܴ3ׄ,FJN^ fXѨekbJ,kȂi@*9qYaYD64C'H E`nݡ1ɑsEܘ~IBRs'a.)9m4(Qsa dQؘ2(Rs .lPZ2tctΫV4 "fVVs,Qvj%Y8z,Ӳi`,NE ͍Sft8QGH@)7hV%,EzH|_/>-a#˱pI>H㭬(,q{}w 2l NQw*;_^Ei !mlbUqvhLh }~usKpiO*zLc[~I_;p3WROZ)4p١o!X!{lvW|[Ĵk_IeoY▆-$ aNፂ \R ҡ , e ~ /+W᠇Q!Gq9hG)BGn4$ZPcF9rp^otMz OvM*hdibDԄ_Dɹ0$& =X?H"+q'? P/.0I1g;<9gEr^җỷ>Ԏ~amџt - ׂ9F>9JnH2qh=q-$}ۜjP>Q3 K8J[xUtǁnQF7n ˄QD+)X"g0Ix#l+JQ ,Q{muά W6s8y3-1y itiHs -_.nh6v^@B}F[:xH+)ɉaެ]*{=ߧ+l *,)_¢Q| ,m V *2wx[ }sj뵒FIpՓfȸ:ۿTSF+TlT#nclo^:uIņQY&NR:GDˇ.ڈoD~2ȸe6 ʺ{sJڿgpsHMC0ВhW![аAnC"㖅:4K`.z8ID̞xewCqxw('{JuƓ 4r YKPiyG F/'(c=$J;`a0ݍz8׸iA"]4<1X *xJDiMo^(SˀZ@bB5QUaE @sbc*#kK~8@ y4ĭ5zpjHLkȂeL 04MFa Gi u@fJׁŌ j^na#JXt^ А6}Qi4]|~gئ,¢ 9QHA{HM?4~l*)\ ![W L:'bF<;d81X{ OotqksrGq[CL81aM4`>?NŬд0.m$??' 8f鿖:fa<'^rl&!)XN} 49ĕ,4iidDsLR?PƗ ZBT7k҂*-HCt[ W%d@P_44R׉~<&;vk Cybūf"DXb[CL6tӥnrAs+HA{|?oA,q+eFUTuÒ8sSq_Rc!i9" {'0gFq$5 GO-Vfl욣m)uAqc \ҠaxƐ %0+u_9?,/i9F)T* hȃhN))avb_{\W[4Br 7dCРӡRM vK$9qԢ|W.!!J&u9͆F_>zUL4 uCNTah.m%r[Ɠ^G`)@t9Ґ3 80S!2j^\.C*x1^BC qAZ%ZJaNԕb$OUUTnrҀϳ$:JЎA{|BY,9:C4 |EJ8n[x89`K>:Y`c kl3{xB 0ڤ,:.H~~N p6^C=!1z3bO!ޟQ {1IP `.`F살#;,V$CZ)`4R "ذp{XKeO ($$ Y@"e燣]|FElNMH(7HC\*8OΒfdwp|g(+A6m@_t in Y0M(:g;-||>H|(A\˰=4c6^]SR p(ƤcS8;%/C\t;~? 0N|4g(rpPp }101M5S5P$Q0`RL(p8R ދѥ˓U S3)Qbf@`l)g"{t!Yp[Q{.A~D bOA$]7@ ޚw8lt,, 2F pzN 4IXq zgy#CЗ;V%/$tgZ0M0MWCK! {%a-q{ pUq5\t}Nd/ r?h!\T_VYH>pQϥ|Z|?>D ȸe>yxa>9(l?#^^He@8ַc!K%Ұ802ÐY@ cpFsC0,EA"g-q}Nhh|rFgⲷ><ƛX{ m0nAoK!@͠$al@$=nW{6(〴~|H!{(gׅ閜V~RurבH$Ke4}|~'&7;a )Un+u#} 'LnŸtSX*p̪UyQv)3VȨeᵊS2x?Db7]B;Sq$~,{l_QC ?>vD- uhsm ʠ* oO%Â^"Mu=:Ӂhl%AGN'8r{$^>j`4lQ.>Ϡ^T)ۇRK/RcY``dtbё(T~0}SǨToR)@E=Ple; O=}k#`zpA >L8,ס-졵%g[[%6]Iko~DZ-Ӷ) pRۭߋPgHt$s|L͋@͝'[Ұ+7m4}3i%8L<1D- uڮ?cm_THel: zѢ) zQ g}l5ŵb n, y㲁w8̈&l 0C}0r Ӫ> S"@Bpy0.lr)??DJdԲZEj}u˸ZlE.&K kQC{_ s8wt”}Lfp^X>"g*rڎ#ϰ(CJ4'*"0-0|-Db t!")li*'ZX%g)= F!KD߅[#9&-n:l-. n(,q{Jcm m;9b#u}&fWh,i!Qv%뤁G pp_y7H(<19߽9:|7FJ}OM3&L#& I.s4}hHm˜&i+ Kdܲ^/nFL3 d2^2=`lQ/mp$Zqo;%4ltdTa@K ?4,Qk|`hfc WFwy 6bЃSqd }IC>|D d2^ |aypxy͈kM n<=.l0g,> Kস9ܿN* 柣9s[ޚćqwK4Vn+yNa~ĨGM4gKP~[R?o~7^;1V12NI:)X wc:rdaLR $+WnG(7=(RIWn7;'6Ŀ+C࣢Eq͈1)1X0ݍ MьQXQ{> qafEO K8XHÜkG,LrB,YF!9{dlvuTN kIc$`<,$]0bFI 8.Hg , m J N`9U1a(*#>S`>L@NćP:>Hȸe:؊]]"|iԳcG▆->kBamv((,q{,Adum=v$yIK*.k X~3CMؤf]"zƆmKVfd\ؗDghvf85#LQ! °/uJX7qHiERj l@ L<ڇ%fBzˊ5adL'EAF}1q@S%|AI;P:hP0(Ut"+qЀq!# ^ yhR .N_2fCG0@*mٯP˅;}SBc]YEendjR>1rb%3&el)QR H): XXi-ND55bkرHS,Ⰴ:رH" 8La#-2 ~۶'ncxԢK@H3gNñ'M `* 3 _q6"(CyKnք-O`:!L1${i{i+La+-:Za54R1@D ",t6Ŋ.0uC YPuZGVVZu pm~>c2x' 1]aЉ*]:TՓpjžFXUxbfͫEDk^XbhWN  P`̀ @cd_aw'1#c˦/J]hL S$f;H&w=MuDF߰ĨF o|]Y 6 =N&` 5*b`hQeML. 36J逢0\VHBI0UWG[Mm1p\E8ڣØ3fio A⣁)nF#-8^)j*Sb&˔uyVVZu:_pA)*͒;?jPy/"7{j51jRFj]è6SDK" \K3Kv̌ZEIq ďthPsyqmxRtC1,D:}X8. 0/1#GxFħuK03`Ú 0L-1 ?u5Zq9Y=)FqP@s{à)`wT M(FIx5 ,1& | Nj4DXK`pPĖ+wgZd!gm'yf3˜0eHdTqN,`^ ? IVKu*F(oVhbQ[/#kkc &~fn L 3ï%V'/ggt03f)QQevC3ѵj:]`1qVZu LYBͨm;p3Q6/I(&:Ra*@d֎M0MXӇsNl4xbe\pm.=8%$\TT)1M3SŒ]pG82 *?0YxX|MؼL#{@EKHxLKK:3 SݒpGz@9a|oN&.t_N=$ܵ319$aEaAXNu?QJKm,1a|Xp%HN ԸbOұf:S!La$;> !GljB*[a$`lhɑm5K0 r  qR!4")jIB1b.)ƬgfMV>)3($@ 5CFDkay҉'l5ქ`YEp3CrA9Da+%86 G̛"׃QޠfIcSj}Z1a?nȏSW %hAR@_dT}dyF>!=hv1o 7󪻀Gwd2f6et>n6" l\GMyl"Lx{+fW/x+\n' bD63a(}Nk}><LXn?K"a;Q wM`xP ̻9(!bMHb BUE FxX[tLVz"XYM8V/+)]Y+WJL!ߍGj >wL̓Hp=ʺw C~M"L!Ia-:܂OY7EKL,Xc pExFuKX/L8@u7;Έ-]GVP1^ ψ-N%IVA vOS뉢.^? Sw\3qM:Ɗ42\6ƇU~žϘS4zBEaE&,Yy=أI>@D2n~[Z)OoiU\dsa+9 0"'\cE Ze9@-ӧaVP)w"RKؓWHa%'T &:ecOl 1E8nb#Yzl#:L.vwcD}Z"t}fA K!v̆cC7(fqHA*1( 6Ta ,!P<}d>K4. Ɏ6-){@K(;I$+S`&2/hjF0Qn٢a *BaV)0UY4DkPS:+TH):+ƌ6F)d&s:gdհCj`Х:qVVJ«qȟQ3@wnPSyLfbC,*;!x+w]E.Ƚl\ S_5sD SQį%Z*!Add[~ grmXvflAE1AUAĭ 6Tc،*%TqHApj(K|mnkĚ\8j6Lf1$DvAi_@6\l|°U4\\7V, ,ﲮ' `*iSO0.]bTAP6dm(̆V[d3ώY8s7M[ե`͖.!A)3xKa&+La+-:T .>0 -/9b:ƣRa)QFt 026P:%ky>aox)u yX/ Sque80.'ONqp5_f 7s_SТ}u:ѿ9rS8zDD'~Tos=Xėg"H" zNTq_uÊ"PSL8K~ƍ"7{)x*>'OLa,:y٪αTHc68D ݘ PK* 0d⨲*##|YpT (7:ԨpTDq w`ڃLK8@|]16xB>[4ҙʁ3"sV`фnùV)`5Vp !kѣJ[Okѳ q^+m (`Xe>Qp6%؏ud5bUR^CcΖ%*Akæy-0҄R9˳%뤔1e}6[!nVZq(苝Ӣ)|#*C#b%TӰ  0Е0IULkO#LVSug>p٩n1DcE8êiz"]׉_X]]%d}>'xܺ&pBF31n֪ {PUmJXDԘ"jq:fd 3w5, [i q=Ԝ r 1& Ucݮ'ecg$|XRh! Si TBfvA3"햄8Tci55|,j ۊ)\+]bH U˪xEEAos z: گTߎ Wc ,FhZ6eD&MC S?i(|A-sVZuvy- D7\ krR҃b68BsRM:BqT蜢d_Pe͢B!X=c'8]xsuDTiģ**:,50(Tq2f vzU&6'f  ;uLa-4ZXXOHG0ϬwrAL~0 y6 gDNX훚R>aLf y:M}:ff@_xW9DbݞR<D {a T@` h Ah]f(%fG 'eiNcÐ)։I,0usrP-%axqVVZu "0xT8j;fG.adJBa}Ge) H ZarZY`4ZțպS% :kºi0)0uc.>+W:DvKuY 5+baLDFӐmLX@dE*yʝw/>mzp2^XKc·Vь^٠ k6L*a }d?xT<pAD[c[WxMV q%d 0H-1QNmA=~ ۼ]`x9! .C`؃DEE$31D3fA5@X/l gKNB ϖ!bI)j " ђ|G) MYUBdGTN$ `*[ǔ+IK(VI[JWj'fztWoA;ů:XYɝkQ0YK]@ AΛkBS1I8nvnĝ"O' C7[Xtb^VM ơIQ *<0$=Wkx[u2nŦ,K0<Ș(2I(rAsC8֓8tґ|L՛(Ԣ|5| 1 9+Š H%2Kpx+ޘm,.ž2 1ڰs ;+ b%^1 ճ:cYJo+LvKuJ]U bJąbz }x ju D 6>^xm: % i=)nIC, { X?0m]L|:CKq ͽK0L#IuF0E^Cܺc=#TBF.?/ٔC Ίb,F  ,j@.`UVR$dvZQhM5ؤX9 [T nT513Aà~"} hVw37 # MK;wkI3pM+ܓuw1s# 5}B)2 H%:a⭌IB;_eaͼS_SL|SB'S 10a HT涰:< [q3Dꠑ 諭 ̦[]@]֝0(l%\!N,^#&y6|)_i䎎1 Cw@ :,NYXHb*(5zweR˖i*`O8+AK8E܅ES@_plWa:󽫃ڪ::Yr4(G HpҕBĻ_UVP'{mBI B'j5>ى5]\%[I\&lI0Lǭc: ZBu>n SJK#Ob3Wp<bxV `:sa K/" =Xa [i qd+ nĩ!iQ;cC:) xkyp01j.AnS@ An1LDnN 3D!$<('1<. A6$m(Y&d2f3B"'SYDŽ39RB£(Y&%DR-H [i qQv#E[ONzsN KꄷРW3eT/f%ct袹62[v ܸ3rs x Q^?,nPoH%:aFb'LEpYq%a0ż)_ S8ڷIA8u5E- qplQ2kV'$feaFZTFG- 8. XM 08%cSBu|8UT' &H%: N_;Ϙ۫: pz~l㚰%)!S1DG$,+:H [i qh:1~$fU@u0aZk[Q-T 5g)ǭ` HWcPaOO;f0鱞YذӉ`ܘX# l* 4F̖xiW,xW͌(^H^A~g Ξ eE*(^xpB/3TbFdʮ嫫N :(ʪ(`:[bbi*.x *waFj<,\ UndDnVOhabI,gNԖڹ|gi$gȂCHJ =0qz`E7g Qx51'a*8MݱC03&La-:etsL~Gd`BLj( ,Y k:<:7ʘU"s*n lA:>}8gtm4fGUXtʇ > 92%$\u13躄:IZa [i qhboNV4wF H&|Y)bun$$DM"$  IOxN(L0yc\r P"H  Z@Ta"v {0H`HWcP>jZ`ߣi[w&a*A1MjDKh@'2!TE,cDU6G@$*XƄcQn$@ `OxXkܓI$y.CP ܚGa.fHeu"S,l ,@sH¬"1g[a[&x2yg @s1D- >L$$< 1+Jt 8cSa#L`-: y'a"шHG,y9Je,d>|5svYcI)`5m0L4e_kA"Ve阽ps?<Dcf>X؎(ZKaP9`>t k:EZ) Ƒo>c6$ʆGj x)捽-!і KNVVZu&cpjYE3֫a]D+[ HLtg" A Ia-:c6'L<[#;m$w<]wbsHw]Gv SJIx5$m&'^UY}QTKFD$棆ĬI3@o` hQdl :c,rb׺0k.  c (Q=$$<8LΘ0uV4ZpGfǽlYE1f^=+WBqT/bb`:[搄sy |ܺpG+Sj/ l䯮S:-B ! tőlC0:N SJKCU%&>.9.k!D5a Aa/ W&.aN |ܺpwIc=/.ȹ#tE3iL H 8"`PˉI #<(qFL tȉu]5̘-j#QkxQL\.hRl!(09tK6.f+v[ufo/ " UA'6%# &)ڵ]ԡU'02%㮣KPNʱ<01xh1֮&~I/Lq:}e 8cPC7uc^Y*1OSV>q81\j`s>Ţ! ufݖpGj;ŘXgk~p-\yr6(vqXaƆsH´l:.i$\Ǒ»Bsha`ŊgO\G:ZPN;:uɮaJ\T4E+ W5=NtEFd6gG#p]ջfY0fGMM*'m 4A*TpjOCٱ,#za+E:GUD$b١B+(#/`) L`-:[ߩUn j9 V=|WT^$Br&JLE$zszIL'ze$~lgw<||۶~|o3-nTC>K_nݥq{ {7eJDXx MuK̙N:T\⺠wR sZMULe̅JN_^oz#gr#f]׺y[uo%FòG~.yy8hߞq.Hw@s[ߟ)4o_>w>}?~} (Nw1:L6=>~m4}O_a ~yiߴo(e?.obz NX/O/qOja?<}z"Tק/>@6㋘o/Fb ou.^C>>~MYc_N?vOw.bjmwW1}ǕėۼCm8o_ ҲYSlߓ})p? \:L&vq.%׏Ym"('v0mlZey屽B<2ͭ%w<ݏۇlwm̛!<<"ۢG?^ڄ=s0ѽ}nK6Xx?ӐߠmROO|}VZjvWѦ7zg\eR;q)h+poUsrGDvOn.GjD~?M%rʉ;Kϟ6#}s~znso /YY-8㇘"wyOdS;+6'nMϏǓ6Ǧ'/-rװ ۧ?|,iJƇfsdq[5o̿qF۳w,jk5,eg_z}R}n_/*#ϟq>o_~zɘŞރnV ?x#ŶpP~^BvCCYTUPS,}o+.!x2S{goMi6ߟ?ӻ36r扣u=5โ=?G/"ێ9\Dx-eo )>ob"nkzh]K/;~=cJxDhs0Զ#x5\Rm Cƥa*{ w~ qeÜ1-X;ȹ]ĺ"{+6]qp~yz]ݘ]U^)ޓnԦ+~kdz!vE}W2.^`:ogsDmް]>u"Q^۰&}ߵ߄? &?݇O_~|lf> stream x|I,ISSx X‚R! #JT],m|w|Gn뫶ǿ~wR^oWۿֿ}ͱZ{?_u]^<<(=뿅>X8ğ%їDyp%iLKH\a8$95_WAߵm][ o[Я3gIKnvXƒH\0e6Æ08b|8{m a#$L){[ñ$&>sh 0~r?byROҾ},/{]7kI:hׄf_ץ <0p=[浴dS0Z>u?ۯkxQ׺<ץ≩t< %eډ?_۾9d듃1iz.4S k8mS6ş^#8r_nL1MY&eY\;ii" i0 Hb j҆+0~յnxŒM~,MǭQbZ\{bLG<C1me ʔ5s6q sKkx3bl}5IhxG!awX^li i0 HfV`ZXkmXoF NJ{y aYf.RsWWRepyKsd[Cbڀ)q-]!7ؔ a,Er\_{!fWLUTvL2MTCsUgrX8^}I<& ʘ1Oma89Gf~Кٷ)b;Ë%q6$̱XXZ1m@KHÁٶA49}yM o֫ wHg $Cz-QmjYU,"^I[t] _Ěj/|m\r\aـ) k y441)]Wұ}3LE:x54. kBͻIIoJvMJ<.p )|e71s= uco j%]m݀,q}N7%C&σRɴ5tؔVསR~p.B}h0 HRd!Ğ@r"9m*xuxPθTuqly\?7[#~{&-NH@ $L^kjXS&qJ@ k*5 ^xQ,ͶolФqJߝsJkM) 1[Cmc sI+x XWϓrg1ڭW372ۡK~0MaCh; E^P('KO`$/9WpW1cR7|8lg맹C0qѼ!)+lLC51愲l{hc&F/>5\ .vT|=Mh9s,[ `{h1py2':f?ʚ\6#0@_fS<1 Δoɠ-T.# x?s҉};'k.z-`c(%a ymi >Z{:q |b'^L'Xp w?6Egа&vxfϔA NI""sppwuT_&6) kH\y&aM|RpJ&Nun'a'51wiV.1IYo#B% IXxbWxF! qը!1mDR LRCM 9{a~oC$b0%_cWccP"9b{o $D,;iC89Ki5>1;kCIYυ}bGc`yuWm)a *0xZA[ĆQh!ֱ'+W olQ*II;9Pr)uRCfD2G+x"o`Iaư~&-oxsE+x>o.CCfwqt>ݽ` xqh&e8$9/'[AMc-'a'= no "):iW7~O'a_~n KyHȆpHЉdH=箑.ӟ'ڰjJʠљ#fy5aMVg&~ \0] x3e",z@l cK[{hH|8+ٔPlS6q sKkxc[FllJū:ng$.!j rq  ɇBr!4Ki UıX%)`). :e n~~qlqc(^&M@vvރЎ[ gS"yݒkE$Eo]gQHթu*`S'l7pY!##:6  .[{Lm4:1yRp-6nZ%[OfIc].8ҰJߔjҰnz  {a{i q\U\'1&FFKc=|h0 H! V-08v$m 5¯-ﭜ̤7G/i i0) 7$aM^^Z{QED652uU$}7CPul0XJPb41Kxɾ9#E( 8d Yd\TvlEL`+#Ϭya*N:Rav ep"=+ +-\Hh ARdzaIy٦nC&b#PYćv2," @-}n-ܪ l-SlO8ֱYZ;0GRcڀ)GGSM;/M0GMQՙQerᖊöƥB̘`ƴp=qR$ nC6@ Z{47N(3 d(.o,) QpNjnIJXJ`=ѹuLoޥq=eMyA8Z{w);;M!3XlXMNKﳚ㖄5$VϦD5D ~.syh``MY d_Op2/@X{#c>8oۃr O "j,"mBt"9m*xz﬇]'S] z n#1^O蒰>H)"'54l95]嬅Ns0_op7 bO"YG( pQ}Xl˂095ğila?O06Qj U;n:[ LH2G:5$>XSaSNn 6eX[~Cs(+69~& n8E"F61#on $Dx[Á;m Fya{) Ʊc:;݃A%/͉5T`,%`y$V9㧳r$mc]'A>(xE kǹ|͜FPHpP]'V`H!`JJ)x Q0\ j$ bTAwG5$.M1%a{i qpƲV 8a1%/Z:MmG[X8PHx4h0 F)'6ma0eke a*,Nln6 a f[âGzP 顼0G- qۜ5ٰIJ&3AQH0B*^b)mL14|m'!={ xevnJ̭.kFd hCcT`*]E1",!6+4$D#RAB@&C@N$ݖ(xWx5)'Sc*T.@d`|4JRo QS~  {a9iAp=/Ww̙bu{$`>*#5l|lӦ 딆* b%a{i q7+q%<(8[.W">qxnFKiwX'57ފiCwH[, mLJBd-bIV'tDaI ǎ-1DP`L eZ bSzX Fl|0\kL̻"/]I'Ϳe \wze*rbpHDޜ9jB /U{PR'v҄pH sl({]؋^bg2~׉Ήf[-ppb:õ>8)C0wdP|0}_oYwR*fm ljj1 `]U S)nKSAAi?$`sR7ìm$ySnN穇:XsMc/㖄5$*_K6C^^Z{5U9:j${5iJGV$a XsEkxQS%2ήD loTŝXHW1>X56ep˟*GE &^YsҲ$)K[{sT}FMsR 蝉MYdk A4"qF$ f,WN\p\i ƴI9P)54t뽮i9oixz)j$ݢvˆ @'0 ?'>S/IS?L#]> c!={;`Ji쀙NNJy:Ź5kS$a,A^ya{i q0뾕qJJ5B8^;KC{-jsSTkT+nSONRCmTm/đ^Jïq0>!_q7;#Z5yqY85$>0)qICF:mǨ91şG:``ZM02UʭF:%ıCl=Z1m I. ktnğ95ǡZ\Ƕ=% ď9V Dy7\ W4 ]K5L؅R!p~뷶2NeqV-`"\WnXqweMM`i;`tY COCu-6ШQe-6ИXdP%`n4MU@AFG*3Gq܄ p@2fGM`LEPa Ʒ' sv*|:N`ILoxM ÒlH7- kH|0iۄH-Kle9djx=h<ٔ^5QDR“c5G烣YBiDOۦltn/a/=GȡX>aI؏lG72It R`L"I7ʈF[ a= VeσQP@v  !4{IX@M41Tj 5EnSF]R&-6 7A5#SCu6(ğ9"#cAX2ixTEqK#LUL4)Vڢ,TUG>#(8u7^^R&r%[c`6?䶰J8I8f҃unRp% ~k[r]A7O;Pf+J"4.WK ؛BBGOYbV)+qo9Ej$e8ݵ5jXB`O>O%ٜH vbcRgΕ58% ! l q=lc​ Vy#< C\;ο͸w#2C|0M$#KIyY6=:H<׹~a(52}Qc:lݸ]1!qcI)VmiE4;ɅR^?)KUN?"u(zض),fe`L崄4DAK^Z{Gb<UD(t0q~n,HjL6G,CIQPol* a|Ӌ䐗=}cH/<ߔ^k Lu5@+c-Tw0Sަ KP@õm  >Z{ڶ,3>29JK9Jw21*cU' HR-! emǛ^#_r4sjgI@ݳ❅J3GEHgI95)Z²84Q~X!u›R;{C/# p [.cUJAo&tv^^ke}]ݶ)άӢCL֐BuS,A QmCX4HcM ]_[&eX&:@8FyL]$!IL[Mna'=T^Bvcm5S#@N>7_E9\6X G29Ei5ʪ;)gפ̦>]=[B6Fv~0D?miɼa)8Xcga(w4uEe%"sqߣIo;mn֢8p*8T1w"svɶM)԰Vl0yf3GV9a S.IH&굀G/vBlgr'dv"NVt:ГE|\` q_5D? EeVH{m Qp9Ѕg~R*ST@v "FK%C1m@_CAQl^^Ia{ nр񅞗*YܘcJq Ə,̑>S{*I{2ԑg?O q<]FĬ_ލ% kHMio 6eXl8-{i qp}+')[tTKImE}|ƣ@8l@/B# a>c'̑>S{#x6I};Vbznz{/ǰɝ m(bݔ@/C9cPyMIENF +p`|j!)RaH6 sKkxCW S詓I䕠2 B6CUp'Kc|%%Q !lbGb(x`q| aP*0 0]m}0}(fʔeѝx^2lQqܒ0}sc ƴ SNzI b>Bm*x:6lҦHiq^J%fM-3܃Eܭ&B”i;t~byCV+^ĉ9vj셶&CD|V&>_JE5Q5iB}HZ ޣЮ3;+?O *߅D&HڸHB6,ɔv7vj칏mCz_{xݔF9?_y`܍5$V[]M !08, K&%đd1q@y$D>r6či)-qDdZ-#ⰋTU8PaaY}ŻƤ<Vs3fE` Mc' ␰氓#rل6?O\?M"昒ʽNBq )8$`Jl'a'5 4H~[E8 AQ88U=9Аsr5 IFvK/a/=ُKGBrwiTiQ6|xKNU6A9uf }GltY!vFg|9fxzupT'wuG5+$)P ƞ$̑~K{q#Jݬliʍ:52ODSoS`JT]k0ܘ1)S0*qۘn̘^^Z{{>,s$VRnDܳF=TAdF)#j<:?لi| ^\0] C3@1XrhS Y>oQ0bHE5$*9dOĬ! a\ڧh$=Mn_yRnIFF&͉X@=Tfe{sv5ǥ8RȢxDM>tQwjmB?ʅ94EC= Wr_!+0d>(VI'!铊G8+\44σ=+G$h?p: T& 7R)[ V|_˽ND0VgSPIE x(ȎzŸ/9{*׻)PPw(* N>Z{#*mm3rAekjTA(jP8)`yqt6L.~e)xa?Ii]vıx0AyeIO|h!aJԨ!ƙХJqJ!p&s]j0~8ŦT0 +4D(s^a,/i`6>\@4D Ay!R~+xTSLƔl5u1jpP|0%Ph0.g2INi8NSd8$9oixcwSs&6ؔ4E8qO5W\sqJ WcY+0)V) ' %3RE6\8qyPn$H`,i@?n@ȫqˇ8xEkA{c0*.MVR&Cmw$/wزx_1$a ƧOF! :^#8J>ҥT‹hh[s*GK*P,@JAnq4n|X |mđ>JQigmu[RZe^tDr-;6F$c`P`LM)H6J$n/a/=gq&IXW}D/DŔ ~~U"$qQ0p+(f-+<0+Q*72([@a8~$F޽;C - lP8C71ry9iixBE8?zPc'SыҹGl? qUܤtYA-}0}(vσa-Mh&E:tzrK eHJ&VdQ?_}n-f*LaSd{Hp?u :f#nMF3N>Z{)Ѵ7:bѸ5fTVjۍU?܊`gT H zu/k`h AMm5Ea_ԅٯ,nH%9J,XBEE!1m@Bv1E8Ki5 D)GNRuQ0;xG`icP5gxZ8عߎCNÌsvR:k/̱^ȣ1 `MZC6V˿@"MM\ܔ(sj&DTAYHC3. ,0^Ax$̑~K{_(o PlD[G{ ,KCCrSj)Ĩ$/a/=Qm؈bӤ\FY8q VL.Oգ70VpJfe qEkxy1aNCRnWq, 'Gd>s ƴ-4L]u,rh& kH\7)ᐄm|08vH &T+CbX.=Ir0dK*=H ؛ܵ2, gԳ.qcj|m)_YDq=I?<t[&mR4E?Bs(/*0CeWT`L >z *:W`!Vw+ǟM)zߴ8HF|Gʚa@L)L   y4ǡ,eY C;+/Ґ Ъ>b1c{ X5hbYK>l((Y#[ V+0 $q#"bbBkM/!{ $g_(2:03E20ǕsB u@PB*Å4!r"95Gw*ATɔ8SrJ>v#cHkS.锆Ss|a=U2X\<('6"' q'D J ,|Xp!tIXCz%M~fV(T1 $BW>l(< A]S1-@Ru @CY!0Tn-bTtwνo&XbW덖wpcڀ)H blK4Ճ$!?&l4&Gf6n9X_UI] |{RP(` E%}n-{SHZБXف2:x֨5g'1=)`z[1-@qMU@Y!(IMFhA vk}X8O?O%kuprTI٣쮈ڶǝ8:1CD?mIQD&Wt9CiyP^Os:`*N%l+S$->.beJE((YٱlxvK{wEhT4TPo1}AY~&8U|hAc)LLns2M&3C+xQf7a"}uW/ݬzv>9RnX :oC69kixBƷӚ>m{ ?N;1u,! 44IXCT}Ď MfVIY7u >W WOjEz$ OkqC+xz'DӸRyK)!xnm]ϘᶀDo j饉CfV# Hu,hhl׏mw" X@[B'n ƴ&QvlS6ZsCB{a{i q0r# IYhf̈́:P- Ɨsn +Dm ibA JAϓ`=?81dXyw7G5$*ޙ! kNöul/a/=ixY9;7뽢`*o[WkO;ٷ Ƣ&y0DJ i Uu%k`tZރPIZ*A!EkL@#AH8~8d(KJ͉IPm:#0_-xUvr)Q֮#]Ȃ89*ZSl|6岄$mǦH- Oa/줜'+`U@{f2Y%G_Ax. r sNN#IB-80Dk%a UmS:& F$a/=Y*ن->"G8-H:*<Ǒt!qVRR`Ek()J95DZ R-Qb w 3EyqUAp7)5T'6C^#80m#AhɺD×vKg' 16&T XAG`6ADl(Z^1ϚtD:#g…MR{-]@/2P%cqUJ18Iix?grwFƺ5uGzdB ), kt=,}0}k;8c4 6lJ ƮPyd&sr+;9jZЖtKHCǚcv^^Z{ل"=-2 z2n'6""!hjqݴLi#"k&nnY `I2Պ7)AK4G8 cy g[V`\qyS,! tk)xQTŭ(ZQm1F (6iJe7i@+m#jZ ̑~K{|z֋PS`4h-ZHt(J&rDs+0/DV)P{ |m'a=  N:L~EC˟= ?lU˃a=-P iBRgIDR̐^K{qwuߦ\n0LG!_˹ze+\ T`L0 8ᔄmfH=G%{RΞ\*;v#ǤiH'aHBpH< Wz4>#1n|Ozx5z1C<2) )׃eP*(؄+HZޣ_RNtAAUu`v\`d7 %lHAI,NI!*3G+xB9{rs>%#樇KCbBNAӆ0E?H=sfaP5)ƅ'Nt;+M]?'XИ.J-c,q0ډƕqs9kx3H+C'boȌeX"ĘWI i.pJ6+)/̑~K{ >tHNKVtcYKF,:y-JCB*f a)!>~/"e6f! {F+$Z (#zGLJK Q+6o|nH/8uu;oLȣ(6*X5!2͸ݑ UV`LܹM~fH= &71&fBtaR8bU$a +s0t k;a;i Q7T'2kuDx W(Jyܛ`Lb i8Ne1u0G- qg!*Q\,-slO䀎fP2R*H\\Ɣo+8l1c!Ywӑn,䤂zQ2k6bٲq8<$>RAD˶nC]{b^SQZ#B{ ;z2T!N$,؟KO\YFm}T?_sEkx"sVˤ@~pLD/j$a%)u·כ[Cm+K^#8§SUQAt^c32??U4jR"!$_Y& #$i *x!ネ8?{Ů Q#s1f=sD%S}Qף~` CRC<_6Aٵ<{>JӉQR"/2CV:(nEI6-}0GtP~@craS Ң5ZQ[3rNØvEQCmYcj.1?,h??Nw%v?ʐ#Pw 职|FL;*!1.aECB*H\)4LMʨ\Y#,i~1mDbcm:m4$ sHJ44̟ b93Ahn4"pSS، xbRaGxd?b1 H ^nCYQPH=HU\:HoJ迄HE֣ì|6G4}[AC}rX%DF0G( F0]Q$%o [;lnv S?EPKX\qP=xwRYyzfHF ƴ#R{H6 /̑~K{ 8TCM^7BC΅ 񎶀bu?_zlʔ5p-Mxon`hQpHgQSZcP&ۆc~Z@ '&+/J`km[@AKZx M0T O ZZ{,ެ/6BziSeA䏠\IęK:X S:B O\χK`N}dNNZk|7ԁ$)wY8?ef7a"%3R,WUJJA-ʭᐄm086KI@G٪cJ?6O#](`\<3ޔ&?_ sT3V$=ώUM7jC[IA.$v0()*]P GCPurc;hh+Xg;OC)*NG/ǣb䈘AX&t J5 x(W!6?_Erk+x B EUyP.% tz>"߄?ԯǠi"] `H3R# TAa16*7 ;,|+v`8:)v:PД* kaQ95Ouq g}ɉgI 0)R mk4Aær>8kix?K{͗ 6@fc+`Lړn~Y.iX{8[=N^VU>w*lTĴڔF٢,#YfMHpEɿK9$fsM)X;6c(y,ƺxai*lJ&%Ҁ0 _)x.qϘ Nx8Qh@a|p IHAߦDo+h&?_ sA? ^Uu`5x5, #|2l^ߧCn&$i[:J#?v魤_?n06ߋ ؔ|Яĸ(ah#7ְ]3SeG9;m&S sKi5-l ?ZNjFz%ޏ%ıe>4QWT0mdm=`:?/Z6Sm_xc`QF`r@X& `%VDZGE$:vV7I,Xi ~}pdpS:,.~6tj=0=t-u8uMtCD731Y;9d:5$>NcRWr15\֔9Ijh)ZCP1- h4@(H+(,(mi^DQ}ps16M{`dx5G[$C\ \RO(KdIJ^QqqZ£jZ/*Hb$0MJ}߱PahU踃_'g!f_sFmkMÉhҰf3@la[aGiXQ u19#h%Tsrdz^@H-vIǀKMfի.Oue"xO7 Ys64Ysa@͂Ӽ$6 q[ҰbFa Gi u]V0m IVt!ic뾏ыfIt8rci)K8F[x7Q_Ĺ-9sqdQ0|n|Lආ$nNIJ LbmkwAovkhFG#qh7OMEӴ>sP[ԫ]:i\ʹ% bF6vPgaҧx GXTkB QBО>1<]\0u[pM~f(%6vTrpr, iY(rܴgWk(ݼQ`ropg7EI|^p@fH`>5ֿ]ĉ,A[;Q1&^';&ęOgCW=e4=`l4dc@sbck Ra{=$XrqC: ]S<6@FKNIY j@ޫ`Sw])NE_ߗ3PGH4֐DGÂi8 445%-GvYQpbb༃=jft?j*cC[CDbl8.,L.Dcf I8j[xBpوM'5^͎\ .؋*#AhRb: $}SpVS,Li+ahUhCւ9%9 gPE 5fƎQ[0MRSa =Lil%-ס5(gܐůYyQmg= GbKL6pNZ)??' Kdܲ^nmZΒPH^x e!~=ŃK4mmt7{,,ipp^_i}Sn%FAO݄mH;4R/} $8VC# D ;~ t}fQ WIQKT%ĠEҁ6`X\&OF` YK`ḪFسdXHub`/>mCAĴ?8z(/_9qmF0u-;ɨYKL?89.\ ,Q{`'`5q1q'$qg} Rj@!ng )1 5_M]7R;A#_⏝47XVLL5M9fi O)K8Fx+< =9qp#?@mgҰ򁿅9M Q*  .iX"㖅:NYo_49RXk6" j\ľ #4# Y@:>D70ȸeݷvjDo!+( -uK,IWߒSa  ҟ ޫv ;&(3foa. L" `@ k %)֌AQ%]#)=_Da}p*⚦#6}"q>POb9i5w0VL~‘(Ұ"@n'cּoDaߍfIsSʇ4%C3d N()}ZBCGƥ-h4zD~$o5NMNDZPaѾ(,(m>8f0΢EE{<&~W#/za#;1ڦ$t~81 8>x-@@]HNtH7"| MH^y$5d4}@C;.)i[A)HxB+S"<:M44_75YHl!F֐_ǂi@8`⎅>4 8%-ס<0 F }x8H8]u$nWD,0],1 $AUv}2 G>9GƷ- J7)QaAklܺ+>J\t(i:xY1ī%9(.Cp#[q=itԡ#0Фt $]bN'ѫHM Tvbc*j\pPO)Tcqb$<پV4,J XCzU}@Ga[72+7^6(cm P(:xZ\˜ h8% 8ycg U;[;]Mhj6xk1BacSlS[eAe`^1jҙ6#&yx_ I=spD}$͢XLKĝpT-k dQٰ8a`6=Qke֐ȃc4}|~g>XҰC( K8J[x|]ra&S3vSWmLlB_~Rǂi9WK - %E,$HlDNgR`Ef"J|&&q']Yt8SxdOCX{ 旼/'r?hxA0Hc/ KWI Z>t0 -M4C'H[౬u#E9eKѹ7 i(jBA EU֐􁓙8Dc:}iά! Gi uhm99L/+bvakuёaH kЀI:8 20OCAXB!k Ls D:>8oDN2[ҾyWLHMTҙ3cĉ^P}>gp=bW+ (D-$tr5\rh>Ngp^G޹Į%.X\E7.K6y#ur?bLӃ\ sǸKˁh]pT-@魌wIvDţ*0{q$4l!/hS?V& +~mž@*^8Źܦ1!(jӂi87 C{v9UZьʹIEŸzYErPY55V,Si%zlN~(gdžVrvt;CIL ͩ'~%X:,ASե9ٸƲt3"wQ7^S3o笳?I,QTү Y,muLo=4HM$X{L$1A"s8i0j쮓 8]ZC:^S!g"@1A=4z?%n)H@Mt_(`CaI)dx/јFXN4آvMz:Mw{H,_.B=t8Mt,`rqF'}^_!6xy8 6KpmvI#bqI0~M^T۔j4zcg XF*j\cF Né!P8Lw}p!ߨ!=$1Sn &&9Ɯvڄ7>;nD&)#*H ÀiN@<=k`vtqaaө99.!q̑IGr[^oΔ L!aLl`Gh5G2w.'U_ZUj- nh2u$*Fռq8i!أ>DH Gi uhV Ud= G Mv~Lr#Ѥa TB}ֵ64,q{oXĥ x HCr GR&/F]/}ӟ/ tԊȎʠHo "+Ւ4h6h@48L:{ĩ) ÛVp^. &`c#IɹG䄇T7MY19}/`r؃i|suD- EԞq')J/Ca&=#;4Хa?2ROL*mjhS?64.cnR[a[a&7ݗ(|wTiBHtAyH`(Vę(Cɉґ=z 9g"!}$dpc XhOZA:6 QB=QWYr61? |[ ]y?$D-$]8& [@8>Hu) K8J[x. ~Pѣ I苃#]SJVxـhF&yGPNKyNW GQUpSnTgT6uo]8"DkcfVNEñ 'Hޫ`PV=8(/>d.jW9'J&/%B.'KN,\~J13 K8J[xChҘ_&0Urn Qq4N!pKbNDQ :CJ(4^ig`+zUW_\#F!J싸phX>!mIFGQ{,*Moq:0O'd,"]t)0 s8U2/ Cshh)> `gguB@EIcK! K5p~ft+@ۼʶ&$2HYx*fΜxN4To}YbXCLq,۔$-ס(ĭx%Z]T^ O]fݓجURJ.2'ǢOfQXQŸu > 1 芯OJTZ`r9m o.HK}쐭^o92-.Zcr6y@}$Ώj6M!oNw)~  š2+^$(]xjeZwHt҂隀 >僴 Q%-ס'+>81h.-+F{4,;`ڣz wiss =1 ZEpa1P尼4c^#K/^#"`s`$(YO˫M3 ڎ!vA֐*T{Tpq%,MN=U *?~X֐t rpDmRՃcQ{d I04Z~oi 9B80 0)H"8ፇsH C )^51/l@m7ќ@YݠE'|8A ,?@rdP? +!%2fx/A5U] Τb?~[j0M/''Р6A4@ hVTG+J SUAE)_UkY;*Ē-$}Jp4l@%-סP1"̃$ \Ju)a- [HR_brm癩Z(,(m\LG e(xΤe &% [ 6` o>I9Ӂi@L]R9$,5yȃ`{|65M/ʙxDo3ɑtz(OGa[&0[ 4 y!zQIH(T>ʝ>\$nE靌 ^`2D] GD5y ejt{`KdkĒ-$-M-&>yҰ:#X-#::~d@H2 ؛!0MgXXҐ ب( 8FxB[*#+l9: [9&"BE!!  Gpm;)+wŻѨ}2xF;2lTƒfaTE vQ\A i*oUn(}8xp$#ȦF t8Dtۓ I!,lo`}i)8đh! c4}96eqWŠߋeA4ڛ9D%M\iQ =l4]9u 4{K1X1{sӈW0js m{֐@7,->i%-vtnraj^F5ړ<[ ޹t9^I4: =1)FxB7S9 gdzEMA藪q=&}(͚Ԁ$ə?&=ڃOZ"S&&[IPw0 jHB!9Rq!:N` "%2j^?Z#!NeS.z ֏E |*Dt9i40 2Id2gjf /NGKWt6]!HI򨛦yB}gR2d@cUr6_z;"384N&ļA I/δ,D/ż\CEʅ R[pS } 'fx|8^J7o*6mo'JNi#¡7^) 1bp_tbEpGnװmXٻ&;b[oq I0MXiC'K8H[xB%WnZHřćb-Oι?u7(ڇ8fc:I~Np^!U*Ùc\œ*cFGƳ@,CL%9]p%r|4_dp^=zExNܥDJ0\jC-"2KEHwzL;a6ߚS"ÕwRCKCgq[MpICǀIzĭ}DH!X"VHE(!v5&tXM)b9vy*i)u%QЊzjpѼϏ݋y*_ Ni}p+rign#!b. [0ِ*EbAJiX"㖅:Z| " / P"i,:5(QԂh I'0wrmA(,(m>7/#.1sZb`۬UI-I 4}&}ȗp@m? @CY9qrlc[3"QĒEv_d\_'/ 8@x//hsь0' A81>rH̭Ài,#3 J]G`(CD(  m눒3DR_!GWK9ev!nNwtkvI#K>qFY 95i?6*ֆ"y05(**C_Q@c٥*2K8HYxO&D{Zd9 kN)!VN4`>?SmR*id! h U(BR,|>@4f`$Qa I tp.T^dC!RC/l&PD:JZ%KD|;BJaf]V[ c*|-zP7'6wv `vU$i)Ѥ }DҌ-qU=џGKJ$_b:49;mF+F*&  -Œ:_1ჴ ay6ⶮ8KO63vOxq!_&[0M'84qp^AJ1f1ڨFC+hh<L4̱,즎"% Y ?jp97@mmJP5&s*6Im[0ݜA’}F:X5껑M?5?h6Լl("DWJliֽp,d|/|` h%rp"yUK75M+rA}MB qX(?.H jI8H ^?jBONǹ`P7F后`>BÜj8*Da Gi u0CYJے8pK =xWOf_[1`LK#KOs"5`4xdKO-u#(}{!M``Z_\Rp@p] +`m wg8 pkSOK]a脢$z菁Μ֛l%N?? 8jxB%*ZJTܪ؛{܅FeC.+%&,]P+s-m H ? %h4~Ŷ_Klc&@mtiǏ.,%-*\LͻzaW4$1.bB5גCP>Oȸi:ʩݏd]2Yu>{!Q6ze-d|<EJ04^2ɻ _`T(wO+i &)O`>>?ɹniB}Id ?W^P255>O@v]FIiUYtL1ݎ%y; $me ě@oqsS $fguNY-iX"p*BU)+3.+u1e`OpZogVt$ ή,Uz5NeY]\6`5׶ߗjw^1X1{ymV%YLi./}oݤa^vj Iz#9UP,f9 K8J[x'JԎcN98SI =l[DZUG iZ:iǐ[1O=8u~Jz 8Ȝx ը /-itmf*Mp5T0:im,1%{/qFpݜU4@+im^$H"! IӇZaɉczyFa[%֖6_/dEU֓p(iM~\H ;4 k`;rr91U4aWfX.L8ZYCLO&gh,ԇt;K8DWga 0ʞQ9[ q*p? /UrMt14`h4m ]*UEuK[!t)Lyč~H \*9M@E}<Ő %+5*91p7-& 絬!Qitw39MP1y|4[(,(maJ`g܍bm[[@d>#XHzOgYlCI K>c Da G) ֡JF]we!vOGSFo-,Ԡrqb},\>CΨ! EmEp]\Xr06m 0.I) S Z'Fl;8Q;=kS Gdlg{ؕȩo̱!4YiG$9U 6[pA[kZI{ -v;틚-&Bkk%%5*uh! #4aG0aCQXQ{Aj 50ㅾ Q'OEChηKaY0}i Qr_W['4C4EJ(ʴ^.\݉p87!m":H?#[I_R?-x(А% {XF,mKmm~%gbIA~N:Vtl5$QQd@̌NܨT*'K8D[x/~LTYac0dzb|9K-Ұq86e!pp^.wcżL^& *PcppIkX(wN~3 o0.H AH"c?ZlIjK=R[o%04xχǂi@8Q tۊgR~ [+g>81&T!1wMT)H[yb2 욅X.'4ҰDF- UhU`1HBz͔nVuS Ѥa If>`Q.H~NP6\3ԍhB''OטoUxߖtd2uL4̱,=| Ga[{zj= WFKSXF\] W릎Ķ,h3!  հDy~^P~_GbrQbǶܫ ,d ed]YHҽnrbΊIn%S>H%a\I8J[xCu9-=%X@%z4vO_^G_`Cܟ;l}n X6V%QX0=Fc煄:҂i@mzȾD-D1z-LJh|)Էf w{ ߼)`oU7uT`%fRӗ7svLҠ R rX } wIwH}I^l 23?$K4`>?hQ@9`YkcU\j;|Ei֘ɡ cZZ81s4l)}\Vepg2]~r3;ИF&I~8siC0Mg -|{}$e,m >xp;A `G!ݓs% Iற >iPHq,Lҟ%,YVEflRqzƆ͸Rmpͬe<3aq ӧnò;p4EX>MNb&BnѸ9LRT  lk\ IcG;dk%ThEGy 5rUEDBŽGP7iUQ!K8DWpq,= 1ӑͿ5$:Z'q$hŊ1Ms[;|A=,LbW]ؕQV}kyd Bup"VxY.DH ^PN״}q[0$GӴS^&g6q:P.HJ1H c?`-[4Ac28y$KQw֎C_Ƨ2K~ |RCu l#$c#~ƁTPH!`1`piP!gޫPv٨ۮ(fUX!}G^)=j–@b}s!"  F ^UB94MNE?>^*z燉 4{H<sZ|1,߃"zk_5d=d\mk@}<] 8QUUhHG {s[r&%11t4\) We-}\9OAX1{uU"&(݅(" TkH0R0Cr Xl- ^%EKÉbO61ڥ4e􌶴D- hsq*-m 9QX"㖅:<ҵ٪<LHb H  R70@U͘ɉLT`B!],m ]ljc7h3Qs\"G$H.{:?afLEN}:NȐe PWa9Yqb:/Z򖸉e$/΅D6D<"ȏ `Q+,^%[|F?JUt!p5h lҡ,mཊ,ڮ=4`x6>>^!14ߚN=b2>ot%֑& 91oPdLJIWdA" {(ade"]|50F_#Jp}%Kl IRE)RHA;@rޛ B40ҺEIq  HP;jM@a7O( ,Q{9[M5ylmKHVKB4|@Cf Y`[As:shL9α(M؜ M I s# IOE>4  q}49cߋ|1'ƾrj8uxs0_:2Z1˵̤Kdr]gXR>H~2 d2^&t3p/ro>*g@V)ζ,x}i7H8n[xCw_{3p7 Aa)XYIp@=Ԧ PpbAK΍ pk50`,5A$6X']uI 9lぴOb!m.'lM8hZ:=Rw|U5Msm!ia5b}-\%-סV]1~ FJϝ9 ERӿi I*Y0MLЩV6er :} 21d8-Ѓ;cF 盭$:QxaFjσSpn/=tgRǯ@J)q U/h[QOFrJ`>2.@Q)I˿+Pfo73q)e$n !WHJ[C)j J&1l 1OZ[ {8/TpJ"U&C`ID4̱(sB$*`Zޫߧ)Vadjy`"Ui$dI_JNnWLCAc*ɄrQ19 E~qjJd3V@HC}Q@ZlG"?Mb`C!;:;ZBddȁzgv@2#=cH H?K9~*$gSNrhljxZCi,0g"7-,ii%-ס7f }k<9.5pSsU sO݄ ئ! Y%bn<Hni'j/ĥET6I!.ƒ5)J}X'w dp8ș|5wTqN /)æ@И1<,18;D4̉4B4JC d?yT`F, gƤ+Km+=h`.>?Υ }5r}(RQkz^K͈\=mWD" }l-ζ2hr.N!@:%eu]%5"1WƹF%~ H\*xsbuP)2(}CX{ rt _C$eܧ!04t 4,4&D@[ MKCÿ`K8J[x;ʪhK99.&F*Icgj IJ )dlK=4џ<ދFneh>ag~5jOZۢ61D=,t#t[Ұ# Ii:0[5hȘIؑ)K i؂i9{ nn-DMU055 #d?YeP-Ժ4O!uL7QIF T ;.F؝ , m 8M.gh=RUk4V>Kbvxق,۔ ヴ%-בp3̹٘4iVX/^H9XJj)Ѭ! K%'`׿c!pfl9}^ĹW.$82ƣλI[KldМ4Х`)ᘥZD-$1 Hu'Zln CKBҗA[[E^Km+TL̈́V!= ;ue JD /rYH>!β,w}F?P£(5I<9PK }:.J4y~ph&wޔc#;Aߺ09vGGr`34GoM[1Ll@m8`>-N-T\~Ql I8J[xC>\ܜډ ti| Ik2*[`xHeR|ٸ.ԃ ?`k8^vJ:f`" Y8ZmR{Eso! Rޫͦx{hNVϣCۚX&n5$ ʂh58cՠ!CCQ㶅:T>uVKEM 1:B}u)gp6^U)$gq|AUp Ⱥ@ cD  .cx@(VT uvCF` `jRA7e cH҂hP93pd!!АW[Gen5S9%*jڱ@zjze%),lٶI e([>Dco I8n[x yb̹ 4 mn Y0Mqn偅f}&(,(m#= dx:(.]Iv$,<% XB>Q,G(e:{N*+=3Q,'ͷQU~MՕ*9D-$]SlN}w 4)S^  p2^:nѸ89ޅ=D>Y 6rV8 NNgoZҐ>ޫМK1AbL`~h96JF=iW<=F84< }jClۣ\F1X c͸ f͑ׯ*: ~Tq[@4`xC]Tq!=t[d p-T.b̰37ZQc@wG8 )91JoڥH"}Mp>v~/AD@/W9)%JTY"ÀiPpQI4l ??>v~/;;b25rf(g\UXҰ=b&GC-m4XLGa[?Rh |՜i̦M$r;CbIL4̱,\AiX"㖅:xl$pKm a4-GN>ڶ$ku \TVӴpmѡ RQ{|4F$"L`#$KryHR!@Gh˕^k~]VwSs+f!j?ǹCbL]Lʂi8KK KKeUP6Z{T֒V>@ Lewx-1Nvxx--`XҰҡ(,q{~}ϬJxqv4PrإhT݄γD-$]HRҠ蔉<=nI8jxB|]Nך&g3F6n4u4'wZ0=]kz8{0a%2nYx#xP/yfs`!`Ɠe~uFI||ʀiPz3b }AZ,σSj ].%2nYx<؍;H}9S#sDTbx%n*X ?9HG` YKk-Gц/cro-Mlʑ$*&bX\1f?]BDދ}Jv ,nknlؾ8yr`.g L n| E^1bYe W9qr0jxM#[#ѥ!I '9q 4*Cv_ܴ3ׄ,FJN^ fXѨekbJ,kȂi@*9qYaYD64C'H E`nݡ1ɑsEܘ~IBRs'a.)9m4(Qsa dQؘ2(Rs .lPZ2tctΫV4 "fVVs,Qvj%Y8z,Ӳi`,NE ͍Sft8QGH@)7hV%,EzH|_/>-a#˱pI>H㭬(,q{}w 2l NQw*;_^Ei !mlbUqvhLh }~usKpiO*zLc[~I_;p3WROZ)4p١o!X!{lvW|[Ĵk_IeoY▆-$ aNፂ \R ҡ , e ~ /+W᠇Q!Gq9hG)BGn4$ZPcF9rp^otMz OvM*hdibDԄ_Dɹ0$& =X?H"+q'? P/.0I1g;<9gEr^җỷ>Ԏ~amџt - ׂ9F>9JnH2qh=q-$}ۜjP>Q3 K8J[xUtǁnQF7n ˄QD+)X"g0Ix#l+JQ ,Q{muά W6s8y3-1y itiHs -_.nh6v^@B}F[:xH+)ɉaެ]*{=ߧ+l *,)_¢Q| ,m V *2wx[ }sj뵒FIpՓfȸ:ۿTSF+TlT#nclo^:uIņQY&NR:GDˇ.ڈoD~2ȸe6 ʺ{sJڿgpsHMC0ВhW![аAnC"㖅:4K`.z8ID̞xewCqxw('{JuƓ 4r YKPiyG F/'(c=$J;`a0ݍz8׸iA"]4<1X *xJDiMo^(SˀZ@bB5QUaE @sbc*#kK~8@ y4ĭ5zpjHLkȂeL 04MFa Gi u@fJׁŌ j^na#JXt^ А6}Qi4]|~gئ,¢ 9QHA{HM?4~l*)\ ![W L:'bF<;d81X{ OotqksrGq[CL81aM4`>?NŬд0.m$??' 8f鿖:fa<'^rl&!)XN} 49ĕ,4iidDsLR?PƗ ZBT7k҂*-HCt[ W%d@P_44R׉~<&;vk Cybūf"DXb[CL6tӥnrAs+HA{|?oA,q+eFUTuÒ8sSq_Rc!i9" {'0gFq$5 GO-Vfl욣m)uAqc \ҠaxƐ %0+u_9?,/i9F)T* hȃhN))avb_{\W[4Br 7dCРӡRM vK$9qԢ|W.!!J&u9͆F_>zUL4 uCNTah.m%r[Ɠ^G`)@t9Ґ3 80S!2j^\.C*x1^BC qAZ%ZJaNԕb$OUUTnrҀϳ$:JЎA{|BY,9:C4 |EJ8n[x89`K>:Y`c kl3{xB 0ڤ,:.H~~N p6^C=!1z3bO!ޟQ {1IP `.`F살#;,V$CZ)`4R "ذp{XKeO ($$ Y@"e燣]|FElNMH(7HC\*8OΒfdwp|g(+A6m@_t in Y0M(:g;-||>H|(A\˰=4c6^]SR p(ƤcS8;%/C\t;~? 0N|4g(rpPp }101M5S5P$Q0`RL(p8R ދѥ˓U S3)Qbf@`l)g"{t!Yp[Q{.A~D bOA$]7@ ޚw8lt,, 2F pzN 4IXq zgy#CЗ;V%/$tgZ0M0MWCK! {%a-q{ pUq5\t}Nd/ r?h!\T_VYH>pQϥ|Z|?>D ȸe>yxa>9(l?#^^He@8ַc!K%Ұ802ÐY@ cpFsC0,EA"g-q}Nhh|rFgⲷ><ƛX{ m0nAoK!@͠$al@$=nW{6(〴~|H!{(gׅ閜V~RurבH$Ke4}|~'&7;a )Un+u#} 'LnŸtSX*p̪UyQv)3VȨeᵊS2x?Db7]B;Sq$~,{l_QC ?>vD- uhsm ʠ* oO%Â^"Mu=:Ӂhl%AGN'8r{$^>j`4lQ.>Ϡ^T)ۇRK/RcY``dtbё(T~0}SǨToR)@E=Ple; O=}k#`zpA >L8,ס-졵%g[[%6]Iko~DZ-Ӷ) pRۭߋPgHt$s|L͋@͝'[Ұ+7m4}3i%8L<1D- u/@%#8lwKϛ_4h\{,cutedۯ #\'.3b PK| # rnƴOoi!i<Nf6o џ "%2jYx"qOفJe-p6"$c{A Ѵ!}9;caJ>K3 K8J[xCU+\#r 9Q] gVߡ(%D @ hs" Ԑcr4g\@,[3ټ#?R$-؜wLTB}tTeȸeܾ/:Mzm q;9d#u}fgW&,i!Qv%뤁I pp?**Q/hr{ttn*LF:M}^<m!iN۔1 (Wȸe^5& Ax4,[ nm5~'LpH=ḺMY(M>3 d2^4M`!m/mp'$Zqo>%4ltdTa@K ?4,Qk(GMS׎U9_>Ư )10m|t8Ȩeཊ2q'⌸rjݤH8JC;sR۝,m.{0.rVԨB-(L^&!\`R7M۠,9JS:8%b?SxTVgUciz卌DRO `eeF)?^>duvTDpSjh+IE 7jofIUtǣdཀot'8Dg"ŭEIk"Sbcα`U#9 :4U#:14q9.aƨ4ia  IWHi1]QH"?%IG`񼐌*CR3iWlD"Jh6)id%-V}[Ir áUeUE`,؀%!hfc6կwwN2quʌgx,(*%&`^+`q#gb%%+SQ"%XuSݒpNXԭ8:<1`(c0`]8,pqtxǘCF~i)nIC5Q㰸FeDl{];VfFEԑTRa:%8cu")d\{6%&$b^MIEz02|&䓢 KHxϘ8n> Ya [i qFWNaPz1~VpH}F:>1>A8\2ĕ.)anU D_d/ vND۴v`Ee&|Cc%"K,(gL4HMVيS* LfSu1[D0X2hk0۳9&քczXa X5~cE VA[AqFZec߶^q{k8;rͬ 6F;KeU8lbTqNP` [i\G JMk`Zb l;EDڰ& @RLa9$UU[a [i q,eD/Ƕ ]:&1X!XჅrXqz%pS*X? SJKpĕ\ktmz獞& _ل#aSD 8(ژ~P|mؿ , <+ZFhRxY#x]% ʄ2I\5E ,xnUp Ku!bޅUjXE<}u :SpBDqpAn]Ahf AY6YD11.UQ{\[WzĦ. ^CQj Y" ,( 4usiѢ„\ޘ2+ gR1C$|V)*{!#La#%: ~.fc%4E0@ DeJXe?Q{PI%J L5Al`zLsS% kTu UR(d8Ұi aVa& GY!9P8u+ 6I0⃝v(z+ |ܺ Mj ?ceȍ]g̊S2Gbq"Sե%$SL) <:-08q'ݤmA3榨Z4OA9 rI#u Z wLa-:_Kfv2p̨ZԨfahH"e?܆'EtL S>A-I lg3;}ԕ7nDQ|46mTqb-MHj_ПX|M^bVtaL |]c,"|MQ~d~A:b`9sW[|USԯ^_JE,<(k~L 5i8V| a 㸝0搄8\N:(Sݒpǫ3Ezpr xE(<8(:჎D0zX` 3"`e*z+ C` [,G^n5:A" DA,3Dg6q:!V+L-0E, ! Scelq3ͺƾg`(m,K0L0:Rh1uV"햄8^wfp_EFùݾA%AL K `*[ē &7sHL`7^|r'.Qhqo~Wd`} b߹&PxTOW{ukY =0ca=V 8`ZVV6@ r\@󎢷h ȀfSp]\֮>Z$!EVĂ3(E%,Ha mpF/-8Ԋyprcv$hU&q`7=<3l .!aa^MJ@uƝ)l%\8E$3G`if8 &{͋p'`#M(Qed+(yeLa-43c꬏ 8 =>K0XJ0LXZt)aN 0E- qՈ옝!q >k<Մž_- 7ULN*VVm6wh ^E2d,ikς^JD֌%0r"qœkF!r 9m dǮ_]1C9dlgF]x>GhLD E<D`pH`cI4_(nfG=1ӠM߃1!mM`_|Dy䠄Œ搄tjQJIjoC7*s℈ (E+إEќVn@L@~tޝ)B b76JJG* ۉb'Qz͌ʛ]HnJn?Zm:&ƼtL_`|[N5D1/ܹYA%3栄#C06a0" $: Up"xO mQ{OX`e`X V&we_]P- 037 Nj >wL̻HpPn<)Q K,:-%,!BR![Z}n꧗X 0ꖰBE;

3j"v[٭bBA*j$-t"qΙ=e+5a#zⰄת2W%:LR0LA(x3ftr:kX0E'KV0!bj+#Ex2@>_GP+ܖ@v7l "IK˹$SL\!ZAg5< ;]/;Z.CPwjI8L˸QB$ @eΒ)(/$-aXKI@.٠mٯx}-~6S~66ϰ_;rb7,mj.ãlO&8LOJ$SJK㴹(B skO(rsN0Uu;Dp`*S"K()UQ Qp?1n|JL8E}h<' sV:B;qP$JH:feJS:n)l$|5o_ S wfHB#j|iM&уc=Ql`H~EeMo19⮫%_s.1N^`8 (犊e=a-e 0L 3/ʋ0>2!(g"^a4bWaU6F >Xdi;## 0L D%y!)l/C3)q6ok g3wѸ)VqXB>uML1$Da夃qVVZuH[NvFD|U2*lJ翊 "U`*E-au>P\pA&rdٙ9! UQ &ҟ S8a#P!v!LVKu}IG* }Dζ*Wğ cCmrQk80CIaN?}bjW'Zf\as1n|wX #Ta)x(>ZX&&Zu;fֽYq|(yN}[YYO#xI0^Z31Wq^N ?Ia,j,>~F'Qy9GPN ?AdRB `uO_| -&uE>1ppRȘ7X-Vd'b2Xry`L+RAu|A` g 3Xub5CWw%Npnߍ a$bĠM6bp i\Gaci6C F W؝N |=Ace&H ǭc:W}bRAL닶igϬ:(#rf>]4 FNQFQO"ɷSăSN M*7fԓ & p|X[3Vx|\SFgFʪ+|t"@ jK`q6"mFqJ]0+'f3a}x-CN( \HvL܋7?) ⿎#(j؞sP+#ئ&oIX7K0L0| Xf3 `#LfSufY)' , ѽN'*K0LrtWq% ))nIp#A"pw$2~߰nl_B3'< 1f 5?6!}Ⰴ x3gLr0hęZ7ޖ7Ki/Qy6}}-'R@uLa-:o| >¬H,;+j(~A | AiPa1GUi=u*LQAQ&y ZU)p$`Q#Xq@6]`^"(l%\qPxVFl,bga癈{&Xp:f@0+~w8SD_=Pm#=XN 02(FH S8Yy\l:UZ*Y) _Cj繲`bv(̋ 滬,8J K ,-RRSǔm(Z]ǡ:v QI3(' . ŦUg0TI*brAax=Zu#Qa6"K`XLANNB ,Q> 0 ϜvǬ8LVp&A')afSe]SN Sqyc%DrAX4袰pGE5|SbZ؍û, }YZPqZrx8-ut ~(H%:t]p>0 J <C uޏ@<@JW@V.*. z' glg3ʠ Wt [Fau[6eE, LC SqiєQ|A6sVZu,D -i "omzX>d\0*ݔ ozʆ.v F"&2=* "P.Z ߣ`K>'L@ԬwuANE0A37y6 gcD_u)R>aLf ^G<)ݦ>~3PQcsWX'H"O* oL0M N{5;1_31;Fq6:)N ]LNld CG%p`+ Ļo[xfv@-v'L5#i:qa}G4 H ar[d`4ZPk\<0q ٺa`}ܨ[& S Sn$1& ndOi$\!NU`yV&y6&M'29LQ`~<λY X 83Z,fc1AFbBlP[jjq|},cS@ A}]Y!S`+<㦪}=b2㖘(m^B0<+ML!0[Cꍢ<3:U ʬ:(lԆg9OB0M ` )SpEr%E3*ASD͎(s.XK$0U)+WP*n l\G] ֑e-j@̦5B t;Q5)`>* ( ?"p)Fgᚥ hƃ1uGP1]GxNIaM f %,))vSW,Lam g*-Cq3(6%Y)GD[JB+et:+9֓8j3WWvCa':cw00EA.0us`8$aS!#nJj8{u;ւ4bA4d_5lPJh(*atf1Xa%KbNVlsw7UǛ0h`x1Gb P_*76Wn^RD%b$]U=ၙ +RSaM N8eQ玘URTtB*l{b+[gO7kM ߁V@84Sb_J81gUiqjNl"A>TiJa~/[=Nc*/.g.NBb='Φ`Y'zwu=\U uPW5 ETI .YU {'6$s]>;5 $&|}`K2 *j0ΈFN`:[Tu * |ܺpǩej=X$1yJniERJKC^J7+cV~h){&kZγK(vrO7l+|")l%\dFVq]U;)6Ƒ IHoH@'$䟥 n)l"éHF' LvtL@X,mXD0nLpRaE&hA#0hNܴIVKWxIaufRcp4k^69Ak~g+Ξ0 I*(^p©+}dY%idA]WW >T%x>QLUv,0ugu`܈T&svŅIl%0 MYagF4(!?>0Zo &,?>9HvKWx=.72΄\'L|\ x,wMn NؐLM0)La-:*q+lt 8njAۻo3&jݺ ǭ`(_8m7fU}aYt2 > A6%$\uj135:zNZa [i qvTDAsxTfa (܂zq(VKB>OLB"yΡa+La+-:o<<-sU)oCJ>bf*2 M H*!LspmCUKL`J*ƨxga 61p:B}u Q4H6B&2u#h&qB¢b(م˘0! x&Okr{>ɟt`-0-UJXd-c($D*U@5,0Ų!%Y&1搄Yjcj6i$\:ٖ#' o /u1goIq`E%, _YQrKXaahQ|#u)90<UƠ=Iv Om' N6fEME6B&2WkUn:f/tZ 3Ov8CQA6n$(ʬ`:TWEopuoV"Ƒ>쥷c6d! j >)fw Ԙ1h]BupP*q#aO4E7T ,z}WՑDV6*KqK&ZunyG:*, IMD1栄ӛ} Rǩ8mRDb5iwEnDd0ʻ$A[N o>Ije P pyR~)ǂ*v #F"IBƒ! YasY' [i qdx&0b {V >X'Ra8n3Cf~R֭082JUL"y]Ծ"JQp%.@LLt 8408zKx&&Y=.iDC5aNA&_! W& .aN |ܺpǩ>si [յghw3 >zYRT4xJ'<@DjJ9 -7[W d(Q8 xݕK]wW3f~H )^mԱSuc 9$At7 fݪM񲢲يݖpۿ~ w'̊,WxK7ٔx6SCh|O SVT˔ܻL SJK:Y;V& /j6:۵ք٧>) /`:[ǴR,au JVVVZujK1Ic,5)~+D>O"6|nQq:fMO^A(ubQnK/ݹMƴO:s?Uy3mF:D S6TĘCz1-NoH%: ]GV=mw7Ġbk|oMJojh}ήFਸ਼*µa̮YX,!auHDj&53TA8La#-2~\'Y[cugnO!3JMA[!OV0&pצEIJiE ivc0foWp *25bRF7UʔF$EZ- QFb:P `-_Tɋ``x j &UT?׸ 6Oc<s?ɦ-7e``x֞mh_1Ym: g+^z2}xc"DH\DI"3Ĺy{NO4D_Yy;7.8}bk߭]j4ocvo"o]۹}Wlw{b*!~j%~/~׷}K%bnʖf)U{-1g:qSsrI%̙jP4<21*z:YϼqF>G̬uyǵ.{Ke\~qcѾI\2ʷvn06x&ԑ|z\>w5~ӏ[NO~rP~xyy6[OVD7K)oۣkbq[i C[PÇ(P9AỨ6PYc|˽C8>kA 6,?FSzy,yCUOMgs| mFfSOxom"tmdmaOAR)HS(7R~t__=xok޿c(R_^mM4fa2?=EB~:BPv>|1m܄/^>?߽}dO&O! O?y\??7hˋ~gt??| umlSV{xE?LhD?޷G~|\~xol/7a_ joߛu臷~oE$ !(<4QsXg+j!#8=Ѐq}+.ve˻=H0t?S_9ޥHFx9{w\#V燧9lw?T4xj~O}2??OƉ7m? QcxaZF)㧇x5(5ޟcv._b0o/S>il q?e.bn-11cJmcBOGl;gLiDoԦ8 >~I>??x7_>=?Ǐ_,3K>C ?Ү:Ƨ4 JOQij/̏xLϨ(?e폢j]5%>@K\1AT\ۻ/ڷHk~j^,rlC;~g,<ڕ5. f\PPoOǯJq #6LnK-诫Է0/K7?ڜãmWRD"5ͱ3W~~Q[>w- sk3^ /0tq?&C|)y\+Ƒ3]'/69QàVVmy< `y맣]k]9Ǹic1YW-$5ϟ>^o| < yʙ?,袭>ӷ&~1I;>KfeEi+?b='jݳA_^g(f'=Ǽ/ϞD96Qځ_{oڂ-mjA/|6"?[{_3z#SC WR>wKD,p}<3dqyX=c1_o>OFp/Q% [%(!bbsendstream endobj 530 0 obj << /Filter /FlateDecode /Length 5306 >> stream x[[u606/ҋ^bo3+l8VQ9D@m pݴ2Kvk=9*f4ɺsΥ)/__7^z{7n_ܾ}=čP*/̥27fj!]^gR)-wgŲ #ٹaatVeى;xoCo~lm4cs4,v"~qGl.frPY^ٛ ΥC^Ďދ2#h60˛pbf616-s FF Kg\&FL\k/ǻ%/%QwĮEᴙYq܌0&k]eOLXj<@vSmw*ky]!MwMI_êɃNz趩pG<4=[zHD}nmsgT$҉A_h`˒0/]/a k_BUP.P }ԝújwie=~|( ]N0E xvYׇUݽDZ WYՅdnչ# ~4!=_oqR{Ej8Re.ce_Hms4Mh@͠**%P@`,E!t L*}kWE+{m> [ ͐6ī)4-oNy!m{0Oc kO#98cF=ޝ*Om22]'Nunva`b.km{:ʈ46f{nՇ^JI's_OX2Hd2ʡģհSdaF\GԛM^~ax| T"ԺٹcI>}ȹ910k35d/Q]]#萏 J$&T!O "9մ0a6, TH`~(p]wYuyȢ]ԤK jrt oIeFoOVu-=&}@!? Bo`BRSuVê:ןppa0 ` v~T9U$%|UoÉr YQٗ:WOU-Gˠ[Q9N {\;M[60 ߟOI5푭Q6Nj8/ui7%/3ԅW`qT}(0 6?>UU^MRdh9:&h4=`4wz@/HR*o@y`7x7 ,AՇlt21̀j,]xc ǖ#K{e[@Ł)/Gq7p}bAt0C犀3@kh',}j{lj5NZ3BAe(}@ԋ0`FX'lgZ:)Xg}^0rɜ7ǣ>a_ЂY2^ތLZH9;AR~_@n*'Epe8cGoPDou`zKİ.6D3n2y"诒 z}J$] 璎]'2@ͨKZueUdE>IwcX=4}:u";0e{8rXvYk&BG2i pj  dMQ35e3_en <*}9@=$6IPpz=#Rf㙁(\^ *8Ab d9054W1b<5u`!:8-XNMVn3,rbS@}FZJ4s7Pޙ4˒+z  il9wì.^TM:e']"dQx10k`:n`mR4Upr_H 8@704=KD4ڟ ? aNp7tSX,Qsƀ5Hvpna8!8 |dTZk3܀/w, 曲|Hs YQ&elcDi>F4 a"e4MQqi 8/J{rG%$sJ$XEbPoSß48^5GJ11=NiĘ3EA{"!wv"YeoA6w:1Ozn լHqkn#|cHz h)=灞!]>fσt5 3n$1pb=Ԛ̞ʀabďv—&4fCYi=&Wv :{\U z+Đ.dќRKzKar2b LQxDmjW$h e.ew(3ejh\ɆuChŸ~ޝi9jz_IRܺ j9tuևԬ>ʼL" 4O4m䛪SNIMs)Q7`}a3P!2mwӖ|6rZkg:9n:KsQ '$f \TZWJ 8ʪ#U]gf)sa,i!J;I%w-^Iz!sZ597󓁅Rs~3ϤtI9PҝIAT%eҧWc37g/0kA3GF `m2!6LxTs6!=(/* >Ed$PI`g3hc7G9AIDz%: AU1= ksը˯A=2/A gzh8I-գ8PD Gŧ)0%>F`9 /ƒi5 y*#iz Mg-1%wޜ陘oAPy2(j;o\D4mWi"4}$\Z1* M~ET2 4v eUp&`,[~d%ܞ=*Sf@ R.+q}! R dWZ]be~IȪp N.nXW"NGt_I|Y0 Q`PءD~:P(6V>&%xq8X^xiZ?Qr_HK Xf $2چ|B#eF =Ta(,tU9wȂ#ӯWYS ZuF q%g\-~Efi;WxopӘk%&&=& s %NYfOÌ?eI>2`:PϵU/B#˅K`-|$Pb=/0^j[ق|>,= nAZɖ  с~QClg.X1T>mX܃币Q3(Ծ+dbp?J6\DcCUzzby);ja;+Gew v='EM8 k K&9wu~Wm lL ZK\ AS81=: NkR^נJ8s+Ny5 _:]QJJTcG]NqepC !d]G[p{H92m|]LCQ%CiBuV2endstream endobj 531 0 obj << /Filter /FlateDecode /Length 2422 >> stream xXn}S !@SK6HL;d!y&{dN$sk{Oߺ{C[p~v?K b.2dWp GK@G j 0 .u dom墌qF\CJYZ+z{4BLҶ5QF-DoZ_N)CIрzד1)]J.&b|SB =D,? *֋I:TEzy] $DKȨT:5]eAF =D E[R]Z!&ڤZF9GQ?r #ޅ1i4*2R] *22]xP%л4SF>d۹kH㞝8OfWj &~ͻ1U]< Դ&yv7F)K?c3!/`<{yxo]3r *Nyj*#6OǞ€K+wL~;_е;˾aOQT8v9?+v~8${nr?ʎߜ7^t$:l^n$N\G C]`)ֱBcayQLBR`ً%ߤΰ̲mspR;d-TH06S 9=8j7o؟ M .W3&e`pO)zO7ǽNH yw?MB]=e.Dtث~xyz*-܅4RUHC^ፅ!Mqq)z3)|nF4B;cn?'Et&ޡ9%r~)FOt/4sCԁL* >b?vDu=`Ndm4~uQI*99>z8&ҰGj/0ՠdl8!2ŎЕCP +% gapuI4Y5Z {> stream x\}I-A8xİ;ۿXo]{X?q3}u^o~eo/^o79SJ~[uwH 6I49cD7$M„ b BmK˱ޏee :WtEqO %ж4(SR`\+{ :[ 8{ܞe=ǒ:;rx.X}x|@2(`Qm89a]mPt&_ۅY_8Ɨps wMj0 H7lHy%[oܮ;eIg8/ܶCZnY|͇~ Sj?ߦ_"xz7县A_s_y+o{OFk) kH !am m8$95ߎORzƷήgDwʒ. k0Hʚ/k۔ bH%a[XODɷG8,|k*醱1$a 0\_ ' 4@vv#{ez:\ƤkV\{u3at?8v ƴgƔ¿5)O/̑~K{xʍ=3(Sb]D >5\oNcf8\HBeXbB!I/̱?cMExB%cH@ wC*0n6btAC=19P=*QJJ,D+}'Zw4%1֍qm"ؔҋn8.=0=?C&xkB%A)XTsbсILpr&DH$PjiB"}HZ ޣljSL >Isݳ _څ)6q/4Vr\aـ)k y4'1q\wuұ2 :6:x33. ƧBͻ:IҕV_{+,Ǜ/BF`MLacyʍ­gboϳu{@ƊJۺi nJ3qfA)YzTl[+oe^ӣy?8b5_i5)H/C@N$GMQ[7cs]nzG\{K%V%C$ H=԰M6TgbTXx%^ ئE םsJk=) "[Cmc/ sI+x WČ\#&XwsfG>"nr!"n {?&fs{]>Kᮍ)KF%.w1o.F֥C0q )˱lLC51~l{Pc~譗e7`|Py[>aĦt ĩ-]0= GMd!5r3zQ Φ߯ǡ)pXfJVdqT)̑NK{|bľZR~;g.,`c(%a ymf >Z{:&u݄X&ċyb'^ߙg>7O;$~mΊa%M4l`)ܷ6?D`8.>18$!q孛5;חS6qj?r;a;i QpCҌdݜ7QG&([nuCyŨ!1mDR :RCM 5{ag{O>)&Xɕ%F*H[ư Q.KiD=yaRo&FRI)}vlǞH=^~1,AQ@l_ā{)|]oʕs(kVhaIa|(H\rݔ:!qWن01HhQ`dW6NtӶ xʹcp`֮59&7)XnleSgF;6!r"95FW~džo{lkra;o&2b$a 7RCbO?rbok b. sKkxłj=H Xwm^oZcMޱ%AX6 !2.5K!J~[{[`JxǛDDt_d0)ōi30@qsTkC'/\5'eʹMg/;FgSz0L H1M]als_r1p\wBA3uCǁk3XzډcC84zfHImc:)Gz) ơIZQ߅S>NϘ@C|! 7뛺o@3`( ~l_\ExƒrX؞d_N ጙ~8qt=` y|Hg&e8$9ǧ)"}#"}V`X߽)Wġ95G1*s/qlʍlb001Jov#rG5$H=5t۔ b-@ I _3^Yq6ϳqL zp $8m4 QN5w FR0}(8m=x 6e27v/"` g1[%au!4KʈK&1氓Rc75:vH@@z=U[HS8fK ƴ)n 6e8$)8ޗjNJu}f^<蚉}s KH1m`؏4D}m88ejxߴuxA.{l24l{]/?≫w56E %3"Oa#} [)kܸE+ "C\:Gݠ@re9 /6Xj%8$dC8$D2RE͚q'⚮HǑjJl؉љe 5aMNf&~ Ƒ\0] s51seDWlW ->$kАp֭)ޭئl㹔気PP lG:QZ†p*qa\LEZ ļ.w,?CSV,LBxhF(pC307k)8n.'F®$̱n+XCb%oJNiXu=l08xH>(1F5v c#X/9i66BaH6 /a/=,^?ۦPKm1.m M495Ǒ8(_R&2ĘHx o9vC<1 (aLLR;1>fogX4pчBMH 2H E_Zӵ8d!mp]'RҀ…䰋R{0/)ܟ?cń3bzbIá=ҐZR%40~߃໱2E3LY3Y*cZtk{KHiKkߪ4Av6GDۈ:LOʅG$1qv^ȓ6I.P1r (`;h!dPTh \; B;,Ҡ8ٲ8(.D Y%}פ~bݳr]:7@Yߺ8a@ɲ&: KbXQB녁5$DŋmUrv,zP.eDB3!:0>-}p:c5 G$. ~ڔkhk AЇdVW|޹h 8jz8; i[֐X.9!\ү(q9٦?_7pF.1`| A9&-6!0S:6Grg-LW)`Vkwx'G5$R֟ j+.#-kYF]0]?ȭE9"lLbSAfAK9*tSC⃹r",sk)ZdPw< X}R8,|r! wԆ!ZcF6 H9OԶARChdۃ,5Te `,%`yZ$VɅYCl6 S{|GO ̄qn2]N("N$$੸@ҭR b8&qT#'!5^u_xaVxPxXzjvX6H<8$!qߔ:m tp>l#*N気g khXnũkjjczΌςcQqbӑ =]kٯՆ-M8Gb#nFHIJڔޥ6J/^Z{<VhM (qS͘#Zt sL%82P{k|a8$9ҏ3g}-.5fBG$]}k`iM) 1ĘcK5+9q'}š>XQ)Y8CHg5BO6?,0%nLeH@^CJ{TT!KM)(QQ$9\X+w$ckHL %V݇r'Q96yusKk3Ut!HeP+5AFL:ăĨ/% sa|h0 Hb i8xC÷|za[6TT[R,Lk8ZI]x $a0`wM8N?4{ŕ*VͷIń qKI7", 0qrğm-WYyŋ)[x*Cpm Ǔ0GC1m@”Ň5TI149oix˗]v"@ۦLLc97Qm"_# #CCCDXXCm~気r%慯\ S&'d+5`W s'>hJGV$a XsEkxBUJIJ_DU ݌n8oYcjnS)dTdnbȸ9ke)sS  32%7^ ߭8Ϥ@; s> j?]HtZރPq1.*%8.܄[1m|~6eHJn؊気æ[Q v{PVt&֢n,7-Q65!6#5TIFUB42"9ELjq8x3 2:?csj !hxm nN{te1*yayLJ \ V35[aw}ZpO9H˱8 ۀ`LK:&g{a{i q( Bgê71'‡ktB%1 @ʞCzab] 8eomI%e(ZDa믲PJi Rc%v7bu|R*} " UE)c,)Y񇅀 a:!1SovvN]{w!}6n~EUHAdPT`LX3{U -BU9k)x#S#kKD [> Ķ1f‚$ሇaKRIVl^ _*?A_6nE롤fܑJ4.WJ ؛)pG,Yu\8ϢZS:2q}ΩR;_\`c⅛#>m+0rV `.K=FvgkS6`HO$L;êe>85jXBq`~O%q7k8+7ԡR"bqmcpLKH1 @B"z ֚%-pȉG yo7EQ8"F%1`/9HJGƝql{f upw*\s="zQb4ϰ!C^tTwUč'wdYAeA_p%.KkIgձ)KY': GblL2e i0 HrZB"~| %a/=F,s<}Pr$Csr5#ӡ `|oR75iC"9ejxCEO7eO qp]M PfFR`|ڔa *h-cAbqG+xBj5®dR&brVYF)(%ƶZeiZsnJ~b؀(P&˥q|8Ki3բ>TlpaG Bљ,e$JCV۔ b-=YARC:VdHRtSemeJp}b` !`BJ)hiH'a'5 ž[yʁ2-zLp1ɤQ(a //<7M65낄8gqN`؊5)#Q߅Վ3J];@2I8%a OJd2 lT$`;)Q~@WJIRQ6cf^.qcScAY ~$#]?%2ߘr@d9[ 1)]ǔS.Y\^^Z{jg4~owDM;"/ɒRS zulAG}6{@.~P. ]qr |UG5$.Iiַc^˓95ǡHTљ)ga崋vXe#UL$ '_s[ }0}?hFj-%"qУ9j'/anEmN8p{!8S-1!sv˓mRȽVo6xX^#@X&B”KponXZ@ ޣЖV qk4tj2c}o q`k p&|*)&6!-:^l8JSqTfm3qK#ׇcڀĝѮec $l8$95냷tRydͩ=a/`AKd7$"o8q9g*xAq'Cy}RI.bf}؆8.IXCboJô|k)Ċ{`_qKkx 9WO,Iݢcꥤ~YB&sf$pYC -_KKGH|'M#}4,F 1o u]^a9P )Q_6(>sEɿǠrM'r,}Y #Vݼ%Ԣ`SRÐm Mٷ気2I'M)ȱ@/df@[0l#wm9NH ƴIJT5D B$)\BE 509 =9!^dǘT`L rSWL!:˜g`hQ(!ޕ?eёx^kQqܒ0}sc ƴ SNH bh>Bm*xB[׮?r#M7 /𛵞pc SUJCm @@?AKa S8Mkv|ؿlmM'% 8 1M|~.ј:bO]Mt9PE +Z/UCcT`L@54OzeIq/TbRsEPD,]`|fH*mc4ǡ|QQ>fESnvbn&`՘RBZFyIutyeіr,7jl"ua&3/tQ_y&L+xL195B2 2$l-|*[cxI#(tpVc5<C #qnb*HL-+I>JQc]X%N)d c3n=RAb~ݔj i((mM'`= ĨJp¤l OC;QP{[!9N X^p>5SC_y`tY CRJh4NC\,oMM50Dpbo8f(5:=ԼPM b a$7DNsUa,/E`6>\"/[CԐX qg 1G5@L\|K)C aXry딆B6C^#8C<9Jߔ5uE8۸lpxM jiu6*AmtY GZmr# G{UQnGP_>TIʔ- agP C9wZLJ=,ʾQ3-bA9$6T"lj< l. *Lq]5q7rq:S[NuZWF半;m醭=0T'*&,.Gޙ|12ʖܬ;㤀O66#<6̐Kk@BPrـ=, #P`L9g(y6 :˒Zw;FD(6ކ(ˋ"~Wij9۔P2G5$>H=kAF\TgL(bD|/M1X6<ʃcHOA>(J C1t̹0G- qdtxKc M, ̏X˃X ܔTi*@>#}?W69ԗ+)2,:!O?yG qPk0ϦA8P$aE気Uq~IXŸ^{Z+*DIzIa:VPPm[ V|y0A+xA7M %v6 `v9P H (N͹2@S`tZރPQ4A _;&t5)Ntd v*+H\U^5);2V$` !`a= קt6eIɚrwu'n4LV'$Z{hsl)7̳]f톖vDTs#f*Ov̌ymɺi]0] T1^!ꎟ , {Q.̾RDH|P^8f(*. i4hG6qx(/đ^Jßq+%#lJ{-HpK8^g>8XVT1-WR-^vUs:*JUc(]=)QS9fʨͅX^/UV5XmCXKTޥMMߢ.3 @H2n ,0Z@ipA$̑~K{j]vԫՈF r/561<ćZ&d0lS6QG^^Z{|mhD dR.|, Wjfv@rbl+8%a H859GedC(KaTcF烣ϜA1m@blM!@5i"s sT/Ydhǃ:Λ* "Gվ;9C7teT9U9 d g{ r _fUac NMYSΆIvAA =!b):tb.- u<0v A i&Dit9j&L}Q<\2K!7~%ЂPd^C&tFWYÁF?lPόqTIXqINi'cdmH=FcMp Baj4Gb-!*76qZ4T֊MĨ%/a/=FYbSdzLڼbY'TֹtZApNPM93!b dy\r5q؟js\K,u l6K)9i Q>fDԓ8G8Ա^DŽX|p ZX1mhKDl k.4iZEQk>WaP."цmEH~’T`x\Fĝo.v'~,’J{z{e1˯/|*s7AN$a cw755CY sKkx1k+WQ5Qbͷ &i֏üv: *`of3roUdRfNSiϤD}W}U  EOZ53X1m):!x· ␰気Fb5\xD Z&NKcdQ>.qcjl)_YCq=I?<Ju{ węE}oB3TvMƴࣘ Ԭs[H)NoUnJ9oš*jDu_hXc>8Pv4b5MA^jp[HȆpHȋH=ḯ J. B?ԎMS(`y(+s\fQN)刄J":5mvqܷngJT%;ţXCB.$Ǵ4G t] L ;fHnT- (+%! sĴe)I g'mǥ~SßqptG7p6eMyPj5^#q|LH%[Cb딆j 6 ql88P(^9)ae^nW4pCkH'2.pZf&eF|:a;)QhT cnW0 w$D8|Vݘ6T{T$l0G- qPtR*o -@k>6Qdrد9nC77![|}jxwg4!daxȔxD&VGr f2T:rgqBrY|&CBN$(8wC%ǚ2}P,Xucl%unŸ"95ǡ r2^!bvxwBOHJ5t3g`hQǃ:gMڷf5aDQ ޖ i!mT 㮇bϒ k*Qֆ%89:`TaZqJi rkhܚ6- sVNXOJCrxeҘwQ9_=W ՓIA- Z=KHǍKБcWՁ/:!֨S$1 )`zALA?TP%` !`a%j\\c>)1"Ӆ;H`[3>4GSqN T] *%Iqt4>]Q[g#fGղU26) p$$ |0\{ zs_ɆWIݡp( q!vԍT5'ċ9N&NyN)wiq:Cup: OA݉߸2)GũIx .IB BUb'a"WTGyZLv0Е i5iDWyx΂j^% TFA .gCƺQ@ (FcW2PWG+H\5GNJJ! ޣ*̕ly&:R 9yenX1k&G5OgxP*Séng4asLi1w҉R3q۪e 즘]/~l@8$Br_BMzāg;5[BaB2r[ܘZ36[AAY  `tZރP, " εZ 4LH'8E9P)=#Va, Z`alnJPbVL챴9yv#KU_B%[B"W=l08u^.5cXƶkZ] "]Y )Q[*/ēoY 1G8"DƖdqN0ȋ]8EWuY% A P@ϋm]0U jOnÓ211 H0Y D `=7磔MHF27j翝c>8Cxh0VR񃀺[M8my;aZ^P񫬩_$b>=z6vʝyXhc S$!>Lb]Z5 |O KbEa$aUê4;x%AvރPrW$ V+l*IVsaqLKHqW9Mi) i9Ζ 2/UhSnd'"7r)+P`|ZЦLKHlj-S気Ր\Err) Eˆ9bõo 5IyדQC195G}Qi0)5Bk@2fQ$Ck-knsv_lVUPC:0G,%_ R[ڥ 氋_=ɾ L!>\Ҏ+ &jc<(o smZ$̑~K{yP.\2|x\ط#9NI ;rr 氋_qޥ*:3:Bq\Kf ~/-1(Z4~Q(^ ȷblZ@ cm^ 4T>ADD >bU*f- E6[ÁjiPG!d"g)Z?ى"ck q(n5bDi7۵YCMh 95ǡIq^j)7_FsldL9 R`|ᦰ0&+nĐ>JQhܕ }81QѩaX1}rtIXCbIl]wj&LbJ߬m-6Ǚ#)&TUxğ9iixBF8 ͘dF_q p0QMF!R%a )mT'C気gUy#d-RYG6 @80ƃ` rTflcle y sy[#*kI9OVN1q&n AJH, CWA>. 9 s>K5nčКM*asLYݜfN`<ߔn iX'l][ sKkx#Zx@RFNF[Ghzc+28bZ-nг[) n]HA8~[|;UZlOhesnu(*`j!ğ95GG!ޅF )}N%9O-V@U+0 7H9~[b&aBR 7Cz-QI+/xvH9G-8=P<=+OqJB e` >#6!I'#(XWA-ѭxWA=W򄂑%$`> &eej8%!\fVr\777T̢#樇*?KCbNϭAӆ0EG-H=.:gaI)˷';TN8;YcͿݓ4jXJ| Nvb5evs>50#X7){D' Z?LIg[1m@NI1b9oixCÛф|` RP8+&h `] ^Ymf{߲k=0=~y9ޔ FHw(cD[B=E!1m)'zl ' b$ qgvR>S7Ēwյ#r<8$!񑵾E)a2pvv( h tLI`= /5 Q?@gZWt:r#wTBaR0Pk F~G( F=frFo)8-NG:ĆϺ5$ b_F:lڨ.I^^Z{J)剽ODhBX E q-&`ƊrؔU鴬)7 gXQeA@ɫ?8jU,4$nʅTjKykur0Gz) ơ<*[5钢 (v Tg&DfJ iXB: G=~e$z"Y3^$!qewE. l'a'5 Tb&2DcD;J 2W(Jy숛`Lϝb i8Ne`1c0G- qd!a{MFNTs)De kDPӛ/Z68yTgDc^/J$_Yk$ #J$i *xA3wG}'9:2$ڇc6~eJ,,28x_m3bwy(xV%EDZ\%8ǖY/0nE#6-}0G_Pu20ؔ=˵P^\0]&}T[֘C )+`KAk!! Ÿϟ]x*ĺm"펚<ċCXTйO S6ѳFiP~@? W8κI9 8L{,+Y{pSҐ?)06搓V߁ Dw)A5ޠ_[v ;+yōi@4RPp+@w0G:- A(QݡNg-`PQ|hw%0 VPؤVI=1&C'̑>JßQX&B`[,NE]`WG]~ b2 I.[k|gLGTn BkVjBhǥIxo@M7<45![|T3A+x|#pSI(`v%!|-K֮Nq4=R H \#!.f6[vb|eoqHNFAixA(҂ =q|RbR`Lڎ7RCM ևⰗ7nn| Dndl),§6nW7G4FHuc Q=a0O%g\RvBbq6B 9a5,$Ɗ8/Nx*RfS5S8 sKkxJ l:/{\lJPmvq0u$D9-! kP*mM???G}Ɖ=vrsw"]=-',_*@PH ! A`M)\(?89~ )c'[BI֩@p $)!5 Q-! A(Fv0Pf ?(˜m" :0G aq#3S*[AEYD\v!zKe독&7覆}ɦkb\/`pjH\U3)E1.087X:nSℍzh4Ha/HcPxk0 Hp"v F0G- qhBK5tOxݱ2.b)9"?4ӆBkHJ." Ɓ0G- q0`EΰxR\ !?C5@ܔ) kV ޣvGA9S)1hhPkq>- ɓT`L`ھ5D-myCP8*~&ʅ-m:4.|Ly>؃z+0.t)>ᔄL.t>!(42VQ>DaFQ,,ᢒ1UL9$6tRq (616I#8L(wU:í#NOW #7昖c S_ra]ژ5+ZBjYu4| >Տ^5ǹeГ%d45WR&ҬH6?95ǑՍ3XrNGզwpk Xޘj>xtILl : ?n]!氓*i%SB)W0ۢDI#Vk& kH\cG & blIvR ޣP-VhubIăr! 5fŭU@7ӥ.bpqv0}(O֝~r!-ZP$@ƥt)(5) s]S⴮ekJeml NqSZaYk:{H"z#Ll`xJc4OTcTQg ᦔPm~気G` %n\CPoTV‰T$ݘdA`LJ&F i8ߘ6L/a/=fXkIRX9r{leN" GTekHLD(n qE! c|qYF5}rGFuPxof}p\n RFF~ s*AUH$;t#B%1Nu"q~WTWPג7B~=]lbGfsD 6.jtS:aٶAsm:ahQ[cO\,&JDK,ƿ&?U#iCio UA^^Z{Y{84t=|[eIaq #V`X}, ,BQtAAKaAaIXp~N IpٶcCZCIV0Y'mF~B(`\<ݔm1&?? sT*]r6$nb1C=A<.#v%)*],G+Pug;hb̈́hX;O &*N/Z+bߵ䈸AX&t Jt(!6??Erk+x B -U'bZݰ!wZqK7zp X^/%Πm[v0C,!(Ѓ*}PXʍ J,!t`NR%a 16js08nn0\dI]f,ŔOu0CzsLKH1mh;N#eQ1fX buSBW:N[͝SUU}w*JlT콶Ĵٔ早٢,#)f>pEɿ[9fM)Xۢ^|2t,ƺIai*mkJ=%Ҁ0 _)x 8=\Z((Q "՝M&VM~怃8{t ݄kpj,Xؠ>"es(DًBM#}{KGڇz.9{=HlT-&)=_(Qrk̭aûWćbdsBwä21気gڵlzdrf[+qQ^c qk ƴfDUfՉKYI.08PKįƈAſCL@K@Ae- !9k)x#(i_n Y[!^ Eq'`aw~')ڦt X]C@ P CPbaq0<ѐMBKeh4˪'!AvB)x^ ~҅䐋1v}dҤ0=0ɂDT(?Z G)~pLIXCbyCѶ aăH25Ǒ}t'%ʊ-y:Ot5gS.[CmC^$8kH)_I_PR 'rZaQ:}S R`L 0Bmgrvd" u5JO k{WΡn-㸦'V"%66/!J54j6(/đ^JßqԞɩDNM4n&H{h_dqm*0ORURAdmIMHO!{s) TY̊ةlgX 8%a W5eeT/AcZ׿ڶjarru<sQ$S,J 3NRfqƖ'~lɑp[Q / 95`5iU#!)7{TE]¨bWöSkJN3JИ. \lqډ??۸95J G\RxS.ďE^z QPI2VHukHM9Ije aTJa/=a=[ߔu*1o쀕X[l0ǸtHC)p+޶A]6xa{i q(y΃jJFS앜V2tUݖ--H`KJ'ƫS͑K{/I\ǡ)ę:A oP M#%25ںI Gu)ui 0c4]( NtB}V? W  $R~BL5~GI.w!θoLPqoiЇiWa Fy,MG )FI,X82[L495`nhWA]$!ŸEpg? arهsaǂӼᖆ$61ӂi8ssp-Liy/:.+i5ĩ):D3ٰ)G蓲$:ZӀI_'U1@ 4Z%-W(/j<8%=7tcwp[C7'ˤ q&1¼x'hё(,(mn6ft4HZDѵ{lO-DQ4Mh13Gڕ%[+v͟xkD]cw91Iq ,}jňאH%Pw{h iN,D>.H~~N e*>V艁k>29;CC3F4,'q,$= iaH>\E1㦅?п 9| S!(m:^#EہȑA_9j:03 Y1tПbL17+NBwLwş@z,ˀiz>i@@` ,{Hة`Mt߷s& Ryl륁:fP)%ՠS WJȤT;SяL&9*5$`>!Nµ%  Ea Gi udYQSb5s3Pg5i!ѭ! i`16p@*% $-V;&~ww#PH?D#NI_d>% S1BAX1{P~ZHN~TQ< s*2$zsLTTiBEA[*Ea Gi u йV71kmV^3~iHFyW7ȑҰ􁍰9y}ω,![[֪d#|Yx_OM[t[0, KA:4%-WZl_[8ɸQHS7f$^R2$(p {Iӽ/κմGHP(}_; lnũ8]`dTUR?n %1`}t3q@X CXCJ+1;%3~hEEE /~jP'e DKWdSONdgohIk$+g.0]TXpY.Hc@F-U(__㏃B 瞀_c Ɲ4R}1`(NJ>=Fޞ1Ĕ_Ks%9#5j'H r2F Jـ<ٜ1i ^=bc*r ;N~yFI- [-[\ҴАр%2nYxێ֜;sJ/F0.~H+: Z>B#9ҐCn?x#[} nf@0v8tB lZ(P1tU-9EpJ ,m Ck/)MFf,Wp B]8E (83֠q HS$K FR{,T5MG&i}hE-y}"sj9.ha<8#Qa E97έOb}wG7ۈ+͒`L[Ui K{fL+@PhQY p0kG-Cc3i H|߶k>,$]ǙcJ>He@QXQ{|p44aFEMEJxIG_ϟGvboDj>&9HLkȂi89w \R R2^VDxtAhfn g"9ŷE- J%7)QaAk8lܺ+>Js(i:xY0$9($Cpc[=itŤ#0t $]0bN' HM Tvbc*[pP>8\q\lI |Xyp! >zwD- u6^6cm RP2xZ\˜ /% 8~xcg T:{ %~"TlB FQаD4 bp*(Up^Hf!#/ӪTա;ix4@`.Yˁi<|8xE+rEŸ7G \U֤Ql I @ s8p}BQXQ{,b i~2jcGf>4X8L4XZniu&(ȸe& .L͜@#sr PJ<o,2CT3I7[=ʂÙ2(z:  PK6 D8 @ztqD|iXJ9.Pl!n!??EJdܲ^u#E19ecI7 i(jBA EU֐􁓙8D):}iά! Gi uhmUE=q_j1H{Ф{$5h'ΰ p*q)'K(Dx-ir-~ȝ C7fK!JҀi.=8<ɀJ:C}f 8QU+JXӇs Xls%aVEҰ}NN:yXN;صK0(peT&oD֝\XLڀizН 9msy9+U5Q6Ɏx4c#Q}f϶@;Đ-$}-rCv0d!X!c|O] u~@T ݺ8_t<5$=EMyZ0M[Cf>wV/"PTV4c3-u8:)`mRnĭtqVTg gM&K /Vf@f<='ٱ]%hRtBss*z~_ĕ2KdAuhD6, *idԍOT$̛j@9z*%KF:ct9w+K8B[x-A~qpӥ~. 8S.ILkЀHyNvLm;B4N֐n*G}m\{sbOW7 P\)qKA$dl"[ %mGc#tB;5#r5:!|$ Ikpα)9DžC{|_ߺU=,q%Ċ%4Q7MxSmS*+`U5O̤DsbV(V;'c@Ez05 | 4@sbDL%hHsOik` Hbs8uboU @һp 8pM_丄e0Gwj&myInƣI:8S 60l31 נ1V}iV]B 3C֑(9Vl4gb"%-ס9: 7TCnp 'p-4{ޢ{1AlDD-3SO EAZаD- uP}b;և%5"^#z@j}_خF=ޙGⶆ,h6alI>H~Np^j/"Gf(㍗9n ~?Y(RCkl2'zq`|$2Dx!S40"CTdLC:jH"f,I)) >˓2 K8J[xCŋ2,݋X0c>N$6c)00 0M^S @` /~d Ȩe n9Nx`)~ـVh1؄ʖ.Ʊa4αP璆,sC4v"%-סU0W5*_ Y/y$4l!KxYx >4vVҰ"6obFpCdWխ cIHq[<ֱp,۔bA4@ʉ" k܏I"h0*"q, rZ}Iz!3CH^+1<^8QV8& {=& &hYI%9c.DT p6^70PWE\dQRIrkeQ#Q`}JA FiGKrE19"-y2([H$&% qmBP4%-ס Hiqsr15Dn&q@d~>9 !MnVAiNO `\=-A{*ggx8|D>DN4al Ivy 41:)I-K_&J͉|jd&c )Vw,L# =GtWŃcuʹ;sȠe1kF]Kv7~m{gBPJT'd4}9ONt(8Id4gBy- ;KzuAεܪYh!AI" tiCqT>g ۳G`IMe D.e iݸ8UP1(]DR1X1Upq&erbqd=H7HH{\H$9_?| c?PdOfUMlNfHVyBW!ѥa In$I*-x]J«/B@Hה$&U"a6 .IQTqp2gTmX*s8qMEݸ[$Qn|؀yʇSp,. *cc*dl*F 0mˆUIKIp񱐴ɒS! e"4& LgEA%_yR5QԀk4=Ő1 WuqZ9؊^8y#MMpZfEiC"Âi@K9K | E,ס#|`>8S1!:,\:C~HlkȂi&`*Ա٥O֕pxC:F3Ti=>8^tG CŊX(M}p6u"N-%4::ᖨTH:V R}ڟL9 ix cQQS<x$I ت+1 x̗'Uw$ Pgw`3y81Vg1tߴ(j3i=Ĕ-.MXC.4F??' Kdܲ^[$س";J![bYg}MpO4rZPKP".u8nVfW- $]0'ƯKir@y[!JVNm;l|uI5lx(&T t)ppZ_ s`8/9wtȸe>o 5@s`pJNJi+y>E^Ԙ8&MLp`p4JX#^FDAK,iBؒߤقoc. K8J[x?Al192﫣GD $ t q!% bc*2éA;qn"" Q`pa z8frR|8['Hj, !/K%)--LGhSlSJ  qpU !Z] OO^jUU JC` +LE(n]@Gyh-% W-=g;x} ?va I ]~(O/}E` Gh %0]{EڪW6Ɗ?h*~ւ8wn,kNoW&{ BrLޫ5TliQSP5j\OI#jJD&5` hr&:珁I">wV/'Tk69ǶV`@%o8LG()4zZPHd`Ǵ~\HZAW6uv~@hho7唋HcE߷ *l4]|~g$ LB#iA YuӑDxU j@*,@H3R,<@=`PaC+8#X >3 )1/kBKh3! p1/58}g>raV'B_BI@$瑈SB&(B̫>HҢ$<$2nYxClMPѭ8{pdZܠ!G鳵حk=ӳ8:ڱaƾ։g^ioLeJ--?ILkȂpr^|;p~\ 3}^.SƜaS 7XqNeJҸL$L_U.go@F MϭY5U(İ,ס͛,l[k`p;jpWrmBF~C{ѷ[5l[|0z. Ά[~DBl,TЙ4vƖ>jxcS@:\ҐDlk1`>?!q,9ұ7qȠ{J{]=2VySD]^JZ"i]Io"Cg4/cbdt27~WS F@|sqߙۈuHXKL{6䃃Jcا|ƱRȸen}p l>c-FH?m J B ̝j@s; K8J[xύHK̜8X/6+nxtKqB,$M8Cee=FۏBC,+Pk2lxX֌-`A# }tiKAK ,ڜtD4, EIC$Nq=5s0 rȌaRQ:2$!ŸEqt:$' ѮWE}`k]Sk]ȒOQCD @ZOd_ͻ Hg* $D &$ eT0Phvƭ Rޫ0fࢥ '9ўr΂FS qAH#Ǿİ TʆtAYkH1{@ !p4 !Pm&-,ITiB86լY>FfH"? + ~8(x{8wI)ߎ$3dfeւ ,m v_KC#͉ ]8H(Xl}IF~J4)H)4cK\qUy@C<#MN9J=ѾICGB} i!"D-vx :xX^i;9,ғ͌-$teokH!ŗL4ĉ** C@%-zPA/t6x76 y$/t s! Da G-E}Ek΍pA(%ԓRW: c`Ɵ3'?[@ ӇϏ8ޫPE8U%9*^wQِC *rɹd!*A" \ 8Fx/gE-'_zmn[EX @[0=}8]@ KDa Gi u𩸻J1,C0/}^pq* IXt!-"@%2nZr/p69CVe^xH͟,$8s|Yg n ~E 2 Ld—)8yJEJq>,Or[sqAb/c0~|7DMMS8Сp@4AyZE AL~IH. 8@x/AB1fP[T- f]SVKtUʌ 2p]q%X%-V#k@4uv^u xS}Yp n͵-D)e㝗C*mq p6^wEv BprKK:yc7iXWB҆HN-+(˅YɥSeT{hcq,.zHh롎;~Z1$VS-?EtgRǮ^;/N2'^_E5cKKnqY ut9f Nx[3KL^vD7g@=96P'J-F "hBD zlarbyiQX"㖅:뺸u%'׶ YUõ?J(}t8-RŽ8 B#㎜诜=kjlMoX,;U b+h֐# !q; Yȭ=gzΖ?z~vC !Ka00>] M?1X@!ZMaEjBJkgQAj%cҖu] cqJe0GzCA1Bk CJitͧJNL!E07FI8y-kHbsEZ0ݝLN-4TL3 K8J[xh4ؙ9w#|{ǖz%!YĈm<YcsP’OX6>QXQŸuQ]YFCrwoQEK% 5h\AX' ׶O3jHBQk\'n ;0,C&h7̭KaJ&ּÉNNu!H C5+ .v,r=s@2mFVZ"<7I`N ?\ZVp6^fDfPD`AgquJ:pf%nkȂi sz, ipp^nZv x/}Bm`У6-=nXL_Hx ;$F2-ס uw=MHH<=HVWTc8 ?34d`IQv2K8D[x/gR[_hənҢ~Up1'j If9:37*ջk ދUF}roEg6^;_R $4l!ianδMYA:4%-Xj1/6S=8 䂊<d`>%ʝ_; +@ `@e*P-Noh=]Db,_–/NLDUrH]Ӈc7RhL&f?8V6 4,Q{,6|19XL4ҮP5}3UH4iB҅d,)@z ҟ% Po$Qb Q^!_ Yb^v7oJ%|[Me꧙s=l\]f)4Bd((Ȩܱ?VDa G) ֡UY6=l!@g3|[p1\3k>- @Xu75iy[찬u&I1v4nc.գ=n+i8BҘ@%d GQl^C*pFnl;iUQ!?tq9=Mv@ZUTp;:2tKqqf">tqo I։{y3 ZLpp_E1D.:UA;vehcFiU_Ě|قiPh hc ѡ RBA*T5m{f9r{}t- 0#Q4fɀiMA, R e*Xo pok)aI$zԝcЗҰh5>HG1?Ftۈq%${Z-TiE&cc*T]}k6*붫/&ʶYzH_Jp%;o`!f\HBC>DhQ"ow"P(=nN,vS쏏JabECp$M??v$F螰5Ҩ*O}:Ŝ_t ?l Y"0-j 0)}}$N4g|-$2Qޜ@d4րIxr $]|DFKWIR{bp⯳XS lv)x=-$ѥa \ g _>H~Nȸetm188r5CD­, =:6tU3fr,XPic$??' 8FxBeEƘM'L**^쏆i؇SS/%2dYx-A;xFTCr-zNVnN񋖼%nbDY?ɋpxsa8 g!cgf xɖ`s6e5Fe+!rc4] i  t(8K8Fx"k XD~MxOm&ϩ1xH̡=,$ͷatS/?< y)]k z+sd;&u$ xGb|LlC1#S?>zҕ%fcHs$2DY=3:~-u_%g'` m׈R2s~ߤ`ƒ;HRje`ʣ<B%$:ftzpb.M@j=9LGCD\BAԎc2P͓.}2KdԲ^|g*LρEp{2nAYbvy@5Y6).z ,m 'Fi$%2dE^P:Χ洠x%j^BړD-$]5a:96e! C1sh"1A-OVfM@ҟG4UƒП<-$ YCz|(,(m὎)Socus,Jf6gGBHDdBSMBz..MEy-"_ {)N=Vr-3i4u\/?.ҟ  W!< =c)*@"K\uٸĴ,~U ncgڍ8^ 9=,zp!HAX (FC=A#4P` i<%`PAs{ Z .M>y4EIW@}g8x ~H8D[x/ [*΃i~]8_ǗDj[HZ5Ccاg_ Da Gi uwUksgN~gQo`ZCJLtMY\9>H#(,(m὎lL.>w`4ΘQf+ `ԁ?^&i܃*@*+9%RrJ1sUe Vӭ҅% o>,AK߭394rT A' žۍd܀FJHH94֐gڂ魒dIkE>wV-VC.7"NaK/`HXRr sƜ%J")X *ti=8xuFHUwZ}$ *@җ.[U(q)mཊ,E2\nTLB_R9 U7(ҐC_nT4h"i! 8>D<Ǒ(e:4;vX2ؐi,dNև+-r`}ك>4HRB!Rv ٔxr4޵֐Do& #4YM KA;Ea Gi u YzObNKt \x&9Sb7aȀh91~ q8C{ "؇-R牺KFc'q)y4MzdMʁRI]D<r&GUK.tʰi34fuL##K $Ύr s4 -GĐ4kk^/U;QK:1JRCJZb{n,σsB_Ms>D~2p2Z*&$j3"WOȻc_@F-4p蹌5d`ȣ=Gcc&{ yY]~Wib̕iqQ 7^Xt5@r JPpp2^]C&ɦ(ei0 $)) K, ɹ0e҇/η.RNN|{ˁɱѼJؙZC;H'[R,M@'"㵆[6'%dلw pړֶ͇D k ]4샴B'nZ"A6zFgm'g aNtt1[ ,a{LM b¡)3:}iָGu 'SkHb,hMsTQ ꎋv'K8H[xBg1Nep1t2&؃^`zt296ea FjDa Gi u0-sn6& u`wRZJ4kȂR XX5=\[A;Dq #8owV=$0=4< t)ǭzJ8f鿖 (o!Q"x€wI[Ҷ%7qPVFQ4Ұ{x- < .4=s3w`xcc'wY‹\h! d(,(e:(JuMONh/39fw!p·˲5Me޸_8@u7X;|bз. tф،[V 1PD,4ySa _i?[C&7'v/v]qu_%C J▆-.@z8Ұ~@?/g~6 `0ظlqGHCdttAH"*~6ުS/`;Đ嶦*Ii I1 Z>p,&>-X5hȇP)mO]U8D鬤q_q%1w $NmPaE] Wvy Y_i9 .<详$80==Q!9(l°K $1U(1h.TPbnfYҰଇ Z>TCN Yg4CU9Q9nYx{TDũ*v.ڡ^Y=yuJ! |qmB*ʖ[C.8%mrD!*snC7 %߭Aq[C[CLӇnjy`Y*}t 2 K8J[xﳻj,gsٹſ2N7p׻{Ҹz>Ile!i8 caI>ХOQ)JYޮ%JLFb;ImT_FuJN,Ѥa IWsM Ԁ€% W![p4Nznwa-h8QcxV+Hƛ4䢻8cc*4'hR1dpLؠZNDQqC(qO H(?e Șeെy3nYsdF룊{iNUi2 y3PW?U*kH%_Kzu6UlNE*FÀDIk:0`.<\TR [OK`;αľnEytL ʙc1WeU%4ltϱPK[ Mh9QX"㖅:O 6_5Gi?a(ɪfXҰ s! $}Cȸeiơ%)RB?}EˑS Z4-\.Cth0p6^}(|0)j/:9t<`q\ThFf1Q.r%bC_lU!ݔ Y};qS`.!҂idY% $1s3fGKS??^Kn 8X*:4th8 Kdܲ^n~c3+>Ep v)o7,Qa I'ۦ,DWx 9QH"?PCq̙3k2{`ڗq-bKҰC3lK>z7"D- uyp `69}_wU]U^ҰDQC^FU? ;B=|_CQHM  hF:Nd![@Z])0 Tr4h :e"aC[Aޫ._ӵY,쌑۬$9~ݮ5 ~,qɝLOךN:- ipiFa[>}}r)Ei]QA߇72`.^gnw9|V#x8FxB||nwJk5F•IǙpJ B} Da[`#9vc$9!@_Ԉ/)"Uk ֿ'>OX CZlQ˘曭j wS(F3ir!nǀIzzLq,A+"z]Ú:q?)?D^$>,7B۠!CCQD,>sxV"lUomL^SHƖHti@.INo.n !sgW,7̟5!Sxh¾DV4Aٚ`>PJN\VFX" (RBQ:awhLrd7_2u+FI z EDN0 J\e1r@46 !%\@00( @Ĝ3K+AhI(N pl2S%q?|sTnN+P Zjn k etqKr,\Ұx++ Kdܲ^] SG|WQZCHU@ļ%B; b!m]t|'\Z|^8n=_җ7ԓopJG: \v[vwgp^07'8&1mWfsٛ8za IGhSx`t(8K8HxKUmt8>fntm{qfN:%Qt5‘9 -#Q%-ס+z>9;^BS]ӫ DF.)5'Qr. Iq֏Ȋ#}IT.KLRŅ@َ#{YDǣjs";pm[']BKൠbѴO'90&FǢqRLZj&k I_.6ڦ,b0bƌv"q}pf<|э[2aT;J Y9LH*ۊi+KdԲ^[3kbepsL u ~B]ҰB˗ͩжPaі(,(mRJ0CJrb7ty7=^ĊH 59K 60Wh+K8F[xBU Lָzߜzc%\"eY%2/cсhd .3ՈDBE0rafT-Ƒ򡫋67U7Аџ"%2nYxCM8ޜoپp $ڕcAa4}V'4l{,ჴȸeR(iNR =`17^P4J sAl?. cT!0~{ޑ9 H~Np2^2RO23p4MJ$*HHΉ%?Ndp2^]ܚkF@Q_f)(No@X`- σS1+4- KωY%0;7v >9@nY ׯ"EIH (¦C_MN5q%" MA}d*T%͚sAx b g>Ė•`'k&P1% c9 $m5uɎPި3Ŀzj1 ֐Ͳ8tiۥ\ p2^[KfJkqQ!q݁鰤csz5ܔDܗXH.agEI>LQ GMESfhli,t,awu`CƷH9 94h r:D1Bk JW+9?-z\?M\(V90X Q)m#3[f ܃3X212,&gCW+IΖ Tz0 0p$ Y8]峗fh6tmw=0x,jI#y Ē-$}9cNܤG"pYsh"1|7ճք݁@&z3(tư$0m4}|~'0! >th8 K8J[xCĢ*9ѽi. @asϋũnZ>?Q J k8$- 'H79ي,udWoH쮆 YH0Cr8By:(.YzTpq>|2n&d@ ]z[J ʦ#cR hShZ^\b|z;F)rxqVu18]1"8Cbn4l!iP&8% L""Xxm?Gcܗ&o? #o#iI7F XCQ<~|ƵH[P,nєT\eNkZ Zjlů֐Xk؂i8fy,T @cz8Э<.3kӁ8M`>>?@]tĝ , e =5TkmhMF>|CeURوs$ӓL.UK3R}J*N!9hZRu~. ' Ӣh(m/I}Àd`qat`:‘֐"],q2n 6f(Dޭ\PO_J{zaVp"4aK' K(Hx-YJlqQ&)%xWG)iJP4-SJJ"O!i~AKA<%y#\Q8"J%lnga I0 r+EF.J߇t(ؿ%-u ?m!Pd4}@CDZ8k2}V :VRvAuS-~m4!cA|PV( {cNF1NؒffXJP)2l͘v\&1N*鋄%m펁>iH Yf3<T'}%B|_jLLLST1( D? T5S2 *%y;T0uBC"xmtFk@JدYj6z?$X%D:46yŞ<]F\pԶ^v ikkp0QClfU ͙4֠i!iN>}G&:TSYms,1c-*_(s4js:BXNT,S±+|c}Y4؉Vza #p]ET B$ YʍRiirn6@F$~a[0M*7"`ᲀ]IuG"㖅:te<1N J, h֓%%ʌS`(9 >N%~1T!3pwnފtQX[yU11A @wϷĭnp.mdCy k?$ Vt\Thȶ6_5:KQ%ԠĎ c-BH;8%‘pж^R:w,yέ6[^Z !eؓHL+H 0Vc@] #F-B!Zbr2g*p߬'vH؜4I> X$0 $IP'YKa$_ A;@Y@z-#zU[Y؂' ]Yⶆ, (,Zi r!/=kHBA*xތKY8cl#C@Fݓ@$·D,EEs٦,\mR#B,YA^`Ӝ w(A.Yⶆ, hCcI>HAk~$J\Ghwwb7w|[ntdnkP"9Lrf\y.B.|HdԲZ(&{Ϝ*T)EqP#Лh gZps"%0xqh̚=u7E[߼]8*I8rI7 m@$=@fqdH ,m* 3zbnx[¯S[0M- ;>Hp^KGmT(%8+tׇ&@wClqJu<,mAm3 Pb4knk\wCHqC`LpZH39߯w T) B$ Y&Y-ޔ/ℙT׼xYliح!4FL/qL+է4*$2DYxܰ?8kфvYK\'.љǵf`p6^[}g=ţ9۫kЭҶ{(/P3!E$I՞ J}8 /# WԐ(,q{ܣ$qq["2{4SkId $mS]t z`'/<:pm3xQ' y^/4,O,v# [xGmƠ3hz!9ҋXp:3]8Ct$ Ed|1j;՛tAs@ P,hQn?}8|([َ5SOt$>,};iӡ(D- uh/ "{hmY,V-oM@Wڛ_Cql msmB7?\v.sh"ԙ3+*ɩ_z"cs'ɖ4,M[0M1Gp >.w(,q{*$Pt8i@ER/7(j&x8D.yY&kec5GͿþ&̈&l 0C}0r Ӫ> S"@!`\8@R.D~2Ȩeᵊ>u*q,ҋ\L47?Dע$Lp&莅) .(,(mU}DU,D+F6aQ"hOU,?E 2`Z.a^[4B$RER؎QUpeu#NlJd3S3 d2^<`!q90mp$Zq3 %4ltdTa@K ?4,Qk(GMUU9>֯ )10mdv|t8Ȩeཊ8K\ЃssFܜ^InRP%|tuQ9HiNI~Np6^vgֱe}pi0̱2$>D [\t8>[w CgwܷD=Sl69J_;#R2>\̱̑qHI ߻( 1Y[Bގ n\ mHIH&] n,~V;x #^+ ؝P`+}$KL:ǂn4XhҰf;rUp済;q3[2t%X~,$]\AbNcY}ti[F!RCcF C26;;G06FOͤ_m8ѫ0.B31٤ Tt+VpZ|Gܑш/D5w)9QV\ń4?h&HLi؂iP39'B nhX"㖅:3 G<9DQҨ'~#$- [0]]A~8֐Q\AQX5=y_1/~'ّ",$fb[YX9ƈӫ[Gv6><R7A>=XUvKuՋf:h];3[fx jȌ+STsH`0썝&X];H YmAp/8m&)b_MSE 0&'EAHའ1ᤋI7tF␬08^~WN8aP 1VM{G#= ̪b iR .N2SvJH*mٯȕZ$)nMVj-FKMxg;KE X:P2ϘhVٞS*4LfSu% \D02r;`8ۏgatW cw{Xa ]uKE VA?qFZe/W}PN&&NfVqF6FZ<ˋU841Xao'̀B b`ZОk>0-1\FT6"jmXƦp@RLa9$O[m)l%\Ǒ_X:DLslYc⛂] z$Ym,'.0uC3." 508^ߙr O?1]MxՄq^6Ml>t0{^5P*5ZRw !]ol,<+FxRF+:'.KNF! *b%$߅jZ|B-Nv]`M}BNnjk M6!(3sЦ9 )&ƅ9#=T޼D" HQfI9(n}˟N> G(2rY1OS loQDv`x%1mR@UAFFJu<<ŠeK3">i  DEeJXe;QÃ˖&%̬:H Y*?)qP#ft;Բ) ao'LA*񏢷0$ gcf\cz:AξOKo'c1(PČPep6(>-A| m`*[b"=p{M p IP@^~: #^c1yYљ17u]ylT|ZEB"$Euft$F_bBD,f1ʚӂ(dRSԱNsHB(8Q)nI#g)ybð+ڍ zX'a5 j=̨ Q2#F&<J[FǬb]BOX.wI)6sHѕ3nRi)d\=EQfM>_0:qIY+?)FqP@sc̀r)`6 _^i$<~M』؉`Ityc]cS23UK *!xbUq7uᒦ~n~yݝFOmv^eQCh9*I`* ̫A0Ӵ!)j ?$#rV@ob^s#e; \?OOg7Lxq~%V%/gg^:LÏgF'3]`e;;?}cD)x='y8)ek!wRp~V3G#;lvLwqس(&UEIf/#{)XA,-` h ^\)1HY:!sm%$\T0+1 QSŒ]pG&ΨUOx-t.#__ UNfv /Y?1i 9]Gu{Za[P( !DGp:ngYRӰsDI掬Y! +<:]8xڬ|f􎙰wv 9)P=JTQf/|R@OL0퓄g#…g.BlAT X#)e<-0TfSl~ pЈH%2tyvcV E(_S0>{ExOR>zDTXa-SDKBrΫ?qYt8qz|~a]XJ0L(Úu)aN yYa[Pav옍q >[5jZyˉb[u- *ujI''+uM4u/o#I25ʍ72Ⱦ5c#6x_`͈r#DnY?g< 0A^>aF<"L6ouJs]@2E.1i1@=!  fh%<_V[3#iPhGaaFH[Y&5QD~9(!a09$!:-'CR`@bxGeQAr%6H(Su S>ȏSW HhAR@_?m6,ix(ŽG$0`.~.^ƴGF$m(^/< y6&N8(jS8VHHm' bDC73c(o:w!T֊j'&[abLDŽ+m4rD1/ܵYA3o栄7C06e0" $:_N`sVz#IbaVVN;`EjmrW_KS>wO &}옘GT3\7 OSJK,m4FK KpVpHV$8^~\mekS:viXW`W[+bjEl.v̨}ჷtb"J*j7-3_uN&58fz;q1[B.ߙ=AA88l)l%\ʊoMıjj\/ܝFxx\-Y` Q1%躀c a#La%2.FV9aзDQ{+*l!xgu 51M2%a]O$SJKMpA+u,]Cmܱƞ( S[cJD5w :JZ!#>ՋZt%L;ci8sQ_"H C뜕nlT3[a [) ʹMaƌ)]߸D 2ej]$%z|'7XT&Epw]E.U8`;> ԉͨ;yb3o|,*+YO$`p/̼(fk@SD_|ԈMur( DkH&hS;#+ 0L Deyﷴ!)l/CС0AoiD ̕ơH׺)VqXB&CprAxu+La+-:Szp6v3"b=Qefӛ+2O3L^dދ(VqXaP41趄QAXKupV;$H6f .)gEbe'UIW$ wxPIYSR䔡46N#P0kqƊgb mbIY➂w6rN( q,5oQlANM4 }~{'LlG.$;&͛eSƍSD_p=uU4CU c3!C0J0Ŋ>$]a1S2Ua4ȃnl/tLAYrN%atӉ Sd uJaui$\rw$2c8!l74ėPȇb7_(mYC͏Mo8,:H84b94v0VN0.2bj>K{6Nξ#) ߺ p U>{¬H-(evA |AjPaGUiu*5E:9zF95_"n޳hY Sc$`Qu#@])6B[򲈭Jl"vV`Doaù֝)`5V|p !{CDб]` s-V (aXe>Qp{\:UZ*Y) ơTU\}01Ƕ(O΋ MZVa sL T)AiةcBnʮP;Fm`FTG 0 ^fNcV&Vf&AU)ڔ² 2EԻ)' cuee t֔pQJKߠ!cc"c ^Š(b*pb!o\]ψ*>|] pˇ#냫 %5]oqLxݽ{ < H%:m`\#h)KVF5a8pbe`:t㈱L7U-uxH%:m<"^@:ge:1am0aJ t wEĖf&Hc%d+`IÑ"all$L4Zf*WS0嫼'0e𣭮xOb~sԱN.aN q"햄8r[7koCK̶>Zq*SC SN$,w:#jVM Jȑ:X'҈Ԑ A%&d3jQ(PEb J(ߖĜ2SJyST-báT`S܆Ǭo V:R@XD`@.nn ` `'m-6גf.="V'FE1lR0/ `LFKu/s4LQĘGh,Y},`*[N1qH vH$*`m)d#Cxtasu`yfEqz*G*AT.*PU:51 v]@]֟E0(l%\J6;1jNJ껊RHCsטz_VLu{PhC-0-&~DpVF R 5J\2M gcV1qp2P{ v!ʖ2ƬO7uK\UuP75ETI .YU{ɞ Pm96}.P$&Ό'xeSմ0\'lI0Lc:ZBuo SJKҗ"bQFo&qҷRD́NŋDŋ$0uB Q/,$ ,o&08^Md5MW8>$ a~ȓD1"r?7Q;c×  0 ^f`v_01hZP*6tQlH ia$aa!88dERJKOn׀?YT݈|˜SI E4A$ÃތAbsRh[76J!O;f1m躀Y۰Nlؘ% ´E)tA#Шeo$%(^]\f*6/#J&ՠ ^GgO Lb2&/tSB ᕮP߲B1Rϐ̓~KP<%B|*+XaoŸ1+5Z?M(ƫ=/ncI(\v v7Qd5lu41E1$Kc kty?9HvK³q,HC +0:Wயׄ솩ITt80>26KKOdbD6GNMpg=0}[9oD%!"t[WApuL`%(^|"T1c7fU}qc 6 i6%$\.aΖCVVZu|f63Qiw6QfRQ _dJaVVZu^8ov>0ePA䕠,$f,H1 򴀄D@đ84Za<6Ơ+s%McT8[F "`o.05췎9DV( 5 ysR_e:!jayT BeLWEOBN,O=YOr4t La -2@E ,G1LF"V9@S5 ,0h ,psH¬51g[xyp*z,G~O^4c ܒ0ʸY@# O:?)68N8W}N(F0>&"RfG1eKxg|"U$ IƬ31X@SD_}$$Ҵ-VlLцCQTGj!2Ƽ$V&B.a-\[H+%82(N`z:$t$T6A` +(%$rT5E+ WOen'F>gS#=Mfa̦kM+f$j&58TYpAVujk22@n\9vw?8#/b^}xk\Y`h!pXxT^۵g- u1z(C*R`JՈL V!+Ui$\FrOptXZ VèߺE0Vd0A$ ZFz?*Ѹ 6Oc<s[͌6Ms~~xo~Oso_=#6؄ۂ&Sc5eq3<!p9%&PΈN~9=$=lw o;.8}}b;Vb_yc7&2Ҏ׏;J,vwO`T"|aoS0KэTQ[bTu*2B%J3O4E21*z:Yy2덼9i7]];whZ?/^^<c,;o8~qXrjK>?}._mtw7_=xy.Bm,c9_|ᇧ؎GOOf` f[t{_Pv8F帪i/cC~ѽ{|xüǷ(uy],c .ʢX~xCL۲Xeho//c]cċL/#z:.Lom j?~?<}?.P8֏ǫz(ߎ y_%Bxa:D =ߵ>to1mm?С&H*a&خ|WɼJ6Fm?n]űzD|?~2>3.[c 8CQ'~ӓçLJ/9c{tB=˯I㼔<<=PԼ'/wG0Cf<˯P|3}|yy)nּmr<[%\ǭϭ5\FOwxoqy+}U?ᩝ߿z1@~z_>~?}&Xob_y|<t>~<Lxq q7x m5Q}<|~S~h )|s.YD).qo~Ϟ=QK ܨl-S-I)x\)s|9& /ﻬ/OUy| 8ۯ\ΨAcH,O/r?ͣ>,{{_:Bnqݾzn"(D6sjqu7=Q% C1={1 w#X͕a{v=^xi#0&n qݨߎxso4!&_Z=bl|租>?YɊ` 6dz?G~vOo˕Ŀ}>>>|֨uq\Y>7:̯~q K.X{خeXUػ(16_b 7c((/w-OO!V@|9>̱ikm?tyy%LJӷtO_ɦx~_==<|G.,[WiǮ?cv,|~_>:nE<~{lg?>}&֢ xu*OO(_.grǘV.a;_{ޟ> }ԏ*Y2o3V VO?[3.s^\X:cxa%a!_rO?_qrҳoR>k~Gr]B>t#/X]\ag;e);ob^=xw\?1S>fPǟSkQ[A>om] Pxm欱Ťh@endstream endobj 533 0 obj << /Type /XRef /Length 341 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 534 /ID [] >> stream x햿+Eas9s$%7LJMdRAAIY%lwCŏ JYL1)3|zz>}97+/WŽF^'dUywqLvгrʰrd_҆w-ږK}ybA+)QyC5uxW ~Ǩ1sģbSԢ-|%/VPx.my"4 lPl癸-O)3?@ĶY1>D8oOك:XgQesxXZx])~ C endstream endobj startxref 417012 %%EOF clue/build/0000755000175100001440000000000014715042016012317 5ustar hornikusersclue/build/vignette.rds0000644000175100001440000000031114715042016014651 0ustar hornikusersb```b`aab`b2 1# 'H)M +Gt -.I-Rp+NMI-ƪ % M b fa(D@bK0% 5/$~N,/AQU▙ 7$apq2݀a>9`~sMI,F(WJbI^ZP?X)clue/build/partial.rdb0000644000175100001440000000007514715042003014442 0ustar hornikusersb```b`aab`b1g``d`aҬy@D?M7clue/man/0000755000175100001440000000000012734172047012002 5ustar hornikusersclue/man/sumt.Rd0000644000175100001440000000670012734174226013265 0ustar hornikusers\name{sumt} \alias{sumt} \title{Sequential Unconstrained Minimization Technique} \description{ Solve constrained optimization problems via the Sequential Unconstrained Minimization Technique (\acronym{SUMT}). } \usage{ sumt(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) } \arguments{ \item{x0}{a list of starting values, or a single starting value.} \item{L}{a function to minimize.} \item{P}{a non-negative penalty function such that \eqn{P(x)} is zero iff the constraints are satisfied.} \item{grad_L}{a function giving the gradient of \code{L}, or \code{NULL} (default).} \item{grad_P}{a function giving the gradient of \code{P}, or \code{NULL} (default).} \item{method}{a character string, or \code{NULL}. If not given, \code{"CG"} is used. If equal to \code{"nlm"}, minimization is carried out using \code{\link[stats]{nlm}}. Otherwise, \code{\link[stats]{optim}} is used with \code{method} as the given method.} \item{eps}{the absolute convergence tolerance. The algorithm stops if the (maximum) distance between successive \code{x} values is less than \code{eps}. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{q}{a double greater than one controlling the growth of the \eqn{\rho_k} as described in \bold{Details}. Defaults to 10.} \item{verbose}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{control}{a list of control parameters to be passed to the minimization routine in case \code{optim} is used.} } \details{ The Sequential Unconstrained Minimization Technique is a heuristic for constrained optimization. To minimize a function \eqn{L} subject to constraints, one employs a non-negative function \eqn{P} penalizing violations of the constraints, such that \eqn{P(x)} is zero iff \eqn{x} satisfies the constraints. One iteratively minimizes \eqn{L(x) + \rho_k P(x)}, where the \eqn{\rho} values are increased according to the rule \eqn{\rho_{k+1} = q \rho_k} for some constant \eqn{q > 1}, until convergence is obtained in the sense that the Euclidean distance between successive solutions \eqn{x_k} and \eqn{x_{k+1}} is small enough. Note that the \dQuote{solution} \eqn{x} obtained does not necessarily satisfy the constraints, i.e., has zero \eqn{P(x)}. Note also that there is no guarantee that global (approximately) constrained optima are found. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the algorithm. The unconstrained minimizations are carried out by either \code{\link[stats]{optim}} or \code{\link[stats]{nlm}}, using analytic gradients if both \code{grad_L} and \code{grad_P} are given, and numeric ones otherwise. If more than one starting value is given, the solution with the minimal augmented criterion function value is returned. } \value{ A list inheriting from class \code{"sumt"}, with components \code{x}, \code{L}, \code{P}, and \code{rho} giving the solution obtained, the value of the criterion and penalty function at \code{x}, and the final \eqn{\rho} value used in the augmented criterion function. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. } \keyword{optimize} clue/man/cl_membership.Rd0000644000175100001440000000462312211412501015065 0ustar hornikusers\name{cl_membership} \alias{cl_membership} \alias{as.cl_membership} \title{Memberships of Partitions} \description{ Compute the memberships values for objects representing partitions. } \usage{ cl_membership(x, k = n_of_classes(x)) as.cl_membership(x) } \arguments{ \item{x}{an R object representing a partition of objects (for \code{cl_membership}) or raw memberships or class ids (for \code{as.cl_membership}).} \item{k}{an integer giving the number of columns (corresponding to class ids) to be used in the membership matrix. Must not be less, and default to, the number of classes in the partition.} } \value{ An object of class \code{"cl_membership"} with the matrix of membership values. } \details{ \code{cl_membership} is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{as.cl_membership} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ ## Getting the memberships of a single soft partition. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) ## And add the results of agnes and diana. require("cluster") hens <- c(hens, list(agnes = agnes(d), diana = diana(d))) ## Create a dissimilarity object from this. d1 <- cl_dissimilarity(hens) ## And compute a soft partition. party <- fanny(d1, 2) round(cl_membership(party), 5) ## The "nearest" hard partition to this: as.cl_hard_partition(party) ## (which has the same class ids as cl_class_ids(party)). ## Extracting the memberships from the elements of an ensemble of ## partitions. pens <- cl_boot(USArrests, 30, 3) pens mems <- lapply(pens, cl_membership) ## And turning these raw memberships into an ensemble of partitions. pens <- cl_ensemble(list = lapply(mems, as.cl_partition)) pens pens[[length(pens)]] } \keyword{cluster} clue/man/fit_ultrametric_target.Rd0000644000175100001440000000621411304023137017022 0ustar hornikusers\name{fit_ultrametric_target} \alias{ls_fit_ultrametric_target} \alias{l1_fit_ultrametric_target} \title{Fit Dissimilarities to a Hierarchy} \description{ Find the ultrametric from a target equivalence class of hierarchies which minimizes weighted Euclidean or Manhattan dissimilarity to a given dissimilarity object. } \usage{ ls_fit_ultrametric_target(x, y, weights = 1) l1_fit_ultrametric_target(x, y, weights = 1) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{y}{a target hierarchy.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}. Otherwise, it is recycled to the number of elements in \code{x}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the optimal ultrametric distances. } \details{ The target equivalence class consists of all dendrograms for which the corresponding \eqn{n}-trees are the same as the one corresponding to \code{y}. I.e., all splits are the same as for \code{y}, and optimization is over the height of the splits. The criterion function to be optimized over all ultrametrics from the equivalence class is \eqn{\sum w_{ij} |x_{ij} - u_{ij}|^p}, where \eqn{p = 2} in the Euclidean and \eqn{p = 1} in the Manhattan case, respectively. The optimum can be computed as follows. Suppose split \eqn{s} joins object classes \eqn{A} and \eqn{B}. As the ultrametric dissimilarities of all objects in \eqn{A} to all objects in \eqn{B} must be the same value, say, \eqn{u_{A,B} = u_s}, the contribution from the split to the criterion function is of the form \eqn{f_s(u_s) = \sum_{i \in A, j \in B} w_{ij} |x_{ij} - u_s|^p}. We need to minimize \eqn{\sum_s f_s(u_s)} under the constraint that the \eqn{u_s} form a non-decreasing sequence, which is accomplished by using the Pool Adjacent Violator Algorithm (\acronym{PAVA}) using the weighted mean (\eqn{p = 2}) or weighted median (\eqn{p = 1}) for solving the blockwise optimization problems. } \seealso{ \code{\link{ls_fit_ultrametric}} for finding the ultrametric minimizing Euclidean dissimilarity (without fixing the splits). } \examples{ data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) ## Find the maximal dominated and miminal dominating ultrametrics by ## hclust() with single and complete linkage: y1 <- hclust(d, "single") y2 <- hclust(d, "complete") ## Note that these are quite different: cl_dissimilarity(y1, y2, "gamma") ## Now find the L2 optimal members of the respective dendrogram ## equivalence classes. u1 <- ls_fit_ultrametric_target(d, y1) u2 <- ls_fit_ultrametric_target(d, y2) ## Compute the L2 optimal ultrametric approximation to d. u <- ls_fit_ultrametric(d) ## And compare ... cl_dissimilarity(cl_ensemble(Opt = u, Single = u1, Complete = u2), d) ## The solution obtained via complete linkage is quite close: cl_agreement(u2, u, "cophenetic") } \keyword{cluster} \keyword{optimize} clue/man/cl_classes.Rd0000644000175100001440000000126711304023137014375 0ustar hornikusers\name{cl_classes} \alias{cl_classes} \title{Cluster Classes} \description{ Extract the classes in a partition or hierarchy. } \usage{ cl_classes(x) } \arguments{ \item{x}{an R object representing a partition or hierarchy of objects.} } \value{ A list inheriting from \code{"cl_classes_of_objects"} of vectors indicating the classes. } \details{ For partitions, the classes are the equivalence classes (\dQuote{clusters}) of the partition; for soft partitions, the classes of the nearest hard partition are used. For hierarchies represented by trees, the classes are the sets of objects corresponding to (joined at or split by) the nodes of the tree. } \keyword{cluster} clue/man/cl_ultrametric.Rd0000644000175100001440000000440311304023137015266 0ustar hornikusers\name{cl_ultrametric} \alias{cl_ultrametric} \alias{as.cl_ultrametric} \title{Ultrametrics of Hierarchies} \description{ Compute the ultrametric distances for objects representing (total indexed) hierarchies. } \usage{ cl_ultrametric(x, size = NULL, labels = NULL) as.cl_ultrametric(x) } \arguments{ \item{x}{an R object representing a (total indexed) hierarchy of objects.} \item{size}{an integer giving the number of objects in the hierarchy.} \item{labels}{a character vector giving the names of the objects in the hierarchy.} } \value{ An object of class \code{"cl_ultrametric"} containing the ultrametric distances. } \details{ If \code{x} is not an ultrametric or a hierarchy with an ultrametric representation, \code{cl_ultrametric} uses \code{\link[stats]{cophenetic}} to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{\link[stats]{as.hclust}} on the hierarchy. Support for a class which represents hierarchies can thus be added by providing \code{as.hclust} methods for this class. In R 2.1.0 or better, \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. \code{as.cl_ultrametric} is a generic function which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes \code{"\link{cl_dissimilarity}"} and \code{"cl_proximity"}. See section \bold{Details} in the documentation for \code{\link{cl_dissimilarity}} for implications. Ultrametric objects can also be coerced to classes \code{"\link[stats]{dendrogram}"} and \code{"\link[stats]{hclust}"}, and hence in particular use the \code{plot} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. } \seealso{ \code{\link{is.cl_hierarchy}} } \examples{ hc <- hclust(dist(USArrests)) u <- cl_ultrametric(hc) ## Subscripting. u[1 : 5, 1 : 5] u[1 : 5, 6 : 7] ## Plotting. plot(u) } \keyword{cluster} clue/man/solve_LSAP.Rd0000644000175100001440000000331014241725155014234 0ustar hornikusers\name{solve_LSAP} \encoding{UTF-8} \alias{solve_LSAP} \title{Solve Linear Sum Assignment Problem} \description{ Solve the linear sum assignment problem using the Hungarian method. } \usage{ solve_LSAP(x, maximum = FALSE) } \arguments{ \item{x}{a matrix with nonnegative entries and at least as many columns as rows.} \item{maximum}{a logical indicating whether to minimize of maximize the sum of assigned costs.} } \details{ If \eqn{nr} and \eqn{nc} are the numbers of rows and columns of \code{x}, \code{solve_LSAP} finds an optimal \emph{assignment} of rows to columns, i.e., a one-to-one map \code{p} of the numbers from 1 to \eqn{nr} to the numbers from 1 to \eqn{nc} (a permutation of these numbers in case \code{x} is a square matrix) such that \eqn{\sum_{i=1}^{nr} x[i, p[i]]} is minimized or maximized. This assignment can be found using a linear program (and package \pkg{lpSolve} provides a function \code{lp.assign} for doing so), but typically more efficiently and provably in polynomial time \eqn{O(n^3)} using primal-dual methods such as the so-called Hungarian method (see the references). } \value{ An object of class \code{"solve_LSAP"} with the optimal assignment of rows to columns. } \references{ C. Papadimitriou and K. Steiglitz (1982), \emph{Combinatorial Optimization: Algorithms and Complexity}. Englewood Cliffs: Prentice Hall. } \author{ Walter Böhm \email{Walter.Boehm@wu.ac.at} kindly provided C code implementing the Hungarian method. } \examples{ x <- matrix(c(5, 1, 4, 3, 5, 2, 2, 4, 4), nrow = 3) solve_LSAP(x) solve_LSAP(x, maximum = TRUE) ## To get the optimal value (for now): y <- solve_LSAP(x) sum(x[cbind(seq_along(y), y)]) } \keyword{optimize} clue/man/cl_bag.Rd0000644000175100001440000000644214715041246013503 0ustar hornikusers\name{cl_bag} \alias{cl_bag} \title{Bagging for Clustering} \description{ Construct partitions of objects by running a base clustering algorithm on bootstrap samples from a given data set, and \dQuote{suitably} aggregating these primary partitions. } \usage{ cl_bag(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{method}{a character string indicating the bagging method to use. Currently, only method \code{"DFBC1"} is available, which implements algorithm \emph{BagClust1} in Dudoit & Fridlyand (2003).} \item{control}{a list of control parameters for the aggregation. Currently, not used.} } \value{ An R object representing a partition of the objects given in \code{x}. } \details{ Bagging for clustering is really a rather general conceptual framework than a specific algorithm. If the primary partitions generated in the bootstrap stage form a cluster ensemble (so that class memberships of the objects in \code{x} can be obtained), consensus methods for cluster ensembles (as implemented, e.g., in \code{\link{cl_consensus}} and \code{\link{cl_medoid}}) can be employed for the aggregation stage. In particular, (possibly new) bagging algorithms can easily be realized by directly running \code{\link{cl_consensus}} on the results of \code{\link{cl_boot}}. In BagClust1, aggregation proceeds by generating a reference partition by running the base clustering algorithm on the whole given data set, and averaging the ensemble memberships after optimally matching them to the reference partition (in fact, by minimizing Euclidean dissimilarity, see \code{\link{cl_dissimilarity}}). If the base clustering algorithm yields prototypes, aggregation can be based on clustering these. This is the idea underlying the \dQuote{Bagged Clustering} algorithm introduced in Leisch (1999) and implemented by function \code{\link[e1071]{bclust}} in package \pkg{e1071}. } \references{ S. Dudoit and J. Fridlyand (2003). Bagging to improve the accuracy of a clustering procedure. \emph{Bioinformatics}, \bold{19}/9, 1090--1099. \doi{10.1093/bioinformatics/btg038}. F. Leisch (1999). \emph{Bagged Clustering}. Working Paper 51, SFB \dQuote{Adaptive Information Systems and Modeling in Economics and Management Science}. \doi{10.57938/9b129f95-b53b-44ce-a129-5b7a1168d832}. } \examples{ set.seed(1234) ## Run BagClust1 on the Cassini data. data("Cassini") party <- cl_bag(Cassini$x, 50, 3) plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## Actually, using fuzzy c-means as a base learner works much better: if(require("e1071", quietly = TRUE)) { party <- cl_bag(Cassini$x, 20, 3, algorithm = "cmeans") plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") } } \keyword{cluster} clue/man/cl_prototypes.Rd0000644000175100001440000000265311304023137015170 0ustar hornikusers\name{cl_prototypes} \alias{cl_prototypes} \title{Partition Prototypes} \description{ Determine prototypes for the classes of an R object representing a partition. } \usage{ cl_prototypes(x) } \arguments{ \item{x}{an R object representing a partition of objects.} } \details{ Many partitioning methods are based on prototypes (\dQuote{centers}, \dQuote{centroids}, \dQuote{medoids}, \dots). In typical cases, these are points in the feature space for the measurements on the objects to be partitioned, such that one can quantify the distance between the objects and the prototypes, and, e.g., classify objects to their closest prototype. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{kernlab}, and \pkg{mclust} (and of course, \pkg{clue} itself). } \examples{ ## Show how prototypes ("centers") vary across k-means runs on ## bootstrap samples from the Cassini data. data("Cassini") nr <- NROW(Cassini$x) out <- replicate(50, { kmeans(Cassini$x[sample(nr, replace = TRUE), ], 3) }, simplify = FALSE) ## Plot the data points in light gray, and the prototypes found. plot(Cassini$x, col = gray(0.8)) points(do.call("rbind", lapply(out, cl_prototypes)), pch = 19) } \keyword{cluster} clue/man/Kinship82_Consensus.Rd0000644000175100001440000000307212734171653016114 0ustar hornikusers\name{Kinship82_Consensus} \alias{Kinship82_Consensus} \title{Gordon-Vichi Kinship82 Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the Rosenberg-Kim kinship terms partition data given in Gordon and Vichi (2001). } \usage{data("Kinship82_Consensus")} \format{ A named cluster ensemble of three soft partitions of the 15 kinship terms into three classes. } \details{ The elements of the ensemble are named \code{"MF1"}, \code{"MF2"}, and \code{"JMF"}, and correspond to the consensus partitions obtained by applying models 1, 2, and 3 in Gordon and Vichi (2001) to the kinship terms partition data in Rosenberg (1982), which are available as data set \code{\link{Kinship82}}. } \source{ Table 6 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \examples{ ## Load the consensus partitions. data("Kinship82_Consensus") ## Fuzziness using the Partition Coefficient. cl_fuzziness(Kinship82_Consensus) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(Kinship82_Consensus) cl_dissimilarity(Kinship82_Consensus, method = "comem") } \keyword{datasets} clue/man/partition.Rd0000644000175100001440000000525312734174303014304 0ustar hornikusers\name{partition} \alias{cl_partition} % class ... \alias{is.cl_partition} \alias{is.cl_hard_partition} \alias{is.cl_soft_partition} \alias{cl_hard_partition} % class ... \alias{as.cl_partition} \alias{as.cl_hard_partition} \title{Partitions} \description{ Determine whether an R object represents a partition of objects, or coerce to an R object representing such.} \usage{ is.cl_partition(x) is.cl_hard_partition(x) is.cl_soft_partition(x) as.cl_partition(x) as.cl_hard_partition(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_partition"}, with a suitable representation of the partition given by \code{x}. } \details{ \code{is.cl_partition} and \code{is.cl_hard_partition} are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{is.cl_soft_partition} gives true iff \code{is.cl_partition} is true and \code{is.cl_hard_partition} is false. \code{as.cl_partition} returns an object of class \code{"cl_partition"} \dQuote{containing} the given object \code{x} if this already represents a partition (i.e., \code{is.cl_partition(x)} is true), or the memberships obtained from \code{x} via \code{\link{as.cl_membership}}. \code{as.cl_hard_partition(x)} returns an object which has class \code{"cl_hard_partition"} and inherits from \code{"cl_partition"}, and contains \code{x} if it already represents a hard partition (i.e., provided that \code{is.cl_hard_partition(x)} is true), or the class ids obtained from \code{x}, using \code{x} if this is an atomic vector of raw class ids, or, if \code{x} represents a soft partition or is a raw matrix of membership values, using the class ids of the \emph{nearest hard partition}, defined by taking the class ids of the (first) maximal membership values. Conceptually, partitions and hard partitions are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing partitions and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) is.cl_partition(pcl) is.cl_hard_partition(pcl) is.cl_soft_partition(pcl) } \keyword{cluster} clue/man/cl_tabulate.Rd0000644000175100001440000000105311304023137014532 0ustar hornikusers\name{cl_tabulate} \alias{cl_tabulate} \title{Tabulate Vector Objects} \description{Tabulate the unique values in vector objects.} \usage{ cl_tabulate(x) } \arguments{ \item{x}{a vector.} } \value{ A data frame with components: \item{values}{the unique values.} \item{counts}{an integer vector with the number of times each of the unique values occurs in \code{x}.} } \examples{ data("Kinship82") tab <- cl_tabulate(Kinship82) ## The counts: tab$counts ## The most frequent partition: tab$values[[which.max(tab$counts)]] } \keyword{utilities} clue/man/CKME.Rd0000644000175100001440000000071011304023137012771 0ustar hornikusers\name{CKME} \alias{CKME} \title{Cassini Data Partitions Obtained by K-Means} \description{ A cluster ensemble of 50 \eqn{k}-means partitions of the Cassini data into three classes. } \usage{data("CKME")} \format{ A cluster ensemble of 50 (\eqn{k}-means) partitions. } \details{ The ensemble was generated via \preformatted{ require("clue") data("Cassini") set.seed(1234) CKME <- cl_boot(Cassini$x, 50, 3) } } \keyword{datasets} clue/man/cl_consensus.Rd0000644000175100001440000003671214503010223014757 0ustar hornikusers\name{cl_consensus} \alias{cl_consensus} \title{Consensus Partitions and Hierarchies} \description{ Compute the consensus clustering of an ensemble of partitions or hierarchies. } \usage{ cl_consensus(x, method = NULL, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string specifying one of the built-in methods for computing consensus clusterings, or a function to be taken as a user-defined method, or \code{NULL} (default value). If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods and defaults.} \item{weights}{a numeric vector with non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ The consensus partition or hierarchy. } \details{ Consensus clusterings \dQuote{synthesize} the information in the elements of a cluster ensemble into a single clustering, often by minimizing a criterion function measuring how dissimilar consensus candidates are from the (elements of) the ensemble (the so-called \dQuote{optimization approach} to consensus clustering). The most popular criterion functions are of the form \eqn{L(x) = \sum w_b d(x_b, x)^p}, where \eqn{d} is a suitable dissimilarity measure (see \code{\link{cl_dissimilarity}}), \eqn{w_b} is the case weight given to element \eqn{x_b} of the ensemble, and \eqn{p \ge 1}. If \eqn{p = 1} and minimization is over all possible base clusterings, a consensus solution is called a \emph{median} of the ensemble; if minimization is restricted to the elements of the ensemble, a consensus solution is called a \emph{medoid} (see \code{\link{cl_medoid}}). For \eqn{p = 2}, we obtain \emph{least squares} consensus partitions and hierarchies (generalized means). See also Gordon (1999) for more information. If all elements of the ensemble are partitions, the built-in consensus methods compute consensus partitions by minimizing a criterion of the form \eqn{L(x) = \sum w_b d(x_b, x)^p} over all hard or soft partitions \eqn{x} with a given (maximal) number \eqn{k} of classes. Available built-in methods are as follows. \describe{ \item{\code{"SE"}}{a fixed-point algorithm for obtaining \emph{soft} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all soft partitions with a given maximal number of classes). This iterates between individually matching all partitions to the current approximation to the consensus partition, and computing the next approximation as the membership matrix closest to a suitable weighted average of the memberships of all partitions after permuting their columns for the optimal matchings of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a matrix with number of rows equal to the number of objects of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, suitable random membership matrices are used.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } In the case of multiple runs, the first optimum found is returned. This method can also be referred to as \code{"soft/euclidean"}. } \item{\code{"GV1"}}{the fixed-point algorithm for the \dQuote{first model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being GV1 dissimilarity and \eqn{p = 2} over all soft partitions with a given maximal number of classes. This is similar to \code{"SE"}, but uses GV1 rather than Euclidean dissimilarity. Available control parameters are the same as for \code{"SE"}. } \item{\code{"DWH"}}{an extension of the greedy algorithm in Dimitriadou, Weingessel and Hornik (2002) for (approximately) obtaining soft least squares Euclidean consensus partitions. The reference provides some structure theory relating finding the consensus partition to an instance of the multiple assignment problem, which is known to be NP-hard, and suggests a simple heuristic based on successively matching an individual partition \eqn{x_b} to the current approximation to the consensus partition, and compute the memberships of the next approximation as a weighted average of those of the current one and of \eqn{x_b} after permuting its columns for the optimal matching of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{order}}{a permutation of the integers from 1 to the size of the ensemble, specifying the order in which the partitions in the ensemble should be aggregated. Defaults to using a random permutation (unlike the reference, which does not permute at all).} } } \item{\code{"HE"}}{a fixed-point algorithm for obtaining \emph{hard} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all hard partitions with a given maximal number of classes.) Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/euclidean"}. } \item{\code{"SM"}}{a fixed-point algorithm for obtaining \emph{soft} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all soft partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"soft/manhattan"}. } \item{\code{"HM"}}{a fixed-point algorithm for obtaining \emph{hard} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all hard partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/manhattan"}. } \item{\code{"GV3"}}{a \acronym{SUMT} algorithm for the \dQuote{third model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being co-membership dissimilarity and \eqn{p = 2}. (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) This optimization problem is equivalent to finding the membership matrix \eqn{m} for which the sum of the squared differences between \eqn{C(m) = m m'} and the weighted average co-membership matrix \eqn{\sum_b w_b C(m_b)} of the partitions is minimal. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a matrix with number of rows equal to the size of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, a membership based on a rank \eqn{k} approximation to the weighted average co-membership matrix is used.} } In the case of multiple runs, the first optimum found is returned. } \item{\code{"soft/symdiff"}}{a \acronym{SUMT} approach for minimizing \eqn{L = \sum w_b d(x_b, x)} over all soft partitions with a given maximal number of classes, where \eqn{d} is the Manhattan dissimilarity of the co-membership matrices (coinciding with symdiff partition dissimilarity in the case of hard partitions). Available control parameters are the same as for \code{"GV3"}. } \item{\code{"hard/symdiff"}}{an exact solver for minimizing \eqn{L = \sum w_b d(x_b, x)} over all hard partitions (possibly with a given maximal number of classes as specified by the control parameter \code{k}), where \eqn{d} is symdiff partition dissimilarity (so that soft partitions in the ensemble are replaced by their closest hard partitions), or equivalently, Rand distance or pair-bonds (Boorman-Arabie \eqn{D}) distance. The consensus solution is found via mixed linear or quadratic programming. } } By default, method \code{"SE"} is used for ensembles of partitions. If all elements of the ensemble are hierarchies, the following built-in methods for computing consensus hierarchies are available. \describe{ \item{\code{"euclidean"}}{an algorithm for minimizing \eqn{L(x) = \sum w_b d(x_b, x) ^ 2} over all dendrograms, where \eqn{d} is Euclidean dissimilarity. This is equivalent to finding the best least squares ultrametric approximation of the weighted average \eqn{d = \sum w_b u_b} of the ultrametrics \eqn{u_b} of the hierarchies \eqn{x_b}, which is attempted by calling \code{\link{ls_fit_ultrametric}} on \eqn{d} with appropriate control parameters. This method can also be referred to as \code{"cophenetic"}. } \item{\code{"manhattan"}}{a \acronym{SUMT} for minimizing \eqn{L = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is Manhattan dissimilarity. Available control parameters are the same as for \code{"euclidean"}. } \item{\code{"majority"}}{a hierarchy obtained from an extension of the majority consensus tree of Margush and McMorris (1981), which minimizes \eqn{L(x) = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is the symmetric difference dissimilarity. The unweighted \eqn{p}-majority tree is the \eqn{n}-tree (hierarchy in the strict sense) consisting of all subsets of objects contained in more than \eqn{100 p} percent of the \eqn{n}-trees \eqn{T_b} induced by the dendrograms, where \eqn{1/2 \le p < 1} and \eqn{p = 1/2} (default) corresponds to the standard majority tree. In the weighted case, it consists of all subsets \eqn{A} for which \eqn{\sum_{b: A \in T_b} w_b > W p}, where \eqn{W = \sum_b w_b}. We also allow for \eqn{p = 1}, which gives the \emph{strict consensus tree} consisting of all subsets contained in each of the \eqn{n}-trees. The majority dendrogram returned is a representation of the majority tree where all splits have height one. The fraction \eqn{p} can be specified via the control parameter \code{p}. } } By default, method \code{"euclidean"} is used for ensembles of hierarchies. If a user-defined consensus method is to be employed, it must be a function taking the cluster ensemble, the case weights, and a list of control parameters as its arguments, with formals named \code{x}, \code{weights}, and \code{control}, respectively. Most built-in methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the methods. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. T. Margush and F. R. McMorris (1981). Consensus \eqn{n}-trees. \emph{Bulletin of Mathematical Biology}, \bold{43}, 239--244. \doi{10.1007/BF02459446}. } \seealso{ \code{\link{cl_medoid}}, \code{\link[ape]{consensus}} } \examples{ ## Consensus partition for the Rosenberg-Kim kinship terms partition ## data based on co-membership dissimilarities. data("Kinship82") m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value for criterion function to be minimized: sum(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] sum(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ## Seems we get a better solution ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "comem") ## How "fuzzy" are they? cl_fuzziness(cl_ensemble(m1, m2)) ## Do the "nearest" hard partitions fully agree? cl_dissimilarity(as.cl_hard_partition(m1), as.cl_hard_partition(m2)) ## Consensus partition for the Gordon and Vichi (2001) macroeconomic ## partition data based on Euclidean dissimilarities. data("GVME") set.seed(1) ## First, using k = 2 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value of criterion function to be minimized: sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/2"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## Seems we get a slightly better solution ... ## But note that cl_dissimilarity(m1, m2, "GV1") ## and that the maximal deviation of the memberships is max(abs(cl_membership(m1) - cl_membership(m2))) ## so the differences seem to be due to rounding. ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) ## And now for k = 3 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). m2 <- GVME_Consensus[["MF1/3"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## This time we look much better ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "GV1") ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/n_of_objects.Rd0000644000175100001440000000221212211412717014707 0ustar hornikusers\name{n_of_objects} \alias{n_of_objects} \title{Number of Objects in a Partition or Hierarchy} \description{Determine the number of objects from which a partition or hierarchy was obtained.} \usage{ n_of_objects(x) } \arguments{ \item{x}{an \R object representing a (hard of soft) partition or a hierarchy of objects, or dissimilarities between objects.} } \value{ An integer giving the number of objects. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \seealso{ \code{\link{is.cl_partition}}, \code{\link{is.cl_hierarchy}} } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) n_of_objects(pcl) hcl <- hclust(dist(USArrests)) n_of_objects(hcl) } \keyword{cluster} clue/man/cl_ensemble.Rd0000644000175100001440000000462311547637750014556 0ustar hornikusers\name{cl_ensemble} \alias{cl_ensemble} \alias{as.cl_ensemble} \alias{is.cl_ensemble} \title{Cluster Ensembles} \description{Creation and manipulation of cluster ensembles.} \usage{ cl_ensemble(..., list = NULL) as.cl_ensemble(x) is.cl_ensemble(x) } \arguments{ \item{\dots}{R objects representing clusterings of or dissimilarities between the same objects.} \item{list}{a list of R objects as in \code{\dots}.} \item{x}{for \code{as.cl_ensemble}, an R object as in \code{\dots}; for \code{is.cl_ensemble}, an arbitrary R object.} } \details{ \code{cl_ensemble} creates \dQuote{cluster ensembles}, which are realized as lists of clusterings (or dissimilarities) with additional class information, always inheriting from \code{"cl_ensemble"}. All elements of the ensemble must have the same number of objects. If all elements are partitions, the ensemble has class \code{"cl_partition_ensemble"}; if all elements are dendrograms, it has class \code{"cl_dendrogram_ensemble"} and inherits from \code{"cl_hierarchy_ensemble"}; if all elements are hierarchies (but not always dendrograms), it has class \code{"cl_hierarchy_ensemble"}. Note that empty or \dQuote{mixed} ensembles cannot be categorized according to the kind of elements they contain, and hence only have class \code{"cl_ensemble"}. The list representation makes it possible to use \code{lapply} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c}, \code{rep}, and \code{print}. There is also a \code{plot} method for ensembles for which all elements can be plotted (currently, additive trees, dendrograms and ultrametrics). } \value{ \code{cl_ensemble} returns a list of the given clusterings or dissimilarities, with additional class information (see \bold{Details}). } \examples{ d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) hens ## Subscripting. hens[1 : 3] ## Replication. rep(hens, 3) ## Plotting. plot(hens, main = names(hens)) ## And continue to analyze the ensemble, e.g. round(cl_dissimilarity(hens, method = "gamma"), 4) } \keyword{cluster} clue/man/cl_margin.Rd0000644000175100001440000000165011304023137014211 0ustar hornikusers\name{cl_margin} \alias{cl_margin} \title{Membership Margins} \description{ Compute the \emph{margin} of the memberships of a partition, i.e., the difference between the largest and second largest membership values of the respective objects. } \usage{ cl_margin(x) } \arguments{ \item{x}{an \R object representing a partition of objects.} } \details{ For hard partitions, the margins are always 1. For soft partitions, the margins may be taken as an indication of the \dQuote{sureness} of classifying an object to the class with maximum membership value. } \examples{ data("GVME") ## Look at the classes obtained for 1980: split(cl_object_names(GVME[["1980"]]), cl_class_ids(GVME[["1980"]])) ## Margins: x <- cl_margin(GVME[["1980"]]) ## Add names, and sort: names(x) <- cl_object_names(GVME[["1980"]]) sort(x) ## Note the "uncertainty" of assigning Egypt to the "intermediate" class ## of nations. } \keyword{cluster} clue/man/l1_fit_ultrametric.Rd0000644000175100001440000001111313140644252016051 0ustar hornikusers\name{l1_fit_ultrametric} \alias{l1_fit_ultrametric} \title{Least Absolute Deviation Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal absolute distance (Manhattan dissimilarity) to a given dissimilarity object. } \usage{ l1_fit_ultrametric(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default) or \code{"IRIP"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ The problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} |x_{ij} - u_{ij}|} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). We provide two heuristics for solving this problem. Method \code{"SUMT"} implements a \acronym{SUMT} (Sequential Unconstrained Minimization Technique, see \code{\link{sumt}}) approach using the sign function for the gradients of the absolute value function. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } Method \code{"IRIP"} implements a variant of the Iteratively Reweighted Iterative Projection approach of Smith (2001), which attempts to solve the \eqn{L_1} problem via a sequence of weighted \eqn{L_2} problems, determining \eqn{u(t+1)} by minimizing the criterion function \deqn{\sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2 / \max(|x_{ij} - u_{ij}(t)|, m)} with \eqn{m} a \dQuote{small} non-zero value to avoid zero divisors. We use the \acronym{SUMT} method of \code{\link{ls_fit_ultrametric}} for solving the weighted least squares problems. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in \eqn{u} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the \eqn{L_1} criterion is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{MIN}}{the cutoff \eqn{m}. Defaults to \eqn{10^{-3}}.} \item{\code{start}}{a dissimilarity object to be used as the starting value for \eqn{u}.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed for solving the weighted \eqn{L_2} problems.} } One may need to adjust the default control parameters to achieve convergence. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. } \seealso{ \code{\link{cl_consensus}} for computing least absolute deviation (Manhattan) consensus hierarchies; \code{\link{ls_fit_ultrametric}}. } \references{ M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \keyword{cluster} \keyword{optimize} clue/man/cl_fuzziness.Rd0000644000175100001440000000500414021342416014774 0ustar hornikusers\name{cl_fuzziness} \alias{cl_fuzziness} \title{Partition Fuzziness} \description{ Compute the fuzziness of partitions. } \usage{ cl_fuzziness(x, method = NULL, normalize = TRUE) } \arguments{ \item{x}{a cluster ensemble of partitions, or an R object coercible to such.} \item{method}{a character string indicating the fuzziness measure to be employed, or \code{NULL} (default), or a function to be taken as a user-defined method. Currently available built-in methods are \code{"PC"} (Partition Coefficient) and \code{"PE"} (Partition Entropy), with the default corresponding to the first one. If \code{method} is a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}.} \item{normalize}{a logical indicating whether the fuzziness measure should be normalized in a way that hard partitions have value 0, and \dQuote{completely fuzzy} partitions (where for all objects, all classes get the same membership) have value 1.} } \details{ If \eqn{m} contains the membership values of a partition, the (unnormalized) Partition Coefficient and Partition Entropy are given by \eqn{\sum_{n,i} m_{n,i}^2} and \eqn{\sum_{n,i} H(m_{n,i})}, respectively, where \eqn{H(u) = u \log u - (1-u) \log(1-u)}{u log(u) - (1-u) log(1-u)}. Note that the normalization used here is different from the normalizations typically found in the literature. If a user-defined fuzziness method is to be employed, is must be a function taking a matrix of membership values and a logical to indicate whether normalization is to be performed as its arguments (in that order; argument names are not used). } \value{ An object of class \code{"cl_fuzziness"} giving the fuzziness values. } \references{ J. C. Bezdek (1981). \emph{Pattern Recognition with Fuzzy Objective Function Algorithms}. New York: Plenum. } \seealso{ Function \code{\link[e1071]{fclustIndex}} in package \pkg{e1071}, which also computes several other \dQuote{fuzzy cluster indexes} (typically based on more information than just the membership values). } \examples{ if(require("e1071", quietly = TRUE)) { ## Use an on-line version of fuzzy c-means from package e1071 if ## available. data("Cassini") pens <- cl_boot(Cassini$x, B = 15, k = 3, algorithm = "cmeans", parameters = list(method = "ufcl")) pens summary(cl_fuzziness(pens, "PC")) summary(cl_fuzziness(pens, "PE")) } } \keyword{cluster} clue/man/cl_medoid.Rd0000644000175100001440000000331211726357356014217 0ustar hornikusers\name{cl_medoid} \alias{cl_medoid} \title{Medoid Partitions and Hierarchies} \description{ Compute the medoid of an ensemble of partitions or hierarchies, i.e., the element of the ensemble minimizing the sum of dissimilarities to all other elements. } \usage{ cl_medoid(x, method = "euclidean") } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} } \value{ The medoid partition or hierarchy. } \details{ Medoid clusterings are special cases of \dQuote{consensus} clusterings characterized as the solutions of an optimization problem. See Gordon (2001) for more information. The dissimilarities \code{d} for determining the medoid are obtained by calling \code{cl_dissimilarity} with arguments \code{x} and \code{method}. The medoid can then be found as the (first) row index for which the row sum of \code{as.matrix(d)} is minimal. Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{\link[cluster]{pam}} in package \pkg{cluster}. } \references{ A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. } \seealso{ \code{\link{cl_consensus}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] m1 <- cl_medoid(pens) diss <- cl_dissimilarity(pens) require("cluster") m2 <- pens[[pam(diss, 1)$medoids]] ## Agreement of medoid consensus partitions. cl_agreement(m1, m2) ## Or, more straightforwardly: table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/cl_pclust.Rd0000644000175100001440000001036512734173132014262 0ustar hornikusers\name{cl_pclust} \alias{cl_pclust} \title{Prototype-Based Partitions of Clusterings} \description{ Compute prototype-based partitions of a cluster ensemble by minimizing \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the elements \eqn{x_b} of the ensemble and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ cl_pclust(x, k, method = NULL, m = 1, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{the consensus method to be employed, see \code{\link{cl_consensus}}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_partition"} representing the obtained \dQuote{secondary} partition by an object of class \code{"cl_pclust"}, which is a list containing at least the following components. \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{silhouette}{Silhouette information for the partition, see \code{\link[cluster]{silhouette}}.} \item{validity}{precomputed validity measures for the partition.} \item{m}{the softness control argument.} \item{call}{the matched call.} \item{d}{the dissimilarity function \eqn{d = d(x, p)} employed.} \item{e}{the exponent \eqn{e} employed.} } \details{ Partitioning is performed using \code{\link{pclust}} via a family constructed from \code{method}. The dissimilarities \eqn{d} and exponent \eqn{e} are implied by the consensus method employed, and inferred via a registration mechanism currently only made available to built-in consensus methods. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity \eqn{d} and \eqn{e = 2}. For \eqn{m = 1}, the partitioning procedure was introduced by Gaul and Schader (1988) for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. Available control parameters are as for \code{\link{pclust}}. The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum (as this is already true for the computation of consensus clusterings). Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. W. Gaul and M. Schader (1988). Clusterwise aggregation of relations. \emph{Applied Stochastic Models and Data Analysis}, \bold{4}:273--282. \doi{10.1002/asm.3150040406}. } \examples{ ## Use a precomputed ensemble of 50 k-means partitions of the ## Cassini data. data("CKME") CKME <- CKME[1 : 30] # for saving precious time ... diss <- cl_dissimilarity(CKME) hc <- hclust(diss) plot(hc) ## This suggests using a partition with three classes, which can be ## obtained using cutree(hc, 3). Could use cl_consensus() to compute ## prototypes as the least squares consensus clusterings of the classes, ## or alternatively: set.seed(123) x1 <- cl_pclust(CKME, 3, m = 1) x2 <- cl_pclust(CKME, 3, m = 2) ## Agreement of solutions. cl_dissimilarity(x1, x2) table(cl_class_ids(x1), cl_class_ids(x2)) } \keyword{cluster} clue/man/lattice.Rd0000644000175100001440000001235614021342636013717 0ustar hornikusers\name{lattice} \encoding{UTF-8} \alias{cl_meet} \alias{cl_join} \alias{Ops.cl_partition} \alias{Summary.cl_partition} \alias{Ops.cl_dendrogram} \alias{Ops.cl_hierarchy} \alias{Summary.cl_hierarchy} \title{Cluster Lattices} \description{ Computations on the lattice of all (hard) partitions, or the lattice of all dendrograms, or the meet semilattice of all hierarchies (\eqn{n}-trees) of/on a set of objects: meet, join, and comparisons. } \usage{ cl_meet(x, y) cl_join(x, y) } \arguments{ \item{x}{an ensemble of partitions or dendrograms or hierarchies, or an R object representing a partition or dendrogram or hierarchy.} \item{y}{an R object representing a partition or dendrogram or hierarchy. Ignored if \code{x} is an ensemble.} } \details{ For a given finite set of objects \eqn{X}, the set \eqn{H(X)} of all (hard) partitions of \eqn{X} can be partially ordered by defining a partition \eqn{P} to be \dQuote{finer} than a partition \eqn{Q}, i.e., \eqn{P \le Q}, if each class of \eqn{P} is contained in some class of \eqn{Q}. With this partial order, \eqn{H(X)} becomes a bounded \dfn{lattice}, with intersection and union of two elements given by their greatest lower bound (\dfn{meet}) and their least upper bound (\dfn{join}), respectively. Specifically, the meet of two partitions computed by \code{cl_meet} is the partition obtained by intersecting the classes of the partitions; the classes of the join computed by \code{cl_join} are obtained by joining all elements in the same class in at least one of the partitions. Obviously, the least and greatest elements of the partition lattice are the partitions where each object is in a single class (sometimes referred to as the \dQuote{splitter} partition) or in the same class (the \dQuote{lumper} partition), respectively. Meet and join of an arbitrary number of partitions can be defined recursively. In addition to computing the meet and join, the comparison operations corresponding to the above partial order as well as \code{min}, \code{max}, and \code{range} are available at least for R objects representing partitions inheriting from \code{"\link{cl_partition}"}. The summary methods give the meet and join of the given partitions (for \code{min} and \code{max}), or a partition ensemble with the meet and join (for \code{range}). If the partitions specified by \code{x} and \code{y} are soft partitions, the corresponding nearest hard partitions are used. Future versions may optionally provide suitable \dQuote{soft} (fuzzy) extensions for computing meets and joins. The set of all dendrograms on \eqn{X} can be ordered using pointwise inequality of the associated ultrametric dissimilarities: i.e., if \eqn{D} and \eqn{E} are the dendrograms with ultrametrics \eqn{u} and \eqn{v}, respectively, then \eqn{D \le E} if \eqn{u_{ij} \le v_{ij}} for all pairs \eqn{(i, j)} of objects. This again yields a lattice (of dendrograms). The join of \eqn{D} and \eqn{E} is the dendrogram with ultrametrics given by \eqn{\max(u_{ij}, v_{ij})} (as this gives an ultrametric); the meet is the dendrogram with the maximal ultrametric dominated by \eqn{\min(u_{ij}, v_{ij})}, and can be obtained by applying single linkage hierarchical clustering to the minima. The set of all hierarchies on \eqn{X} can be ordered by set-wise inclusion of the classes: i.e., if \eqn{H} and \eqn{G} are two hierarchies, then \eqn{H \le G} if all classes of \eqn{H} are also classes of \eqn{G}. This yields a meet semilattice, with meet given by the classes contained in both hierarchies. The join only exists if the union of the classes is a hierarchy. In each case, a modular semilattice is obtained, which allows for a natural metrization via least element (semi)lattice move distances, see Barthélémy, Leclerc and Monjardet (1981). These latticial metrics are given by the BA/C (partitions), Manhattan (dendrograms), and symdiff (hierarchies) dissimilarities, respectively (see \code{\link{cl_dissimilarity}}). } \value{ For \code{cl_meet} and \code{cl_join}, an object of class \code{"\link{cl_partition}"} or \code{"\link{cl_dendrogram}"} with the class ids or ultrametric dissimilarities of the meet and join of the partitions or dendrograms, respectively. } \references{ J.-P. Barthélémy, B. Leclerc and B. Monjardet (1981). On the use of ordered sets in problems of comparison and consensus of classification. \emph{Journal of Classification}, \bold{3}, 187--224. \doi{10.1007/BF01894188}. } \examples{ ## Two simple partitions of 7 objects. A <- as.cl_partition(c(1, 1, 2, 3, 3, 5, 5)) B <- as.cl_partition(c(1, 2, 2, 3, 4, 5, 5)) ## These disagree on objects 1-3, A splits objects 4 and 5 into ## separate classes. Objects 6 and 7 are always in the same class. (A <= B) || (B <= A) ## (Neither partition is finer than the other.) cl_meet(A, B) cl_join(A, B) ## Meeting with the lumper (greatest) or joining with the splitter ## (least) partition does not make a difference: C_lumper <- as.cl_partition(rep(1, n_of_objects(A))) cl_meet(cl_ensemble(A, B, C_lumper)) C_splitter <- as.cl_partition(seq_len(n_of_objects(A))) cl_join(cl_ensemble(A, B, C_splitter)) ## Another way of computing the join: range(A, B, C_splitter)$max } \keyword{cluster} clue/man/GVME.Rd0000644000175100001440000000207412734171420013024 0ustar hornikusers\name{GVME} \alias{GVME} \title{Gordon-Vichi Macroeconomic Partition Ensemble Data} \description{ Soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. } \usage{data("GVME")} \format{ A named cluster ensemble of 5 soft partitions of 21 countries into 2 or 3 classes. The names are the years to which the partitions correspond. } \details{ The partitions were obtained using fuzzy \eqn{c}-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. See Gordon and Vichi (2001), page 230, for more details. } \source{ Table 1 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. } \keyword{datasets} clue/man/cl_pam.Rd0000644000175100001440000000515312734173110013520 0ustar hornikusers\name{cl_pam} \alias{cl_pam} \title{K-Medoids Partitions of Clusterings} \description{ Compute \eqn{k}-medoids partitions of clusterings. } \usage{ cl_pam(x, k, method = "euclidean", solver = c("pam", "kmedoids")) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} \item{solver}{a character string indicating the \eqn{k}-medoids solver to be employed. May be abbreviated. If \code{"pam"} (default), the Partitioning Around Medoids (Kaufman & Rousseeuw (1990), Chapter 2) heuristic \code{\link[cluster]{pam}} of package \pkg{cluster} is used. Otherwise, the exact algorithm of \code{\link{kmedoids}} is employed.} } \value{ An object of class \code{"cl_pam"} representing the obtained \dQuote{secondary} partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes (medoids).} \item{criterion}{the value of the criterion function of the partition.} \item{description}{a character string indicating the dissimilarity method employed.} } \details{ An optimal \eqn{k}-medoids partition of the given cluster ensemble is defined as a partition of the objects \eqn{x_i} (the elements of the ensemble) into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_{l=1}^k \min_{j \in C_l} \sum_{i \in C_l} d(x_i, x_j)} is minimized. Such secondary partitions (e.g., Gordon & Vichi, 1998) are obtained by computing the dissimilarities \eqn{d} of the objects in the ensemble for the given dissimilarity method, and applying a dissimilarity-based \eqn{k}-medoids solver to \eqn{d}. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \seealso{ \code{\link{cl_pclust}} for more general prototype-based partitions of clusterings. } \examples{ data("Kinship82") party <- cl_pam(Kinship82, 3, "symdiff") ## Compare results with tables 5 and 6 in Gordon & Vichi (1998). party lapply(cl_prototypes(party), cl_classes) table(cl_class_ids(party)) } \keyword{cluster} clue/man/ls_fit_ultrametric.Rd0000644000175100001440000002076712734173740016201 0ustar hornikusers\name{ls_fit_ultrametric} \encoding{UTF-8} \alias{ls_fit_ultrametric} \title{Least Squares Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal square distance (Euclidean dissimilarity) to given dissimilarity objects. } \usage{ ls_fit_ultrametric(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}, or an ensemble of such objects.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ For a single dissimilarity object \code{x}, the problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). For an ensemble of dissimilarity objects, the criterion function is \deqn{L(u) = \sum_b w_b \sum_{i,j} w_{ij} (x_{ij}(b) - u_{ij})^2,} where \eqn{w_b} is the weight given to element \eqn{x_b} of the ensemble and can be specified via control parameter \code{weights} (default: all ones). This problem reduces to the above basic problem with \eqn{x} as the \eqn{w_b}-weighted mean of the \eqn{x_b}. We provide three heuristics for solving the basic problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1986) which in turn simplifies the suggestions in Carroll and Pruzansky (1980). (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) We then use a final single linkage hierarchical clustering step to ensure that the returned object exactly satisfies the ultrametric constraints. The starting value \eqn{u_0} is obtained by \dQuote{random shaking} of the given dissimilarity object (if not given). If there are missing values in \code{x}, i.e., the given dissimilarities are \emph{incomplete}, we follow a suggestion of de Soete (1984), imputing the missing values by the weighted mean of the non-missing ones, and setting the corresponding weights to zero. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm} is usually faster. Note that the number of ultrametric constraints is of the order \eqn{n^3}, where \eqn{n} is the number of objects in the dissimilarity object, suggesting to use the \acronym{SUMT} approach in favor of \code{\link[stats]{constrOptim}}. If starting values for the \acronym{SUMT} are provided via \code{start}, the number of starting values gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} starting values are obtained by random shaking of the dissimilarity to be fitted. In the case of multiple \acronym{SUMT} runs, the (first) best solution found is returned. Method \code{"IP"} implements the Iterative Projection approach of Hubert and Arabie (1995). This iteratively projects the current dissimilarities to the closed convex set given by the ultrametric constraints (3-point conditions) for a single index triple \eqn{(i, j, k)}, in fact replacing the two largest values among \eqn{d_{ij}, d_{ik}, d_{jk}} by their mean. The following control parameters can be provided via the \code{control} argument. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{order}}{a permutation of the numbers from 1 to the number of objects in \code{x}, specifying the order in which the ultrametric constraints are considered, or a list of such permutations.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be employed.} \item{\code{tol}}{a double indicating the maximal convergence tolerance. The algorithm stops if the total absolute change in the dissimilarities in an iteration is less than \code{tol}.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } If permutations are provided via \code{order}, the number of these gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} randomly generated orders are tried. In the case of multiple runs, the (first) best solution found is returned. Non-identical weights and incomplete dissimilarities are currently not supported. Method \code{"IR"} implements the Iterative Reduction approach suggested by Roux (1988), see also Barthélémy and Guénoche (1991). This is similar to the Iterative Projection method, but modifies the dissimilarities between objects proportionally to the aggregated change incurred from the ultrametric projections. Available control parameters are identical to those of method \code{"IP"}. Non-identical weights and incomplete dissimilarities are currently not supported. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1984). Ultrametric tree representations of incomplete dissimilarity data. \emph{Journal of Classification}, \bold{1}, 235--242. \doi{10.1007/BF01890124}. G. de Soete (1986). A least squares algorithm for fitting an ultrametric tree to a dissimilarity matrix. \emph{Pattern Recognition Letters}, \bold{2}, 133--137. \doi{10.1016/0167-8655(84)90036-9}. } \seealso{ \code{\link{cl_consensus}} for computing least squares (Euclidean) consensus hierarchies by least squares fitting of average ultrametric distances; \code{\link{l1_fit_ultrametric}}. } \examples{ ## Least squares fit of an ultrametric to the Miller-Nicely consonant ## phoneme confusion data. data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ## Cophenetic correlation: cor(d, u) ## Plot: plot(u) ## ("Basically" the same as Figure 1 in de Soete (1986).) } \keyword{cluster} \keyword{optimize} clue/man/ls_fit_addtree.Rd0000644000175100001440000000676612734173577015271 0ustar hornikusers\name{ls_fit_addtree} \encoding{UTF-8} \alias{ls_fit_addtree} \alias{ls_fit_centroid} \title{Least Squares Fit of Additive Tree Distances to Dissimilarities} \description{ Find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_addtree(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) ls_fit_centroid(x) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_addtree"} containing the optimal additive tree distances. } \details{ See \code{\link{as.cl_addtree}} for details on additive tree distances and centroid distances. With \eqn{L(d) = \sum w_{ij} (x_{ij} - d_{ij})^2}, the problem to be solved by \code{ls_fit_addtree} is minimizing \eqn{L} over all additive tree distances \eqn{d}. This problem is known to be NP hard. We provide three heuristics for solving this problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1983). Incomplete dissimilarities are currently not supported. Methods \code{"IP"} and \code{"IR"} implement the Iterative Projection and Iterative Reduction approaches of Hubert and Arabie (1995) and Roux (1988), respectively. Non-identical weights and incomplete dissimilarities are currently not supported. See \code{\link{ls_fit_ultrametric}} for details on these methods and available control parameters. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \code{ls_fit_centroid} finds the centroid distance \eqn{d} minimizing \eqn{L(d)} (currently, only for the case of identical weights). This optimization problem has a closed-form solution. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1983). A least squares algorithm for fitting additive trees to proximity data. \emph{Psychometrika}, \bold{48}, 621--626. \doi{10.1007/BF02293884}. } \keyword{cluster} \keyword{optimize} clue/man/hierarchy.Rd0000644000175100001440000000735412211412651014244 0ustar hornikusers\name{hierarchy} \alias{cl_hierarchy} % class ... \alias{is.cl_hierarchy} \alias{as.cl_hierarchy} \alias{cl_dendrogram} % class ... \alias{is.cl_dendrogram} \alias{as.cl_dendrogram} \alias{plot.cl_dendrogram} \title{Hierarchies} \description{ Determine whether an R object represents a hierarchy of objects, or coerce to an R object representing such.} \usage{ is.cl_hierarchy(x) is.cl_dendrogram(x) as.cl_hierarchy(x) as.cl_dendrogram(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_hierarchy"}, with a suitable representation of the hierarchy given by \code{x}. } \details{ These functions are generic functions. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). The hierarchies considered by \pkg{clue} are \emph{\eqn{n}-trees} (hierarchies in the strict sense) and \emph{dendrograms} (also known as valued \eqn{n}-trees or total indexed hierarchies), which are represented by the virtual classes \code{"cl_hierarchy"} and \code{"cl_dendrogram"} (which inherits from the former), respectively. \eqn{n}-trees on a set \eqn{X} of objects correspond to collections \eqn{H} of subsets of \eqn{X}, usually called \emph{classes} of the hierarchy, which satisfy the following properties: \itemize{ \item \eqn{H} contains all singletons with objects of \eqn{X}, \eqn{X} itself, but not the empty set; \item The intersection of two sets \eqn{A} and \eqn{B} in \eqn{H} is either empty or one of the sets. } The classes of a hierarchy can be obtained by \code{\link{cl_classes}}. Dendrograms are \eqn{n}-trees where additionally a height \eqn{h} is associated with each of the classes, so that for two classes \eqn{A} and \eqn{B} with non-empty intersection we have \eqn{h(A) \le h(B)} iff \eqn{A} is a subset of \eqn{B}. For each pair of objects one can then define \eqn{u_{ij}} as the height of the smallest class containing both \eqn{i} and \eqn{j}: this results in a dissimilarity on \eqn{X} which satisfies the ultrametric (3-point) conditions \eqn{u_{ij} \le \max(u_{ik}, u_{jk})} for all triples \eqn{(i, j, k)} of objects. Conversely, an ultrametric dissimilarity induces a unique dendrogram. The ultrametric dissimilarities of a dendrogram can be obtained by \code{\link{cl_ultrametric}}. \code{as.cl_hierarchy} returns an object of class \code{"cl_hierarchy"} \dQuote{containing} the given object \code{x} if this already represents a hierarchy (i.e., \code{is.cl_hierarchy(x)} is true), or the ultrametric obtained from \code{x} via \code{\link{as.cl_ultrametric}}. \code{as.cl_dendrogram} returns an object which has class \code{"cl_dendrogram"} and inherits from \code{"cl_hierarchy"}, and contains \code{x} if it represents a dendrogram (i.e., \code{is.cl_dendrogram(x)} is true), or the ultrametric obtained from \code{x}. Conceptually, hierarchies and dendrograms are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing dendrograms and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. There is also a \code{plot} method. } \examples{ hcl <- hclust(dist(USArrests)) is.cl_dendrogram(hcl) is.cl_hierarchy(hcl) } \keyword{cluster} clue/man/cl_object_names.Rd0000644000175100001440000000176712211412557015404 0ustar hornikusers\name{cl_object_names} \alias{cl_object_names} \title{Find Object Names} \description{ Find the names of the objects from which a taxonomy (partition or hierarchy) or proximity was obtained. } \usage{ cl_object_names(x) } \arguments{ \item{x}{an \R object representing a taxonomy or proximity.} } \value{ A character vector of length \code{\link{n_of_objects}(x)} in case the names of the objects could be determined, or \code{NULL}. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself), in as much as possible. There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \keyword{cluster} clue/man/cl_dissimilarity.Rd0000644000175100001440000003665512734174403015652 0ustar hornikusers\name{cl_dissimilarity} \encoding{UTF-8} \alias{cl_dissimilarity} \title{Dissimilarity Between Partitions or Hierarchies} \description{Compute the dissimilarity between (ensembles) of partitions or hierarchies.} \usage{ cl_dissimilarity(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_dissimilarity"} containing the dissimilarities between all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_dissimilarity"} with the dissimilarities between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring dissimilarity between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"comemberships"}}{the Euclidean dissimilarity of the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, i.e., the square root of the sum of the squared differences of \eqn{C(u)} and \eqn{C(v)}.} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of co-classified pairs of distinct objects in the partitions. I.e., the number of distinct pairs of objects in the same class in exactly one of the partitions. (Alternatively, the cardinality of the symmetric set difference between the (binary) equivalence relations corresponding to the partitions.) For soft partitions, (currently) the symmetric set difference of the corresponding nearest hard partitions is used.} \item{\code{"Rand"}}{the Rand distance, i.e., the rate of distinct pairs of objects in the same class in exactly one of the partitions. (Related to the Rand index \eqn{a} via the linear transformation \eqn{d = (1 - a) / 2}.) For soft partitions, (currently) the Rand distance of the corresponding nearest hard partitions is used.} \item{\code{"GV1"}}{the square root of the dissimilarity \eqn{\Delta_1}{Delta_1} used for the first model in Gordon and Vichi (2001), i.e., the square root of the minimal sum of the squared differences of the \emph{matched} non-zero columns of \eqn{u} and \eqn{v}.} \item{\code{"BA/\var{d}"}}{distance measures for hard partitions discussed in Boorman and Arabie (1972), with \var{d} one of \samp{A}, \samp{C}, \samp{D}, or \samp{E}. For soft partitions, the distances of the corresponding nearest hard partitions are used. \code{"BA/A"} is the minimum number of single element moves (move from one class to another or a new one) needed to transform one partition into the other. Introduced in Rubin (1967). \code{"BA/C"} is the minimum number of lattice moves for transforming one partition into the other, where partitions are said to be connected by a lattice move if one is \emph{just} finer than the other (i.e., there is no other partition between them) in the partition lattice (see \code{\link{cl_meet}}). Equivalently, with \eqn{z} the join of \code{x} and \code{y} and \eqn{S} giving the number of classes, this can be written as \eqn{S(x) + S(y) - 2 S(z)}. Attributed to David Pavy. \code{"BA/D"} is the \dQuote{pair-bonds} distance, which can be defined as \eqn{S(x) + S(y) - 2 S(z)}, with \eqn{z} the meet of \code{x} and \code{y} and \eqn{S} the \emph{supervaluation} (i.e., non-decreasing with respect to the partial order on the partition lattice) function \eqn{\sum_i (n_i (n_i - 1)) / (n (n - 1))}, where the \eqn{n_i} are the numbers of objects in the respective classes of the partition (such that \eqn{n_i (n_i - 1) / 2} are the numbers of pair bonds in the classes), and \eqn{n} the total number of objects. \code{"BA/E"} is the normalized information distance, defined as \eqn{1 - I / H}, where \eqn{I} is the average mutual information between the partitions, and \eqn{H} is the average entropy of the meet \eqn{z} of the partitions. Introduced in Rajski (1961). (Boorman and Arabie also discuss a distance measure (\eqn{B}) based on the minimum number of set moves needed to transform one partition into the other, which, differently from the \eqn{A} and \eqn{C} distance measures is hard to compute (Day, 1981) and (currently) not provided.)} \item{\code{"VI"}}{Variation of Information, see Meila (2003). If \code{\dots} has an argument named \code{weights}, it is taken to specify case weights.} \item{\code{"Mallows"}}{the Mallows-type distance by Zhou, Li and Zha (2005), which is related to the Monge-Kantorovich mass transfer problem, and given as the \eqn{p}-th root of the minimal value of the transportation problem \eqn{\sum w_{jk} \sum_i |u_{ij} - v_{ik}| ^ p} with constraints \eqn{w_{jk} \ge 0}, \eqn{\sum_j w_{jk} = \alpha_j}, \eqn{\sum_k w_{jk} = \beta_k}, where \eqn{\sum_j \alpha_j = \sum_k \beta_k}. The parameters \eqn{p}, \eqn{\alpha} and \eqn{\beta} all default to one (in this case, the Mallows distance coincides with the Manhattan dissimilarity), and can be specified via additional arguments named \code{p}, \code{alpha}, and \code{beta}, respectively.} \item{\code{"CSSD"}}{the Cluster Similarity Sensitive Distance of Zhou, Li and Zha (2005), which is given as the minimal value of \eqn{\sum_{k,l} (1 - 2 w_{kl} / (\alpha_k + \beta_l)) L_{kl}}, where \eqn{L_{kl} = \sum_i u_{ik} v_{il} d(p_{x;k}, p_{y;l})} with \eqn{p_{x;k}} and \eqn{p_{y;l}} the prototype of the \eqn{k}-th class of \code{x} and the \eqn{l}-th class of \code{y}, respectively, \eqn{d} is the distance between these, and the \eqn{w_{kl}} as for Mallows distance. If prototypes are matrices, the Euclidean distance between these is used as default. Using the additional argument \code{L}, one can give a matrix of \eqn{L_{kl}} values, or the function \eqn{d}. Parameters \eqn{\alpha} and \eqn{\beta} all default to one, and can be specified via additional arguments named \code{alpha} and \code{beta}, respectively.} } For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} (Charon et al., 2005), which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{\eqn{R}-metric} in Day (1981), i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in Gusfield (2002), and equals twice the number of single element moves distance of Boorman and Arabie. For hard partitions, the pair-bonds (Boorman-Arabie \eqn{D}) distance is identical to the Rand distance, and can also be written as the Manhattan distance between the co-membership matrices corresponding to the partitions, or equivalently, their symdiff distance, normalized by \eqn{n (n - 1)}. If all components are hierarchies, available built-in methods for measuring dissimilarity between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{\eqn{1 - c^2}, where \eqn{c} is the cophenetic correlation coefficient (i.e., the product-moment correlation of the ultrametrics).} \item{\code{"gamma"}}{the rate of inversions between the ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}).} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. I.e., the number of sets of objects obtained by a split in exactly one of the hierarchies.} \item{\code{"Chebyshev"}}{the Chebyshev (maximal) dissimilarity of the ultrametrics (i.e., the maximum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"Lyapunov"}}{the logarithm of the product of the maximal and minimal ratios of the ultrametrics. This is also known as the \dQuote{Hilbert projective metric} on the cone represented by the ultrametrics (e.g., Jardine & Sibson (1971), page 107), and only defined for \emph{strict} ultrametrics (which are strictly positive for distinct objects).} \item{\code{"BO"}}{the \eqn{m_\delta} family of tree metrics by Boorman and Olivier (1973), which are of the form \eqn{m_\delta = \int_0^\infty \delta(p(h), q(h)) dh}, where \eqn{p(h)} and \eqn{q(h)} are the hard partitions obtaining by cutting the trees (dendrograms) at height \eqn{h}, and \eqn{\delta} is a suitably dissimilarity measure for partitions. In particular, when taking \eqn{\delta} as symdiff or Rand dissimilarity, \eqn{m_\delta} is the Manhattan dissimilarity of the hierarchies. If \code{\dots} has an argument named \code{delta} it is taken to specify the partition dissimilarity \eqn{\delta} to be employed.} \item{\code{"spectral"}}{the spectral norm (2-norm) of the differences of the ultrametrics, suggested in Mérigot, Durbec, and Gaertner (2010).} } The measures based on ultrametrics also allow computing dissimilarity with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined dissimilarity method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric dissimilarity objects of class \code{"cl_dissimilarity"} are implemented as symmetric proximity objects with self-proximities identical to zero, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric dissimilarity) object of class \code{"cl_cross_dissimilarity"}. Symmetric dissimilarity objects also inherit from class \code{"\link{dist}"} (although they currently do not \dQuote{strictly} extend this class), thus making it possible to use them directly for clustering algorithms based on dissimilarity matrices of this class, see the examples. } \references{ S. A. Boorman and P. Arabie (1972). Structural measures and the method of sorting. In R. N. Shepard, A. K. Romney, & S. B. Nerlove (eds.), \emph{Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory} (pages 225--249). New York: Seminar Press. S. A. Boorman and D. C. Olivier (1973). Metrics on spaces of finite trees. \emph{Journal of Mathematical Psychology}, \bold{10}, 26--59. \doi{10.1016/0022-2496(73)90003-5}. I. Charon, L. Denoeud, A. Guénoche and O. Hudry (2006). \emph{Maximum Transfer Distance Between Partitions}. \emph{Journal of Classification}, \bold{23}, 103--121. \doi{10.1007/s00357-006-0006-2}. W. E. H. Day (1981). The complexity of computing metric distances between partitions. \emph{Mathematical Social Sciences}, \bold{1}, 269--287. \doi{10.1016/0165-4896(81)90042-1}. E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. D. Gusfield (2002). Partition-distance: A problem and class of perfect graphs arising in clustering. \emph{Information Processing Letters}, \bold{82}, 159--164. \doi{10.1016/S0020-0190(01)00263-0}. N. Jardine and E. Sibson (1971). \emph{Mathematical Taxonomy}. London: Wiley. M. Meila (2003). Comparing clusterings by the variation of information. In B. Schölkopf and M. K. Warmuth (eds.), \emph{Learning Theory and Kernel Machines}, pages 173--187. Springer-Verlag: Lecture Notes in Computer Science 2777. B. Mérigot, J.-P. Durbec and J.-C. Gaertner (2010). On goodness-of-fit measure for dendrogram-based analyses. \emph{Ecology}, \bold{91}, 1850—-1859. \doi{10.1890/09-1387.1}. C. Rajski (1961). A metric space of discrete probability distributions, \emph{Information and Control}, \bold{4}, 371--377. \doi{10.1016/S0019-9958(61)80055-7}. J. Rubin (1967). Optimal classification into groups: An approach for solving the taxonomy problem. \emph{Journal of Theoretical Biology}, \bold{15}, 103--144. \doi{10.1016/0022-5193(67)90046-X}. D. Zhou, J. Li and H. Zha (2005). A new Mallows distance based metric for comparing clusterings. In \emph{Proceedings of the 22nd international Conference on Machine Learning} (Bonn, Germany, August 07--11, 2005), pages 1028--1035. ICML '05, volume 119. ACM Press, New York, NY. \doi{10.1145/1102351.1102481}. } \seealso{ \code{\link{cl_agreement}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 30] diss <- cl_dissimilarity(pens) summary(c(diss)) cl_dissimilarity(pens[1:5], pens[6:7]) ## Equivalently, using subscripting. diss[1:5, 6:7] ## Can use the dissimilarities for "secondary" clustering ## (e.g. obtaining hierarchies of partitions): hc <- hclust(diss) plot(hc) ## Example from Boorman and Arabie (1972). P1 <- as.cl_partition(c(1, 2, 2, 2, 3, 3, 2, 2)) P2 <- as.cl_partition(c(1, 1, 2, 2, 3, 3, 4, 4)) cl_dissimilarity(P1, P2, "BA/A") cl_dissimilarity(P1, P2, "BA/C") ## Hierarchical clustering. d <- dist(USArrests) x <- hclust(d) cl_dissimilarity(x, d, "cophenetic") cl_dissimilarity(x, d, "gamma") } \keyword{cluster} clue/man/kmedoids.Rd0000644000175100001440000000414612734173215014073 0ustar hornikusers\name{kmedoids} \alias{kmedoids} \title{K-Medoids Clustering} \description{ Compute a \eqn{k}-medoids partition of a dissimilarity object. } \usage{ kmedoids(x, k) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}, or a square matrix of pairwise object-to-object dissimilarity values.} \item{k}{an integer giving the number of classes to be used in the partition.} } \value{ An object of class \code{"kmedoids"} representing the obtained partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{criterion}{the value of the criterion function of the partition.} } \details{ Let \eqn{d} denote the pairwise object-to-object dissimilarity matrix corresponding to \code{x}. A \eqn{k}-medoids partition of \code{x} is defined as a partition of the numbers from 1 to \eqn{n}, the number of objects in \code{x}, into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_l \min_{j \in C_l} \sum_{i \in C_l} d_{ij}} is minimized. This is an NP-hard optimization problem. PAM (Partitioning Around Medoids, see Kaufman & Rousseeuw (1990), Chapter 2) is a very popular heuristic for obtaining optimal \eqn{k}-medoids partitions, and provided by \code{\link[cluster]{pam}} in package \pkg{cluster}. \code{kmedoids} is an exact algorithm based on a binary linear programming formulation of the optimization problem (e.g., Gordon & Vichi (1998), [P4']), using \code{\link[lpSolve]{lp}} from package \pkg{lpSolve} as solver. Depending on available hardware resources (the number of constraints of the program is of the order \eqn{n^2}), it may not be possible to obtain a solution. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \keyword{cluster} \keyword{optimize} clue/man/Kinship82.Rd0000644000175100001440000000352014323456600014044 0ustar hornikusers\name{Kinship82} \alias{Kinship82} \title{Rosenberg-Kim Kinship Terms Partition Data} \description{ Partitions of 15 kinship terms given by 85 female undergraduates at Rutgers University who were asked to sort the terms into classes \dQuote{on the basis of some aspect of meaning}. } \usage{data("Kinship82")} \format{ A cluster ensemble of 85 hard partitions of the 15 kinship terms. } \details{ Rosenberg and Kim (1975) describe an experiment where perceived similarities of the kinship terms were obtained from six different \dQuote{sorting} experiments. These \dQuote{original} Rosenberg-Kim kinship terms data were published in Arabie, Carroll and de Sarbo (1987), and are also contained in file \file{indclus.data} in the shell archive \url{https://netlib.org/mds/indclus.shar}. For one of the experiments, partitions of the terms were printed in Rosenberg (1982). Comparison with the original data indicates that the partition data have the \dQuote{nephew} and \dQuote{niece} columns interchanged, which is corrected in the data set at hand. } \source{ Table 7.1 in Rosenberg (1982), with the \dQuote{nephew} and \dQuote{niece} columns interchanged. } \references{ P. Arabie, J. D. Carroll and W. S. de Sarbo (1987). \emph{Three-way scaling and clustering}. Newbury Park, CA: Sage. S. Rosenberg and M. P. Kim (1975). The method of sorting as a data-gathering procedure in multivariate research. \emph{Multivariate Behavioral Research}, \bold{10}, 489--502. \cr \doi{10.1207/s15327906mbr1004_7}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \keyword{datasets} clue/man/cl_boot.Rd0000644000175100001440000000455211304023137013703 0ustar hornikusers\name{cl_boot} \alias{cl_boot} \title{Bootstrap Resampling of Clustering Algorithms} \description{ Generate bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. } \usage{ cl_boot(x, B, k = NULL, algorithm = if (is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{resample}{a logical indicating whether the data should be resampled in addition to \dQuote{sampling from the algorithm}. If resampling is used, the class memberships of the objects given in \code{x} are predicted from the results of running the base algorithm on bootstrap samples of \code{x}.} } \value{ A cluster ensemble of length \eqn{B}, with either (if resampling is not used, default) the results of running the base algorithm on the given data set, or (if resampling is used) the memberships for the given data predicted from the results of running the base algorithm on bootstrap samples of the data. } \details{ This is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{\link[stats]{kmeans}} or \code{\link[e1071]{cmeans}}, see the examples. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{\link{replicate}} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl_ensemble(list = lst)} to create the ensemble). } \examples{ ## Study e.g. the effect of random kmeans() initializations. data("Cassini") pens <- cl_boot(Cassini$x, 15, 3) diss <- cl_dissimilarity(pens) summary(c(diss)) plot(hclust(diss)) } \keyword{cluster} clue/man/n_of_classes.Rd0000644000175100001440000000347612211412701014721 0ustar hornikusers\name{n_of_classes} \alias{n_of_classes} \alias{cl_class_ids} \alias{as.cl_class_ids} \title{Classes in a Partition} \description{Determine the number of classes and the class ids in a partition of objects.} \usage{ n_of_classes(x) cl_class_ids(x) as.cl_class_ids(x) } \arguments{ \item{x}{an object representing a (hard or soft) partition (for \code{n_of_classes} and \code{cl_class_ids}), or raw class ids (for \code{as.cl_class_ids}).} } \value{ For \code{n_of_classes}, an integer giving the number of classes in the partition. For \code{cl_class_ids}, a vector of integers with the corresponding class ids. For soft partitions, the class ids returned are those of the \emph{nearest hard partition} obtained by taking the class ids of the (first) maximal membership values. } \details{ These function are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). Note that the number of classes is taken as the number of distinct class ids actually used in the partition, and may differ from the number of columns in a membership matrix representing the partition. \code{as.cl_class_ids} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) to class id objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ data("Cassini") party <- kmeans(Cassini$x, 3) n_of_classes(party) ## A simple confusion matrix: table(cl_class_ids(party), Cassini$classes) ## For an "oversize" membership matrix representation: n_of_classes(cl_membership(party, 6)) } \keyword{cluster} clue/man/cl_predict.Rd0000644000175100001440000000411412211412617014367 0ustar hornikusers\name{cl_predict} \alias{cl_predict} \title{Predict Memberships} \description{ Predict class ids or memberships from R objects representing partitions. } \usage{ cl_predict(object, newdata = NULL, type = c("class_ids", "memberships"), ...) } \arguments{ \item{object}{an R object representing a partition of objects.} \item{newdata}{an optional data set giving the objects to make predictions for. This must be of the same \dQuote{kind} as the data set employed for obtaining the partition. If omitted, the original data are used.} \item{type}{a character string indicating whether class ids or memberships should be returned. May be abbreviated.} \item{\dots}{arguments to be passed to and from methods.} } \value{ Depending on \code{type}, an object of class \code{"cl_class_ids"} with the predicted class ids, or of class \code{"cl_membership"} with the matrix of predicted membership values. } \details{ Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for \dQuote{new} objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their \dQuote{closest} prototypes, or providing mixture models for the distribution of objects in feature space. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). } \examples{ ## Run kmeans on a random subset of the Cassini data, and predict the ## memberships for the "test" data set. data("Cassini") nr <- NROW(Cassini$x) ind <- sample(nr, 0.9 * nr, replace = FALSE) party <- kmeans(Cassini$x[ind, ], 3) table(cl_predict(party, Cassini$x[-ind, ]), Cassini$classes[-ind]) } \keyword{cluster} clue/man/cl_validity.Rd0000644000175100001440000000673014714611112014571 0ustar hornikusers\name{cl_validity} \alias{cl_validity} \alias{cl_validity.default} \title{Validity Measures for Partitions and Hierarchies} \description{ Compute validity measures for partitions and hierarchies, attempting to measure how well these clusterings capture the underlying structure in the data they were obtained from. } \usage{ cl_validity(x, ...) \method{cl_validity}{default}(x, d, ...) } \arguments{ \item{x}{an object representing a partition or hierarchy.} \item{d}{a dissimilarity object from which \code{x} was obtained.} \item{\dots}{arguments to be passed to or from methods.} } \value{ A list of class \code{"cl_validity"} with the computed validity measures. } \details{ \code{cl_validity} is a generic function. For partitions, its default method gives the \dQuote{dissimilarity accounted for}, defined as \eqn{1 - a_w / a_t}, where \eqn{a_t} is the average total dissimilarity, and the \dQuote{average within dissimilarity} \eqn{a_w} is given by \deqn{\frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij}}{ \sum_{i,j} \sum_k m_{ik}m_{jk}}}{% \sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij} / \sum_{i,j} \sum_k m_{ik}m_{jk}} where \eqn{d} and \eqn{m} are the dissimilarities and memberships, respectively, and the sums are over all pairs of objects and all classes. For hierarchies, the validity measures computed by default are \dQuote{variance accounted for} (VAF, e.g., Hubert, Arabie & Meulman, 2006) and \dQuote{deviance accounted for} (DEV, e.g., Smith, 2001). If \code{u} is the ultrametric corresponding to the hierarchy \code{x} and \code{d} the dissimilarity \code{x} was obtained from, these validity measures are given by \deqn{\mathrm{VAF} = \max\left(0, 1 - \frac{\sum_{i,j} (d_{ij} - u_{ij})^2}{ \sum_{i,j} (d_{ij} - \mathrm{mean}(d)) ^ 2}\right)}{ max(0, 1 - sum_{i,j} (d_{ij} - u_{ij})^2 / sum_{i,j} (d_{ij} - mean(d))^2)} and \deqn{\mathrm{DEV} = \max\left(0, 1 - \frac{\sum_{i,j} |d_{ij} - u_{ij}|}{ \sum_{i,j} |d_{ij} - \mathrm{median}(d)|}\right)}{ max(0, 1 - sum_{i,j} |d_{ij} - u_{ij}| / sum_{i,j} |d_{ij} - median(d)|)} respectively. Note that VAF and DEV are not invariant under rescaling \code{u}, and may be \dQuote{arbitrarily small} (i.e., 0 using the above definitions) even though \code{u} and \code{d} are \dQuote{structurally close} in some sense. For the results of using \code{\link[cluster]{agnes}} and \code{\link[cluster]{diana}}, the agglomerative and divisive coefficients are provided in addition to the default ones. } \references{ L. Hubert, P. Arabie and J. Meulman (2006). \emph{The structural representation of proximity matrices with MATLAB}. Philadelphia, PA: SIAM. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}/2, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \seealso{ \code{\link[fpc]{cluster.stats}} in package \pkg{fpc} for a variety of cluster validation statistics; \code{\link[e1071]{fclustIndex}} in package \pkg{e1071} for several fuzzy cluster indexes; \code{\link[cclust]{clustIndex}} in package \pkg{cclust}; \code{\link[cluster]{silhouette}} in package \pkg{cluster}. } \keyword{cluster} clue/man/cl_agreement.Rd0000644000175100001440000002370713761714616014734 0ustar hornikusers\name{cl_agreement} \alias{cl_agreement} \title{Agreement Between Partitions or Hierarchies} \description{Compute the agreement between (ensembles) of partitions or hierarchies. } \usage{ cl_agreement(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing agreement, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_agreement"} containing the agreements between the all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_agreement"} with the agreements between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring agreement between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{\eqn{1 - d / m}, where \eqn{d} is the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Euclidean dissimilarity. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{\eqn{1 - d / m}, where \eqn{d} is the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Manhattan dissimilarity.} \item{\code{"Rand"}}{the Rand index (the rate of distinct pairs of objects both in the same class or both in different classes in both partitions), see Rand (1971) or Gordon (1999), page 198. For soft partitions, (currently) the Rand index of the corresponding nearest hard partitions is used.} \item{\code{"cRand"}}{the Rand index corrected for agreement by chance, see Hubert and Arabie (1985) or Gordon (1999), page 198. Can only be used for hard partitions.} \item{\code{"NMI"}}{Normalized Mutual Information, see Strehl and Ghosh (2002). For soft partitions, (currently) the NMI of the corresponding nearest hard partitions is used.} \item{\code{"KP"}}{the Katz-Powell index, i.e., the product-moment correlation coefficient between the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, respectively, see Katz and Powell (1953). For soft partitions, (currently) the Katz-Powell index of the corresponding nearest hard partitions is used. (Note that for hard partitions, the \eqn{(i,j)} entry of \eqn{C(u)} is one iff objects \eqn{i} and \eqn{j} are in the same class.)} \item{\code{"angle"}}{the maximal cosine of the angle between the elements of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"diag"}}{the maximal co-classification rate, i.e., the maximal rate of objects with the same class ids in both partitions after arbitrarily permuting the ids.} \item{\code{"FM"}}{the index of Fowlkes and Mallows (1983), i.e., the ratio \eqn{N_{xy} / \sqrt{N_x N_y}}{N_xy / sqrt(N_x N_y)} of the number \eqn{N_{xy}}{N_xy} of distinct pairs of objects in the same class in both partitions and the geometric mean of the numbers \eqn{N_x} and \eqn{N_y} of distinct pairs of objects in the same class in partition \eqn{x} and partition \eqn{y}, respectively. For soft partitions, (currently) the Fowlkes-Mallows index of the corresponding nearest hard partitions is used.} \item{\code{"Jaccard"}}{the Jaccard index, i.e., the ratio of the numbers of distinct pairs of objects in the same class in both partitions and in at least one partition, respectively. For soft partitions, (currently) the Jaccard index of the corresponding nearest hard partitions is used.} \item{\code{"purity"}}{the purity of the classes of \code{x} with respect to those of \code{y}, i.e., \eqn{\sum_j \max_i n_{ij} / n}, where \eqn{n_{ij}} is the joint frequency of objects in class \eqn{i} for \code{x} and in class \eqn{j} for \code{y}, and \eqn{n} is the total number of objects.} \item{\code{"PS"}}{Prediction Strength, see Tibshirani and Walter (2005): the minimum, over all classes \eqn{j} of \code{y}, of the maximal rate of objects in the same class for \code{x} and in class \eqn{j} for \code{y}.} } If all components are hierarchies, available built-in methods for measuring agreement between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{The cophenetic correlation coefficient. (I.e., the product-moment correlation of the ultrametrics.)} \item{\code{"angle"}}{the cosine of the angle between the ultrametrics.} \item{\code{"gamma"}}{\eqn{1 - d}, where \eqn{d} is the rate of inversions between the associated ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}). (This agreement measure is a linear transformation of Kruskal's \eqn{\gamma}{gamma}.)} } The measures based on ultrametrics also allow computing agreement with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined agreement method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric agreement objects of class \code{"cl_agreement"} are implemented as symmetric proximity objects with self-proximities identical to one, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric agreement) object of class \code{"cl_cross_agreement"}. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. E. B. Fowlkes and C. L. Mallows (1983). A method for comparing two hierarchical clusterings. \emph{Journal of the American Statistical Association}, \bold{78}, 553--569. \cr \doi{10.1080/01621459.1983.10478008}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. L. Hubert and P. Arabie (1985). Comparing partitions. \emph{Journal of Classification}, \bold{2}, 193--218. \doi{10.1007/bf01908075}. W. M. Rand (1971). Objective criteria for the evaluation of clustering methods. \emph{Journal of the American Statistical Association}, \bold{66}, 846--850. \doi{10.2307/2284239}. L. Katz and J. H. Powell (1953). A proposed index of the conformity of one sociometric measurement to another. \emph{Psychometrika}, \bold{18}, 249--256. \doi{10.1007/BF02289063}. A. Strehl and J. Ghosh (2002). Cluster ensembles --- A knowledge reuse framework for combining multiple partitions. \emph{Journal of Machine Learning Research}, \bold{3}, 583--617. \cr \url{https://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}. R. Tibshirani and G. Walter (2005). Cluster validation by Prediction Strength. \emph{Journal of Computational and Graphical Statistics}, \bold{14}/3, 511--528. \doi{10.1198/106186005X59243}. } \seealso{ \code{\link{cl_dissimilarity}}; \code{\link[e1071]{classAgreement}} in package \pkg{e1071}. } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] # for saving precious time ... summary(c(cl_agreement(pens))) summary(c(cl_agreement(pens, method = "Rand"))) summary(c(cl_agreement(pens, method = "diag"))) cl_agreement(pens[1:5], pens[6:7], method = "NMI") ## Equivalently, using subscripting. cl_agreement(pens, method = "NMI")[1:5, 6:7] ## An ensemble of hierarchies. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods hens <- cl_ensemble(list = hclust_results) summary(c(cl_agreement(hens))) ## Note that the Euclidean agreements are *very* small. ## This is because the ultrametrics differ substantially in height: u <- lapply(hens, cl_ultrametric) round(sapply(u, max), 3) ## Rescaling the ultrametrics to [0, 1] gives: u <- lapply(u, function(x) (x - min(x)) / (max(x) - min(x))) shens <- cl_ensemble(list = lapply(u, as.cl_dendrogram)) summary(c(cl_agreement(shens))) ## Au contraire ... summary(c(cl_agreement(hens, method = "cophenetic"))) cl_agreement(hens[1:3], hens[4:5], method = "gamma") } \keyword{cluster} clue/man/pclust.Rd0000644000175100001440000001551611430740706013606 0ustar hornikusers\name{pclust} \alias{pclust} \alias{pclust_family} \alias{pclust_object} \title{Prototype-Based Partitioning} \description{ Obtain prototype-based partitions of objects by minimizing the criterion \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the objects \eqn{x_b} and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ pclust(x, k, family, m = 1, weights = 1, control = list()) pclust_family(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) pclust_object(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) } \arguments{ \item{x}{the object to be partitioned.} \item{k}{an integer giving the number of classes to be used in the partition.} \item{family}{an object of class \code{"pclust_family"} as generated by \code{pclust_family}, containing the information about \eqn{d} and \eqn{e}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} \item{D}{a function for computing dissimilarities \eqn{d} between objects and prototypes.} \item{C}{a \sQuote{consensus} function with formals \code{x}, \code{weights} and \code{control} for computing a consensus prototype \eqn{p} minimizing \eqn{\sum_b w_b d(x_b, p) ^ e}.} \item{init}{a function with formals \code{x} and \code{k} initializing an object with \eqn{k} prototypes from the object \code{x} to be partitioned.} \item{description}{a character string describing the family.} \item{e}{a number giving the exponent \eqn{e} of the criterion.} \item{.modify}{a function with formals \code{x}, \code{i} and \code{value} for modifying a single prototype, or \code{NULL} (default).} \item{.subset}{a function with formals \code{x} and \code{i} for subsetting prototypes, or \code{NULL} (default).} \item{prototypes}{an object representing the prototypes of the partition.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{value}{the value of the criterion to be minimized.} \item{...}{further elements to be included in the generated pclust object.} \item{classes}{a character vector giving further classes to be given to the generated pclust object in addition to \code{"pclust"}, or \code{NULL} (default).} \item{attributes}{a list of attributes, or \code{NULL} (default).} } \value{ \code{pclust} returns the partition found as an object of class \code{"pclust"} (as obtained by calling \code{pclust_object}) which in addition to the \emph{default} components contains \code{call} (the matched call) and a \code{converged} attribute indicating convergence status (i.e., whether the maximal number of iterations was reached). \code{pclust_family} returns an object of class \code{"pclust_family"}, which is a list with components corresponding to the formals of \code{pclust_family}. \code{pclust_object} returns an object inheriting from class \code{"pclust"}, which is a list with components corresponding to the formals (up to and including \code{...}) of \code{pclust_object}, and additional classes and attributes specified by \code{classes} and \code{attributes}, respectively. } \details{ For \eqn{m = 1}, a generalization of the Lloyd-Forgy variant of the \eqn{k}-means algorithm is used, which iterates between reclassifying objects to their closest prototypes (according to the dissimilarities given by \code{D}), and computing new prototypes as the consensus for the classes (using \code{C}). For \eqn{m > 1}, a generalization of the fuzzy \eqn{c}-means recipe (e.g., Bezdek (1981)) is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Currently, no local improvement heuristics are provided. It is possible to perform several runs of the procedure via control arguments \code{nruns} or \code{start} (the default is to perform a single run), in which case the first partition with the smallest value of the criterion is returned. The dissimilarity and consensus functions as well as the exponent \eqn{e} are specified via \code{family}. In principle, arbitrary representations of objects to be partitioned and prototypes (which do not necessarily have to be \dQuote{of the same kind}) can be employed. In addition to \code{D} and \code{C}, what is needed are means to obtain an initial collection of \eqn{k} prototypes (\code{init}), to modify a single prototype (\code{.modify}), and subset the prototypes (\code{.subset}). By default, list and (currently, only dense) matrix (with the usual convention that the rows correspond to the objects) are supported. Otherwise, the family has to provide the functions needed. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a list of prototype objects to be used as starting values.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{\code{control}}{control parameters to be used in the consensus function.} } The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum, in particular if \code{C} is not an exact minimizer. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. } \seealso{ \code{\link[stats]{kmeans}}, \code{\link[e1071]{cmeans}}. } clue/man/Phonemes.Rd0000644000175100001440000000215714323456560014055 0ustar hornikusers\name{Phonemes} \alias{Phonemes} \title{Miller-Nicely Consonant Phoneme Confusion Data} \description{ Miller-Nicely data on the auditory confusion of 16 consonant phonemes. } \usage{data("Phonemes")} \format{ A symmetric matrix of the misclassification probabilities of 16 English consonant phonemes. } \details{ Miller and Nicely (1955) obtained the confusions by exposing female subjects to a series of syllables consisting of one of the 16 consonants followed by the vowel \samp{a} under 17 different experimental conditions. The data provided are obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. } \source{ The data set is also contained in file \file{mapclus.data} in the shell archive \url{https://netlib.org/mds/mapclus.shar}. } \references{ G. A. Miller and P. E. Nicely (1955). An analysis of perceptual confusions among some English consonants. \emph{Journal of the Acoustical Society of America}, \bold{27}, 338--352. \doi{10.1121/1.1907526}. } \keyword{datasets} clue/man/addtree.Rd0000644000175100001440000000364312037226501013676 0ustar hornikusers\name{addtree} \encoding{UTF-8} \alias{as.cl_addtree} \title{Additive Tree Distances} \description{ Objects representing additive tree distances. } \usage{ as.cl_addtree(x) } \arguments{ \item{x}{an R object representing additive tree distances.} } \value{ An object of class \code{"cl_addtree"} containing the additive tree distances. } \details{ Additive tree distances are object dissimilarities \eqn{d} satisfying the so-called \emph{additive tree conditions}, also known as \emph{four-point conditions} \eqn{d_{ij} + d_{kl} \le \max(d_{ik} + d_{jl}, d_{il} + d_{jk})} for all quadruples \eqn{i, j, k, l}. Equivalently, for each such quadruple, the largest two values of the sums \eqn{d_{ij} + d_{kl}}, \eqn{d_{ik} + d_{jl}}, and \eqn{d_{il} + d_{jk}} must be equal. Centroid distances are additive tree distances where the inequalities in the four-point conditions are strengthened to equalities (such that all three sums are equal), and can be represented as \eqn{d_{ij} = g_i + g_j}, i.e., as sums of distances from a \dQuote{centroid}. See, e.g., Barthélémy and Guénoche (1991) for more details on additive tree distances. \code{as.cl_addtree} is a generic function. Its default method can handle objects representing ultrametric distances and raw additive distance matrices. In addition, there is a method for coercing objects of class \code{"\link[ape:as.phylo]{phylo}"} from package \pkg{ape}. Functions \code{\link{ls_fit_addtree}} and \code{\link{ls_fit_centroid}} can be used to find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. There is a \code{\link{plot}} method for additive tree distances. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. } \keyword{cluster} clue/man/Cassini.Rd0000644000175100001440000000237411304023137013653 0ustar hornikusers\name{Cassini} \alias{Cassini} \title{Cassini Data} \description{ A Cassini data set with 1000 points in 2-dimensional space which are drawn from the uniform distribution on 3 structures. The two outer structures are banana-shaped; the \dQuote{middle} structure in between them is a circle. } \usage{data("Cassini")} \format{ A classed list with components \describe{ \item{\code{x}}{a matrix with 1000 rows and 2 columns giving the coordinates of the points.} \item{\code{classes}}{a factor indicating which structure the respective points belong to.} } } \details{ Instances of Cassini data sets can be created using function \code{\link[mlbench]{mlbench.cassini}} in package \pkg{mlbench}. The data set at hand was obtained using \preformatted{ library("mlbench") set.seed(1234) Cassini <- mlbench.cassini(1000) } } \examples{ data("Cassini") op <- par(mfcol = c(1, 2)) ## Plot the data set: plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ## Create a "random" k-means partition of the data: set.seed(1234) party <- kmeans(Cassini$x, 3) ## And plot that. plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## (We can see the problem ...) par(op) } \keyword{datasets} clue/man/ls_fit_sum_of_ultrametrics.Rd0000644000175100001440000000603012116170572017711 0ustar hornikusers\name{ls_fit_sum_of_ultrametrics} \alias{ls_fit_sum_of_ultrametrics} \title{Least Squares Fit of Sums of Ultrametrics to Dissimilarities} \description{ Find a sequence of ultrametrics with sum minimizing square distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_sum_of_ultrametrics(x, nterms = 1, weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{nterms}{an integer giving the number of ultrametrics to be fitted.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \details{ The problem to be solved is minimizing the criterion function \deqn{L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2}{ L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2} over all \eqn{u(1), \ldots, u(n)} satisfying the ultrametric constraints. We provide an implementation of the iterative heuristic suggested in Carroll & Pruzansky (1980) which in each step \eqn{t} sequentially refits the \eqn{u(k)} as the least squares ultrametric fit to the \dQuote{residuals} \eqn{x - \sum_{l \ne k} u(l)} using \code{\link{ls_fit_ultrametric}}. Available control parameters include \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in all \eqn{u(k)} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the criterion function is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{method}}{a character string indicating the fitting method to be employed by the individual least squares fits.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed. By default, if the \acronym{SUMT} method method is used, 10 inner \acronym{SUMT} runs are performed for each refitting.} } It should be noted that the method used is a heuristic which can not be guaranteed to find the global minimum. } \value{ A list of objects of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \references{ J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. } \keyword{cluster} \keyword{optimize} clue/man/GVME_Consensus.Rd0000644000175100001440000000343512734174635015101 0ustar hornikusers\name{GVME_Consensus} \alias{GVME_Consensus} \title{Gordon-Vichi Macroeconomic Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the macroeconomic partition data given in Gordon and Vichi (2001). } \usage{data("GVME_Consensus")} \format{ A named cluster ensemble of eight soft partitions of 21 countries terms into two or three classes. } \details{ The elements of the ensemble are consensus partitions for the macroeconomic partition data in Gordon and Vichi (2001), which are available as data set \code{\link{GVME}}. Element names are of the form \code{"\var{m}/\var{k}"}, where \var{m} indicates the consensus method employed (one of \samp{MF1}, \samp{MF2}, \samp{JMF}, and \samp{S&S}, corresponding to the application of models 1, 2, and 3 in Gordon and Vichi (2001) and the approach in Sato and Sato (1994), respectively), and \var{k} denotes the number classes (2 or 3). } \source{ Tables 4 and 5 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. M. Sato and Y. Sato (1994). On a multicriteria fuzzy clustering method for 3-way data. \emph{International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems}, \bold{2}, 127--142. \cr \doi{10.1142/S0218488594000122}. } \examples{ ## Load the consensus partitions. data("GVME_Consensus") ## Pick the partitions into 2 classes. GVME_Consensus_2 <- GVME_Consensus[1 : 4] ## Fuzziness using the Partition Coefficient. cl_fuzziness(GVME_Consensus_2) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(GVME_Consensus_2) cl_dissimilarity(GVME_Consensus_2, method = "comem") } \keyword{datasets} clue/DESCRIPTION0000644000175100001440000000151314715050616012733 0ustar hornikusersPackage: clue Version: 0.3-66 Encoding: UTF-8 Title: Cluster Ensembles Description: CLUster Ensembles. Authors@R: c(person("Kurt", "Hornik", role = c("aut", "cre"), email = "Kurt.Hornik@R-project.org", comment = c(ORCID = "0000-0003-4198-9911")), person("Walter", "Böhm", role = "ctb")) License: GPL-2 Depends: R (>= 3.2.0) Imports: stats, cluster, graphics, methods Suggests: e1071, lpSolve (>= 5.5.7), quadprog (>= 1.4-8), relations Enhances: RWeka, ape, cba, cclust, flexclust, flexmix, kernlab, mclust, movMF, modeltools NeedsCompilation: yes Packaged: 2024-11-13 06:15:44 UTC; hornik Author: Kurt Hornik [aut, cre] (), Walter Böhm [ctb] Maintainer: Kurt Hornik Repository: CRAN Date/Publication: 2024-11-13 07:13:18 UTC