clue/0000755000175100001440000000000013435050401011213 5ustar hornikusersclue/po/0000755000175100001440000000000012213262407011635 5ustar hornikusersclue/po/R-clue.pot0000644000175100001440000001223713142031604013510 0ustar hornikusersmsgid "" msgstr "" "Project-Id-Version: clue 0.3-54\n" "POT-Creation-Date: 2017-08-07 11:31\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "Argument 'weights' must be compatible with 'x'." msgstr "" msgid "Argument 'weights' has negative elements." msgstr "" msgid "Argument 'weights' has no positive elements." msgstr "" msgid "Non-identical weights currently not supported." msgstr "" msgid "All given orders must be valid permutations." msgstr "" msgid "Iterative projection run: %d" msgstr "" msgid "Iterative reduction run: %d" msgstr "" msgid "Cannot coerce to 'cl_addtree'." msgstr "" msgid "Cannot mix partitions and hierarchies." msgstr "" msgid "All clusterings must have the same number of objects." msgstr "" msgid "Can only handle hard partitions." msgstr "" msgid "Can only determine classes of partitions or hierarchies." msgstr "" msgid "Cannot compute consensus of empty ensemble." msgstr "" msgid "AOS run: %d" msgstr "" msgid "Iteration: 0 *** value: %g" msgstr "" msgid "Iteration: %d *** value: %g" msgstr "" msgid "Minimum: %g" msgstr "" msgid "AOG run: %d" msgstr "" msgid "Parameter 'p' must be in [1/2, 1]." msgstr "" msgid "Cannot compute prototype distances." msgstr "" msgid "All elements must have the same number of objects." msgstr "" msgid "Generic '%s' not defined for \"%s\" objects." msgstr "" msgid "Wrong class." msgstr "" msgid "Plotting not available for elements %s of the ensemble." msgstr "" msgid "Value '%s' is not a valid abbreviation for a fuzziness method." msgstr "" msgid "Unary '%s' not defined for \"%s\" objects." msgstr "" msgid "Hierarchies must have the same number of objects." msgstr "" msgid "Dendrograms must have the same number of objects." msgstr "" msgid "Arguments 'x' and 'y' must have the same number of objects." msgstr "" msgid "Cannot compute meet of given clusterings." msgstr "" msgid "Cannot compute join of given clusterings." msgstr "" msgid "Join of given n-trees does not exist." msgstr "" msgid "x must be a matrix with nonnegative entries." msgstr "" msgid "x must not have more rows than columns." msgstr "" msgid "Argument 'x' must be a partition." msgstr "" msgid "Cannot compute medoid of empty ensemble." msgstr "" msgid "Cannot compute medoid partition of empty ensemble." msgstr "" msgid "Class ids:" msgstr "" msgid "Criterion:" msgstr "" msgid "Medoid ids:" msgstr "" msgid "k cannot be less than the number of classes in x." msgstr "" msgid "Cannot extract object dissimilarities" msgstr "" msgid "Cannot infer class ids from given object." msgstr "" msgid "A hard partition of %d objects." msgstr "" msgid "A partition of %d objects." msgstr "" msgid "Partitions must have the same number of objects." msgstr "" msgid "Class ids must be atomic." msgstr "" msgid "Not a valid membership matrix." msgstr "" msgid "Cannot coerce to 'cl_hard_partition'." msgstr "" msgid "No information on exponent in consensus method used." msgstr "" msgid "No information on dissimilarity in consensus method used." msgstr "" msgid "A hard partition of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes." msgstr "" msgid "Cannot determine how to modify prototypes." msgstr "" msgid "Invalid function to modify prototypes." msgstr "" msgid "Cannot determine how to subset prototypes." msgstr "" msgid "Invalid function to subset prototypes." msgstr "" msgid "Pclust run: %d" msgstr "" msgid "A hard partition of %d objects into %d classes." msgstr "" msgid "A soft partition (degree m = %f) of %d objects into %d classes." msgstr "" msgid "Cannot make new predictions." msgstr "" msgid "Standardization is currently not supported." msgstr "" msgid "Cannot determine prototypes." msgstr "" msgid "Invalid consensus method '%s'." msgstr "" msgid "Invalid dissimilarity method '%s'." msgstr "" msgid "Invalid agreement method '%s'." msgstr "" msgid "SUMT run: %d" msgstr "" msgid "Iteration: 0 Rho: %g P: %g" msgstr "" msgid "Iteration: %d Rho: %g P: %g" msgstr "" msgid "Not a valid ultrametric." msgstr "" msgid "Given ensemble contains no dissimilarities." msgstr "" msgid "Outer iteration: %d" msgstr "" msgid "Change: u: %g L: %g" msgstr "" msgid "Iteration: %d" msgstr "" msgid "Term: %d" msgstr "" msgid "Change: %g" msgstr "" msgid "Overall change: u: %g L: %g" msgstr "" msgid "An object of virtual class '%s', with representation:" msgstr "" msgid "An ensemble of %d partition of %d objects." msgid_plural "An ensemble of %d partitions of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d dendrogram of %d objects." msgid_plural "An ensemble of %d dendrograms of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble of %d hierarchy of %d objects." msgid_plural "An ensemble of %d hierarchies of %d objects." msgstr[0] "" msgstr[1] "" msgid "An ensemble with %d element." msgid_plural "An ensemble with %d elements." msgstr[0] "" msgstr[1] "" clue/inst/0000755000175100001440000000000013435045064012201 5ustar hornikusersclue/inst/po/0000755000175100001440000000000012213262407012612 5ustar hornikusersclue/inst/po/en@quot/0000755000175100001440000000000012213262407014225 5ustar hornikusersclue/inst/po/en@quot/LC_MESSAGES/0000755000175100001440000000000012213262407016012 5ustar hornikusersclue/inst/po/en@quot/LC_MESSAGES/R-clue.mo0000644000175100001440000002064013143661614017506 0ustar hornikusersSqL/AHa?X ^ j5v2,X We V : 5O ) , / ! ;. 8j  % + )5 (_ 2 ) # * *4 _ %| )  & / I T1_*+19"X&{& *Eb%~  94.+Zy"07% ]+j(> 1,F's6/H"k?X  +572m,XW&V~:9-J0x7%C8K ")+)(H2q)#**H%e)&  2 =1H.z+1""*&M&t& #>[%w  94.$Sr&0 7" Z +g  , B !1!,K!'x!4I%R52'M+ D0.N O3,L"AS1)$=E*B#! J;-? > KQ8 PC<G(9:F&7@/H6A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class '%s', with representation:Argument 'weights' has negative elements.Argument 'weights' has no positive elements.Argument 'weights' must be compatible with 'x'.Argument 'x' must be a partition.Arguments 'x' and 'y' must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to 'cl_addtree'.Cannot coerce to 'cl_hard_partition'.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic '%s' not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method '%s'.Invalid consensus method '%s'.Invalid dissimilarity method '%s'.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter 'p' must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary '%s' not defined for "%s" objects.Value '%s' is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.Project-Id-Version: clue 0.3-54 POT-Creation-Date: 2017-08-07 11:31 PO-Revision-Date: 2017-08-07 11:31 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); A hard partition of %d objects into %d classes.A hard partition of %d objects.A hard partition of a cluster ensemble with %d elements into %d classes.A partition of %d objects.A soft partition (degree m = %f) of %d objects into %d classes.A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.AOG run: %dAOS run: %dAll clusterings must have the same number of objects.All elements must have the same number of objects.All given orders must be valid permutations.An ensemble of %d dendrogram of %d objects.An ensemble of %d dendrograms of %d objects.An ensemble of %d hierarchy of %d objects.An ensemble of %d hierarchies of %d objects.An ensemble of %d partition of %d objects.An ensemble of %d partitions of %d objects.An ensemble with %d element.An ensemble with %d elements.An object of virtual class ‘%s’, with representation:Argument ‘weights’ has negative elements.Argument ‘weights’ has no positive elements.Argument ‘weights’ must be compatible with ‘x’.Argument ‘x’ must be a partition.Arguments ‘x’ and ‘y’ must have the same number of objects.Can only determine classes of partitions or hierarchies.Can only handle hard partitions.Cannot coerce to ‘cl_addtree’.Cannot coerce to ‘cl_hard_partition’.Cannot compute consensus of empty ensemble.Cannot compute join of given clusterings.Cannot compute medoid of empty ensemble.Cannot compute medoid partition of empty ensemble.Cannot compute meet of given clusterings.Cannot compute prototype distances.Cannot determine how to modify prototypes.Cannot determine how to subset prototypes.Cannot determine prototypes.Cannot extract object dissimilaritiesCannot infer class ids from given object.Cannot make new predictions.Cannot mix partitions and hierarchies.Change: %gChange: u: %g L: %gClass ids must be atomic.Class ids:Criterion:Dendrograms must have the same number of objects.Generic ‘%s’ not defined for "%s" objects.Given ensemble contains no dissimilarities.Hierarchies must have the same number of objects.Invalid agreement method ‘%s’.Invalid consensus method ‘%s’.Invalid dissimilarity method ‘%s’.Invalid function to modify prototypes.Invalid function to subset prototypes.Iteration: %dIteration: %d *** value: %gIteration: %d Rho: %g P: %gIteration: 0 *** value: %gIteration: 0 Rho: %g P: %gIterative projection run: %dIterative reduction run: %dJoin of given n-trees does not exist.Medoid ids:Minimum: %gNo information on dissimilarity in consensus method used.No information on exponent in consensus method used.Non-identical weights currently not supported.Not a valid membership matrix.Not a valid ultrametric.Outer iteration: %dOverall change: u: %g L: %gParameter ‘p’ must be in [1/2, 1].Partitions must have the same number of objects.Pclust run: %dPlotting not available for elements %s of the ensemble.SUMT run: %dStandardization is currently not supported.Term: %dUnary ‘%s’ not defined for "%s" objects.Value ‘%s’ is not a valid abbreviation for a fuzziness method.Wrong class.k cannot be less than the number of classes in x.x must be a matrix with nonnegative entries.x must not have more rows than columns.clue/inst/CITATION0000644000175100001440000000165012612741643013342 0ustar hornikuserscitHeader("To cite in publications use:") ## R >= 2.8.0 passes package metadata to citation(). if(!exists("meta") || is.null(meta)) meta <- packageDescription("clue") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) bibentry("Manual", title = "clue: Cluster ensembles", author = person("Kurt", "Hornik", email = "Kurt.Hornik@R-project.org"), year = year, note = note, url = "https://CRAN.R-project.org/package=clue" ) bibentry("Article", title = "A {CLUE} for {CLUster Ensembles}", author = person("Kurt", "Hornik", email = "Kurt.Hornik@R-project.org"), year = 2005, journal = "Journal of Statistical Software", volume = 14, number = 12, month = "September", doi = "10.18637/jss.v014.i12" ) clue/inst/doc/0000755000175100001440000000000013435045064012746 5ustar hornikusersclue/inst/doc/clue.R0000644000175100001440000001741013435045064014024 0ustar hornikusers### R code from vignette source 'clue.Rnw' ### Encoding: UTF-8 ################################################### ### code chunk number 1: clue.Rnw:40-42 ################################################### options(width = 60) library("clue") ################################################### ### code chunk number 2: clue.Rnw:310-319 ################################################### cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE ################################################### ### code chunk number 3: Cassini-data (eval = FALSE) ################################################### ## data("Cassini") ## plot(Cassini$x, col = as.integer(Cassini$classes), ## xlab = "", ylab = "") ################################################### ### code chunk number 4: clue.Rnw:889-890 ################################################### data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ################################################### ### code chunk number 5: CKME (eval = FALSE) ################################################### ## data("CKME") ## plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 6: clue.Rnw:903-904 ################################################### data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) ################################################### ### code chunk number 7: clue.Rnw:914-916 ################################################### m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) ################################################### ### code chunk number 8: Cassini-medoid (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 9: clue.Rnw:924-925 ################################################### plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") ################################################### ### code chunk number 10: clue.Rnw:934-936 ################################################### set.seed(1234) m2 <- cl_consensus(CKME) ################################################### ### code chunk number 11: clue.Rnw:941-942 ################################################### table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) ################################################### ### code chunk number 12: Cassini-mean (eval = FALSE) ################################################### ## plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 13: clue.Rnw:950-951 ################################################### plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") ################################################### ### code chunk number 14: clue.Rnw:984-989 ################################################### data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 15: clue.Rnw:993-994 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ################################################### ### code chunk number 16: clue.Rnw:998-1002 ################################################### data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 17: clue.Rnw:1009-1012 ################################################### set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 18: clue.Rnw:1016-1019 ################################################### mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ################################################### ### code chunk number 19: clue.Rnw:1022-1023 ################################################### max(abs(cl_membership(m1) - cl_membership(m2))) ################################################### ### code chunk number 20: clue.Rnw:1027-1029 ################################################### m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ################################################### ### code chunk number 21: clue.Rnw:1032-1033 ################################################### table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) ################################################### ### code chunk number 22: clue.Rnw:1036-1037 ################################################### rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] ################################################### ### code chunk number 23: clue.Rnw:1061-1066 ################################################### data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ################################################### ### code chunk number 24: clue.Rnw:1071-1072 ################################################### mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ################################################### ### code chunk number 25: clue.Rnw:1076-1079 ################################################### data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ################################################### ### code chunk number 26: clue.Rnw:1083-1085 ################################################### cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) ################################################### ### code chunk number 27: clue.Rnw:1088-1089 ################################################### cl_fuzziness(cl_ensemble(m1, m2)) ################################################### ### code chunk number 28: clue.Rnw:1109-1111 ################################################### data("Phonemes") d <- as.dist(1 - Phonemes) ################################################### ### code chunk number 29: clue.Rnw:1115-1116 ################################################### u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ################################################### ### code chunk number 30: Phonemes (eval = FALSE) ################################################### ## plot(u) ################################################### ### code chunk number 31: clue.Rnw:1126-1127 ################################################### plot(u) ################################################### ### code chunk number 32: clue.Rnw:1137-1138 ################################################### round(cl_dissimilarity(d, u), 4) ################################################### ### code chunk number 33: clue.Rnw:1141-1146 ################################################### hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) ################################################### ### code chunk number 34: clue.Rnw:1153-1155 ################################################### ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) clue/inst/doc/clue.pdf0000644000175100001440000153015413435045064014402 0ustar hornikusers%PDF-1.5 % 100 0 obj << /Length 2705 /Filter /FlateDecode >> stream xڅYKw۸WdEc1$L;紉MEQjIԐ$ί},,l}~~G/L[T6έ[&]e,W.1D};鯖f?{>8}GS-,xzibSʖqY>obf, ,8s%l)m~+ J>7u2.$I+yZ&Zxj_eseˬUZDQK Dɖр.?NVN(vHb\82kbCDq/~ h) )ye\\0tʢiQ=O2햫4w'4= 򈓖dF;yD~ koi L% U*i{"'! R@d ճF"z)b>'>OJ{k2+iS}5/@N cTb)h0QV>{DY{<˹+oTc'A4xLG>G&jvmzj6!C39+2;Xw x+Vr1ENVVykd_4 wߞei2ʤ-2<=xu# nl)9jR7l +c<:ාIH?'qE|3%@!3Jnu(5Qݐ(Ed+)+G(-3U\{GR;G&e553ӛUM *yQF9fr<|UBi  OqrR=:'u?-5dس1`@@9EVwZ=Ӆֺn0\Vy~d21fHnL2ꞚYPC4?G$哙ڷ/SU'XSƷ<֔vrOjbZsߗYƁ0'>^9 (#9H|3;H@P dKTXȺoyRBv(CxD [/"f Xhw|=hI:1zz1q IR)dZ51;ВB׃5 q+gsQnhuWRi&Љ.wt>JAD=v%ˎ$[ܧ )*D~׊y0@'݃ y2K} x[Y{C ֿp(M(j!@BWjn.3}^Xr&'A1\laK>?ӌ+_VK=}P =Ћ4w @Z_&Sn [*cP_:+' l;=c*v}6|;{qi-H{ Xl. mU3Oea%Κvޫpö>r]yl37bt^ EkA/2zF¦c=WQ[Ҋ)f"w2$ܣ35t>bqE6]VöM^@O:f`ЬŴOܸyne9x&e^B Z;=H . rROwe[өF\&0c/e]΁ˬr[#NzdO&k136_g^%R!qo]WӋ wA75n'߲bɡ_fI_z3^5u&ЙVqY Ko>> stream xZ[o6~$Ǭxb i}hBħe-iί߹%N=}H$9|PM#FZTR8[ ovdqok;[~+n>³G[ol-Cc,mg*ڞ==0ʰqvfqñ7sU{3R4k䠛̪FUBA6xYR WZI~*m ?HW{iUC##m±~VqH5q-o(f UEiN ] i,?=2hSH~@s-RYt4)ip w8Kf5^{kLs0`KȠp荐̀-.o>Lgskhh0{6%={q`a|Ua&X!T)[?eZ"Gp{vESbh؝y(-81 [| y칙l#lrL+14 &G)zO}2 .m~EKxvoM(%Ykc54Ê&)>G7-mj`%VW q'ι;o ZJ#@t*wȷy?O\vk"-b#LbYE&9 d@hFC Vx&~Bǰg-;Rжy4)'^q*+1x Ibcxa|21;Gzh O>"r=-ڑP3Y\[D.Em#fr6|~8QmÜ ;ul ^ ,@7FeQm0vVŊZe60*-#KNO-_o`ď֕>3ҬjHoxٷI:6((HFȗeqB fNHYS!!E]F9*M+} RP+iT(94Qrھ?7}W1 |m/;nB2MfBejF o\zl@b._JuݟMJUlw^}%u.+g  As `wFA#A=)ci9Ñ'ցe܀ma[_MG$ikQc#-[.A ГGS4_'_>>6"AQ\ASҠ 쵂]R^.Ax!T!T#yM66-]I.0}o剱O˸njK󙐱Ϗ!|̶%PRL3)ZiW bhE̖0̰>1i 8E )y`Vp!&O*œn(T*{xCث{Y,M뿄(}͌Ώ c@;J@+sH=y4qBⲡ$F2&?DJ9e/EU2C{H Ħ>6vi+=ڕgxk!+ h*/}_+ VgGsM]ըef(Tc 蠒r~YBz|$tg'd\$OIEàP{O4νݥ^WJ`[REc]̣#9M;|2#)+N*M\ƶsk0b|SjcBV.7I#"5~=s=u \8lH`ޟE`Iw>d\ٲ'9Ěg>b pˍy<?G)ew3o`3R42Flה0\uxFد/X?=Y(:y5N|I5sd\-s 2XHi=TN\9Z2]AvfU> g4Uj_XtS G#QͱTC!eT=:e)\H s8Ԝ ` Utr@6G=ɤ:*L}1nʩ!fCCkr/ ! t>|$4e&gg'UFQ gSxLX~.$)CG)c.'lC,,@:głUћ:znT@r9$aStpXi|4sU2nX]3$mpR?5ނ.tXdrloJI >Wιm1:7䉅C_pJ,'J@7Xi/f6,z%>[j?p7uC@A`"O\OC % qk#att; >,ڊ*Yk<& {q+ uc!B|%54#C=O H0F! *s拣iD/Z[7tBj7l㔍? '_%R8; _im 8JTڝ D}ilMj;tg HO`r輤.E0G+Mk% tI-\ Wϥ endstream endobj 150 0 obj << /Length 3704 /Filter /FlateDecode >> stream xڽZ[o6~*F E_Z{Xy.>Ƕ}s\Iʢ@)j8$sfI*L'&Y^=x_fբb-E,lOo7 ?khCwh3vt#KM|52/l}V/>)3gZ^|=5Jur_һ1ISʾ-5vxM~'SA qvXs]c{ )}f4zE0{cS gӘ&/VH{g氢}|xu&JLk 'mbRc:&2mFKB3t޸ߞf'iBɇB$ LISY-"U3šT H@ ̔t⩽⩃U8|mY rĺGw-f)Oܫ8skZzY/,9hFLGa;Y'Nx . mߩY}յjDD T!qNŻ@J*$HJqU,Iki[s~2/g!݉"Di:[Z:ѪGkNSgǞ-˂F3~ai|*:f6C8jbI~يo6qGAܝATB#-N|-?џ,c\6\EoZ -&ܚpH/'8Sx|VϦc.,11 M=-\.5z֓*^'kUXºy C7R0nN7"6iN[Лh8NYgk".zKH!A2<'ˇG9:,wn08.qpxEujdlz*ggHD`O4u NPuK=^р/tK$M iqJy0xmW02+W+!OO]c)KGR~.N6fL($vq($]46LR’)4T$T'Q*\*,v/-T"'Xur@;,5I-)&% tiRb!BE65*'eYH(GjҤWx>\ eۑG2О2/,k9wdVZ&#GN)ŠTeդ(v$ÚG\zA+4d;D/ ?8 }4i28#B!a|<$qg+W;0[MQF6"tj"5NRk134, "? h@7X-l9" o9p2h^iuՓ9'*,J`¡6 {^x!n񏏱?)&AkLI=V?cGPxdֹL.m8 LaRVj(c!6AxLD\ h=@HFԣp>9Syr/42Hrm&ѩMR7.trfѽ^U$*-+⛃@B2*gYCG^eIQ nZ!kg^F^H) NA$9zOP& K?kG~7(:Q- *D 84l]JX'[%' ~<9ˍ@]͝%`"NwJ pخ#!#-wޑuNnHvq,t(]M.YƑRS YQ'0 /}tchXp&+lU\2ifNq8B,]V 'ҾL0 f]9#eCAhI斧- H.~+b(yWːZz`hwYqN)"1\CR9pzL`T4)G(BJT~% ycѽlFkG0+_ʫ*:xcʖ!!6Z>$zgT}}fjv%7eLet pׅ((_9V"ΉeH=F.*Ac-invr+yq̅bRERN\IL߸UC(e4 C nq/R;onZU΁r 6i" E ZVyxqvk sfr'LC&%bZ\iEpYKlLq_*Fa.}IкR+]Z"ʑ `xBnZ5hʷF}EY XW8(_a}.E.rqocP!ɬ.i.~ :;E2Ht3Hk/#r =sv"RY)&IaO(X&d+W&:iF)!=PzvF%[&c; sIђ>[ݝu2mD:,t.ՃxU4 R+JDl9ȅ{d%%?3L8pC$0IV;Ivw'>)E!#OTJ[IIxsPU_F٦.ޗYwSAUĖtI4DŽ`Q1kyc6 }qGךсgl= Mckg\=|9L4_yį%8\ h5~t-wQ>c{rHnr3a[|}nPv]qÝMP1|P$uP<);9-+6#/oÎ'9kE`4̓-`쪾|j닠5\$Unc5֛\$kjq|vcͭz]Oa!<rIU@!  an GU5'Q"_|7![ endstream endobj 171 0 obj << /Length 4385 /Filter /FlateDecode >> stream xڽ[s_'zzbdҙӛ_9OMCKX\Qs/ پڇ;X,݅~{ŗE[N eL8ԦZ]ru]/u]x~ .~/5.g3H'hJпGR߾R7@YzP-NyX%^_j_|o^m#\"\ت)p.u_60<6l=$hm}-!p= W bTֲ(CQ"^楉U]uה3z}ۚ=#I" t^= t8l!O H˺uS;Ԥ'VEH&)G`a/V 6=?Z:Wf,AcwVtAԦ/ *|ۯ+tFB# @6,ÁՂ> zmP'S=vA!%zȏ(<>HIqdlL< TX)M]O\}ɸcZ;8ZYXϷDOm8v2-qX=UvPn& |4YeGj&?[YD64ΘQ.J6or))-[k3jtn$AS_lprU#2Ɉ(זs9BNPOTgZԢ*]+"a-SGS9!|+l/wb̦j*;ZSTګEbxp!<> cMǞ> dtZkɃNd&ܓGPyEBu4.& ?%/С~G%!*E 0G D$X]tdyJN[6~/e LitVO 鑋>9r{upZUj#b56W`6,z f `.fvN.Bnތ.?4"2np~awBD@qׄ!t6M&Qg* KL&;{95M`Ď8?> !sȩGbzG"wNoӘ5Ϫ E[eÅ섓3&@jߒ $oHs4ud@1j"'S|r`robz2Ŵ]xM8zzq. R=f,$)"eꄁGa`"lmgѲm1sBeiY{Hܳ EN&FqBʏɞ pK߲Dv SZ3\8,p9?HeuGHt?FoXP7߇u3M37.>u|̖JIB&?TIڏR()b7V֝%$j?4uaCp4?oO&Wу_06z mUi +;| Q8Nq[d?[}tWHE#Qx`@4m}d&ԏ!h|CImLƒ>*bp^q36%X€hP#Z4&e;nY' SI-xG]f+~ i)o5Jm9T}+rvl?$ i;_VqJ1S$~~5c0";pPŃPUؼ&>d'5h)$F?H#`4" c.Ų SKxD]LΟ+jb%fb51 K~C"0mj8{R-Q~Ga J1IfQАj8{TZe3\$AO$L3ՏfuE^!>fS'0Վ u9g{~#Xt)ʫҌb|Y52$ʚ>$.=Ru֦2GvR]H3dhVw\%;qEg4@}~_ i> I5m46gKZE2b Ow5̙me+6?fZU2ꇪ5c-蘝lvV|4 1)a:dP(@Z@^;b6,3R@(T`C&҉5end]urU1=X0oR?Tת2%.јVaLL%CBןh[[*fktPjwRD$͝~p]xZ3S鶴?e+-:N {; 1Q!,4W ԅ1c|Nʨ4͐pLӒ~P]XPogm?Hgu ~e*^ŴqƔZCTU\+w 6E%z u Y$Ѿ(lrk14C( ,zɱK 7.W B6I|6Jec%Ӓak]mIcqq,NR᧼M#o:{.SoyqmeY^6VqD8MB,MS1&bD~i%Uޖ@dMMb<@`,HzVb r41?M_ JPVWIUet+5G>wd0VKj03HNhDR%cSk bű>8ʙ-}~ϞL$fMnk+#]'4yLoTLSj:q2GRX t!mJyeSB u+kGζ_O.&V|xe 0HyVCI5$M"דr" VM*](Ļђ[^6j^&7}WZC8*[~/jL!i=)W% W<6Sr G4h7REҹX l}kx6IKhߦ|Z/nAOR&܄k FpI)0%g&Jq(+\lmhCC˅r .Rؾgcp.K@ `@(,߂s쁶D]>-Dr1=߉js7er? 1 3uBT%wv&:&Ѫ|39чO7r5N٪j2TJ= wY 3[ t}p D&LyL[.V`6/ߕNr8.ئkSx(!=pq@&I/-Vy|IX6z<ӥ<ȦHU3dT>@NJtg/0Fw&7rmttE-}jLoM~Xi'Zqlzs91/U3U+H21tT櫺*#6JTIVM\O~S{~6.jtY<tB!To10^+_B3\]*L3+W_G endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 789 /Length 2303 /Filter /FlateDecode >> stream xZmo8_0J(hK]V⨍[ $y_HFMKc 9|fhL 昱,2o Lj& 3)e)ggJ1# -!E :_3CfHfŬ3499f̡c0k$`,8s, (4ç'<$:%4QHLVDȤTq`d Ǽ ЏK / FP )H{3TU/*&h0EôŜ#D0 &=^3# I2 A$aWuўotߜn5 궠W}"z qG_+)?t/_ڼlbUWL1 4m^3ק>nuXczcpI\q-;~zפWv6ip iزwQ~e?[֋>Olm5MQ ɐ*_blYW*u$<`Ӽb.9/.fnhI=,V~bP4U-<|ӛO|~Τ0]UW>"@ vE{B-_->\]l>p0vN9E-g6#a~Vdo/#5q>4芀,v˲ݠABB"{-{-{-{-' vpfi=(/f7XyD,.1Nh˖}{LAENsN ǣ5hvDzhs˷K׮s4R܃mg(3)-縐7`rDpL0åM$XS:Y+ŕtp;"B s@ Ѱ"?A,(N,^*!Ą/'<F=[ZC6$8cI]f ,jiA>-a_HD}G!a%au~;s'rwyVrlL"Y$C u?ΗJ92!>c޳GG   h7QPcRťۈtR?'@i >.[܄q ai@+7ɝMu9AF5Pm݁טЉK,6a lW0U.u񾮖9v->_4v<*,Kb8PFI%=fA>IeIN|HA$ӜY]ga 2[X5O K%nՂ#4Mo;)4? n'ɉnib;oH#b2 wͷ)2\,Ks@5R-L:/`j!R|!&36"'KnMl 9#6^q) mwpkuYpoQ3!zst˩:G_ ]S"m-g=YGL[%'] 0= "-TՔn kTD$}bTF6ľ*8 _|L].TC*hڭ&WYt4c6VwޝO?[нW{=&k>ɔٔ\>0el̈Xm8BQq~mXYY_:ߏSMfE5,]D$5r Okǭ~M}<~,a-PݠE3$䈈RuvZ̦c=w iMQ{(i_IF\i;̏ͧ^g]6^Ê?9,XGei9YrY{U.\xtŜoy5K!so/j7des݇)8a+?m endstream endobj 197 0 obj << /Length 3386 /Filter /FlateDecode >> stream xZY~ׯXc *vc'NuR(傖X.$!HK_fq*y`'_g gWgZU,B.gȮf:;l=-Nݵ|"mUf]Mo*5NifʬKگDiFRӂ}Kp:.ipGyp:{>'r݊6{g\kUy9 V†t@ڶʾ tC}yM9*&dck`y9yJWa:!~pSt7aO1A!6l q@LS`xKIu>bsId=e ryJpd݁B- 1gX%D^f_AD9 +]T*bgE|΍-ժl:i =Uٗ6t` %$LK?b0^%:PP߀ΟOT@zXe$Z_[L"cm?dqb9ܳU'?N!ڍ҃D$U3 L ̌OefU2Lp$qk8KNOZtA>K0B& (:Yp] S1 nixb|HGxˍk@~n:xT,]`IʡA~5ٷ(kw 2KC*_%(ҏdHOQNqby*zUv$q/kjAT)&)C%gѨnB}GLk (7X uȩ&u:LMFoD5?}]K-C2U^U~IҜKUG~vMDE¼#;U)ӎE`חkGt& i a2Oc~2E-T~y TĠ7 ygDĠV07wTd9kT-˚䛎:v3cv.qңXlO}REDraj{*ub$~W,C=@ 'A$X@xI91HH8眀eýs2u|'(Ng#GCGE)u0kbsZ;FXǷm]3%oI6nj.p5Ew'혛WhBX ;1 N!څXq z@ PpgTʾ()/S"US£uh&2t] w}ߋ<!G(ӝ>Ra[kVl)RnΰPzFIl ߴA0T#s"ϛÒӚ(/៤9`azKL)Zꈰ?PLWL^_Q Po<>$v 9H$;DuDj|Q$;\nv#&&40ܻ0AcdLbByt 5/ [UrBzg/q"akį5BsZXo7nQkp?\~Dʛ:èFDK yvAV cif l(&LGؕ)]Ā]VnPPģw3~lR{ ֤j/A;#iw<[lV}l]X츭W_a~J̏9oAW&1 {hOS(}WISIz?;>X8y.2z1;a$=ʶ'{Y^(N0+LM|׃S.O99D4Ml,APH-'}>bj.5'Yc]ϏEter0GJ@c d:jm/(W=M`vD.FbKMf1G3D ݼ,L;ryv)!!"< |5@+} FS,qSO^ ؕZpdm+%\*!:T> stream xڵZKoFWs&b"6hYVq~ֳ)Q${E_UokM[-KSW]kSqq_/gln /k~wJ~wAo}-vu#5XW)%ޛ*rߜvٯ𷇿& ~}?7J9nSZy6Yǭ9A=QSnV8g\ͬ5I}mpiT Ts{r \H3 T/_ǵ-ȼn@@*2C#?:t'xYJg-z,.-8YWO0|DArISͼ>;k0yD{PLQdAI=㊸R>҇K)J`5Z~mڊt.zdag_VsAJ06l{YgHn3Jݚk|Um'➙UY"}Iǥ.gcG6;&\@A'gvwuD]d딹ɛϑMDok$n5tptU6x7t *3EYUM,AqVRi9 Sg\ǂ; (r+{SՑT4NlƮ*^w ypk!z7[)θg[~!\%|zIO*rʖc$G|ay?~NM_]5)˫):G-8`K =\0=wLcF|Xw>06LĤYݞG0Q(4'咼1Οpt:u2m_㞲[zYP7=@[:e%ĺ !Sql6]Ʃ&,T箃Fo3@RJ,=`-|_,.-GF liĂ Bz3:|/k6ڮKs3513!uS)6*IJA#@H- <5\\r)5 .]rZEPN9.BB]K0cJ69>EXʬe4>m:&Y$$MWspu(B=n G܏J&n#WyiR2'pM Abl^⨯.{oJਢeբT@Q{5VBÙÞ׼kFX ?iR6;?)L^e)w\,=pp8!G9lü3zh\p;\@qUR+w4UQ sD$҅mU1G6q9]MDe ~~`ij\! ȷb9GjF"=qBa_Հڴ*qˀ.\sN*N8fk'T]<9 8IfI%Gl9Dj/{5xٰHqy) ۝/5ǻX?R;"ؐwyZ=?j!\J{8r$)c;M k%5cha,8::KY˒yGA1JnqZ@|n$ D嶳hr*JMd#~-͉ߘToCJ#Gu\lJc:"JLׁG)}Fl-'8 EuE\QXG6?CO"%uwJs]DU~gBG6V]5)#/-wǸ )!LRXJX-F`ıjaC3*!N20R5qm0q`sk> stream xڽn=_a ΍}I-Rm8C(hH!Z|}u8){l q8sf̙s?櫯߅peLބ`nsyUWer_ͯMWX}z*=kgO[uoj%_\L;|}K/mG-.p6".F[Æg 2"6#u8S؊f o& mnW0{u;k4Ȇ1(Akw0*q&~]քm /= J ɱǭ [h!죸!񺘙wؐ/Fv>^. 0u<ȲEt='ڟ!ZlQQlnFW'һ5|y[a<.nxkeG"Egn #4tF .>ie\~A-^Gt؀=ΐq$ DLסNx97ǯp ]q&gw~;$Z# ܏&&nN90^noJMi;жt\'r$$zF$7*g܏q#_( m z$U !IV|HP~ `&e{(fu*vz\+g 9~n/ސB4҅! >lTPSw#ē$t$TA w'u,0"%ے5B VM V"Gb,0;5roٔb'-.klX2I~QS*6\ĵ1̤!݉VɐQg툛`uY^Π9D4b߫/SwBo@Ʉɾd 8ÎXM'F8W<ɹx/~vn *8l opKf<>kzF?D!DmOiStL~NUFSZ"ľ;Z 鄤ʿ9KS ط{i}DT?sG;fdȋm.h4T '9'~acfl!ϐp9)!гn$xਖxv![WtO%6d?].lx](.}`t v[k|Fs7H2gN K_`ڏJmj;&Hr:FH7cPF~˅Y|A脢aZ/ؘ nqB`O#ض+DO ] 㕌˝D_Wۗ56#>sh:v;PFcJ)]TPہ`jV5]B"I ׊ }!|QeVܰnx+yXphIr4*>CVi >˖]BeTs /27Q|16T"-ˤX. hHY6r ώr "į'bʴ+/q s~VH>OSo47CJ,; j:\'I`;ѭ=[E䛼R0ұ+f<^ R nN/+NLj ɕŵ ˷1;p!;QcS?8 Xi:ѩվͦkP/K;U\.IAh 8u"N+. B ʫhN_4*%QiTG)~N Y.r,Y⤘yCȞw*`%32տsP*ux5ąKgZ\^噯e2ɸYH/΃[͕ðSӉwhNnԇړ%SDCŭRxwv!hfN iMlq"+4yݔԽX9.SlT.7ʠ}fLQցɧǰOrr~ %?ӺcMIoy$ZGzD|2gUܖ~RK"4*G{,K(&Μ֡:n\n JM)4e ׉ZL =ˁxݒBc%`MjTwG}s٫ vάuZmOfT^W KUr,Nb@ ʉbHshXoN|^7 kEg~w Po͂́%2ڲ̭7IޏX p.&>-M +Kb,<įK  ^ K\X%,L,kRZ^ Kd`k^L׍\(.yxhƘ xU ;]BN_;|uZ} 7`3IS 9})]Ss?Fr) lEsV H-=_4 KMx/|ihAȹF1//}H9es A%~yҼt5ϼ*3Q@s^b+̒ + d Ps\=P羲R&u3/#-!,:3;a`GT%,0>Bbz\gäj |AO1% ZI, ^uȣ_ $zBmOٷ]lr ^lV iC' .4J0S4Roa@|,oQ!YQewa}VUHKdGdSV@ dQ|Byh+E?M X } 5`D endstream endobj 242 0 obj << /Length 3944 /Filter /FlateDecode >> stream x[IW0>q"^*;'TrE"1vo[ d)a@mowJW3\R,ϊĺl|50kw}sa9]_oƷ9G֫x @%t>?͌O*g c*y(|B%vɻ[]G&),NKhzqFaqivyop(ryC#gU[hNÍR C`KrPZhXKZ[ =T9x S=_8¾8G+pve9_^mv\JK 뫇FMˋE4z=7Gw>oƑh us6Sj3FI@r'2|AxE[zE'+fR%Ҥo_YtdEJ|moq=HVĞ7c%|>a3xKNXEPxJ9[R^7\{S"K;@RŴ^m{K6]"&2 < grOR IqKN ݓP,TΙn:W?'J}`XSi+fxLkXuh歌$ڵ0"t郞 .?I \r]V qkb帱MӬ d/R٧Eh /RirUv4٪Dz C3 FR&6Ƒæ$RptpDP3,Qhɶ˓^Moe?B!/41~l:8:YWdax08L"CD"(W2I.RwwЋZbB#?ln@$ l Sef|Hsvԉ2R2˹:Ԙ9>lHDaˤ-,Z̢;v@/O/Ztqi {vl`ܭ#/Ll+M75 hn$*i4iHH ӤJb[:\.f{ WE]@MOx*Γ)-3:Ln)8>p璉@\%&gp.j/0J`r LJ4n'UJjhH}$7k %7$< "JofDBDd^ :^.! .X;to#!#(I kb6@|i. 2LNl;>=dĩ=pi==|Om,lu,v@q@ʂO\%xY.6m`ho5hコjYhI0!NLCbcBuI=v L[EπnpގBT,W<Q8i 0lO? IHG$ F] Kf Po8`aV:~}@4@$0IaI/5tCC$B ۊ6CfNi&E Pݭ A 'u40&V7-Tst$jdֺQ.o-Pq$x qg4{BB5QDKlѰbUӄޙ3jLDi"tDP2=\>^EipljHzS! _,aVWHsӰ8I%$-n8;[E{x#[*#θDDuYtW@ܣ9";eІPG*S ~0,++9Ip2-b lv2.F}XeN {L7{>`jƒ 5U[#/j`~'"r2I]5ڽX/byCg!!:>2`2_Yf4c1{䑏Jm|h-(J `g͵|pf9ƦoXljqٰ # ԮWv dn:i4 WVg۵LN чb/y#_UAm~pIC5 ;ִ.5%=w;&fH<UuU$noɼ@!bF|`A۩o:Hz8с/<_xa\4qCeT)r+ Pj(}X#؁+~05&ϼr$hm*HZā9&39xv$?)~4圯P gzq~&R3LFCm?%0]hU?o0':?%TIji4$;:ٽ +,2-=Z0N ~ K#%)R/E]JtLO\r+l4(oUYl>1d8v<1@rY͇V}"u)tHE'rOݱ(G!YG|5͍DF{;g0ǛVOØ&eepсOy XG_|gXVqC2b~^'qCV?8UXNuՆy!&rb'.d?"rYP=LHRL:xNJ7K6\Ef%\ ,秉賎#mC0[.2J5N#HA>b3˳oJƑZwE[MiQxm+LC5Ʋzr &s6Xe mX/{2S&(Ӄ/ U9MHe^sMEBNz! whP|-\!rk2m>XF"nPBwng"T3X)D=/Y -Ė&y8ֳyc@KX "jMtnx endstream endobj 269 0 obj << /Length 3811 /Filter /FlateDecode >> stream xڕZI6WrZ A Y5'g.g&s$ZZ:/_DC@[ooE=inr{71֦'ܖzjtf*yfU0-]ҮnEiIvx-ci^'R-RWw~ =:nx+y@g̒9èKƍo[\v'h uL}t=O$t{!"3SLfM]&3cҦ-hYQ8no;~:E|su0x(i==;.6LAF9G NqيēdԘJ˥@#M4LPM\T2-3T8i,""U XۤR|Z_Q[R%[u?!U Zr-uH E'Sg]$$;Y: tur.#DGUmG2Y[eӹI yUInOˢ憰;gDp9M7 "Y~T1YUtR`ױ780BdrT{Yxcέ|V<)Kqc(K-AAά)\j"DxeA6A..YcEQ <ֈ!i2R|.Ѹ);#}SߦlؑAϬk@呴̕Yݐ2GԵ W`lG9!L=wt{2PBxb";lD}/1;D, T/+B)_7 \.P3N~JsQ5E1%|)R#a]-dum!v!ةL3Op4tm)&ԀOm,=z8@[|'pep\KcYM5LV&|DqSjn9"R@Fǵ"5I|ч(<%"Gw<͠23yP^h O2W>[$S\4U=SȲ{<2a x hx$}pccFaY&^3DȦ0D>z=#~h.AЈ5Ov0Ӥ6AWY=n}Υ=F(DDЇ̢2g CA~oIƀ$v%C]C },[v_#9_"QFېYY^yh7WŮb1]\b}'@v $Qo4*GmB,C{Z @i+QL8㠁6û\~La'^Hn;F+s晙ii35ngD6,J%=6+;_vrl㼧ҪG bO3~*=H\54 |Fa(^ =\ EExɈZp>} "/yYiK~=#:# EJ*0-s-)eh Q- PUYwd{K !w24x7S l:!U֗Sbt2. 4A.TBd E|W6p=q ))/t/NJp(k쵚2^z ׵B6Z]Hb-U4!7Pbx83."z~ŠdE'70a`%LdjR陭KPQCM4>+{4K Mn{2ː\K ݤyuU+rsfJKLW^P(>R=NƲDvB0!jw/i'd!lݼ[0@fqy9P_iMF&iiя/fx3t^w^u\g rs=y3H6\Maԩyw\/q?IEbԝWiѢ@7hI8 l!B9XBH8 oZF?hb߄ k k ?Sw W :h!L_ ,tLcvs%NےG3mAuKuQ(.DLH.=*.xBG J2䪀Em%a oTg7${8{+!BOͳe"AF!BD$ˠ%v2Nםh:EA@?wG?}(7MPpo31L)<rY"1w>  RWQ;'ndQ}tN4 o,J $iNŤa]}V'.c{>rpRٔܝRaYiw%+O#4K^foXԁ$swnq̗4 Fէ,|`9`'>//7GI3}GjN.^?.hߴ;xs"S.e4Xgr,( FC_=L] endstream endobj 295 0 obj << /Length 3650 /Filter /FlateDecode >> stream xڝZY~ <8za{H ^#Hy%q,Jx}VWUoܢS]cB[-U֋b-uҴ{xaySYSeոp R?AfF|,aM[^6~6>Y>-p#vj_FkrtS7-yЯ_Yp&`w\$꺃֦>۔aUFۊxܺB#C 6Mml6F ĺ}BZ_KMoH xI|mC/l?KU)~Dkm參|s*o6Q:ʵi28X]%[52WKXq r aa{w,]iG7P;e _DVmje6,NŽF;Q7?[@`vp=;yYx= D4ث 8D:YS&GVɾ>jLC\FfKD=s`Iliv5`@ O!jݑ~؆Ofkؚͣ@4[:Oi&3eE2߳,8vg[=E}{zy$}8 g'bnH@*vœh:$>=*?*O{n fz2 Ϗ[ Q4FhYp()t;Egƴwޔz&۰"g>[c-sV0@cdnOPQH u$%\,YTB1nfotd3n-&LLp[5cl=pwq 1ak )^OCG(ZaJL *5<ƀpQRFTT]ۨجFuu_)Xaa; Tt`XlU ^ɥ_cNn ,2m:|^OlDPVmjǃmp!izPCyC0$Ŵs|Kj3xh? g qmQ&2`zg,;aiOxDh7fĻ*eތЋ"!n' |N8A`oA^~i5jg0eN'~,BЫ4uYe97]9[R6ilbT[W^OrTS~u5ua/UJ:RJ̀z~1h#{m$h'B!SA;\R:e:( M[tI6vq~l`{F]]*rB  cf7@McNլU[Z`:/?nݮɰ?'+kF5lM8ceOb֝ lJ9?G3%4ǝbułb̩q=%!٠?תN[׆+Bd]I$C8ЖV=%GGX51 2;gRi?F]s>dAy f/#Hu{Tv&%ovv2iFCVd= 7&z4vy^0eY/[֜jI¬*%v<ע _8`,WvQx3I*6( 9aKOǔ /$h:!`@L*WN\VU>wXF]'ED]4PM:;wb, ;1=9MDzcDmU)A5rP?`Q^~I~lR {uSߔ)><:& ȼlBbyY6 mR՜lǀ$ss2QW3-Jkb͹}ݩZUF5>"އ4-C̝rUJoÉȠVy0Źj/i%45<RѺ!]\Kx | 0ǜ mSsbU?Ve2<7/=$2j+ћOP)ėAZHx#޷L,coL³MײSR|{ f?0ʥ; hM_@nuEV|UYw9XX7Qն;+d Bm̿=.seAT{Vb3p]0$)/Wtif}n@9c"+-DTcF]sQ;#bDM%&8ӆ^['L<=2Zөu~&_3v0qfUWNO.5_)MxMhR>κ 222͢) @\U&J]APUGARDisĹh_VB{ضVؑ7mCl_ ՌzH k)JUSWsž|I5lgrK>}reV&_6LiR*_Ogsh,D36BnměV4 4u]SoӜКyۧc\ƪ> gq8dR?w:dր57;RۤU/5>PuJ ]>*)ՕBHj<^’N9^o̶C fQʿ֝IA|} endstream endobj 306 0 obj << /Length 3685 /Filter /FlateDecode >> stream x[Y~УY컹 8^@&F88gFؑ4JקnI]OQd_U}Ul}}ŗYUTVL(U8gָB*3^1_>\=ki w^-T%]Z~+q>xWGXw lqM~ W{=~ZH;a46j+;\Cht~.Adt}_ (Tqhشvq_{r*0;T ZfMHTzw6 fY0\YQ¾PmQ}:5>rY텾k9xg8fM͕>IEF%:dtNTаlož l st)ՅFN%Q`i!|#फoAaND=F1r>Mw1,wZFc:: 4{ܙt᫑~T};ci[BǢ}7B#= wmCK|%Jzb0FH2X\pK2T8LP ˆpejpWe_pxG.umz@d@2U]rDpR.Ё:E3WmIPz;F2ѝudt ;S%C_(m FGLЀ_o1+ؓd lQ/gZ1Hvj#gpԚ%˂08t0Vqd7FZ~R!kZ5dBoY1Sacy `{cmy BWC S n{zҞ 0nmֽSl1hbY)Y5cûps݋I)ݶ}n;46&A.U]+pw,y$8C9҉{ݦىĨ2u K;03v1 4sx j 3@Èw%1ӎ*켃ұkrPx硉 0="XPړ6N±-6Y/.f dথtr3.<2%5|N?cK2s" &N ٷ}W[o—8cפ2*):PqH+24/-S)íO"A]Ŀ9݄\$;H^)8?H%:F&a-߅}ɱrjIi@GƘQ )/LDY^ y"iu$EA~e9%6dmtɡkwv1nmVPSe>CUy&t=-ũ)>&Lu.ˇ4gCdJ?הr /O&QKۭbHoo⺚}WK^O;Uar>Yj:SQunIg t'Sg3*d-awpSMcE$ llQhc#ܘ} kˀPY9֕겮o޻j-Ʈdz Tn\[gI03|qWC!(kf8667Pq<|p?ɂ!S(GE|:wd -d8 > #q;bq֜sМToNgmsɁꓼ(>C\Eӳ~kj9cxg Es]Q0-?k#dh0ZSE*.zs#-e{~<4>I/MZ9 $F'6%܈ோ :wXٌ1oRY8)ݯLF6vr)^YB=KSXe!9T]P.oi#I[sïZQň. T!󉂤GBUZvj\wИE_{%9Q? ElUJXzd\QL=߱ N>ȈB3tϏg/XEO:oTҳ'`/sI 7 s-h/I 2S endstream endobj 180 0 obj << /Type /ObjStm /N 100 /First 914 /Length 2819 /Filter /FlateDecode >> stream x[m9_wU~Eh%`nF;ćfD(/bSN2anD34u۩~lm(PFRLYq格ԓUѡM"mDʚ$NYrh l z/-AHF%8eEd!/Ely+(̊R))lNGQB`d nb!ʢȢMc %118"tȉ\(%^9K 'd z+N@M9W1DEã!}bYF 6@2^1uFG2,Iwh"0(/&0 &;Dobq*(mJIv19ɵsaT .ʤָ 2 g,cpU@"\D#`͈<%3C25kP<+Ѭsc2w}7?b AeGMaHz QGܓv)=h EXAҨjP(Q ((i^@A?P-XH\9,vQ.G~&_>jdh֣>[܌&Ou|kdپc ̻u}VoّuGQGSi_OfOjr&/FM߮fZ_7u|Z.|||/*o9ufV'ox(^ɿ_ @xum7u*[~<R񢔱TZͥbVXS"s [0^|\]59?{&kpϫh 8z{ʕ\5UjM`{U}=6v fjڵs[Ŵ 1]ɥt :%VRz6,rs}7.aKItj96}Uz,f=ղZm>̛#2& VgI"M^S9-7ҧ9@C׮d8Uc-> [ XcNgx-8K5lUݬ ZÛaVsΧ.ŤC2HL0 Y̶tpax3Xm؞P jv@["&/=P !iȨ s!\npENSpXX-Jâ㋏ͼ*``ti3Y mm"\b^s܃jrT6zÛaSuvl%L?ҷ,.f!$k VⓃ5ti׫ӲszP*h ( D& =6/;?Vi5C<v2g\K:mtCZ5v k>%̾]?k?TceC5:ȲוTJ.+/egd㯫RͧA5t GspVbij]2)8ۃ+L$Z#vlAz^7͞I4ƞ4V+1z^Wk`v :>09CI6Nom|ۗ#Ĵ@ZB2_M/icmTH)&e@ooCZ[crX `X)8۟XGGMHwH%"\Hve!"TJGʸ_GPJ\_QC,r}\_"y E^(BP0h(@F B pzh~[ 2\Cpa MFȰZ3( , ŷ0=`WV~ĺ}_tn@kF zp `x-`NIRvtSYO>e:N:bў:5Kf.JeX vA$㸨-mmOtN?Ljhͮ3f<ʼng @ N[ Dv엳Ja<ESo/6dB[GnK{sHTPR u=ʒ '̳26s Õ%c I($ޱژ6#P-KacVϯ$-*'h ċÌ%P$N1!z|ԫj>o>d2Y2$'>Kl,Oxz}મװ*%;Xwcrz6ba!0q^͖7РAGI I{&"JGBEЬ$DnqFirdYL!Yb$s`^I\ 3]'T܀m=gdp9nI`>$c}~HTχlkdS*ɦTM$RI2d:ԹKiH>1M5n?aHCUK94XUp1@1Ǿ'"~+Ñael^`qD/$rmFh́Y0mI1/`3J{>8H9_t홋0§v, d[=MS+[k~/S endstream endobj 331 0 obj << /Length 4038 /Filter /FlateDecode >> stream x[Ys7~ϯ`剪Lṕ+9"$[F-*y߾}!Ard@F|5ySjtrʍ΍-GW? mgg?_|ZGo2U1R,HQ7u(QNQ)XYVK Vԕ6mk2uw5;h2 ] ~.B6j ^fd &@R*!t\=ʪ 73zi\WU:jYfO%?l` 4-j|s{y+|3LȢ>3Wu3ʌ<Y* wĊRƸy_n%nRU* ʔ4qkQh(e}:}uV},n" )acFu [EUmN4 c ղ_%D7 Z;خ;ޟʭ l[H LxGN8\h5SRSS`x~] vωF7je bzi=!,15hDX2pKN*=myNG2fSzM@H:+ ~"{ VDo+*FW2i˃UL3~uRW\*B%䨭ǿa7-lD `/_ʞ> }-TWUy3xg_>KU K/>ϼqn&JV4hƄ iyjAWk\U;MyZ^s,HxM33Cqu3FNg+93Ƨ / o)"˞lA: /Gh> $.U:rLNB2 4ECy#[y%Lyɗ[ދ? c ZV.$J tBYYkqiXDj׀NT ;&yɯzLK|V\@Ս_DZ|8顗Ƣ{:f&`ArP|V/ɶI5 .!B[ݯONV卵6r*OR {\E+b~mܑ7s?nD,p1w+rWxU(- Q鸯h.7$.@P [R3Ns/(QMM<ʿcAT v)fƇu IoY%\_{YzUy"~O;퍴%g0quJ3>~[ɷwC0Z=ӭy>Ս pLpf0Bgϖ/$s Z3xe1k< ;]'>*g^xڸ.D5-~xQnAl1 9a~.c;B@6·ðI<<uͲX eYF++N[)yl*:\*7&`rUP4j҅sÆ:ӫ?J fiV\r7ychH@<ʐU UO+ϓؘ՟EzrVM"Q?q4m}Runa/z(opo V%`}ٽ#xۙ? A:%@!X5=ftA:JK|$r3wɣUX{l3y}!JkOpp+1@;g!䭗>5b TA茩i2%G44Ϋ@d6['Yذu|Ċ}6JÈy&N;zMHEq;f4{ 6 KKpjaTYDf؅ 'GO0GAH~rB9aPi\@ ';YeTquSOBarCY|/-8`÷3ԍ,ŐfJ9inMUDmIx&Γ"z.Ey| z<`t2a94{MMVٰݑr7V<9h ɝ=/Oqd8Z5j}BܧO꡿'0)v.H8{ B^ U{8 u%jbTn"/ǩN~=#,' |sO]>3]8tڭebiCش?pIAN{鋌`5Q[F3|`shW7b~k"MFE_zj%%gW ?Y@C {`|SːZ*|̳&yK ;nmz ϴ< ^͖t!_סH…V(7@<'Pc v qwVDYD6vv}Aizpb lNT9lw|"Ԛhb VvƷA, m]G Q0TE`8AOrQbmИiOќK' 6lbxg:fXi,فWI+*j|ކ_pYHnÞL;&~'PTqO(pAc5˝Η(\e [a fsP5,aytŠzmB3HD2Ln\10U j;3efx2}GRos2qv^%6.B%C9K OTS#&tKOo()#J.GJXCFiA ЪT.9>pc Ec?}^ŀ;X^YQ*fRnbz|_MSS]kU]U!Tvm|z'j^GmFOF)zFMRIfB7+<o6G>#7v`8uq(O u~I#ٝy{⫷Pv:Hw;pN>s~d'#np}=C:uJ@ZA _QI@f>@ƺ 7^ .\vp-IOVQBW.b㯵v &tv%Km gR߇,, VY, d&<1TxHMHae'!e..e|y$7 َ}t><,!=ҀKx{0KAGtoB~TߖLz@ȂdOl[3#&"Fk H +MwkhvrFӠQDwO+aQz/ =}$B 9Uf=uT-?rw ƃ#!F NZes1Cm|! 7nCٝ'r7ݓA0, 4^|_[O endstream endobj 362 0 obj << /Length 4267 /Filter /FlateDecode >> stream xڵ[ܶO{W@mҸ53P)ݮ>.8_yޝS}8yf8g/E[VڋBš\\-/={s)fkru/a)~4fmઇ;c;e;ۮiߠ?W?{!t:'c+gIYu2XԞH9m |~UiFFFc+!HSקl;v2 t"<u;M%Z3Yt_o¢ŅL#KѰyB@׳(}ܒl 7dt7~ ON$n$?ptVƧ':˄"]4 rUuTTe]2ѱ~@v$=OPr;F9y˞GHV|p;a״ywmt^HޘU͸=r*isyzE>HMk" (h6+@:%,U^?o(&Wl`yS뿄=+LmK*bb@@ϔ 64 ޱ:=JjlJlB 2HޡщQdž#F BY$CNW>J_n.TtOe_c`O7 Cd&\'L@hR^p.BGDO>J(bYcRJhfKr(Špkf_-R}2'A λƑg¨μq> I0qTG"k uQpwl2ԩ,azk?O3 V*әJ]+[&zCUJ Xj !՝|B%6Zmr}03U֮F<]-[B̕@vf|\]Ѣot8&'oH=!h&z|E̺[/} 5\n P!gq ;g+k-UBє-|=4xe>''1B~4ًfpw{/!u8r,Pd h:Bm*Mgpĺ:9/z_+uehRm,=*א |rގlPmWȪޮw [)lJ%ׄ}zN / 5D( 3s^q.+B@)27eܻqJ7}y9Xӄ?_Ò`6(ܑږU, 7A, LO|[fǢ0䣭6:bI)QcF^څ3'y@䜻{v=Ç{yŸn<pλ8>&his4۽njȩ]kI)'{*r̮j̎Sd~ރD1*@u&o>l?I_H;yd -*Ɣ|Vk[S*u QѓӇr"d x5ס-b @Di$@4M;[2@>>V O$DR-B=-=lh I} XQqGc )P/;#Hy`jDr"IrA M} 0kp*f7? BC;XؑzWIH]Hi?cT Yd(4J8D{ DAȚHHKY;MXNj.$3(^me {Ƚl8kY NB,3C74Xc#" 4z&{tqp ˥\yBd+hcGDzK< h!19q,rdԆ} ږ } @q*)r+.PLᶡz(ʚقܤ/xADcBo_^X rW 5\SϏfs67;n;$~B2.ޠn1?GE<sZCH-\DH!Muf]o1DBc|MA-Y"`i5e~xRZ(ֿ^]hTKGMN⌓Dc*83| /8ucK6R~;@ 6B5ϻ*E=1j M „&p$y`6^_[9^'% JmAP@C6HᏒ @SuSԕ.v4-t&jL4K ӤnxZOyc&[?s à;=~</W 1?ŁY} ;۠cڲ׫CB3([5s[/N/! 7`\Ƶ ?yNU_9AJ(ٿܯ}jg_C NKvN7)[gL][ڀE> stream xڭ[[o~_!Yg8$hM)ZlѠ) Zlm,嵝_s \ϜssY:WL\ݨԳٿ-^榬~ookLQYcWx?:YQ[c~k[-x~6WUC3Az_Il3>qG|BNakϋ7hHlj6ZuOC@\6xGB{\72/}+ߑ|3Q}s5?>smaϤpn@q;"Nwr5u@)Vs%NyC{]|*>VeiQ~%&|<c({c%:*WM̜bVlSYܯI9UUGH2Cf*uk\5Tvo:T\vv2UUVe5KqufF٦t:5ӝ7+Ω5Y&y M3Q%0e"|A6$,sSY w^v6'ɚܴ!jjԍUEy,z&;^O ĊU`JM0 W;U3=[r4~JbsDPXʺ.п.B0.I/({#DĎsD}rã#"%}#>_Z˔ׯQyO(Y]8]Ԫve TS"R5ݍ7tV86*Rà 򁎔'c|YL ꞛ㝌yCrKk7w=/:2P[8P2 a3]$$/m1O0'_b-ۿ=#K y-<ȰMSBnׯ1N<7h=АJ_Z:+o)RjԼhwd5|?6h7$ Vwx$P2dOdt~5Q3׹p(n$<&Sh挛iڡ A\v;N<jپE /-zD֟Qu >h ϿV4MeD%l:tL\EN׶5\"x9@`Ɯ/8aTz+" i[)Yk;Yq<` BHkz ⁻ji?^u@@ .6 TEWȹ8 w!af͙Z+n'k>&QlC\nޥDlRGٵHkIMxj5,1Nş@g̢趈\s<~i+'l ܑ"}/w~ܑ۬9k *>sc+| a1ȯ@G"` NzBW>vj!A ^Fĸ!:6rXGzh{ Kf\'|{#.RkJZT1#@9!"xmJL{fkA3ЪisV: vf:Be(;"]l(٫kCC8V,^;G  ]M=1 P,AC:U}_>":QHPwrS%'Gn^O1G;0aƅ }o`B,>8*ܲQ|:M=Zq[Os'4Ok*>Pc&>^5H= v]42tLLe-elnu,e b 8V)glJiVگ N+L<~-2*DT M >U)g.u 0Y[lʚYMiwziH&;=g40mlQ`Tg)mzAy%ifC-D OKpL Hf64=u{fr9d& 3S VEzh$53}#xVȟm=3)N냶0],)X41xRTtـ:MRO Cv;3b=Am2{ٚMqf|L[Y|`j@l&SŒqB-;zb_65| }^ϘPS !,JlOՍ; D (|s0O Muc\sA G۴X"aUɥߕ×/k✶N!oxҁ*ɹa/LI4F9>~WXr6wITL|x˂D:Skyy*oCr@̬,NOa̧9UEFIE8 rM6k^VmWn~@p,tjiM˾,O) A ^WQSssm]e5-z(e7ԓ,R"$h^x>k^!~d@HZaBʹqS%\Dܰ8"e.cNu&|>*t`@˙p <&4e~yh bjq1c4%n%N+UgT 8'͠< 88h3W]ƜQ|r`5񒌭2\.C+!|*#m7Ie{g&kz~ ڂ!R[{@c)}DjkF+o=},T endstream endobj 405 0 obj << /Length 1619 /Filter /FlateDecode >> stream xڝXYoF~ >PكDz@Pڤ@%ѲrD1ǿ\bҢd13s}3ig"LZ6++`2-#lDW¿d"}TCd]2J]to+O^%ιJ- Cږx`L!5ߢhQ | Ed '7ćW-í2)S=rtp2JtHܢ>gxměƔD K$S{i T.󁬾STa%V1kR"ebLi NU%r#[ilFvBz>u7J!01lHFV_S0 PU]"하 z؃41[4~3 3v˃^YA>xLY#E7̨P9͘So9H`u2q,#3/)xw '){Tk\3e4H8RAkz)2Q?Kd&'(3(o(,OM-9ގIxqUjU$-1)$/jBN @j( d %Miʝ6,4*ck^ F y<jDm@"")Y:77iZp-yF7~w k `h"6Fkp"$:t r_#< 'o98Vyl wډ7< .]bץ~T+[K=ʦL<'%#6<MLH 2FF:΍5A0niE);yAk\LU3^LnqMzzij ׁ5+:J2+7Mwн #,G.'y(ܰpx?YMC0z8+pb'.0==c"ILzj5p ͢ƾj Cnl /hܢg|dl5+cc?2._ S0ǽǩC!α?* endstream endobj 401 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpnLEEOK/Rbuild423813aac6e7/clue/vignettes/clue-004.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 408 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 409 0 R/F2 410 0 R>> /ExtGState << >>/ColorSpace << /sRGB 411 0 R >>>> /Length 7904 /Filter /FlateDecode >> stream xM&Yq+ޥ|lA%$[ , }gDnNjrNY_x?~돯yevꣽw{{߳_|ϧWϿTޥ~w~o^OOU?O?ǟ^=N%G-]^~}C?}mwwUGCw?=>\~߽=ַ_^ykw_W}_c&_u{n}K>`Ծk%Qzs=SY^9z{xKCf~g\&*6zmCGW-,Y m?l?_kkkfl״c6rcTm`յ5w~.>{ޫ,߼,{_}j.v`ac~][jع=c9p3d7諗>d+xy5OlzoYdb4dMH+vM+I#,\MӘg/ؤ8X,MCV۹yh`%dId={=.;k,|[u ~ポSׁel]40Uѝ8hKWq%8݋p@ߡ &gkQ,-y%`6{[Qo~SCЭW-stxx8Խmimcwr:"ZX{x4Hc .3rрwwUgEMj}g8(q '#|໷p-K9Zu-A(jzy%m'˻u[kF^g2 "tjDF@,Pd$Cx F˱Q6uF\L3$6%@VTp9o͋fnŊu'VIt'jfG >$B%|䨆rJTѐ\~RB<Y%Ɇ]wbLopc :Md։̘_)QwE0*Js\FS.4wP*qJ w?ćk9ՎWzxv(ԟb=^,CX0St!4ئi|7d)'*go$/\2\Gm83# mx["9ng&bX0. Z]2@D&uPU\U.޶T%ZI+hnY!9^VHצ&,"mD(Y%_ω P[JÐ36[QY:ٕ^GQ3P ",#|XTy'tsb@[H"džP*,"%)B83+LVuCݩ/4+ܕggU^VѸ-f/*YSEh ]s[e25-5#!l%pHկD3HJn|oM蒜cT菨cYl"oּw% "&ioɟ#& ] ʛ0(r0J ]:vg*݌".lZcuQ M<:l DL|Gٮ CEś!KH1՘]]<{ Vf8v SS%1Yy-9ddRw2lIB S H##P;fHP>NK؍'cΞ~%J J7\` ɪk ],aʥl&pJ/~kΪnlYkU4׊OZq±Qeh}nf f[磐C=,vZ Q,$rȹPTۨmKJ 3tưgNG!9nIwIG=1":F~i;{p0'cYN*D(oFxwѸ&DN'FrVYSNr?X <,i|!1eD-9u3r=GXI1K-`BX2)`m6[Ra~gMN(ƥzHJ2MXb>i"*rXb7 )F6~V*ަ姾:"XZ 8P UA0TIA*Ϊ;][Sd!)sjv"q<65kEd=b 0)~vapt7 Pʕ o:bc#vB=u=Qᮣ9uJ1.R6]"]Kx|>*|Գ뫡-'}+{q-c:m-jOuôhKeC`<"nbUS`5TsZ4mIW¶SyŪk3Ou"*R\~&IB{$k:)ֵCvnіN8cM[3@"TP\VQ*,9Uw0&Vmne f9<90ݻm#&h:j`EB(Y@ kTǺcҹ+kzHs] ߠ&bح ـc`X fq2ũ# &\" ̖kY}$ f>>:PcR+?&{0R5!=ޠH]a`v3A DIn5JN|ĥ\cPeO l$UQ䨇G.*ЦkpNjPMqFKjݶJlf %l󣨨(hbFF'S6D2W&K29/P8vYۏ:q>Tc 5-ՄI0+Q6M8'; g#|vmX9HceP w=#miv K[RyVOT+sɖGYMf%=,tSmyGfܸ #q,QlV(8:@-Jm1G϶{GU471,u@i]wQð|,놼:+8D%O JkJ@eFGzCkML AW#50[H_@(Fj>;ƈnvȷbi$6e ..ˏ{FՊVn%ջ|$.[s'6GֶA:'6iG)R\'WjTiIh<*ɊZ) mR:q6;U}\y ?"dm3ue!2_!, tF5i)A|TWYn]rNBzh?O6bV9# Y@96kY>$(2$iE9"F2сΦ^zةh'<U׏ -]>\Aٿ-z/1u)֒2 {&PBdܒԃbϕL|Y}`2NjqNϴa=na'`ͤ"|f!T7gS!օd"h6Ų$-tMs82U#UN hܾiF<#lLT#ÎLN/Beكk r/娸ww&4eK%qG) Y(6Y`Ѧ(TP:*ѫ# NW!Mƛ%W4:/E -"V_7CŬSvT&llt$q6+'%(Q=%=k^>Ey0p,aԘ_i ug%!DzLzdp$"`]WSqBht.¶ RƧ83 uN?}c4_>*>¶,\|qG/,n)@S* Ven-!@6$ . I7FI+3?-z\H(!5 92aIJ]V^V|D!5cZ$eCܞ$.p܌ DQZ (!RBP<\g2> [UDov''ydqwuEd%YN쳴lT$jl7"f.RdUъ kfI'A6e1 :* E2z]jTP}l٢igj_'v{7ؚTޖЮ+e?`ή. %.Hr KAuQ.ktI>+]9A{Uؼ⦸{bT=ڨP!da-k )IH1ӯ#w覝UHpJeD}4'biHSE,ZfC# Xf4XOjPgTM-Mk=G}-47eU\NZQ(M8VE;u3w QݵSoXSQ%.n⤟Hll'Lv"A!W=) J*U:FĬKxL0Q}kEd Wo֩J*N֪tC:0t t)=W:DEmY"޽)Lp2~kA4G&N@x&RKmTk䴛6R!#\0ZdލVOι˳]371鴫=^l(, 6BU|@%rmX'w](ZQ^ #tjԩhx# 3J4LҾ%!~k" : ٻVWcRT yMY@Uk@9F giKwhfTpVq e=PeV̚> "$ hft낰;'ai#ʣTsf9 +fգHHD]L^rh#}rК6W"G,WU3ihD̯gV48GPՒɆ* Љ'7O\<\sb ˒wYE߉K]f!vTj TQY{8`Á2V?=UUTY=X-1 )XI:] 4iFz 7^EHapR2m u]FImŒ a]97H~(Qh?Ͽ~֊/¢׿~~_x')HtxՇ[߿??j*MϿ^.~ǯ~r>?ߗk?Y^YzJgO6j?}y8_]^ܑV-7BR"㫫z̴f^5Ъկ=5<:~_Ͽyt_Wa{E~'o^l1loZ&?;>wO/g?m^_&闟d  endstream endobj 413 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 421 0 obj << /Length 1214 /Filter /FlateDecode >> stream xڵWY6~}J+:@z䡈<4E ۊcԒKnsѢnZ0$RǙ\V^?H4 4V,LiI6!\kv;R bv{|[|1ak)(ka-< GvNe3!] ʃ-htjBU`/fqN$Rv=E3ݳ[^$0kiQxRaP"cߋU:i8%"̂\tZ4YW](JDRy?̃D2e)>#)Wxs/4XVV4Gr *#Yfl>=oy-Nd<_3<R^<|ZxȎ5<օzhyfT"o9] 7(Xq̥3?F6-Zp1y6wdhr'¨bHJHʭj{L۠t 25)|h;lJvy[1~z+x0?"ۃT_y  {uǗĜ#$A #+Y Rn'T0? U/Ņ(wr$~1d>M,oWըrG@<h<@;9*?g> ?yi;RZUs}穸q3T:⮔Зqܕ,GH /NI Zgo;$XrVt|.>Ca2LUl1~F|K(lȘ 8V^GީZ$XyP,vg"1JyZU&_m4wƬpbf7=w*qlg3rllp8tӔ`ReGi!zla_th`.O*-&esdvE`i:o;K*J^E C{L`vOӐƫqŰi(sآ\Eqδ8;\4̳*HHqvT `_ endstream endobj 416 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpnLEEOK/Rbuild423813aac6e7/clue/vignettes/clue-006.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 423 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 424 0 R/F3 425 0 R>> /ExtGState << >>/ColorSpace << /sRGB 426 0 R >>>> /Length 1152 /Filter /FlateDecode >> stream xMo7+a$q.R nqP& ݙM BxD/gYJŽkx?οZ\o%R^ /J Zqx;HD2$1ͨuz'(c|ei{ ^`t 2F 2"Һ``GY "Rhd9(Fg00VzGIg0A*$0i@IAF&L@9LbPvH cb!gkogm4 oH_hnip;( h:tX$QZ 2 }&'b|A6߭ʹe0QZV)Y)a42Zf e= bIr]ιJLbe +9|h\0.AO ;l6}q.K A?Y3Uq_5ӌ~0JӀjy@Ewo[e#-!iOTݘ{7hPyRZfgN& $Mι9-ώ^_fШp8Z^C mW2eBE6߬ާ-0$0 k TNW`,uiF@D& l"Q oiI``oiI>o*2D4 4oiIg0>Y #2}"Ud&@]E`D`=UD2]*2:]E5 ȯvIF0lc4bUӪVf]u}Z2 "=o$ɥpjˋ#Fk}L3Qc{³M=<<>HkӸlv Ҹۿq2^|8zW??GK#oaɟ8]:6F]fM @G\~iA|iE 67emsVEͬ|mj[Ԗ`fӲVfEX (,~s}xx?=\ XwݴaP8<=zwߋӟs9*4L sx>#R{9>{~.NunFyL?f8 endstream endobj 428 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 438 0 obj << /Length 1524 /Filter /FlateDecode >> stream xڵWYoF~ >Pk#(J+R_߹i =ڙo:*-r^NqZv-s%ǗM68VxQ '@-zh#MfmgT%*L#)%U7^YI} y ݲW_2wj{8-?\I*'yNA=$5~ bZCuY J \q eݵ;X-,De0YUez97j- k6A-W6,=Ko@cvnÓS jvpCZm- tԄ=֒e) q&j?پ=de+9~p7Ԕs%(=~X3B<&R#9\5י_ ҃;N5Nw3W2RJ}wC Tv6sn;6& \NQM{̥ \K9Y_YO2R ΃|`Ԅ{{gnqVwd /IZ`a7$WD}E64Uy*ϥy<X endstream endobj 418 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpnLEEOK/Rbuild423813aac6e7/clue/vignettes/clue-009.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 440 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 441 0 R/F2 442 0 R>> /ExtGState << >>/ColorSpace << /sRGB 443 0 R >>>> /Length 8571 /Filter /FlateDecode >> stream x]M$K" EBb%xOÊ g (rۈÛyʌss#?ۯyL1ʳG?ӿ?{L)=7zK>oKo񫷟u?c<rϴg????~|Kۯ|K۳/yK_\E\/+>KϹ`./ٻ KxُUÓo{o<) מ?n9Gg{oCZaXrϺ{"ms,#lLnPd0tV,^6D qjϺ; ڝ朓wi^^vi+ݩR?j'jr'5 7q*_:LjYQ~y|/S;w^.y{_Q_^P%I9'O1Vro~k_0[~:hLƊWn^-iј)-PEY8;6iQK{ό.]1mn~dLD*fj%_˵enՕ\٠4snOݩoiȄaPrAai`ws)ÞX,WyMŬ;@/Xq흱E4=$mָG<=-;RN5c.[X(P!~ٓ9=c0)-DnQodm7!wU—\|;(κ,f|$d{g‚ ا;ܰMH1_29XlH#pJ?Wvk(s[Uk &aP<(vcۂK%,"4^:dFR*A1+b¾1WyhbɈ=+[5?K-d,Kנis3RNK0i]"JNˁ`*ȃ\so323B&םKiw( hk+Qطsl+)HӆLǦU6p| 1>@ czǠ>AB>1͏yV 0̧ԸT*.% eYbȘ~3bǀϛV Xis)Pr b◺cSL:DJ[ F8ܥ*dq]/"9i(~yj/1F!S=lƯ=/"E"]R"MpW,ȩgt fSxwHZh*I3Dm%~IoZG;&#hwsa;ӏHEYM倸$æj Z.M7ԿDU}$}S*""?,R5 x8YtmqI5:PZւm߉,,G  hruP>~j&tuģk`yϘX57K 7n_.sG9' qhkEEGWf+Jn9XBg}>TG""W7V8Jv*"{PH^4%N9xY* R5lm!m{D~jFc2nP0ȃQtu"KO+,|p1),J%~jT9T Ɇ6Q~"wnIX[(YݘlY:~ϴ6E G@duˠetʤ(T2̩@I(&IdX+!pS -$*;J53Gsr"]x qtĠ/jeW8)`$^(yaR,[\{ R ڱMg4RI.K A+_ROHamcH+)΅U)t.9K5kRk'ĤoG쟵2VFpD::7fQ=T,TڥxNͭ  QbX;J/u0nzj9(!NTh3~>Dx9ZW⥌rAu}kOrcXV$*@0ga`9C㦮, `m\EHJ [ζ%b-p Ri#[^7šb|u19oXIA')A*\u_ 7p6M`Quy4x!⡍ {S$4odNxdnl6AP &<`·,Gs N4auvҖ)Qӳ@S]j.G26kWd4b<بԌ)5TJ8f'Aqtυ9fr>hF N> CVq:i^$\igc:}>~DQ;#翌^qtC,fR?L!}%WufEIKw;Zlu}DM sü܍jɩY7_"acrN`3LE,>({Yudlp{[VW@QUm23_%I_mˠDFE ,dE<෵*G}<nB j lS,dpIK4 d99/]U(~и!99J\zh̀KOOz*kIp7'R3#&.S:n\JRڙcBK9܅ Q?MUj=?s8sH͸mK;;Z] uUl:7⹞V|)4:TXSb`PR&O4ԖkƩ f_OW5W6Hqx(sG)1 [$m8 b-֤Q/mM_"ii{sa)5TRK.* 68w&V_iB[6]0{ܷk1B-Α=G i D6_ f]uӍKL"ʶ!NN;3فs jP=_ *yld,f)&kfC15(iNJdbj=KKB) UrZes٧l6u@4Ѓ2+-ϬsG=D_yHʑIfE&]VCt 5mZ폣\g] _HXW)碛:*`%[2ghB"cQOX0VHa4{9bJI-e[ 'gVJR(6fֈ%%hR#(]ڠR,.(Ovy!Wh]V\gW3s.@L(8Y(D@cHzDlbB"97mLSL<dU&0``[%E8ʩDljSDVP9nغ*(jc|I]atqLz!0Pnl,uw [\,fJ-;/:!#qrX1b1,3YϾYSW!x(ϫ$ 0!)(R]ap@H]S.~ RuҋK# -N\0sVBIGaBR [z@;P /3b.fq׸e2K~` G"|aIQ]e*:]8]KܲUm)|z64 Z(bg+dZ:LU vNOJa4:Ry4 ⌒2]cE&T þryp" A1@ meYJ RI3sW3:ft kx(`x 8.ymCՕELj ) R>{b;V*Kf$SۂOYSQ;+tN,dI_ f4I6dJ#5Jaxq@V0.GT*J)"Ӆc3)YT1؞CB]q2&$R]2YwP:%-²!qы*S$Fq-+TpUA9VW"32`3.V`)#ZI;P.ObJ jhA@I%N٤/VCVj=%,HHdqwj/CE[eIeuc{r^[_@#jUfg건fJ\iA@8qʮlgCpu)K񻹨0N7! \.E uR;"v]@:G60dֵr"+f`*(zwb4rVLpJhN@Iky']TKv͐sB)-Z. Xf!+JqPkTl8@Io$? ~,q,01NL1!6v!J$ YӨ*EpFEd.l`6( yT8Q yҁ ŽMgNk3^iRQ}E=Vrsx83ipk-3!)U5REG紖Aթ%A9uMH%IoK}6Dv*KW 5raC啋q0tJ1 -%&*!1Kh0<2QSlc.R1 [.%]6KHZLh@':oؐG~*UUҽʴDd,n-Nbm2ix9lh=*A mҲ8TbPJvd]4#Wyh x(XV`4hlhbU2JJ梈,V~kx_V3pJdg ]%j_Yy?լ6A%+:7$_+|ݨ4Nbg(eSZP.cBjuD ™X!AL@Ҕ' JisX82%9h&QW01̀-)*EF81 \uCò 8,$Mϡ>NUwL\F&g7$SW0"8@ň|7/=(w~y/SpSh_4 퍂0X^? XlbVGXm ^4Vz;-s3¢*/W-u'N]P'Z1D+dNBn ޢ)\ĖlO&b `ˑͅEtmXJ79{.'.l|' OO"-mٵsB$}vP!bR8 z'WM  &Z&@ܮF\ݰsotdzKux"n]*ޝfݣFfչ0T&&NBBZFo!<4Gk-(uٴWEi7#!B<)7.?g!7brK7JK=Ob+ѡ`ęɨm(Uv-ESTk׊]yሇi2Z!u܆F.à5zAc<#Eonmo7廣Ysh=hXfP(M )9[QL:"KS"|)P `z"g/d7rEof4VRӴOEm) RJ%L+H؆տ~/~7GIo߿/?eǯ0~hǟ4~3Q|?~?yAB_?~7~ok2CI_VZҪX/oSXӳ}rbg:VW⻡87WQ1v?=ɝV>ΨO_?͕Մ~Gw7//Ǐ_?noGy2Tv=|wS\[W(_x¿1*40|Rr>O;[MeP_~|?$uO endstream endobj 312 0 obj << /Type /ObjStm /N 100 /First 923 /Length 2999 /Filter /FlateDecode >> stream x[mo7_ eV줗\&FZ{T%ˎmpBp IYeVGI(\[KVXm7% 4J/V\ƣbdQ t:x$tc0Z%T0&T0d*V- 8artZBc X0Yg 'aQTR6 9B:@D 2FH$(FSSu*:D\w' ⮋ i#Xx O+Nx2T^w>yt*1`YQ."=;: Lp',ל#}<:OUHNFp cahgGf[?@r4JAG$q (MPcܠD &&gLGdd>NvbasT_˶.jsUϪ5`'%O<3Ud:1Xq! )=nS= Ca%aH p?4pH <m2~!eqHWجl̥v/9LdwcB[>4RK3$1$r-9`3>@RkTۙRee|dn2k2wʬ2k)vʬ2kPKYey.sY^7/H,Ϲe@WҒs}oTIrD;0D)@3Uh_@2$/n천O;C  O6y1oj~6Yu9𪦉$yKi͂%IJvk_n^-&s1|Ewd+n|?+++eeNB,'d.fy1ˋY^!p4I!#R׼)\wKV >%0<@ZAz3)Wy[~Fb[ψ 2f{`h=2'Շ6P 7T@@!½Aq}DwmkȪH?%w-Fn]ˣxOPŇ$RD3JRNRNRNRNRN:⃝\OxU GtF+R5>{S/gx8iˏd!~\-?:MWV:i躦ؖ;.7{yP{R.ůӣz~\. xV~O)ugYx)=>v|?_W c;䊋zZe5NB:\,-rZVŤn'Y{gӪ('m3/޷'d(5XԋB/iѢbolWr,f^}(&%[|,'ջa#S|0< sV h0zdL *vÇ{Bcy-(k,/( E)v̕)s*E51G|~*#U{Exc: g9tςgr;V{Vw?6AKg2>@_q$/Z{ȁabyv2z h('Iˏźe/M~d!RuB>tVy̦%*Q'*N6Q&ތϿ3;bok>pn/=nƃ utOF5\;P`"?/ < endstream endobj 446 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 458 0 obj << /Length 1423 /Filter /FlateDecode >> stream xڝWnF}W~А[o@Ah5}HӀKm )us[R0h.gfgΜ])/?QYYmE<)J4n[Fjq~Qx%|ǙTe{:)(VοX*Ce$\Z' Ȫa(!Ӂmø(¿ D]=?ҺW?_(`UeA[6qHhvIƀekܡM~ K2^{#J#te\vSߘCp,r@c6~Bejٻe`Ш'kZ ]Q_- 1ejny\]1xw^T e D lH! ƌpŨsƨ $q)QQõ P١[cVT n-|Ğj$Yѵ Sn6`.v3.o7TuPURj>7m8OEs}*ZOP] ~/L re D\LS(RT$B3'h33e'} 3Ef`h\By+HgA˥sk ,Vq6B]r,*'E돭XI*I΅F3' L B A&z'͓tڬ'6 /֪.Xq?ՄSTదp+ 6ޓO>]`pDeI|IVPE4e'%Rn$-qYq&kr #l=ޭ;nYT2w|!Čb9O 6Rt(CHzx01_3I x]ZE>^x֔*q; DnLI=P-;EM0+3',=ν;!8q)oE0aʯ䘒9JqdPj6l =Ͳ14r R|Z k=k4caUp /t^NP[*uЃX ۉQb ^vcv'@+=ۛ 줣9j,1hS)9O jdkv5s인yN O.B}*II.UſvoΗ endstream endobj 431 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpnLEEOK/Rbuild423813aac6e7/clue/vignettes/clue-013.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 461 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 462 0 R/F2 463 0 R>> /ExtGState << >>/ColorSpace << /sRGB 464 0 R >>>> /Length 8166 /Filter /FlateDecode >> stream xO$I){ W.+ v 8޳eo0]SfϞ=3_۟_~}m{chnloz_~KRno/ݭRo~x _g_n>v͒R˸s}}ۿ|.oW|Տ_]: ]j=[\~u{k+z{ۦ۹WQo"މ6M~]~kqTkzǞ),ܼ_KDǥ!PGr׺g\&*6zmCGG-,Y m?leon8g#/KbxٮiݗmǶ6u+8p[^e?咉xo֧L|kX_iזmvЁdWKoyŚ'Fw=,tmk2$uoؤV}Pj A.\Ѧi̳jl,SJ!+\<4V2L$`2lĞ5Afr-:I?vز[6.* W[v⥫`Bۋxq@/?C LT^X3䕀8aOE_<;N=Bb\qY+KSײQv\ɷ[Y/vL/3D@:ln03)759ȿP#Cb؎c8ekg(ıЮk @W3+dm=ۂp[Efјþѷ$$B%β;䨆2|d*ю hHD.S&74eց$l@]) r+AAg)` :13Ɍɾ_7u&@Eifm4BsG)}e/ST;^*1vfꏴN{xX =\\l91ZX4 bn@J~ G8 {5}l1v=&|\!*Hԑk[!.D865ex,#@̊\2!<'}2@VMDYO2 XG^hk+jF1w*VAн7˖/|B_.e HQ-PmEibfRv ePzHA$1[/vdo%W RMF!b4":"oyB8.yj i_d6xb%/$%AAyŁs240RAE2\K!,]lpL+4.-L$%y iē>fў[=pE>w͇䲯y`ȳR-{!S2Q3VgJ4939䉮)i]S{ T7Д8ў/k^b[z:1I@qY7o-N{K450薨dz.!ڵ랃2o'(nԧ ]:vgJ"lZcyI M<:, DLX|Gծ DС͐%oMjL.V=+u2U;)T zqLFfxK Y8bT]+-Xը0Bu bǞ _Qi{I-G1@ \B8dI\2p-[s㣈(|2Y^ Xog`,V%Kb^aiҼrRפ9rH#j1bEօH"e{k mR q- P[D~ 85WA+nVug` HZ㬪V}Ҋ[ĵ%G.[$,Css3 4T9زG=rȽюW!JCX$AR+NUJA*Ж-&g5VBp=C[Hj xnDEs ZG=b$5BTzi;uYp0;c'VsvhN[$]2 ¨Uf.]Th_rV7%xXkê$Ԟ#O%UT~"L X0XE~gEN،K>J\B2MXb>hl%uZ+8ajdfeY~7B]2}B(b2 UR3Bd,',@NAԁܲ[c9;j5kEfT=b 0)}va\t7\ Pp5ʕo}Bꄺ+L x{ pGrj>tʗ媬-q o6L/&d]6^ʚ:g1niE7.ReQ*`EP2%%cTǪcҹ5F5Hs;] ߠ&bحjNA؀mi0Tdo,HfqmZ&zIb\3#$PxaBIkX1͇ @@p ԛaaBZf[Sh)B\ a ʰ +IU9摋̂Uh5hwzFS\ac n[|6ya zjl((hbBFՔ i+d39/qĈn u|"쮦c T5BaU4!9l@pNv$Fʁ?xڬr*BUebP  \*zCLk]> X>S'[~ng}:̚l#=ܸd"[ qE G⺓,QlVKq$tJڐHcdzeVuܤD!sXc7ۗjȣ(b1RӚTax{)'6iG)R\'WjTiIhZe;xɊJ) eR:q6;}đV(EGO{l,+W=Wel@g^#M>;Esu}C 4e{+ɵT'ͱ?QɬϱYSO!yrf 4 +H*a4JCK;\VGo*rC/d| ߖ:kIH =!dA &>\3Ӗힴf5na'`%$郄2_i " !wT"u"CsMSڸwk E\8NLHuۍ|pܾv6<#lLR-ÎZ* _ʲA! :ݓ_xT.rhpM`ɝ2RH\Z/(ǚ¶=f(4`B@ePJq4}UEEvm @7=SO9*I,2ӥ”L{=qHE{X@,i6,hSL2 ё@ UOQM-W4A߮EH-"U_7CɬSb*yb9l:؋#vkIO!DHv;li ;@$J!Dz,bG.YC W^u5;#PaEXn8]b|mTy,GvF-lQs"o.qnt&Д~ KBqZ !@M ,rP>ͥSZpx %$>֚T^v>D9GFrᎏ( [UDoV''ydqu KJXNՅ3,k X.>5E[[BlVgب&K4dS.x8  E2t5k!@Nry*9Yp|4ըٝcžea56xR:qdUۻ ̠2z[nBT]&KvP9v]@3QywYjG\\ŝ;,A0L7O*BVn0OIG+bC2Ńkȷkt,$pJeBi)4$]L"y! Z3'~5gTE-Mkj ~eU{@[ir1j4WrQ+euy|@QĈNZ\ISO'PB88SDr`/8@M#N>HPHU:1ו=&`aTFaz#Ebʋ4[(yT6*9ݫy$SyVh=sJj| 0UN;~Ik ͉hIseͣ9Lsy Rw]Y9"HyY.44C mHݕpN6n.Ct*YUΆJ$?Ӗi*ӑdJh{vJ@E5&cJe; 6lz[ I @PG']K^X0p 6# =e\tueb@LR%GMsi@:ښvf0]vAsvd.fռo!u/R,4aj,bQuy[\|-s!E35D."~_&Y<ݳʁ=YM3[6⧝a4h|`IaRWWtQ m-\1k!Y^b*R&4[l~g_'U@򼊦V1Dž[muOlP(ZyCW0歶熉-0?+Z5disW{ƽڷ=Efpyv&^q}Cy8Kl7zKQ17 9w ~(rNCo_k8UMvt`eBB["[zZas>/gdw=pAy<+uA.6OFOlNZލD QMDLF iu^ٻc=sq[}yXjE;y]҅Iks6!5>kLeb2]o{hZ=WOnT+Gz5%;/`Qfd,NXJÜJjecKI 4;8tcŤ(OaRVmi0['i5gҠϻ_e>hJL}J`&@hS+YF-O HhF,N[*+ Wm Q/DyEbe@"ptV# Dk[z*4tOO;JjGaR7:s ?:*C'3izGt;̆b]a"Кl?8bUTfS,%r0"!s-ܫH kWx7|V1edjsE ٛH:eDUm"Q̼֩v7f v3r}^ MHlbWeQG;ث jzb8V ts%L WDڶO!Y`,u=!gimF0]3 |3ŰOqBT hZl~`(u-"},}+bwv}xCGDT=8|.ZܶUhgK9 *>kCN>IH{OXp9"U]J ۔`ӏw5t+z ++PHQ)o 3ďzbTV@҇іӛ(@]%IS]QW aToظ;QԻIGfq|l0Z~k#ЮdD;N6G<ٌTFţǧɋ4dM9^|Fuv/(:^=}OCR!rkܥ~:ɂ#+'SI̚7&q[<^=ǪσsYh*ߎ8#k\ǀr>&)/?o~Ͽ{c܀o/]\zeK_E@ևVo_5ay|e~)ׯ }kX|#WöƗȟĆy;_N97/7P|5nu\㛫ydqo[NW󷯟,Ǐ_~Go>?6_>_~Ͽo70\MOڳu{b7-e be~0|xy^F_䏯>?~Y/g endstream endobj 466 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 472 0 obj << /Length 1262 /Filter /FlateDecode >> stream xXo6_aI"E$Yl{ؐҬ-6jK?%Ja6ha#y<w?e6˃x/jRI[> stream xڥYYs6~ϯpDO-v&$Ӈ>$i)HjD9N?߽-bGgEJ/iJ'M*n[÷zb'a*"/`5|"Q(ZG1^ I.('Cs X8Uw(֚ߒ*eDDys\wG|X(fmD2sxkjq8_A;nz~T+~L'khh$B+C jg  OW .VW u#Īb2BE:xkcͺF;].i?q17ʮJV{hf0n5J sMFR +;"V(Si#DQN#Yx0Gۗk$Dn3 EQB$Km$s鼳"Be&Pq=l!w8Zqa5m uql礅yn~ԗ}I$[,"ldU C 82^wbg٥, KnQC+P1;H($`Ά |Q+nYx݀Z= 0t( ]s&K=5J|@zmhggFT?R>z&īuVB74ݡޜ0Яꉇp;ȻmCJkp 'zq`*5\8Npp# $!XsR3?!)Pٗio; 420ˣ s*%Nl\f_u'5Ǔ]=QW=`4sӔ@TH 9b\\<9<}7K#ål(F ucjX j(Cq] Dڬ,`$nh.e%0K0KnhnMJ%(Z[ԫqLt&In{t.S7I"7AJu_PjTyTbד\A\E1]s& {KO& =& v=.K{ L}aEA[D4$0M9גbDQD!p䭄f߉ 4)\r<"^^Sg)0ΝyhctZVIanӠ:F`'xvpP~9ͭDGu`WSBY-b+/1$xFK&Bӕ%JIRRZ+A>gb u!yZ=~dse߀YH[[h#*`Fs.צ{Jжn0`NjxP%ٴk ^Cw/$8/׹u^:@J?Ga> stream xڭX[F~WDԑ>r҂*5@;>ï\' k7ŃW?+" b= |&FlQ^;?$j8%^oćChAAkuж~-B{)֒Qy`v| dɞu-x- y+yKvب촂v-*TZ' {y^ԃe䔏l3ׇD+4'Z^x|FaEY̧F3d3 yϨO/k3s:r篻:LGx( rS i T#U!xOםD#θ;o(m_oȐa"01#3#]s%mo[+(k/~J܏uk~;ٽcŎ4 ~&)ӱsy^I^}VKD"OJŢ7'ڞsc-[+J,nd)`TUDDZiYtuqgp~a mMXkj(D4IdqEZ+]>W$B,2*.ڵddƖ 9 μ,˄ض0v^Pk*ܗxU ÓWgmZe N' }7܋ҙj8b8;ʞ!~ \PGٖCq};LbFwʣr^] r\h5/SXzhݰHl'- D "SHkWC kr6*}Mn;ᚫ^N,E`UR2ݝ8ptM(9] 4q!Rҗo ) IhJ hAMLЧ >οw<~⪴"GfD|Mx(  } =_5bY:7ww~!;A{Y+}{ҳ1p }UX "x:X`Wk&߃UƲFI , b;+Wc9!dړR0pЛ\d2(_{~)7ܔ]%`ɾyu)2 7nۭ-/n!vz9ZFExST2 VQ"WSFwy{Q##_b 8֠ zc <.cO\-#8L)Y2@yOzld~jZJB+|jAgSv"y7~vA8:vAwCRHkwrEC!AY{/I((~J~AN ]|g.C$Q$B% |X1dC}r=AmRF3ݐxDu| e Y/9;{˥U&|NsD_!Af"=rq`T{WЖG+M]8p9} $!ICOєTB[_9vͤvvM M> stream xڥWYo6~ϯ0$WZں&[ ؖײw.Z-g%3zOLbTG.KTrynK">iK ,|e>5Wo"2d407h4֪-U&їXGK8NSqQ-h37]f'[G+$=گ0;[Kٞg{>5dԨ107-= ~+"Zn Zkhٗ0uŢ<^" \H %V o0<\Dvtƅ#c 9Pa[ڠl܁fsN_Qjo8Ԋ{^%։S6isɶ,q5.]x:5ɓ3Ќm@&NmH ]t):8[-g8N#~DO凞`{AǺ~վ+(_{O >.ߛ:'ړݴߎWh9dӤnJs7i~)a{KOJfp9(tN`hI9>Kt=Icx74SeBEQ ?r endstream endobj 502 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpnLEEOK/Rbuild423813aac6e7/clue/vignettes/clue-031.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 508 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 509 0 R>> /ExtGState << >>/ColorSpace << /sRGB 510 0 R >>>> /Length 959 /Filter /FlateDecode >> stream xXn1W9`\8, JDH8" r){ 4a???knP ԝ>\㻳cj3mQ{2>w|Ʃ䴷^#Ơ|tڍ,8И4d9i}~yNaCl~6߯x;oaxyd )b% '*`Qg7-C iy{5VC2Mw=FnjGns#*Y{T?B2j)(,JxXn\C"WL#h \C"AF˨z&,(JtS '_S8+h!% Fju ;*1Kb4LRɴBB (cVi%h"QGG8kH9i fҹBB %()joF}bgOtABB͵7L5d**f9$Hc0%2mi@oCט%782QE~H4L)IH1l*P#;N}RiFJqWQtd&YSk pO KE%+'wUi4{]ݿO$ȺT@cAt`g\qˈ ftL1;J Qg#cX_D:T|Qv!cRbH1X8% p 4F,% X.v|@Cң!%̒s R@c"lqlZT)z]Ow{+I:,Hcz2#z4$"eR-9FvHct>f.!X:B,Wc+FS;C`604s\2J&!XW?7tBl {~] endstream endobj 512 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 520 0 obj << /Length 2795 /Filter /FlateDecode >> stream xڭZYo~_a̓ufd5@!$;׊=_EQt-Οe˃q3Mhyϛ?}VNhU*&["1V:``ɝ +̌*RHǗ2Uĩ̫Ͷ2ItgcMiC~i8#^WۤUW[Uu8 Tٹ'6/e L'P frO7zy$8"`QT Yu` yEGCp{zGL΁b Va{$+̪ '.=ǽ]/ 'h2ڤX^՚"eWl!dȸO?MǦLo:KrnuVX^ќ++KZ,]7gttη?7G_NmTj/X"MIDK$ "\S1yVe\ .טI~|i(-T\䍸m]JJ 4'D Oӂ0څA1Cc^ZNTuA{k0>0{F'2UX\DPnL@SF pC@0]u1]YZ[B8"DŽK`%o5dPA42U>1X3_kL}F$ -89S`>cv= ݔ:[:  h̀ ㋤İtTZ[wh@OlwqB^$. QsQϘq~ō9h.Tf,O4!r{<h&oD/5bcQ2EĞgk( rAdM{_PۻrXLrTtE.˞ [c;nje$W+& ]B 谶F=gse4' ?@6BeA;G*  B [YO:`qvw ss9g003N=CGŝͼeP\+7/&A=I)`ЦAbҙE?Uu+(f-bL[фrRQR$5Fԃ '0N/Sx'.hnZK"@f GGAJx\D`w:;0&+%%)G~lrT0A%@EdUgd__ʔkx +4qnLA< F(_OGFɣ:PWzUdY6F8LciZB<ʾSxSԖa7?ҏl94%=/wFZ^Q0HfY/ٮ u|s IhLxτD!8}_ iWt;Z{l#!aM#N:3֭FVB2S$s60H0&E,6{-GtqS >~f,cd&92\y{?5w]B5;7kk},w> jUe\_#P ]$sJ}7JDZ17T ~3 r+(z}C?h2QLY"{[PS'Sq@nB$0Nop7BrT}rB)&Gƣe͜LhQƄ[Ǿ2FІo` 7Y^ LC_3m+Xᢵ N QA }JLA0d3]Rzu?~gxEe_i$i{4W&T*^m$ADc-g8AK2u^MIV(!T?nׇ7vrl䥚[kد6VJ)085amł t(~;1ECavؐ“/Q Zd[nUN{{Y4M_wEĞuV9,vxJb3 ѕŬ-:L)(\ĶCh_rD w%z%leZTȕ[!}^#ncEb,%rDqy= ?+L6td޾܈sKO,໋wo endstream endobj 542 0 obj << /Length 3027 /Filter /FlateDecode >> stream xڕZKsP00<|ج#{UM*N "i %{_O?Cvy3====_wtzӳ7Iօ)NnNr_EO _ɿ$O6u2f*}h0)] 9cgYmJ6XXhVK{ƲWA{f^ոn4lj=MJ "#}U6 uʓߢ^*I&|ѿ2KU.:H /!U0B0C泊tDw?8W 4&ߝt(-ιqE\[NK9r,:9qx<椅Oslg| ö% Hۉi]*4EZo#Q@uǜſgάv}y(B88{;. ^] 2Eg7pUV&gAύTjO,R[љnԷ ȕsq!/H^8JKe}0{[)cjH ™X(V:K1w{ `헎"F ~=vlIͮ<a.^9]FwԲxB冠1W^>uy|&1./[)VZbQAQ!UwGC(`yl %x|$F9sva۴=VEϝ 8}O.Od>8 k(n9ypfYR 3GHxmo,}1_XقIQ&\ D3{:Nos5әE~FQXpĐO4}"TIW|XM1}u?SZ|REjl7QɡIBG> gg!dft9/)yr.䦒 vQPy8tNH B3UDݕ$u>G\ ayZrJ`N$ճUn*+ ]fO z}b endstream endobj 444 0 obj << /Type /ObjStm /N 100 /First 905 /Length 2865 /Filter /FlateDecode >> stream xZmoF_XA'6s^@SV"]JPq ^ܗggv癙]kPZ#A)F J-FxQZw"y2 "Mj~nQC69jA>g 6 ӢODU6 X&FtHB;:Kt"*ZgQMHYa)<<)X@ۀodr,5 da Fʤtps$00 "z"%/pIWS!ÉM}ޢ MS0@ xp}')|U!1aAc0sX^ tow Ѫ9, GIDBEGGX1'zv"O-%0p2Pd%@D,8LY| Ьh8f PiHY,(e'~S!J%<啌0E" 8^M"~X"J[`4gykGY^2Ж{p܂#?a'Ȟ4u7ɎV]_$)_7=˒Y9WvUOE3~+j7Ǐ o=}0_^V-\7X٢WK#st<Y͚n^.Uv,/V]U['򯬫2[Eq[EQ]60IJZfnYy;+Y5ۗg4oWy洩?"yQY_F$cU6l(ȃfϲ7${UxcKP- lHK}eI";HdO׍ŽtՖpMxv$;Mc9c(a5>Rղ+բ*5/% 鳦?h W2Id K0̨d$iاNQZX-6a3l⍅-%ٯM`YW۫j-%+4)Z,OzO SnC ?@>Òc%?]ǿ฻ o/ۦ8*!T<|KZՉOldVh)3zBfղcס(%zypk2q;^T5vbҩoޛB|ܖyW5aޕ!I+@ 7l@jfW~{]{6??y՝`flNr~_ߕ>Cl[~C_4YJSxGeĂ5!`<؈q(Lt'Khk/ FCЎdIaYY#6(2ɨ qs! vQQ"dЂvy.E`C lLxnJ*ax2{c A_bF7"Oف7]pJkbP >LNrn"HûەS6đޚ=ah^ʹҮ溻. - C90qKǡ^ܝʰ^Kr|,qfAgeY\'%qpFwoq,̛:y,01 -8T(hfh+}JlآEi@]NΚ[v 9si@woNrЫqwᄁ}7 D"]ldaM+ӓ~1IE;ܭAyW'[J-[h+ P2߃46L6i`_qD,vSU0s7ɧm[hVI0Z>{k$ 0cY)$12:H>V1,y)ߥu׻hJD[TۖuGgݲC4,W3|{9Ha}h#kK`"R _:4ܣ3a0L'#v)bw̥>$sXJ|aۭ Kbkǚ]府KFMK5 _`,|7c]'73ֿPk‹|9E0mj͵*[mw 0}*8uS I-D,[ ~CycGNmأc&vӃ6?r5_5{_^j`Ec} !w# aa gOWaQ ݢ&3;x!;(ɎzκA\/?grU YVٲ=_wqR~_VFr6^K3Xp14n';'N{hiӆMoF`/zk8k|/JJ=yMT@&@.z)A|H}-|U‘"7/3l@ / f45?`Kk`2^5;_H!]|{uLJ|b4լ银?l >Ƚ3pk=^FgU|װeL\yx-_ h]3F|n},՟frQuK0[_=ky-n8N]|+NPeӞf$)m(*ga0#CoKG,GZޱN?M~_.;U\_5z/"lJ2ahU˻esߑ1҉'ZDIeƹE4u7#P7bqm++~&xmUF ah.*tl endstream endobj 571 0 obj << /Length 3381 /Filter /FlateDecode >> stream xڽZYw۸~_V:gD ~K;8ӜiLdhI):'wAr(b]W?QTGWG&˒—GG$˪3=Ѵi6Nq { B{ øHtlYSw[0,Gwc[B3aX ;V0Ҍj4/%q?֧a6?^Up8ĤI*>Nf^,ږXωTW8yb0YR(.@.t6/Ĕ$;\畲QJtvaOi85d?~ޥȍ#|Jt"x gc@_yNdJi= ?zVhݜH[jq2ҠN:pZFE $P"d}#X!{͊G 7*H5fΕ*<װ gԪPTzR.\+;5vS->r* 2o Tʜ2I`:F3b͉<Xi@t4t2u8Iel_SodxDSFkp1FOF`T#\GSXygS2iaK磫;#6(e%M_ 8,Hyj,=N@`@6 ;3d#Ml.,j}'5gIvFE- s2-Y])e`H; KyĸZ${d6Ȕj)@'X?`cG\<21Hiƌ##~i[3r]).kW +0j1@0t6dgZ DVLpK8gy* r<5LwkDZO-V ե)w: O S !nXKcXGˡ-FDIA MY s.O@(ds  %̕]N* IM~̔!Jl?2xN\)0rL?^ T8gzMBrInHo[yyNIQ_0DlF9wc X"uÊ`NCM&zn41n zN 9q%1w ~ W>O*xKu#GORCPnNlmPT[v(J0v]Yۑv6"_(Tl=He9$d^v1z")E28g_d =`nړGVfH1O5Ujnƃ߰AlԪbkx}M}42 z4cMwbN&'c0n.|"T Z ss\&`!q/* ޏ1$+L JڅAF&Qo/jFQg?o1g U4z;CA\i 6DQPY>PHW{ ukU5t˶P4zuq4YڤHw(ֳL]JR+B`˝ј6(L.CfF,@ 1{Zh3IQ!m^fC~, W&aʾ]I \OU%[PDIa2SҲ|G6ky1K|&\IG7 P)qQgeik)4̦ZEUTa!߅\AFcJN[E+0)BNPr?뮜Zc.e첶ܳ9]Hm{<n*AIf@y⫲3*<T~szU7LU?@Fkduf>oIrf8NH#Ƴ')QgY4 JHol(rU{Uvj(쪪XM]E*uo;xP Q*q;+Rl/) Eo Y7i'/]MZx:)kֺ̮^-XRGyV[+D?_"~4;в_vqS.Kwq2t"#H{&tD2+eU:zu( pB-y<ʎ3NhfTTZV"Sj)d{|0yO5Q-Fn&(qU[<8+#Ew*mMHrDBE岽 ytdi6OGV(Y)Fw">AT'öx@ܒ,BDyK}7(l7]2hҴn=ǒF4j,lޅ\YW7` 2;?DTW/z&Lp"i&M@^mĥI.Ŗ?EZyKGZEG6CG`[2đZ&6[F1ȶ24 -\ϡgIpao[<םG$u :"Z_W.-*-v+[yE X ֋H zws],<;=%O=PE+Ŷs=YF]#}aa2rҋ >dI27 SxW-ϑhu.uO9 h2$ǯfUW\\i:4&BR!/y*b$9Bs7#zU"Z,r?࣍s?rvpl(9;vyJnPHAd.=67V(-߲%mZw=aP-zuōlcL/U0XQe V١$MWGD̯4d^:dM<-C0zi$_Ş;ڃӋw[] F 6[sQN%M&ɼlykYZ,jPIvҰoe;h8"kE{HYuHO= endstream endobj 592 0 obj << /Length 3356 /Filter /FlateDecode >> stream xڭZKsHϯi%<7.ۓv KdyE)SNvhlRT^5T ׿Pemʳ 2\OÑ)z|\|"y $U$,}\ǢהŸۧ"K;xq}pR8H–\˒p:fq9f*<Y]$'56&5> 0W(ZYIA/Ozn#`SU2/fѸ^Elz#.%okXfſG< y!MVV]r u=L~h:u=oV:1([jL4" +h ]&,JQAˠܸqȵσ#71g*`! 0ߺQ|' CP~zЪcwZW+roo *Fz0kQe{PAwY.zQmC7x`LrE3|c!lbkW{yyFD [(uWSޥ1Tǀd}Iji.x^$.9O6E 1;/IΒZ˛#XRvP+7;HwF :`&%'h>?)*}nʖ/Aqո߇ȻF;tF,#8rWm@<5E0p0(#\ Beэ:鑑hU;茯ztb;Z;ރ7xcbcqY=g9z(WC@zh` KhH; Ew)˘I$k^j'3 of%b1L1ƫM=G6NPv5TQFn GDqʥ&ߋ/W7^;ѯ'(pcFw$#ǡ M#50ζw.lg@S ΍|./]+Z:XEn4oا Ԇ W _ *{deBLw+\D7e>A 94vH gE"Au:_|N(jiTtL'Vv;E{^_cospY > stream xZKSFW{RG UJRq1x`;=444L_|JoaT/37o\䡪Sy]5;s[gh4M~d5\][gbů+|Q3icjbi ?PwpR~ِG~VלƢ:Kl:’?|7XC6[l\|"6/q5 8ϣ\| rgye~p! `?t (^qwSo%qGKwNs8) uE>i"( xIt{BtdL+~_jC7PR iWgrjxX kP ]?zte^ֺ7 g2@=,\xozě.A.$kI%kAp[)vrxTiGAX 1gxXWjv2{hn jNuX.E7Z#V@mlmiA8Uu^F~D;sR:Єa @[bW9huJ+,h$cTTh'cJ\zUUONGu5Å;@)Q&ZT'Nod٘d C>~u˨ nx}DZZMk6U^US C뜤ǂŽVJ`%o--~تnGa\<>f.dV8M{/.d dz{ת=n\9"ﲳALE+ܳ?؛*M*CMxXU3Ub} 5{#Pu"@!}/VbȲ-qa"xh:'QHwXUoqp-&qb{rX%+yfϹe LZD3EF:TαJ*Rn6&&uLPKyl%[ڼݫŕb@eޑ]5%o\ͼ:M 5=sDQBr Hn뤫"\_+/#0&QXqmM&jDW*8 l`S v5IPzPWJ_ Nk-F'$!&g\P\SdtWKƨ%̝K.Aƍ7PceEY+uBrtc` d5h0CI* *atɿrR~u[*QlP 20j^`@8'Dͬ:QK(?:/('ZP,hםi~O-2 Ĕ,f/f unW >{NE9êy*f٩F##YnM¹4"蓦[8?zP5UNúˋier_*"CHW{Fce ^e'ƅ,u"Vp\$ V0\RM{0҄#b3Py9X}"YsMt8Դ~R5^aIC `8]tto`CLWF"](u\QIn4ZnR/9HY*(& iDЌN CݎNEbCAht.9edlrZ0+~m`;2!nk-C\ZʦB l,usbTw)mRF\X&syXU2FmX[@X&.Y QYD őh3䷟@΀ I{f5.U z)pELV<+v:QԆ t]\TWӶbd":ƿEAD'w7zHuLvS_!hBCfFE1^)rqP2Q‘X6gt|v*H&ݙ 0(%t$ޥH@mӒk-+GsX 'R$5AgFĸɋsh$gP||Ds~4GQEX /;f@-bğ`XВ#Kg퉘5' K81q?>)XѡaUWN_Z1;TH9@ 5(zo^Мj+"`+чE5g*z AVBrxߣ!J'ĠdkWJc#,~KxdxtWs'\ QW/ʌsƩ jK>$Vl|+=q; 8DELɂ;+B-N~.tH0ySn4zfk}m4({\SUׯ[1 Ũ\]4 z`>4 >&TBADUʛHetdltL,k95I'Џ,`(#t>dk8`[;CO6NLj=:)L;"D@3=T> stream xZ[o~篘Gs.s+"]%)yow+Ŏfek#gv3,88HbN"ڨ.VA]Nxh# iE.&QiWDE\P.)c 6<~ɰcŠ1'ILFIL]hD efC1*ƒX1F.NA&חn t%DJ4r;D,T< Ԧ *`ȍPƦ* /W֛Yo,HWPCZ7Jx5Ey.]B#l/ _a܄ܟ6Cc5[TbSZ-=BZB4HJ&# @V?S q>JO٘t(U_*%2lx0 Ihu,,Q  !yqA$ ,%~M_TCexh9@hȖ U9(_]+]þ;4;:߶=.=.=.=.=.=}[zZ]SHڝ`D%KZS͍߶̶Y^us9޶#A9&Pۺ8b_m;6vˋI,&bڸζW4m#{5R.2Yaqu%#*S:E5^z1u,wUƸ5|:Xʨ\C_ 2m*j hGTWV.!m+g  (oG("I](ܝrx;X{2)bC5bB=u۱SLw^V4= ).A|Gh̀2 beeBm*yu7 wF&^ɜ Ums 0m!&oJ v#ܗJu NRԎPuzD(3@DiH#1Q0P%n(iN^9KK9ߪCI~|bgs?k_/]-6 ugt_ $6 {,ع2KMH cNvbiu}?J),1yAa-i5B|Ϸ3TQ{;}ZT, =1-V߸[IE׬>E&:RWolŘJ(Bs>%k1PՈ٧/CᗲÄQ* C1GhhY0af*zτ endstream endobj 631 0 obj << /Length 3516 /Filter /FlateDecode >> stream xڝZIwFWBn4dؑHrKr(HM#0}jm4HH́Ш^j/._ըJG7#\Zըej]^~O^I^䘮69d57N~x6D_=a89AuےvHD,]n L dl_h$)G^ħillt}e% |rL[@ߚ-xnz!LZ{CL~\pѻSZOSsYmH! JLRq괪.p2=)Ȼoin;^敛$VwSWYKd>3|ˑRHYi./+ZVr[> ePN 6bFL%*2(CdsU BF䝰fb>~{F ЁpIWH+uG:e+Rtd32[W'jlRHR^je}Z:r(ub4V7j qg|EB*LE=;=a%I$`2E}Sʋިzصb$:oL.q5ii RW~Nzb_#ЖnE\ S r:}s0 (/6AOz25&|_[>lYxMY u&rEB<T섦CTA# H}mnZGw 䒏p{?vN99E yBY'3jhkV;3Pv"z`M"j·IY=2bx/Mًs|k| ~5]^ԆW` ;VH[XlUxIʄ r^r.kkC;|ۙc bѿFsVѓ~:*7荂фu(!jmTڟJEZ!e*r&Q ywl}dEOF{ "=,Q,ʲ<ʦ3j"<7iq%L!k#(hwgPGo9i[vP^\\B)A26R|0a[pUh<(ɹ(dS!=wUj6)VIߥJIL 먂 deNdks XAfv6q <ȌlQ#-i^?5:W"]\+btqjYLTFUi&nĴT9qL\;t`,0f(B~FYtI@VxvCNwXG `2L3ktKud)$N5[v4,B"8%9W2Zi,͍c5RڢTDD} a&~m(>XN,A)вlbU-{|&,-Tm޶L:ŬA僛P^idg_u cb5JהߟR!P}fWQ`@e֫iplyh6I% o4ObTvV-[i רH\|6-!AzBp(ȻY!eIKMf=oҪNWlf}IɄL|E \5:HET4&q΢>m }&Wgš l_Iҍbȝ !yBY\!F8J+x^IɥP_nDxEG?`8~.L ExQ:J(*OP-->6B_t03:D/q݃m9պ{_6밇rX &( r&Zo%[f/|,+Fd+8Z7Y!:IQ0u!iˁRQPlӫ gUBrn$wã(77LXm>cuYi"N|x [x4f_I2:% r3 w9~>6+up2;,;oM30"+Tk@=o y>^O‰4 #X` O?rH4xOD r9_m|@}zUiop HW)cw"P3Rk}3.W(~)?!}&VV؁<$t&BgVVibX endstream endobj 638 0 obj << /Length 861 /Filter /FlateDecode >> stream xڥUn0+$ [6)(b-rK +|gÊF@EķUQ%U-2Z祭k\iɷ"i̾{ 9~{i`3s cᑟq4S\Q*\}t-z0w cӇ9`z6[A v`Ӑ<1_UE.NFR8߬΀v^gBU}L<3g=62?>.sSI%|F'vkM%c ڐKxa<A0qy˓b[n[ U<tIFLͭS{d~w t͖J11y >z({5)7CWc8'G4?qJ&B2 XI4a18R i$xy2h-j'Cҟh)24_N kCp8CߡPMo=GM=r!TKrsO{OK]Rc 0گ>X!%ȏ{‰7$n_fpʈp|#M&=I.n -WkحN:t>|;RR`Kd j"U|?c.SrK^#9`&eD.&+|z dtׄA)K\cs'U2*lRggg& endstream endobj 665 0 obj << /Length1 1756 /Length2 10838 /Length3 0 /Length 11945 /Filter /FlateDecode >> stream xڍT[-Vw Xpww/wCXqw(r{yޑ1g.s=NBM,n1@\YJl66N66djjMo326 qW3&eq(9<ll66 8 %L- qr[Y.?: =t= 4s(Z_W4 @0J Y: zxxٻ@D`Wk:0@Wg,Mk_v 3jA.n g yE#`ſ ?d3 bhvX@EWOW&fv.|3w3k2jn vtuaq"e^wYBborpuA|v/ֿN7;XXф# $/wȫ 9@@k?kz9ti~|m~ n ?;!,@W9 OW3/z`O۫ld*/ ?/6_'!0p9l>wU34+` u{8]KZq_g&$fgOmf;Unykh% v'j:VvD d vZZ̘ qqu˫ t^濗vB,0n"n$Z<0xm` qF<8BV/U? ~+?U`V?𵨥ݿ_ j_"^mW"L]?uXUU{}~ Wj<\@ e u|%xeW7/nu@7g+ |=ާ '8 wՊ{0LR3,:w=I|#2܇%Mw-Ds&=I$A}zdёx7R$fM]g'_[6nONn|w_e=U,~yT1mP:Go'ۼ2FdbuޫU.=ToǦi|$S|Jׄry#↖&~MHA9Y8b76UO0N8 %k@ .L.3!?Sj$B[0v9>fdŸҸBmL+U"kJM+87*"ecJ6^^fY2wG;VvzYjfgn!IK bwiV´Ch+qvi*;fMm k'M{_#_Z '+mpG<5Eֲ0PUK?l@WO:@h|+w,s[e |i6DOSN9*7j^K맰ez f8_Xx #Y鄋A; /;s@USZh ˆ]<(H3}U՟oHwg+W9OاM.,%E;ZfDs9$~rg@BI=Џ?Uz o?:Vpx< " -(P"([;zHWA!R6❖o5nVlVJs^hw# iRص1>Y@ѪZvWgV^.8OC([Yg8 3s y !(UD5mꯃFvgRa9IpzR:ҟ6w\s77:qҁP@By0?N >F4,0S u)J>A=ik,ߛ+2P0^Q_: :+53nq .U{A <7kGt1_A##JIK ɶiF1U bT]حC=vzD/ž&RoX]jњ 0!#=ke/MA>gU;NBͰr0els2~NcָGdӯ tg_އR[D29s[z2ss~"|6οM]}K#PrMd~[ ;`fg Y;I%p׃&`OB#7׺nLkmGH\'IX]TR lpoM{]cD+i&ڷE: an6(38c ˿0<iG\ 2 p#Nc@@l/'E8+[f\U{$ &l\oeFk 읟6 Z:2AIi T;hBDN0;ܑޛ}K:m׫+Z^pV%0N+A3pG3 ɕ.2#^1oDGmƆՒ!;zT\ۯӭ3<cl+5bDn_M7u@gyviQܣ]Dr[%3R$x3";᧡d3D]4"֖PBW5L77PM‚v5,rj_|kq?I~* ::ثv ):KM>51ysX^.stX:'!aERc*V:Y}?RiڙW lF$vSa&G#%\0rVK{z! t s0mtIO ^W߻XjU0Qm;mUL@1{:t?41`'lTy1; cV-EGfus | bțsu$̀=*Y44Y fXe%Ewv7YvA⡇IB>%ȇ7fE^#91]#.S? Wyt{+l5gQp_,JВCsJ,.w7 6 }/(xGsY9c e2!~f^MZ^Dw(~T/Uo\O.t'}$F#/V9Eu!twdcXIЃJ&F7{G_4XCy mè)nԧ2Xl5 n\>OQgVlv˵]o%7;\,t-ߑ )CNb _aʩ-K b2OfƾuJ4r.sZǫaW?CIꭸ;dj*IlMe7ʿ=ቇ#s|8(q!mY8B^04dPIN~=U\X)i3ޗϛ:n2~g0ӑhJc >>uR/ ֱiʆvOkPKx6`DFݪ <ܼ5pv:A'H.>=LG#e>EÕYs1Re?PlE$HD FXD,L1dUnjm].dtŬ'P4" a rȾ ] b|ɅҚq49snod0hq YddO Uuq..{qHoapED#kQA8"6>i)"Eػ(U&x'%*_ 'ܓ$6 gy:\{kdFPs6D9)oԏUD"y4qe+YqI$Mn% 5a?^#=ZN |NKrR?t=B#-Gk~﹖!~->1Oڏ`ul}rL~>lTU% E1D)'%ai!cnh2[itcɐ!GGʸk)ͳeW/ம¤1e=G'MUI55 otz{8hU9P! KfM~kQy7|@4cp_Z`;Ȓ\hQ"G 乺^QBk: 6߂&hlx@qut?Kܳ9ͤ{HfpOn{6[} Tg`鸱s1js҃{20]< J:ҥmW`ab֖?`4&O,>ڞI %pnk{DBʺPV8ر//;U_6ۈ0.Uh9bZMxbQ13|6HYuEpxާv_we9ubNlAN)? 8/䫧4C=O1W6玟:oKIt$TTvKW+E;큣[צMXĝ0r 3-N/Η`R[ZҖ~Zsa؏ |夙sYm|k)ںɛdQ]ESEqNpB[?rh#0;allUG&c|Hn1m*ПDS^v;{-rgӴD:}?woHmڄV#-Dzh)Drq d469;-{ނ*<^Vi!|KXDur^%=`ڂxAxj̆ )Ze̩' cPwzt04˔b /h.fzusa/ jܽ\Ox:({ُ8nP3h+ z+;[CC4>|L<~] ZMTp;E,2QP@*W(o(` H>rf!%]tj}2\|FyQ0qլVg/(0r; ΋$AѦaz̲ܥ6,oα(41;VY괁4RamK0|P ӳ:yQMLeFŵz͛nz~XNHäfފSU'H7,Ȅ'ʩp0R]gGI 5(Vh+øtQm'wbrOUbz=|I&{,yY;7YeEO7ٸxjH7nY!N<|y}IYSpʍøHU+ngcdO{8u[a"A~PX+YOiŋaaf>o}n_ζ?}G7pƒF_kmmP@aR{dORW lv-%!Y#ʀ蛀]wωm;d&{~\za8R~v0Nz/Plr\-!31?{ |rnxφ|HW~U\iAQ %4IucnXό#.Q dj%@PU[0')~o _# !,+bn}sm}F廂D(;nѬ yaMx@ r(Jy_F6TqꮷZ47urL/"}}ߝe߲G4Gܭ3PUNWhf{2]]'sJb|:)qn+ R-g K\Ҭ߇TNW;Y8i;N0LƤ:__ v*_.I,KYMn8OR @??SS_dB/ժ}xt |Xcnz?O [tdZ%ĸ3YYʹz#!/wܲVUB$!M3K4aϣsa늌s!oK;ҙ+`eJ`خa{E Oy󦴴eYy*,~zI=g*rHe$M[S`of.i:)!ƥExH1EB*8nA>#u_h~j2X'z4pT[mTp?hH||҈U\^̌K9=WsnzU?`|snmyCc)B %Uy*öBQ囃VCk#oT؏},1)O[glOو2SyB3v-X6n _< qV1{ODL>'IRL; ; RQ?(q,3༑ԉΕ-z&q^~& nfb;EsUD}٥-۠z_T~@RJ]ˠPoZ%rdL)LѫtǖpH_ds 7)wNlEV>~Aᆚ7$ i\ z_ЈT$,^/Jv/iJ+FQM !YF{}:DY-dG$+M.S/=R=D6I*!Tկ1%DOq֢gN[9'8~|z78eOЙr|^$koKGWY{>aQxAUJuWh>)0Fat"񚢙Io Lk'EQLC[+ERic&ok5Iw{$.!>y렄0z n=@kPctYH9YŒ6Ǒee@xRI$bίm'*Vټ2% Pf&w3nqxWYʍDǐzYNZa߰ L1(,i:/Ow/9LeN.md>+WqK'At,ax?53qCgZ/Mu$Ҧ\6[)?),(>l<;7'j6gJxېقuNsSB.c" 2qHeq[9:+>a\RƾSS އgM:2S#/8~ б.Ώl"Lq^CB:ez[)c"0*>} AFϓwZ+{'@zcr}ƖTuX5dDmN1s]׮Ba[UD*'ꭉ}6"ÕYR\TsS@" }b}|p&VOW͍R8bs&w\kS-m4\^&/0{rRkei`3`xaIOXZbXFH,lH7 4g3rc4sxp Ŵ&[/Ⱦc19LM,ٖF)#] k*dzd!k9IG(]^|C[_;'>WBZ\ M wV XJ!V4hLJk/w[񼅷LTSޓ߆|{ד~[Uln*zTHc>zO |%;Bc2hPár,=)k@x.:j'%֛Rda+ȢC$ZRĘD ;̀a-t߭Md2%H/qP.aʩY=>S7'Ѡ6iɱ߃n(Z;yhe9a},~zh91TE$Ǧ`{nwKo mCuZrvM|h\[3sߤVDEIKM)ܔk7#vO,z GgipP yϊ _}T?x6ZG9X 5El2=qW4J%4dQR2T l]}'eNQ.1NB~G-5V=X`ɠب8TcP6(A3!6彰I$Wt CD +|" U7ek5orv.;p,׷BJbusOUA/Zw)20n)iIi5!D>s8/ެ3T7JοK|N]dNMpdPٕ@A6l#zb1n{o>9PWG/^3+\h6BE?#fct"~0򓛈R#7v}δp9\;KO^?HX+T<`^`bfC턘 ?tdǴk 8r!2q8c Xqs$m\Q(>11AfvF9/ScX.`N Tȇ%H)P PXanC71NM,vѾ%ݜM"1PpI971D'EO{뢳vk+NYR. wj4wyͩy\[irߣYn;RHr0wTh#uV]Q]ol!@+Ŧd¨ ;/aZ7kyǸ S%O9Z.{0: N3pmDZ@ykT@5v`Ű vG*f_ԠKbkidNQz<皈!m8I*eH  #L@yaQ*ZmBAgtEbcHKn1 9{XOG坐A!3;'>GLI!noq_sfRi%*2gZm0y:;R_ endstream endobj 667 0 obj << /Length1 1777 /Length2 12983 /Length3 0 /Length 14106 /Filter /FlateDecode >> stream xڍP  4!݂[p -@eg{^c\sEI bfosa`ad)j0Y()խ\l@#Pjxe!ā. vYW C{'^8 9#P;x:YYX@mJ` b r2. @BP[8213m,iV.U3 de"wkuK+j.@']`ce s~wq39޳dJ 2}9FG +?@;O+; $)Bڙaql&Hݟ3=2%mmAv.'n2}wOkmgndnegfGfLvV mEY\\#ajGuOПJ?=z;;Zn +ߊE,,3+S  <zc0d03{;9b& - qQ[G)*jf0r0XX8\ ؙx*S{AhK laL?](DIG❹.[` vT ۘ_ }D,lF+gI+_tKǢXف흭xZ ,G]χ;'TޗSJؙڛe:9=އ8,h&F;{w{ss{'?&`C0IXL nf? /0G1ٿ /`d0Y g/_=Ϳ{^%`|kd}~ojwN^ӿ{i.w3X:}L.rx]/O?6uurz\w.‚@ S{SOu!" Zi4 N]Q`ij6nE{v%oWH^O[`%?<ūN#,M},:''dP>yq lstFQ.w/_rP)\>X2Go=K@Gq:ws;;F"O{[w5akRչOcl[(Egѻ8jS`1+c'O_|]l7x=q'L/~|튳kźB3 ۝ra}eKlB/Ê#"{H6my֙PBkJLp_e%gЪ9Ojt4lc8PVy1okxm!qv=}(aþ<VFh<Cr[% c"C'i od/S5qnh]9ex'L; ق >iFEӲZG8yY:iO ;.:nr$Ap*1 2" '<XT'Z&[F6|:08wcqh.Hѱ))5E<K/ )(ɮFK Қx=UG~؏k#340,LڨSV^!^"RijH6eM^jbfK9S\I! l, G[',@9-@shk7Dж4S+5PR/S0!R Y"R}-wdW#_I(16Qm+HHnƈvmuj6XHxe8'm؂o+2Q{tM@eE٪K#×'2=ʟ?]>q!±+{xo ^6 41\΁}נؿYzؠ.2VbMl})]2afEaU׬74 ´AN!zfR ~ *(%-aSIb9 77FN FrQ$@co wGO5c GL4fHyS e'鮵P5{u!z ֺp]H u5̽l:,tF콾N*Jgj35ڦb.a7P5&B=ťȅ @svtWI[@`5hS wT6BBY* +HA23Yί>m\ĂVa0aM@îlřBZKl=Q/sYG ZZ;{m69M&E^^OKhkdt] ̧֫ǐ/$kȤDX_ר7G]߂ J [ߕp&b\FBZT l{s3io˰2%bXzf쓾Nay./ӈE\w XGM $B GvPs<\HE-SU% F N+6˱A9{mK.HPb3v&am}IN8 x U5eQ@[(϶'k1u]\SWpZ"wɴ\DEJ nq==}O>k}*"NC @?w^]l]B5{KpKP2 5":24hЯ115bT1#pY%$ WЊ $Գ=>1^kߎ@q='`J9hdOӜ֠Elt?^w{ad?IuZEtk0wAFW>S#/מhhq2/ xǥA9f+$*j|b] /ѻ&<0N&~SϚ䅫߾ eu&lQ81VD6$^6'ygåxԾiDWZVp! |,]HJy"ڸ0B_X)Sdqq҇ ֋ϴ%'c8OJ[zՑ~K' tZl5ۏ"g&OrqkU{$%'وstxY>lZF Æ*a OrG`K`UՀ~F?By9zeɸ񛃚eh䃋..)F)rwV n3&sq?=X |u)*0&^JdedT^(\^_w lt&0ǒ&eHuwc#n_<lh^7r3pˬE`4ٻ^[x.nVC$H{/rQ=GW~K^v;k5<] v(ߊ61b)~N@ d1I>%gR'IiIOvAjB$ "MjܢS)"t}Β pIP/4]u~څB>)T_gma@@.HCn~Y QUKU>5@nIgY\*=rIYp*Yf/emwZXlQE3X#\v.|D$MAr# 9x}[EJk3 iݒ_x3 ;hk? 0'l'" 6lhmW1CŃ~w8XAYS*`-4IRه?{gg_ZKrşJүL ҹwzؚ ?f LR3퉀!s~Z߃  \$xg_[Ȯ1gWe-nmCs''F5R"sq">nEf6Z&{ uL>JJb]A NCC,!ghJMU-Q<ʇ RcŖfb~ҫ #gԨ5"0a MB""ב,W= 6 W\8.=31氪rmTZ_l2s*!-~}*!RFy DmHgS.T;r )_|"UvȬ,q 6q/Orι`a `UeOAMׄw D0y} ﻞ4 P#>\HHZG06aLuPQ7glLCn9'OZ 5ާ$Ӿ-7xCU2\1ݷkۈ;tj]0Ԁ#`&ِUjA^ ϳ OiMcpNL=8=kDrqXjr8zTHצHyxп3ZIР~m ,<#TQ;uAHzͱ=^fX#rvJ0_p-teaў?I77Jp2_:fXĕ4*¹Q׊15\țTG.\,VJe ehnzV5[meFmODBOyȚ!9ڙQofyn7V/Z \%R~{)\LB[< nJTp׋f*} A誟\҅X[ODaaO$CQ ,\vn.>㽞9ݝ-mFӄVUB^6JՈK`whD5oD9W[rl2ʥT! %x"$q}̱5ODja. =z -Cw%CY)NCDEΰ}up1ٺ˯mݟ(tUzlkruh,An⸨{Уq~Q4&:֨PB&N7R9؇Asa5)/DVGX?hIXm*,Au'SDUa$ͧH۲>PMX+:?PNt5\ c.Kn;a`ܵxarɒ$Oa/ yGfOp<:NB-jkL` vPJыdҶ2fIi޸u˛=J盜ԿK|AAgZAl(@Ӹ[Rzf\-L|NZvjS2I˖3iGL*}Ʒ+\ś =)BOeA)j!l@JBO0! lEsSZ'oֳ&jsx,'f2Ygi G6YA!`K d錫RW3oF?3۹P*mzI 4#>=]9qN& `DH ELp/\8ky3"q`|iぇHDQpt'pEJO+qHAR@4<#L]3\_?ULoXWm+5g]nJȍPV}!a11 SF(7%fMAWS/{dʅLh S4vLTO2is/W>;)HF3~hпsHR"RE]G=LOxbe۟"W"Q>Kk3,Jڠ2ᠳgI|VK3q,B1ZZ a$_Ȧ:vnŚFy3_YKη/ƫ-Q]Hm hΊ 9?2D҈f!Mq.qӴ,l!N;BJ!Y>l 8@`䇾Ĕ! Ȋ PYQVL5;rהCtxkAHP=x!) ~9+2G>=ƻ7il[v LW8VX!:hB5}gV|, 4P4J뼑MޣU '21.q܍7%,G0Y&&x1C7 m!ɚ+xdr&Gѓ~"+tǷMЫzmlг`:NbeT~;uyԿbUh1, 'MYL9c qE;CV4413:"(TD`BXF,)VYȟ0I*&DځJ9Iƚ&,~$LW% N谇_fT ^6䴱לۗ$* V%ܩK|ΏKcX#Uu>+JsF ,HL H9,@rvq[SPnW!d#,4wVdi֡>Тjy/jR" O"%pi'B a2g^"$mqZXT^Z B㗪HmTz<tr73KS~.ۙxl=*cKo? b"픰(Vr9a}"|悚>YhQWd!Gۊ`d{Ќqˀ 500 |M jvWNn~VʚXCTq's.'3͔l±h]WՇƑnqš/ t?&?ruH.Kf<{&kBy837˥iyWDh Gq+!oO-:j9TI!*@g"2D>*I q|Zei9O?Y\OwitVP%w>X "o~! $)Pp$,%r^:e%6-8>|>Z[qr{?2GWQٓ- gHLMU6å?+(oʒ2!r˪7"^up6,ߎn&k-%ڔ ZF)Q _2D6?^n T`Tvď?$~ : x);SU+Hbͳ-.W`A>oNKMQ7Ky6>]g.,FƪRYHFuTP1lի}ѐP"j$~_%L?4bR =R #><,:IkeLqե}`]I'x.UXo`/G5Dghoaj% t.׸A<S\R [$jlgF|cdXG\q΀JGN7©%:ĉF,:i-)"Xj;Jh9 2 Sk12 ?XlTʮ6Og}!H6(R19'N5pO3C8V4V]GkF!J "V;eOсcRaÄjP! 8؋8H_򛫟4$d{CL^Q<އ R8Ź4?p&Dl7۸9c>zXD ?^W^Nog]1}.b U,ܸ]'+. ^͔i `Vf'ezDZV%ͱ L=bR0v+p}RVhISh8h\ȋҙtVrGV=U l['wL9ϽKSJc^J)%t;P<&ŔG8\Άˡ0- } YK# Kp>ʯUω[-jm˶EP^7 =AO$,Q 5Or=x*hUxAi|tԻt;1OO;ζ[6bq)b$vt3n!ͭ8Co*K[Dg~erF-T(Zl"#bQbGF~X֓X?.qKl ·!XTd5er|&DґN`Ii{qgUef1;}?4b’< -wN] \C>z|N=ͺA^%-m3Ʈ奺'n9Oy"%+6ʉ 2}Uڨ`U/TK Mfn:ʘG+\ Nz9 i'E,)B\^s:J*ixV~LC)R6a.ٮ!ٮDFߜ|~H:045W]ƻ'eL |zA߹|^7W4Is[~#o}xX25TJ1ǕG-!T0t˚Ŵkc\IucMAq'>Lܲ4@򹼒%1Npq%itMH&3N5gS؏Bo'<8vveiud2m@߅B8QĪKe2;1Ћ EV2KDWenDHJN荍c~s+r%1rLbC9%*` *X/ѤXmU6~ cMb0/q5Jc *)CdBP1'HJ+;16b)tjz] 6͑q¼$=`^{}sr~੥3T`O}T d"S㱊Su  !4%9Y9ȐKe k4ŏ@o|5v^gz1qE{Lʡٯp 'ЅOZO-"ak$uk't=yrk}"hd,n<#;Z3t2 J_~6 frʎt];MH mJbMb*²U_]n/37Z?ʋ)2̀':YW$NKD6ɶ> ]ٗV k7 5sR!" ס^d~䱲3Sw-ҍp s"_<8kMy~ր&F713xR6i, g ` evSNv;7Oէt@DFK<>=Wuǡ8.o$< XNGHyjxZԾdS¬`q~3kf=;lŋ9:5٬P7E C4iLd"oTɽh|{TS "Z<+,Hiy*B3{񋙆 WϤ@ t(k?WO8BI͋oxYWȑb$O.߄\(eھb<-ԛ=kWV1-=qA I0^I`3Day8-x`VUyڋxims4o&hOMaLt%| h\ᮂW&XƸ”[c%4_Zܝ)x+:r42ׄ'˺{ Z_ֿ5bl>!e5>$?hf^p|0+10^n]1;9 $"v[%$+^87w~ rPg]iI[" 3$UXtCѫ~p0Ŭq10O%e2Yg_pũc n cɮ ELF7hnV.#U&Apfj!Pa \x7}*cQ=^An]IIdS8 H7E )X1ͩXUY v޼Z'3?$ȭPpO)?cBtNѣcjLjJ"+=?ܽeE.g4~U[#h].,^xJ.J]j+b'kPf2m#`̱,+FW'ܩqtvheU"2Ym l'/Jߢb9/< \Ts}7k~ȤX: |dy{g&J=3J*ѓVy)ZygčSmhk (![ΠN;f ?Oc1G|FSiQT6*6%O2rONhꡛ1yA[y泫ci~ܫWx$^(s53թ=2Wwn<J\\AèI#]$8r }nqt!x-5nH˫aEc9PɁ^_r}{[~e?? uPHtWXt" &&& EknkFPvAnn0oMZJ~݄h30ŵx% bvi mj 5iB%Z1 H(=<%{gEu}H *TV=枤ǔJ@d9ᮅ_> stream xڍP\-;B]h=Hp $8w q䞜{z9טko*2u&1 G3+3+?@BQ\ʎDErGDe!$M]_ rnv67??++?`~; st QI8:yAV֮y@kN`a fM@׌vuGsB Z:xxx0ڻ0; WkvZhdj5f$*5/)x؁́..n@05;@]VtX/F1o?t657ww2u9X,Av@+#CS;WSwS٫Tݟ9G?¼YBG} 0u^,%6,ܜX4@n@Yɿm^EHeV@W+++7 z[@ k~>NN6~ K; v[ `2w@H<c+,~y,:Jrb R\`bbqxX~Gw}e,|:7h^:Rr|e.@\_ltD+vSOGoj╹n[ k ku,W'j bVv" Z\ͭ_r?TqtqXYG]涯ׇ+'T_SJ9;Ze\S0 J\\uzb 9?#H! X$#>? "qXd#.oS7zTXT~#vo `4^cF1AL_nkVXP;jb/ `r6ndzMx_+|-_vߐz*X'{}}KZo^7_ͳV^^Ks|pk2Wk0_x_z^zQy?ϧ 4GZw4~W#F=.4CNpBW|#2ceSZt簥>5Iv$ħC~Db& 'g_ [.9Ϻ۪;ȏeL Jfr\H0 rd^`w&$# K_)- )$1o@7u%Z,X!7(Cߜ 7Wݠ޷S$ʫ)~b3.DrQ}󎩜 غGd&ڽ ܋K]^$,󟦩O| ncz sw71!QJCXXcx5YG*[aHϱSP6?亼^z>, ܋@\pv ](Ne8?DS'爛aV(B;~.55U@9V~t$EElI;#HE )[N9vRawe3B,`yp(B)xc*FEpMύupCG&c=eVDִq^fL4Y1 ;c ϊ*Meȝq^^]f?1zMHnT6jt Bn֟悧$Ǘ[{TfcJIagJǽ5Ǿ|W⽭57 F6>Ɯ+$]wX")gؼOs>\vKK!jC6_^9chDmBp}DCF1M!ᓥJfq&=Hayu8.Z2iPZem#KvZda)/dȷ_j襡0}K]NgId$+|uB4S.~xFV2-i|~A^~d#cc86]1 M7J݃]g%c s 'E-Q.pB%!Vݧ_?+;>a-( &3V9CRvq';1~3 Gp {OhI"(οʨ|RSk͟?U& 8ab6.=>s?M|t/W~2&27ƻiGaT,n%|W.d-mC;tOd߂k^`M , EG" FM\yg⬡N>Ak%;Q"~]&abafևEuvXmlY~q:VbT8|)v2U <dP^ ;r)bp;6,ɹ= :ye'HWW~uĚGzvYW{_hp>zm%#TvX>b[P,NCuR򂦽9pW99g^ՍX䨃l;9gZt˧t!*<6TTT+0\G xaDᥞEgԄ/1}žJǟ ֜6]sZd/sgcfJstC, 1#>p|jK7oG08Ys{ Ue@H@u[H;`d# -IRwԿO`InHtF{/7 hc-zMtt \Vp6- AiK[9,e9-2EU^}+n(`ɹcHUtF~xꅛOQ+,[*Vm*k&T3_:/P[k xeo*)ۍǮzaKDޝ>?lǰ"QJw15JKKYBdGQc1 Wk`,؊d5 0ýC<_xsY C@.8ËznNCq j!aq$&h~ s\̲[" H]xn!#ayAkMWUZ\~L=\tR,[NI~)q_>m]D5,z}\TsFd0kҜMEv5a.OufX&Tی}V;pM^)e`В|ÿZ&RhđiW*0zC*x# Wn5i\y3a #W]on__lwKsQB_#e6ܛDLF6!獬 ; 5V^ A"\=EADš%A&9\ O;\9EŃHzR-tY%FMIf@(gN|ޒ! RS,7(FMOsn ShM!'a"f2]u+w* L7(oBtc" ,on51x>o7n2L Ȱ7I<1٬vJ+mSRÏ3cWVenIBN=v(,2_cP|e 1;]F߾0eo$2);A @]ů~޽Nih˶+9âQYHA8&kCC<8?uʷ(( n4YOi^AkNoqZd%&D2!a9Ӱ7Rjͤv CC#Y-LߴYrV& !d4<2ޤ͉>Zߺ0`D0np8?gԵ Y{>5 ~n |(-Z.P[^:đ+i;dmk vq.p j#2+F,1v }M٦d1+ؤ2W _wh'ټ&]QBw39dkD!>~R2.qexg@̏ `VYJZ\j!dPH:Wp4}J<̓s 9Z'8j9wdQgؔտ- lsO^{P3; RNx ,`xB܅n|ܭ\7, e`ϛǁeCT6oObE hi@)P\JM#y<%YO|"cFҡ[.ؘJ*S6ۛc L|Hy 4oRRh+]ł~<]EQ%O \ݔCǶ`60j]~ٚ;E7~ߑvw5EsP"Lʨj3ƯeACaM? M4H9FMx$uw;(dĴ\ %,P;Y7~~2¯ N!:,NQm"BzCm=ɝܹ{8{2]$L#7=^eaB$lerF[fiP<㐅IBu\DmqXPx/N36ȱ44(QZ. lƣޚ*8jHLOT{[!z>y&ximoQ RrVgg ĻǾ64M>mH qCTqH~ }?q@]Ԡ @W'/`:ak! ̺ybhdСc Y_pe3}T\w2U߸8keGx~jge}d*b@} #ů^2- %d_ʯ9ԃV_[囅 w/6xrxNXy/}'?:v,QU~ȴɍ;)FZbe{TU-LLJaC8 |NV yOES&F<49QN2Al#3gjh=/;S%n}8zbJA%ބ`I܄C=-(Õ|RJTgGnƄN_`I0q M8ل~V.,J9f.ՇU3ІrỖ]@y!w1EV}b/CSxk 'K_xo#%"%LmRZwToӕCYQTUfi8lBqUg8ç'"_<7/] /IЋnSe$z2dFG8"#u{<AQPOc6p(},=t"Uπ|>^W=@m!*O"in'}(bK Te3R<i&AcWO!Dn5\ 5~7C(͎}4m*YZY ιQ 3{\׮M}uɈW Ep, 5PJ淋+,ϗH/vdr@ Uadsٷ O Gf"/@Cl#:R 'fq0Iդ=]'0CX?227(uDi>ei T;a;KԢK="ڜ VgDz6=j:=]RK}?w muK_l<82;o rfG?]G[. kϳŘR=/~'7w)ƥ0T޺Gչ&;\ӊஷ(sŒKN'R8VfK\gz8}RmKr]u4b\jqe HXt,s4 D8X I0/juHs1:+zF4; 5v<]}" 1{Hv ɮw_ɴGJ 1`8AKmaّ,WDno5=4G1Ծ}Ϸ3˄r}& 9'wjC;=F"c?j*Q͍TM!ZlAӱS$h}Gyϒ03N6dQ "(pSjeOÓZ.Z c#^7Fd2`yTjv2h:8Sժ7ȿǃ\(Oc{qm 4Zi<9Wйq&e8b7Zav_4H팱qvQJSm˶Ĉ 16-Q#(# O㭷5Xf׈YH0X;RU?TMPU< ~dIR6?@R c.&O5v +bIk0ݗ.G=x;q "ً9iMu4#`x=W}!-}{_~{t$VXQTW,dqՒ,E\Q3ŁYʹ_gR2 Cc{~ht-&*戄Rl#P $TO%8VEWzbK0Rʿysi"7ʰЙC$L`_ݮJf˶F?Cg lMaT&7_-pEIhb!WqEZt㤤*2~ @^0jx'6H](хYM͉Fj[ZJrн,[1(y>oesU`0zűl!F @E#;Qs> TuM?IWqoL-+5*b g=c"?{m?51鑜z {?3Dh\'\$'XC5 |g]㛑^.'56x,%ZY}L|?/T iz Jr'{lԲ~rzy0^M !h8Ӧ!˽ ׍4n.:!2gK$/hğPEnZľEiaZlkMo5^A*5oX:pn&Qz;P$КBrtJT&ұ25q~sZRISq"U)Oa.V¦?]\h.uv̯ KV1P7u3-6.v7bm5qN4Vl/PQ^\d.4aVPPbf䙇u},W;0z7Ei,$( .+Rd\Cq!ߖ?EV&BV[ VBdO,S)bhvԙC>,Dey1JNsu7_S#utbbGhhU4ՇM^ÅEˑt!%Ա8G-z:[x;r @N1 ޗenO7\f?u+Э߁ u*[c]{W޵0Vz%N ưk D6kjr?.o8#5l(@d^cmOb YDd #P7I\c4}ɉ +"! 5(vď^F[PC!Q@`A);j1>m|@- {ֆ<1~>b%M0y qټ,Zj-.0^k&^6YH?z~4KyJ [;v #N:LȢR u?ztM U{>f!XY;NψI6yF[aNRPq3`,&V#[oBk~zpar tf|j>xLX6 JgI9֜ 2x鴆J=eė»=JZOӆLwS 7gn Gy bx@w ߁ ".UifX'HY惝5#\,V9}{N<)Np%bO#TC'_If#)=Eg|::g)ད8:Ǭ\hA~D/xoT*3*0ŷދԤsoS2R_%\.6'E ("#H9tD#fa)!(@Ix޺ $/oZi:i\ޮ2ئ`!f:9ٓ̇l'~L*THdIkm>h/'exS@;rWCOC7La%E(/H>S5T40YW wt(ҰQ!,%GxʼnԶ:ųJR2YW#B%ܿGq FD3N[ߊxK#e0O"Nuo]=zwr[r] 取PPv\ilE]KT˪>mONž{ڔp+b7:/#Z/ɐ_Gm%$g* k[V܎ ean! b qofV.Tg :x4ƊEϬ^꽔N`CWh]H2cGY;"{,F?3~?V県U{ u]:rcn0hӛ VuQȢtL[L-;+~+5pVU-3xKc2UhT .Un!Jț pJK1uۗTV:YjsE`Ol k$K9XeI_/{^l\"%;ZM#'HMٚЁw]f8!bٳ"i-rraĪݔ}7>~ۛ K/ HQis##ʂS=ދIK:FL BDXbvO(al۔p<+5u-^hŎ r-lÈ.Oe=}ZmWoU3w}mnS@FSU!sPa!<&6å ͵z;j0H$tO36FwǷ1M;@WMQk 51oGjä8($&CZX m%rrTT 8)UL**m^BMzq+kRDj<͞\Agnlp7L xؽgjs2̉iNo$0}i%‘aBe|i'ڏ#XUzT^1`g| ]9J8tRp_̡^NeӸ*53Y>۝j @,@&qؤb7ɰcڹmEzI%tr&8:Zħ|ݺGdOe2 ~J3w^dĢS5g\}S43-q-3B8) Jظ#_}m !y P cƷe(Sj8?kTq]K<|=+qvLu)WEa~QbVbSH#>U?w)ٷVi55-)Pm߄؈zC%AS|ʌhdd]݁ZaTo]cdâ8rI۝MR<iQ] Js/˱na zڔ1Y!TP%=D6b 'FMF=U?u PRGx*7 io86 ܓ?qeu "T]DŮ55ﯝ> @4Cv*3Ye**E޿'Ht＀U}?Ssm0,nnW|G -|{ǚE- 1HGzEa x5[ [rv:GOY:heo;[»p(AEU=>µ0- l[^"yWQܪQ]e)ؗO~QӯIݝ:ZL|,lWx\@' 8 E*"U.b^{TJ>h?Zv[@6w61bE j;Hs]p?Mڨ=& )'J]hG3MY'g\A/qP9[샖`|gHܮ%;X#&חm4I?@"oL>qѾTE\B\e q ,iM[e m^զYGR y\,ίq?:J%R["|50ix7seGu4[: ^H3JM{iw>r~1҈*85T2R?,ml*o2_Mw7j?yIvDqJւۿ~">A:ڙ),]!8[j^j,p @._&KtU\ +FuO,)GGMq| G![?u|8{COwIWZUsN#22F0DebvEYFۯX 1I>qC8А X%D=+5R O :yװ:EI5d[-!(bejc!l΄wjQ+H cuP(eY_\.Y$7ΝZ)<> stream xڍw4]6тDFeFE] 3 w;E'z zD I>}^ksvgƬ#AZAW>^8@ASH  @ll0W8/1(D vEh;M$ DA ?$/C$JvA5$BtBl\k pv9BQ0k0 v:wzHk!8$\]ā@^ /e+ t.P;U0@ S/@Gq"\n@oS|` Bs{e{m|J54To<#$ Ą"?haWaݦe8N?ci!Ѭ8&Hd?Sῢo$jG v2@=H ghpT]1CD2 цZPm ץK,k&o=7R a0~!a{>|I@=SE ].tq~$y rDV#1߈ODDq( Bo6K}7@PkDgrt<_F&پr̡.^rVe<[BȽ|Mq,vs^xKN_Z fǨ?l1nt}Ռ١Ɩ&JG~ѫYW:?6EgJXt' ,x-*s>+dS'dn^p ,??^,we5a:&gFѧ(N-,ȆbQ,X1'Y{oOJuV}eӐԊO{OӾ;xOH"PռƁD{V L_nM, Z0U}>Gډ*J{WEP"rt8Ho5:!ֹ{DC%ڗ3Fzu9$3 goM:iEMΚvkM8@uРV45rS@k|U/S*:;# zr=/Ea,a&QP[ Q-z{'?_짷OvRe20){`ȶVRqU,G&?|ׅ2;΅(UG$'Ho ۷4Iy_SJi}WRE"}&:g{ѠQ(m߁Ҿl-~ҙ [#CqEǹ5,ɉ&簃#qA"iۤP5H"Ԓq*H{ lJ|,@H;QrѨ:pe=` ʐO鑔Ҹr[]`#F-vR-uNj%cOlZFYIore utl7aE݅܊4(rT'*OyF\MǍh{bUt*'SP$Ҫ âF sC-KEDQld>ߚq Ȕd=QӥoqQL^ E8uEpoઽYZD [F5:%3N LwB>lN/^5Y@>?(-+u ˜!ieaQ>(nadb&n~X\!Y66O,%Wχ^drz <_86X?ר뙨aB"?]-;}LD;Z41s93EZ2&(|M=MwkpBBum .ّc)F=U|!xsy2;oBr/Ec2R(]x0 ET#h"J, Fլ=wM`\'DsAƮ8 ^:r!J,<`b_ta Ӧ!S'Bn% ^3Z: 1kyrvIOVKtᶘC'ƷbӨ)5moalW'͎g[%I^ߍ"XYS];*lӁlMc{ yZte>>KRzJ%6Ke!1׋5C@noG%d cM)ڪzOxGmG^KS9E׸ne1#(>)M1aFclkqtGj>S?ۜc9}dJw8Čթslc8m2G,=ih<,k‰PMB5QFC=y qn=X lM&=am;pbō'Rzǥc8`p+e"a/SV}7Y'AlOgQf3}WN,gӥډOwlݡ{^w΄3w 5,#ɡ|docjT@ jwֶٌ_-{ЈQ {E͇@/,?^lUme7-ݳ.i<.R_R)*y3sK#&<4Cԩh=(ҌQ~w|n)6+ZeqgGvKN֫oK_}V=US;3ͯO9V x.Q< L\?!BuwJu9x"/8ڷҥG6 awpاzuq75x#>ty܍Է鬀1ZT.s{ƏuDd"9>8$NŨb4.f[c$f^|>ը>om햴C #.FJLod)p53áB xM9EqD'WO+*HɅ&U5aoW7UvX}I@YߊOVdG=U͵T1џ>58'՗mA_1j'+*%HTnGWcQitdx3g<|[za4Ve. &-ci%z[K0dqu7P(?_QQىq;-c!6ƥk%vt|PK-3~*oLHYvȏ.:e _^{NM' tDj|X.o庥p>6TMJ>=`h3]Bd 3l. _32<8,Nܽ"w1O%?],Ȼ%w8oIXaBzI$pdjMwOɛJ %}{Yz4(M0(&=Ϫ>:GXu'dzYeD]I~5qxFP,LS;k)<,E M4~QbC5YHHM+4Ť@{ޗy(E͍> ǯ SviQneU!EzCBS7. 8q$.H'l#[UV.(;tDpn,U*휟GI$JZ#>0kSCD9-{FG-g3o&z:GG dֺP?91p!eK|73x8TwٻSghtmF ri uVK'psw`p475U^/KuG1{D*cw-Bɤȳ_J#e}ɽ+9v6b3' &'OHEשUS=Qm~0 .qܺlk7"Q +$<0{22CM|fzԪ,KdT.BZa61ŶkŋȻvϘԩ*B*}sFWlJbZ01)'stil bxLQe 2⬰8LTEZ4If]Lx= '^1D,/>q*׈=ۇ> L1((|^<8uyZŴp+~Żi|':t"sl\>ZCX994 K9#H{q|[L9(*qgR8ÃNB<c=7]*Cv2+j2c=qs,uGv7ojpY8My/TXŀcq̈́OLGZyyl [Lr9 dJ{8%df4mqB>fUAbX]:_+OS6\8|o"+I]z57n-ca^[fcG(-pJ醥WGWECrҥ \ eGrHrp.^.4I 75/`9]0) .D,kU~9Ei19 nz}:/#~6nLnu`szMT~N(|ǮԘlOai_CYZApMƑ84@4D&z|,x40X3"ɾШ~^Py3] PhBuz}3!ٮ (}DBOG%5t-,^ Pft,C!11]+'2Wl4y!yƓl4p~-e){F+}=ۼՂo/:=:x1=5cV^s(T\Q3GKԳD;_(vH&s(QӓUQē(N>MS e08+g~Yb1.wDZa/PIFԁٶx)dM/o䲕P␁ ;;N:JP?c@3J$ eM5U/ʤI[% kL0ev8fƏ='ER2-j~V &qa.E`J+ɉmgisd\eA!o8Z,|f4rIgMEMx;l3 8QpMNNC* ۉ/\ HZ2ʜY!fM?4hK`ZqG]ʻ#K^F=['L#] W+7(}1s9E͓Wdw@.ysazNԤ:wI57\]vS19` Cr6K\~;O\Q!*|᷼U=;W3@Vm<`ȅL0$@a@V^t4u*/b<}y PĨy$=)wkz;cU""IX\3Zizi.~xNY6-,avFnGMdIqShÂ:٨Ht`خ?K{8fOc@@v&"Ɠ]tDӔR Rm\!_MOӍf쥍v^yF?,duФ+xnO56>(0=#k҅cb=J*[8% endstream endobj 673 0 obj << /Length1 1413 /Length2 6118 /Length3 0 /Length 7081 /Filter /FlateDecode >> stream xڍtT]6 RH!Jw0 0 C CH#%"t)J"- H*)!J>>>}kֺ>>{s]7' !B"5] @DDLHDD'4 |p0&Bu0=_P JɊDEDd&u +A|8^(y߯0/(##%; A@ @txy`(G yW$KVX_#G( HW8~ yB&D 0qÝ p!0_`"E9Pr~'`\P@_SG`N M>9~h@7z!}|fU5`NjpOO C?u(9wuaWP1|MaPo_M1 !"#*%) x `W_ A h/f H0rGAH/$?WD@ F!.Pѿ߄!o Xכœ0Կ鿯XXB@UU8@PTFRHIY$kÜ9{#?FϿn#"!yz*#M_8_yB=P7E@~cS!yWoT _ ф@ H_+nkPuED 1 s#?R;2$@PD7|7t2@XGޤn 8DTZ ~1EDBE n[#7z$&"j#[NUW>sg gdiU#g4x^7c>1l>l4L45LxSIDy5;, g4A!ՉV@]ϫ'V W$_L4M {1Y3Nώd& 5vx25D[b%hѤrQ; V؇GЪk&ѥ%sEclI^ZJG&aHwln Z}ǠPմ-UH G38\XMr5]L\6&hrR< Z!$b=%QϤzrhe| lU A(P~ŒM/nū=I`S> _?gݤ /O\&M/v+UJzX r8m9 IyZi WBGnJOR5u OBOf )̯[X a{SmBw$ znb}9cɎg{Wf?|QRb3@$WZE'4#۩g픅a5=sE*i#.-NCX# z^JQ7/m3`fcgTLnc4wk̢tSKCBV+xN֠%ق-a ZżNpvFVYTl4&FF(%g p"?x{̱H~E &&E|:yLJ:L*1&Wrm*x؎l=˾~]Q$Ŷ#OG}·V4'ʥJϨ6(6Nj%dW[݋b5aΝ&d-.:E*m([Q~zدǡSɍi#3+ʠ+ܷЊ-e=\@U *-_g~#<-+汛Iѵ5m}i\jݶ88Ο!FMV[S)-)g:u^;U츘L9ԧ.6/'"o?껷6xtnK+۷Y>-;ڳF'\})PX2'RC?KΪ$Yf4(9~ړ%> /4]Ǭ,]%c]hO{6uo%y|65DYm*ԿWBkdՋa[&V2Tr G<""J՘*gf;CxI !!{GIsYsXT_l26 5v/ZggJz5p.KT9͹eM . #z.h۪Oe.|xhH]+/LWOC]s 1a~韕a39X-+? oTWF=lu+"+| H^;r ~G͞?Ͷ겔AN/6Do0 WݗpTR$'СQ9ߦ&ҫ:ȭ*joԭ4eCצ2}.tqp|T:l (SwC2Ŭ~S㮬.܈Tkm#JPob]؄w1'-/l˒٠}EmUd5VOH3c*əoB} ;AKӊ#߶yMBڿ=c^*N $MH.3r'pHPƄjL u#%9dc2+=RSyf[p~~_<yGCKfHv@W ;ofz鹣,'h)>ePR&*ڶ%6"ʁ5m\`csrÒh4.@Qy+CxұO=ƅw՜FA6$|z{!dehQ.\g<2+#(I_Y8v1kқİږ" rs&ѽF/mb2jgpx@wPuO \# YqP LPW'j<<˺Hg>^!}0IbS[l݂ KRơ;4Pou0ȦiQ[.\FUmna ^n$`/4Op& Ҥ>]_姃cB2݆bA2Jc}熙< -D"m ۦ:BWIf^O7<..$2.iԏ%T$,%P ֘ {8Q:1HK3%(јșwdl§\ x;FN[AyP T;V${NaMoEطWI|r|)uuQ ҿxF`yиn/p4Eh9yJMVuiƀʮ)&w S~P oRL@L=/ JREժAc[T|}+Mzm^o~sZB?9 eX2G·~ *8#~UT׸5ufwl-(&W6sÙϑWG. N2,a jmLIX qbeopnAIFn'QQ6Ǚ(?Yf mYoIuz44jL֖i?\O7-Ux{jiӎJR%hŠQIdG]4'PP]g_ڀf}?37)BS&gSdERptR{P52V&+ڴ0l[@Si99nmwn$fGDD<҃Zg5B=c2ZRjGe*BƱG:E³SzVBTcp8evVꥈK:$n1Y.w 1#וpN&FG r TևƯCWOr"S;XE,h9m_̙7"ux5laq-hIj|Wϛa2=a<"){w,=}{_b, jPT~5s[di܆Luc8 tx8th9nY.v+VD*༉bh6Sޜф!'ϧ԰v(Yh gkD71\6kwaIS[ToM8rrs&—[eB?p_;`BEi7͊<} BBsMzmtRa{>&ꝛ]5ѷ*_S2MJ}"M,PNZ|E"8Y؞4KpaX=^Ƥl_3tIrJ4`G-Pj)\28YC󀆪xEV!H.۲ɇr8${t+L#TtW/]ڎNadlˆ ӄXwy{\_:u7^Ml-UӋਔm7BOSh/F=IdV42`nKSASYEyeefF~qXˀ8|U.fމػnq9FϚRԗqEi^{|6_hB3s1]wsP!-;/*>uMz%5^c}~.Sj/TWԈ j>Ny-rh㈐v^*Yr"Qh:T=nihONCCiCkY;{91ͨ~dM8-H$U _k]{-=z'B(m+nKL6ĭ];c"w涄a4.}JCxCy$d6N}~ \I4>>/ : ư_ (u) $J;^bpsZ05c$dlCQZ ~&qo[=Ȩy伏1-ɢ9G+ZIOXEZc[ttw3||aZ [ryLo$ ƲOE"t䌻wlNHb =kD5diŇŧJwE 1F_*_-jRy+* PkϥUM/R "XRL~2sdHϵK8>yq0i}ҌWp5);ݱ'~)Kjv b_I- |c$vjv1EjH]*:n [*%6cġe.Tw$ggKn\ZSH[7 ,ZWkwfx@8XO W*u 9KagP5> w7Ƙi7Fo,sYȱ~A n>#=1.]ydMI)u7XF=ԬU]1Q[\Se`HdhLcO/J5,5j4R CRU;=v5-Js@WQG|6=U7oI{E@w{"e!vh/+Kc\bav=xWob~`t{  Al199) Ebg5}uI\.be$c;&1{YdO&=-Jڣ 5/ZP6zLQΜkS8{bn} )mE328QY}b,"ΠЁTn <^1j8wڴrTeV> stream xڍP\ 4n!hCh%8n 9ܛszc1m1wCE(j6I!L,|qEEYV ; 2;2 l/#yI!/DE=@`ccavH]L9= Jhea yG)_Q;)X^*m`S+R XB |̮L@;'&;r9G(@ƄLаrۡ6A)% xPU(;&+MxpLMODVMMv@{w+{ -,q~Nx hBu @JT|L NLNVgd%vv {$A/؃]=̭aio$+ńf8Yxy8%/3x{:/cA/_ȞN@ 2++0YX#b_ "?V ^fuC늙e%$4eN11 `epmG5+-J^d5hɒ+zwXF}u <1գfN:Ձv@NWo Χrxb75gm97rgD8(+=zl%_ EkH~ 6 \.U:a30_q? :qq_4*#?_C7@}Co 7Si立9\z ݽ 8Xl,FZոc_G|Y~و%? & lFWù`c{mI1Rfg6xH@^Y6~AR¿6bnZ>b*TOajU4M qQq(^m$rCm'1hGV`)seD ͒[hJ*KoS% 3?kWtޗrd데8X INՒu)u4 kV_enCm4d3{ANpKY`v1/R@AGR%u2A&HC3/ ,G?0s\nrܑk]kddMmtkn֕ǩni to[vIn_1sp=FjReapٿQ}HPZ٨L ߩ, $@YQ*XWDYhZP5_!/y]p|[%uūטaۨgs=%yv#)}M$:4Dol"'Kؚң;HN.u^,,ﻔ2Fw}\(_>fDD9[u Tn|%=ь:>E/ǽ[l)V`\H^-簃(Qߣy;IGs֑@d-)Nwmy/SFB^`N7NѴF>Ojȣ,~6 ڔڅ٬EQNh=mсwL+հ$za/_{l:V£MQ(0Gký̮QIfO:s8#'\${e&r>2bڴ}iO)Z"ORi,bT(J3{;J5ݴlEtWii8+DG;Q˟YpfJ[>x&J:VۮR4Kz헨4SӻG8]06cFy .IFąxm(JX =H0UɊUe\tv4]oiWuw>~7DF|d$Yb}e Ի[.WEر?jZj{SF[+pdO_lÓ=tF6_Z?aBE - od*q0 a n5zR) !,3BXc~00ě$'.=3o{ڛLgbŎnHSa;};oR<|gMiw@IVʒ L%7Wq~i~|Ab X y@9-mӒvVLl2+n,Tο;oJZE2m} pEu(ͪ:BjHsUYRƬOB%hWwY%'vOL?=//5Z80BrcO1^u},+2H0ph$tye-WR¥?!U"$:?h۬ƽG0NX6զ2G2oӣ90ŀH!H@1L0f~ *.n*=IGY>%UΡ qf^CEt0&(CF "SnyN&,yH{ !_g];t:/}C߸z/Zo"8>a6:=#c(.[#7i 'C-rgnKpkatگ8o0U|ԟI嗟%ow 33 )ِ(梘f;6t5?7:>_T$jPn$lmMDUGCpI(!XNvp=+iS#({^,7`|-INd%a/ siUB?geƴk \fH8AKw')~眉_eMHӘ̎HND{M l˩l=.X2>N?%Tĕ8KRO]'P,\$yɡ=_~4),!@7rbPXx4|ceOzbk"4i.UªkمI_QҼM0.50s=|11a B*w EլFt 6#SkpQMw&wi$koo5(:dW Et*Ŷ餺~I@|u*:4V ?W*cIb-3phάC7~'ǫ_&aeƙucqT Bb"HmT5DC[8hꔃIٍ-'Y̍N3~n41K,vȰ+6_pump??;}( r\Hͧۖش(-lP0 GDg?VW+)B5fB5yjKBKCRN@.<9L>e^B §d[+Fi^K$MĢx tأ ͨK+TD?v4Ur8fٶ{%XUz6s@5G"'^raqJPj8SXxTMJmifb3fߡ|a ݬ%HKOٽRXW#e ~Xܝ~ٖw_)C3CeRZvL- lWzZ1 ()#$J"1`pxnF wDfXw|,'ln(* Գ/9ta]KjeқOQך4?P dw2* <zjJ[-O9Q_l9'U;W$L[UJhjh#mElU{_vi)^$Ff]%t@?|ڕ7h5K}wCDG1ʮDNde̮@_ԣB{vYcHKh3p4{MᒩB0ٌE45-򄬷<P絞[cl@?F\;gLB^ӠHܚ[PH^L!S0 i :oV t纕>Rb1&9X@=)1i,}"L0dMZAڣ1Ѽ&AcscVpoQו c y fn01Z'&/LEtD=@!:E ~l]v5ed'HA`&#>]3gDy0Q[ںV ; ӻ ;*c؝; @]aiDOgϝv̩|9/͜ߎ,6UI!x,n D3qOvaY :r1=sWHiַN߲d:g|w>0O=}@CQ?`*R hAּRߴJE>ksKq F.v Ʉc,\PePPb&=IycWedrw % m>x$Zэ VFb0MΗ|3&:pF{߻EjU+(C,5s?p[_MKÌ\,%4ǖyC)Q d4$){䞸 G~&j 'M ,V~ Y{-[d0rSaYtj5x]ey/'֝ 1x'vK])Ąl0!9PPghCl̵JllݲYF4U$F[fXL) Ֆ%|q)SocDYGQL#̮)"WN1%7cX 8B}5yߑ8N쀹ǙN67A{tD-ab|a %/Sʷw(,MU "ΥVm_":3Rg4l#=.rXI5j] ö&(DA4\(0f0- GtČ%W1ؙn [>YODۜ3SQk0t $u RrhzzbgSيhF:3+_hK KU+gMHOV{ޞmg`LEUxBJY@ĥ Izȹ˚[4qQo-Z0jJNkmH8=-7kMӨQk띥jaЦѨZqP&^%)ѭ+W ipj~j4ҫt>3A3>c%]c`\MN 2&`dVQ؟U@}ͻs#$vhG‚&zuXKӭ7oL*NAv%+H\bt4"+ԕxtamx[)7=- wcH(Wڝ" G'kPFW"E]ĵ>4@sg1s@ j{Aa(+$˛.w&. (iNEt Ôeq[ϓCo4t+w*( zݽ$M+9lhOu@L'KF4wܚB+qtw"vnDqZH`ZEL; >+>qGkuk"z7z[Xk} ,Hf6A`(U yh*w h@~fç`B)eAhPo7#e ~oj>:ؾR=䁇MLqL Ɇc!L>]bâW9*KL-Gg>>Q'#z83~0xZ>!/ W!"S%UUvł2^ɿ`(dh`͑~ ,&E )̵#6m{3a;VP8/0 +4Sǘ.85jNj{mA;s>1e`eX> Urřkw5ZRcl Ѓ?]RQ]VD |_(k7@*- ӀN͠ %<`H;AZ}7H5W޽ִ%Md;նngr ; &Vx ,A|j7Zݹnv0pw#%a?0dL"W{|p~m X?*M|g@w":c "=H3~94UV#,B%ӦU D[CDVN')LBh1Z\u>0 9Uey5ZHuK̨m>"ZV4}0fiK|ҭvE>g"[&B|T-§͒>Ѷۊ&[;!C 5tT[s*qdв1NV8Q/eN,ZZj# (\z*e$A\^-re}/b̷cnw~у>{9Xk@d$o7PuHZ1ƣ)B/cFlza<8Kd +u HTӮdt^,Qn2$pGǾJΘ=ȔiyqX;$H  8m&*g/FhG&>JBt{*Cva"=̻k}`RG2~ۣ+F0mZޒ_S Bپw*"G G6&cYO0{8g[?7ER "hCq{\Rie#/-UE\aS~&0&^S˂i;K[[NW jE[<LKtudۆS-M,٢ {WYkbtljB['te#t")K̦w]zO~ZH_WK@/Tx~R_ڑ aF6hwȤ]N.ާpO9lR(rqe4{BzLkfsL+UO-&# &leKȂ?ve7 /.?޻lN۽S?v)9c,@*r:"?5鲪,tC׋1M :oxi99PhЂoUClV^ C]2%dA/԰[&u<[sz7OFp"8ETWfuYuKGXۺm_Un!E(Et*&j#NKHS`ͱuew ;P2[&xc|}yn4?ٺFyPz&zh$ j*͗O 57O¼(Ol Z8 @$(}@Ҷ e( ڴx ~>nRM*η%N\3N Ҡycu=]McnIx? Q&Ѷ +Ӧ|$6PѺ~.u:>BxCLI_C2ҡ?&Gz]:8%(hL,NܰJ;WOLHߒbnBI6dW K;Vu?^0uu쐤twl7…d)J@Wqydְy}i$yG2(% ۉf_#UZq7R&+xEw):${  j?պr0hЅsk^wD5%SK$zDFߑOM.AU䘜D}h6h}Ck?>#eYp!uttE|RE:rvƹ+_UVz$x2x-"*'v^u1K0*\{ i#|:Xf6A{b'q y7H ֌-D[\[HR5Y,vN;p8 cCLfMIFc#h^aN%hJ:m}Zj Gl% 5xZm7фΉ#};(^Rjo ч?.˙FM dž/v9,}K7s@Nk&R]K <FmTAӆʼnj/s՗#TI ŮHMy&@g$2K`mi #}}/RZ`K)<:qZW,UCLH1 ~ٷ }mMo4&u=%|G~m9h#*fۍ˨n6#IΓc, gMUѡ#2cEԀ;Pq[fjdqyjsTncm6@GPDqy#R^)؄M{~-m8IGe_X_47=`4.xo$tsJ:[,o>znX?vT٨3,\uuft׹;ں4][Zcv_͎C3kr3gkzU~d 59)~JGu$Aj ZQKWRbSy$qOSg$I'?`hXJ4&+Z"[W?'k{ q3y![ +ӊ7X9L+3F89J.bҡ2}[d/E;ϊWRH4s˷H*UʫO\wKkt:*9(lGL5$Ulk{r9C[,jFC,zR"^H k" k5G_\(;mX麯!ăQaDA7z;n3-s+6{ +v4>عd\S,%X,آ Ոjf]Ӥ-Evخ_~׍ sjAlq> 5:fIvs%ԁi@087>Ʊ 5?/f|}ko0~~8f1f3"%rnJq*$쓟`  [3tFE, [55gl/+7ž5Vڙ͍E70.7)XdֻVr|nL͡RR{.DӗMrox*ώCiq"2),ӔlxRm,U˜t:9} WCA?ZκumnhF )iq$,UOMa9q{Q>w29 U3RƆL22ܘwTB& W'`ȿdxayPQboG;sOyҺ/KHg/:P R ~|'~0_Dd~ E*{M$q}ĖVa' ~A7d 2E4J8Xp }OsW%Yo [ -J>=CX8q-^u]Yƒ `0 $t${8j9IXCkFIC,x{Ă K;d>Vw6Db†-PR+zpgoǎm]RL2Ak┞T̲2+Qi,K:AisR.߃N_jd]l^EyZ~+bY"lgAꌢ٢QqMF.H'.K _)J}ڋk$ ,)F95~fS2<L* г~<—7ૄk.kZ?AOc; ITp," ߖt]kۤJߋWUѝ-Ajz[mCֻ(!G'Kk{:{RVڷyKi0[52iis.!ʝF/rbabec?N r(񁯢lU+nj~?(AxsˣſC6*:a`Ba3\:}Q I-O,N33ӆr/< ѩ#HJ+E3^nF)5 .$0Kۖ~Fg,BKV[W&K$}֪K]F4>soG3$׿aJ<<tFFkh΃-XIA%JL|~JOJyUcu &|ݓ;}ZJ oEܙ(3hkXjB&= >;rO ; OCh6n&UL$_OU!Nv\w$k` z&RW@3=#.D{^BuC 8KM&սW=uLZ))k8yJYE/K䢜Âz7VT^d$`jQ"YRlmZxdVj>O,nWc7P#?e( j~ca-0Op endstream endobj 677 0 obj << /Length1 1415 /Length2 6470 /Length3 0 /Length 7436 /Filter /FlateDecode >> stream xڍtT.)%5tww0C t# !! HKJ#) {׺wZ|X y` JՅ@ /(bE8A,&7w(&  S!Pqp@ /@n%' Ѐ ,p7#u̿^~qqQyghgԉ '! A#" rv煻9ppG r`u^ 78Am!0wT@0T@`p f*N]@0(`utUxnf+G^JfYD mP T[77T[ɂj_oC [Id蓚*y/!)t#k\D$Yͷ*3*O>("_/L[e:aPt`=EO;e$nD5MU(vBӣ!'keBV1JV"A窒,- Zq&pњO* !+Qi:LR*IzzH;bkx> G*K* T]{dޗ۱*[HE;X1rvn-RMzߗ$7X#r<@?I682z)$IL߇hY۸Gi[;Q 7 8jK *OFcGE@v6͖8J>N:ێ ,F~.x/FeITR_w`ΫFo~%{a\JLd$BG$/zp]/L>f`j ؘ̧CM.xY : T]bdp6yԯ %11|V<slTAꯪم[9i9+ ++pGO*Uɣ5ra+NCy*gfXD6lw2+ )|I_O [t80yj2w bl |fjPޠyŎg ^+D Px(Te ?햯i= 5 Ot(/)ʋJzȎP -V# ve{;N$2r6m$!ޣ }k'.κMsb3 v-[]~zuj6Ҿ2JwZv ӉCu,{8W k2CqNlv0ZἛz*I&ExhQZ˼uhGКEϻ9\.+w.U]z5E?w:P"?qUXM' Bq6^OSK׍z$ΐ)TKڤ ք* ŠcZ: OS(d`MˎЪӛjh_Unݳ/I4׵gΨdqH~Vk2q$*=V9ZvSfOauἸW&37uYE嬔Yd9Xv%B +WL3 9˵b = r) mu6'e~ah'SJJ/k\ӸK5I/G欄p|DZWÂ.u iGe,wu+Ρݠ4݈ꑫ @k.P6 .sRmI-3B;\Di޸ xsfQQqRyɿsۨߔ dofz,}V` imݒXzOc}?Dx(!D[Sְp]Guض hD{l/@UJUr!}ueL6/VLf5mwGb>ؠ칓*hFZyxTNԘHE4ԃ~y!pSP`dk⚡D0} "][c,'vwj~#fa bMm~rt-445t;aR/2c(hrHH ']͑BstT V˷\$TmixI\AovL%a{MSu GG uE IUҥ[zF'Mdѩo{U۟Ntb9gEJ U5AEw~ƶ Fwr.nTHP/Vf)/\zSbB4/h5lWE8j3 JĦiOCb21_DCs~ sȲ'Iz=,w3韥DȦfD; q:/#9!ǓJu󤬷1dlk|gRE( OǦtXG?6jfKg-[/=aW72Ѡ o(u/.M=nN{u~;qQ*D%aoӝl.D_# 3J!sjX^m -7\SM@z}&˛1v]wCcyH}8!H67QU +gɧJ zr6짛7`nZﲻip2_Rg5W QOfc+[HD~}TY'x@OO#Ȧ#ܓ,oh޸vWu) *@18Ǫ%C59vO,x?ЊzCWO?L{="LxDRnpt~-炙 DroYGʩ;Ę?`5K_U3ra=1ts<-te*e}]q{(g:!NX4)X~w]z5T]%yZIB*l\}5Ef(7qd$gI։8j|'`{p3T#Za›]lKRxquY ̇ԍa?KVYsȸ~N$lTUIj߉Z}*˷4NC`DMd bi[xA~W)oO=)24kyx }ň:3'>TWݺgsů=x]T Yor;`)ȃrQ.!|x;[x(P un_##{`>ysnU]]DkޥEG⤇:COkbYK7Ih1j l$ͪGdwTtm(e>Ig14UG }( b޲Ey'qbd-GPx\|7hObHpEg(A{7XB O5-m$+yHQI~RI&jtˊȶ i1fe&,|_DENJ3z`ᄍCUc+A_-pcArkfktƬ;$aS—5Q%SCg-'\T- )/?Ow̼/țePdZI gyb48_HE I-V#Vj1=ZGM\]ޛEb㧀'Z{6L5ln8ƛ%$vB rꎘq;cyzaɤeߴ@Q`HbKu,k`O^=b<zm>IՏ\o<\wY'z\\uL<˓_^ r2LbӟHDhj8xNJ:"#+TZsK'󴭚jvzm*[%O&6V96ST wwMeMp26hXʾ|nOꂤ3HUA .<{}ʩ:w[?!ܱ#,zLw@2.(ymuQoK^R8ax"a\?b:-&VH {"N7nu>nU8E aTR nF8gs wClZ6ƽCLq4ĩ=Yf:M 6Ak!/P&r9<Ipm8d{)W]Y[y!,`o" Wwn(.צFF{{=04k;**bl(WW֟3(Va9)!j 4~#eK[Ղ4W:Hi Cŀ1U! (?ؾPlAcoHd?{t0s7?ʇC8rcxSDzغ-9wRAfil)hU5eVVJ};Ki-0Qp ##>Y-Y]֐nFGfgI>8fy(WgJ=z)9:u?MTfXj;xI{Kk!$G;:U +7 ɂfbf{*!ik&韍^3BTp'7ImbF>iݱ7_fI~t\w\ǫ$ -$' N)pcToۋ>RfyBB\ode%j6>؝o%D~"Ԝ=n}+p$}8C,}+ ;ղX>RFVo{rջ!`p E@EHÉi|#c_\]ZHWrSkx8̳p~ÿ/&/4Y,'/QSglZm*-_OSj,"R(A+ 5IS;:pkrkt^}h,LQKFɷDA( 7Gļi?<)>=q(ɀ7y[A28LBջN)^g7ȒGTx¶UkֹŘn țLxq endstream endobj 679 0 obj << /Length1 1632 /Length2 9317 /Length3 0 /Length 10391 /Filter /FlateDecode >> stream xڍP-[pиK.AhH#{܂B.5瑙3sUUWu}j:* m6I+G #ԍ )VUUprrsrr@.Gп ] g N Pr@>! ''S?.B Pr]] 6ݞ`dYpH:] (@dvݼ+k77'!OOOv++vxvƎFy qSh rK0jv<'h+ԝ?U4`5w@pYZ::8 b˩y@P߆ {Wgbx6r@NRzn\-] Nn-r yެ@7aġ 8e2y#x9`g5:N?|M 4_WX,6(?џ`?@F8><jˡn"#g뤤l@/'~E \֎ >?{Eƿ Y 0CrcN^N/3pc(7oAr _Ϥuw{^U5>ϥU[AWz^I3ـ<<!r/S{!P+?{~?\y ln\o ~^Cjh{x 7?#^/yA^0ut{v<vtA}||ߢ??CoTFݿozF`7^ALrsȿse >|]?<:=os.?8-]\ sÑ$A[.ļ+8xk!'d4͔\է^3^$A}F >,lsㆺeKʧab@jSܗ 5 rQ[(P.WՇVؒC#* /6koJm^=X71GJ͔ ҉ e,TU]l+@2o@H̺$jnCwʜ[_U7mv?Q%*4(oW.:XV'ذk"]!si̋H*mx\YRt3}{Nab^ 3(V˚\NAH=ZB 3x"#DӺ0,0?d *Xy cY`RњZZB>Tg7ʴ` 1V, QYww03YP޿OL@J3A~}a{Gνh0v/IcG]OL"gQo*ex6jx~ssyo/'4N҆\ͱw /6ӢIO$Ƹq6qrY yAwKT3KW`4~]XO#,jwg "/}oޞqE $*=z/R/ -܀(ϧP IPq_ڨg0;W߄"mvAvK s'КE?rіm4|Rz h86MՅ SvlV(;byHDdFٱN0rP4۔k/V֎[M̼2 f >"aaxkDZ1? hÎS[?oOjͧDvQB ):>Xnq |nr:iwX*e,) j=#'"/MLH`:c3eƶdܷTw:yj78gׂ5#[tW:@3 l#qk-}XvO|ʣGܱ敢B:X&PBi1W'oxɘ c҃Jo#2=.pIEaM^8;1t2^`_ӕCD a+i}f+h[\)m(p>[(*:ɎSZCYp;AiHZxJS;N A9^:2ҶُuNnO/XfJ"+.07My0Y2ƩCh2TM?KFt?I2 `?\S~$#+hSw9W(J!U~'JiR܎9O,^u I⯪Y4DJQY|TV^/jԚ1.f< -Dl PYvp#eX&? ~/?:#ɹɭ*S~ZP~ي~b%mmi,^u 8H)q- >KPN;]]"΁Pr֤7$M۠Іđ^Ъa„rth!2 oCmRVQ珢Gj_ZE Ea 9&U3 R?; g>1j[%n!+ice(ĥFm4H/4cIgaº[6\T_ jΈgy8#qէm@mGjL5N>xKZyZBqN?)*H|oWNL9XG 8;-S %[я98SdVY (~=hn C`+ Y{]ݲ;0퍥U\'\[Gop:\iȌus!}!_K8M'gi7,8mCUL'%4qԑB#.~?6w?U ߰O9 +aن a+:Hu?$]OCbh,%J/嬒'>5DUR UނsLr $t7txpߤeꍿ0PVݯ}xSgcC.ޥ%KK>jMo9[Ӣ |%Vlr8nM"}}Yr>k6xMwrUi/%b/η%YS/.(OHc?ԺiM{KwT~%RAYC#7VxG,=[=V/Žw(Þ抁,Etur4'ɠE"ZB7ekdtk;Ûm_t<<3 Ύ?CC#B,r=Y-[Ikjps fiP1-6Jj0~z:ADbUWtS;ٖ7/Hr9K0Sr[?%I a%৔MnZ8;@=YMyJr0'.Mҟ!2L)K#rS?j1bc|r"' ] 8!7 R\/{Lo64aWu~HʽjQ_ :C%ֶ|uIr/W&pAF7rPɮtI bKƣeQ@ugv]ˉCMu5ގ$)=0N~xk`@~,`τo,K_n`m ɜU1|x:qo䨓jCqn*ӺN@ekhr)#LCM&fŔq3KG>-wʊ^5 !yFD%f%?ҬY^P_<%y{_UJz--:jB >QUgYU&=7']_ypUAn4&[^jz2nM8:_fAQ0*e[C,ⰳr*t)1*'76jK9%DwPDbW4*o.ͺ"0M7qp;_є7Jf!6'&1.OZJhz#SN܏`!?v( qzfb>3N+hf];YӻT w|ζGr{ q] E%#. e9Y5آ[*gfbLBrT~Tu:aۃ[au*Dz[CyGBu۲{X'윊 e-^_'"ֿ󉮍sGn|vyZ.DcH@9N儋OL62BifR&<MCX> L­H{'%Cj{Xvn6E_D$;)>ISHQqm+˄lϱЌ 7Ӌ:$NƊ{3%:ti-Κ܆MY}xރsթ/D_{Vw\OnYVˋd2EJ"QЩrXU,_VF&3¬8z]hLR#Q,*zfݘ_aX7:G[yTZap+ ĽL F,5@GgI^ؓ0;+d"=#S kU7Շ%E0Pqō^h  R?(PCfg\1QXǽf ᥶_ Z(0~?B3tU}C:2++.~$=KF}{R`[K:>pǙ:'$>0/C,h6t8qiR1_[*Y!v B{R!:j.W#<%nTw  +gR\OHS(Zf1KmN 3įKɭG͝1ŝHj >F'?+=z:)3o%~+n@tو^U%pqqK r Gtn}Y7Tfwjb7|["VJ4l/pJ.a6bR0W&5sp$;6!0]Om'K1lZP0SI 2 naފ t0}9DY\>&nǟp̼OsyKԩʂM!,&|#@P)+M|"-|{Y s: W^Ľ c(P&ט7ˏs l_%~a}۸ {| s!OƲh|SG0TI1uW@ڕnfaf0Bɬ|;~a#VyyM{mTk+s#8Wxԍ>.Mۮdg>SJ]UhBqu TDt2&"z{#R92UX8[q75G?/՟dy^NxޟVhʘg}gL~|5CNvWF NBf/j{*R`@i%ّj "nmPHǩChv2.йUWġ3-2 V[k 7(0-qt?]/9FPx7~d1] < o"Wtx"p2AGc|eG\% YS, UpDFF.FZe6mfl"9?=>M~! a[UN}骥,I[΁kQS3R$9JZ\:v]{) .u͐qIޥvMY㍣u_RSDD#B&oN.㌃#rgώ͵tSY#Um~]jrY|M?['DtJ85j`F jl#$Nwi*Lujvŀt-`U[(XXK {[]ڶm/+Eܞ# {q7I8YkYDr\IMeev5MƋ/Mh~a T}#Q] k >! c= LTZ]q<Ӆ 3 0&lGvm98ٹGNoC?q+ !Vhtϫ}x ;V*AxbQnq(_\<9z.$fB&wUilݧ ƳOhiIHڙG=ìڅ79gp2yrf~=5E֊Od=W1,13E=(/Z|%?kKqҰifR5V-AwUG nr: qt>R2'ңrNs3"6eTseXeJ?6pH,qr§ .CCBY@ѢJ`бbtUL۬n2^߻`;oBn C^B~=Kh!ZZ709f(ψUyBsPr),FP5_9KC(PL5UZl5llBX:O!t=;2wPnn0?L5 ru2Ry'3ֺk _0w~Й͵ރ1)n&QCj2ѫN\j҂JQ4[6Wxr #L27RrNYMa'կ5oOL#$9->u~8jF{"ϙG bkC.]% 5 ʼng]>5I9q@b%~7 0̽pGlj)}C;EcHU]Cɛi}ȟmr.GC7svKJv+J {QiUvҒ#ff&/KKCY'E+d]BٔRk%Ky4%E?l:7 Y땽G5XrC51n rU0~H 0Մ  ^zm~טr̹$.l>n"OZRrW̰90FwSDqzj& +]~, υs5u-(}EIG4a?e  S`ɖgIW)ѽ欮 ⿜cӹyi`9nt#P·MRᰡl`s.V6KUG:Y[MY/]ӽ§ni 30j"s\ľPC 1/y{)ƿ(h);n2mJX6$K,ۼoٯ_ѓ98UǥӮߗ(7y^[L.v+[wٻz.+߯-6q84sޮ4GIS[x/W38ᨑe9%yqyR?̜\C+t;k>\V.6<ޤw)]C?vԪPV̀Q?X*TE+a,+@Qn|53jw3Nzt_ݚvW0 TLU``"'iBjڴWJMޚ1\+}gN^] F>/T`C0b/@J=!9L&VU5] !i:[Ӏ4܆ < ;Y a"2R/iw%g4{V'vIm!~J} _്˜& 8xwoб˶v鼬zYKƜ agQ<~mKj"-cQ'fG*([C[}Q;BUZClK2[ؗB*$ "RhJ52i}^У t׬8?\ _a.0PF[[v"O_f5~_{@t%$i'XFBbvJ#AsElƇ?zܤMY5ڝ0|d$nT&"S_ -OwF07)?a;('nVnQ(r ڠ$rv&z(py.΢G Ķx~Z&CeP|m>Sh߻0z BmIs<)tp$VfN3σnGFXYhk=W'-*`Il 0Zv8H2@Č)%\ AʌUt3zѦA`)|mrӧyjގ*AHM嚉l(CyB%Ö9a;7ˉyB:rӌ8"Y\O0ZyHM!hO&7[h3}^ЫM!>^m"3!L"Ι;6F WZhjFrӛSj*uڷO?y +?K{'eQTXeUR-J: F2fU)M S bt8Y`/,.tl|[7GX썹>>5]nma`J~EIVR~yimd_bns"a)5d.I|~ :`sfnNJu].u;~IDstk_C'a1VڝYpd?nFKਗ਼K%3 8=2/ȟ?@g cIuHSujMymoUDE~ KgIrڰ0,v@q-ڧ+8`G\ˆAf}'6Z<Ƿ_bOM?[7=8 󷊿⪮11s˒4~pt2mt xolyNkZ!OY1fGHd}`5  Ϫ?T3ؑ@+Ш'_PL4>`%/QEpRa5g>#ɕꇏ؂1=Yk'kN7UtSw]]x|) s"| ыɕUD#ӘqsxEJ/cA3 iEG/ V(P/3l09LZ225;vZfȣ*<l_6cKo.ML *I+sJ,e-%NB[f. endstream endobj 681 0 obj << /Length1 1401 /Length2 6331 /Length3 0 /Length 7291 /Filter /FlateDecode >> stream xڍt4^A!a({/QF2hA$zM w;ˈ!$R9߹w{׬ͷ~~~|\z@y5TGAEmmu $ qqNпn2.c(C%@ >%0FnNAa}IA1I I @J06?@ȸ.^H={̿^<^ؽyg(`={"0@@`P?JH٣.`g?i'{CBQP;k`g2~2.= oE{P@(l`kt]?`?{wwٿ ࿓ 0'(@WE퉾m~N(69ߝ*0v H ŏ9QW-+mP8E?% ^:pᅥ- nck7#8 ugEDAb"+ U;(ˍBd(;FA}w Av08cP?6vH' 7K,lp'WXCTM)( <@! PHİ/,m\u-]=e ^? xr (}sw⿪X 99a3/Z74V }Zm h0V p;," ?~J у!8oKkN08T`@aq~@PXb QX }(!_#`/2걖([Poj46`@Z3$_?&?!>~ÿbB=ȃ` a)½c!pctjAFf.A: ǙK3߽1շjDT>ݲ:ĝL>bᘦy&ٍAF*kcmEss6@EL#I ]Exg!pC㝬5瘠HI:R-'d+} 4]8i(/UD>ƚ 04Y?6*6v%Q'sc2ɐ1턟,];|FF:.c(-!pNc( e4֘ Wr(?g=? Ό#MEZ'KK'(p.d7[0EP\1aU> &uW~%Q̜B(x*1i(̹l}"AaE!'q_RmZT63<<_9fb)w#$)2Im`9kҮ%sc'։֨F8-H{!"o[Bf֫&<טܫ_2h`yvi"T3%M3AHEop;Kvopȥ(p5%vH_MfLsO#7$<񂂐dɴ[4 D_Ͼ>No;L~7x|L B4~4 Amrz|S$_'sl7ZSrr=t圔"ֶPex-.GUdk)9|5Y?EUu+ݝx|5 OoL /H5RI-2G35O9je~-D~FJ;m vǷ~ Stu0{񕛷#}|Co++ zAzȜ_T8&]XTlU*l PɴC nOX/P4|yDyQg g B6֧ěw{&ȣކ;HG͝G3D hyͽr`jUˍ q%Ъ35!ϛ/)W4D_DsuJaڳM9HeZ9$pk*Xr+B ʊf٤ܺ(0IB>4_U#/7v3޵$r9OnU=Djr7E"%9w5(Ƽk=KTbݡ$]N \|Da6(,;B}:u5T APP1?iq0.IH2ۦr&F#9a<uuME]ʛ5 }n0M=$G!k\.˼jJ(v"ƂVG~PvͲr\ږ {"kxzG n* $Zdo8gG26{Ma33yӤ;TO6(îQ@?Y!+9l)4Ykr]N  3>e>" g/i}>9bY\ L!KuA!mf*l$.CRI{x_Z.}MX(I˹d:^/6~Phd&Fhꝉnp[{J%m ӾZ6y<ǃs˲7ED]ǝݯҁ%Yu+zra<\c|7*eiߣ$0a:OIÌYtSu*5|',ov\_lӫ4@†khIw)Bfԗm 5~!}OxJ2g&X^H;ޚ"LG 61 ެay .)};ܟcN]ӗ)0МN;O1d=oa)B 9~ ۋaz>W̃1z 8LcGGʹ-&KV Sg&lqFtLLbf`wO|bCߺ CDI4aO/b@~ч=3-C1gtQNBC @/9?߾X{%e5;ӒB뙊%TxH׬9c'Ž~Ջ\ڦqjQ=w$)7U(h1&3cf䪄nWD%QRA'5Дo8eh '81+Ά=8k ?0oT(&< ḩڳ8]"8f0gHQ)Z %ŰWfMi'!C/eؓ@kgplt[ PnB=Jdٕbdp49ݙ8Caoض߽X²k#܍2Kw/d$u汓֩/Rl_1|m~$70BXl\Tb7Ծbm]0+Jds\C(Pv0+M̑"Y{Vys˧L*(N#`+ڧw}[2_I+*WPø -#_+/2rY$dr&sso4P1 ^Rm /QqGH_7ж<*[3.M\N 9Z DqM794Ky3  ӳoQy6~/i>WC-A &9[ R"1ۑRE_uye]N ( Dlb'| ESJʇcBWdPګ_ _RߎhJdxR2bsC` Rݏ`ov7,60QVȲ>\cSz,E*&3?|[gŵ{%n$:U̫#@~6wRW _^$k,{c+i_nP!&.>?\$ Izj+áh9&!V橾gՃG ~NntuFZ`~6re9Π"隲K2kyWk=O8} \+a*JV RuC}YLE!J̚a$wyܷi |ZS_f'k1C3ټDt .%"eޥ'@2I\E 2nICfS(rUS|ҭwփ~039wh|f,B܂xXk*9"z0WTS>Q]9[~_/:&6ϋ f4cc$ ZX ]^)J>o}cSQ.m9h{V3禟Z]ёݨ?EW.JgL/[h586T'DRkVvz*.TR3}K$uxITjC9w-Ʒ?<8 =K9*r{7rr:7[hÙou;$[H)+t/"h^2n R~^|ߩoESa,R1˗O =(A%ҀLg&Úxɡc!>|Eq :-sw:w/LeN&fGf_ݍzY]}]FEϩ?|KwXNH%0eZoMM~y).8E OTsMU -7bu7HӑOx^O(NȦpAK†J88Zs/ rnmvH )lJDjy͐;JH-? D}Jq=DE+./)[ӎE;7N9oH.柑x ʙ7T5 avUF *?Qg1b< lr.]j9`K`R:d  8!TO#|j8}&όTލϔi\Z:` D9ВڕER H(l9Ԝ)Gǯu$>zA{7u5W&\8 /MNu :q:mJn f8Hqg3btU&Jр  .^b:tc~~ybJ\R9#f}c(CKFUj^FQ)rf]h{ѭi6[}()άO 4I [?&97dMJFtetbSV@^l㔬; =.^ #&҂!oyH#5@83S endstream endobj 683 0 obj << /Length1 2840 /Length2 24289 /Length3 0 /Length 25875 /Filter /FlateDecode >> stream xڌP Aww'ww @pw' 46U[T s>}J2eUFQ3 PdWx`aagbaaCTrcF:9[wmo]q {-`ca_ ȉ )ANV.2 1r3u jt25(XMm S+RX813393,hV.w@g aΘ)jVUA.N@`ke wp7:2%0 tƦ ;c{O+{ -$)07hl 7v356 )07O{ΦNV.LVZdk8/.sv_'3ق? /d6 ͬN@gwo`2S hgfl ?m\8 05x)! l_7Y_8\&޺%,iXw`/wl`2vƦNlCNp6{c?x@qS:v9?܈y -ll]P¯q:+hfbG\xǮXM|hg_s1Npg߮CuVY~Ę],̕W<[w9i]ͻd>W/8Ry7:|-zMfAֵբ;S;) N(i2ޯ9]&v-oI\,>yhhnb q~XѺ>b"F5]'G@/9<(yRu}Ká;*U\rH?OSdù0ayN_]Oad#Ez묳Ex9wSC_a OPy'y.h($FfH^fDcgKzaS6ɺ&_S҄eD]fRn$4[ijnL;YFֶqe7ƛ[_}5? YIX//'Ö@uieyaCߟn@pGV4)vOٺcq)|I>;+d0M1w5Z0T|^ēT>nsCFggr0\ァo>ӉFz[ g Q~CXPH/uYH`Ԇwpw]JRPE7[3=&o욢Q5FJeRR.(,Ͽ-xVۡtK:m%2cw a`3muɑ-- o ~OжTds[ik4ŸÛWEjҧ׎a޼`g=PgcgYozeܓF7 V:ծb] uiCR~0ȕ=!b_FmUE9E67([~UOKz<Ė 5@mt;Lev\`FtjqQY*&؅ZqQYRon7=]?ffg/"KHYKr&D~z &#!M5 d؝7gZU74TW  _p'A2΁MP`!tXDEJmiuse=`HSJév#}3%[s< kmPp#M7iͫ>zkAF\SAN![|8͛k /aV8FΎ/x6-Ǵ,w8HW껜q1Db%p96%)J.e ۇfxu.~x|9*6J*aTWB9[/תk\jN4]Z]r_7r 5|SZ 3l *ᒭ{89fzJ׳c"lCiw88mХʙL00ExZq/.UrՔ_܂r -]qc?zUnfc Eq ה&G G锱ЕnGbTE+8{#Vkh綏|BAƨTbRZbh)% : \!\U5yHh2YBּ_:43tg̡Š[=(t(l<\s69s6JG "oH> _? )04Mmg7/3Uᆽ-'d,BRO9#JQxBW'mnwF!4wXiVV &Y<^҅3쐈L?usLbR+D[ImOU}lԊ;[Ay<iR~Sd+k'%kƺޛ*{pH`|_"B$g\xܽݐ#*ӅeMM=6T-ubm{hpf> o,Va@qҭrk#}GυEγ֪?mZ8B;1c/?< s3j9>W,aŒj@m /~`.Y[%]5J8J(`t{VENz갋LAE; ӂ6ѹȼ=vSr`7}h{Wå*uZ8\#}\a&Ȇ!Ȧ!e৵w(M %He TXoo.Z-QuUB-Kw:={sJfIxa"aJ&p!p"BD8V&ho3b2{J[m9c;aXNBeZI\Jz#VCk>-n$%c !*M$wt ̔|OQSwa\!Jپw>EeJqdLy/ZR-r$=jsWӤZO Mڽ Hs#dҝJ/#Qwa?c7erQj R,*E]LV{"n;z._f\^*oz` ҄W ܸ͒-qQ+mΓz NcՔr#<8SQ}yJ6FM'[+x\=)~&\$":s~#{Zџɼ':BQgtɡݵөһDSiT < |v*w_eq.G~6A$I.Wc]K6q=jkNZd9&|,ɦӏ'7~;& խm x8yf* \9 _6,=0Fz^(@׭ G3v' {dLB:kꧼq0.A>SFW;aQ),t((U:%^1"xI'O匄FŔFIy'+_K[݆ھ=~Þ}RQhbB޹"G\5qaqh1bZ/3*Ѓu\Z>uO5`q!ŦQ,6>3븣<T~|0.n[[E}LTkq(7wl.O }w;E62Zq%Л/cO EZG7Ⱥ{dTQIaO&De_ӗ~!xCVJ^| =t1룛;I\3 )Y=grukHO}ww:$^oK\D^1ҳEi4-? m׏5$jN|w 6 :dN>.w=f4V- E^o`:?^{diiL~7Q|&K.*d;iv{W nfmͼuK1(RScZ(|)==r9N%w,2/yJU K@9m,\`y&%:5(ZU F{w':␗U7HH` ^gz.FѸ~_#ճ|"ÂDPgѸdHS5 fxdX|qȽOnKfC/\)Q0ޝ3|qoMaK2WH d8D9h)- 0O/g3|3vퟻEOgTzcgu̚e3pH*~-<ޡ tZN|KszǮ욨|uf#DY2\*U⒰)5z ±b۲*'om:,ތ:}m>x%Z: 'w}ZO[!XdN=^TNf2F}\^zC?Q38FV4 Uw#F*EKzqzq'%NǥVkok2JbH( }̦?gPiۅ4 :>mV>L auouH uQRտT6: ؠhWQsP)T|9F TUdm@lo.y_ԀwOrYeyx6vWY+n3f.kQi7&S,~RwHQ$0ɓ]f;˾M*; (8T}V>zo8#T~ Y:+ix!(OH4afxuj.0BmKĺV2b8~OM)P:ut El'ĵ0:mϗ T4E>Ŷ/A6]yvs2|̡Z f#\El99G h1}#rXl=ה9t ^BUu~&H~उhitч^l%7lK5pe?6׹iQ {^`Ɍ_ٚTpvi"oZ&ڬeHՆr5=X{=C[{]/[G5lv`tyME0}L 5|M)r}|1m6"9Q`mz>* BpHR_ `P)a(h^+JvpuWK$ 2_!x|m-Z"Yj/ߌWӢgd?BMR]. r5|M9 0:%}1YiXg"KQʲ,__IJ5[`})c8ؚɺYadďtI#@JDq=fdc<3bmMO;F) m2LvFx1XiF)m"孾DDS$M0_偼`V'#_4{Vc~1-m_ ټZCKn7oˮg;uP+(=ˎ?}=.:}Fs VKFll;ATO\jTAjmWQMJ6pl?6.]`r3VD92V}wF9~S=TK]ћ\:Ns[R A:U]һ!0mwK[,wC _dpͯ&eq4(j'xe|Ҙ "*!2RoiV5Cј'vFf\y T5/e9U6Up,)U A=[ ~wk%Vp$qVז  oc[0C|ղRpIụ2}ĺ f\us4!v3qZmZWH.G·J^޻3n8QKG^Ǣn0~-E?rD'K\|wT\rWL.@g[\; oRiGܪ eǴQ攚~t:Oe|t̓ sDBP>LjL`wvֻšϧ1fl%Nçw[:!VB6_K *NaQUUרʱ}%~j$K' k+Z^$JŢSKjvYQ238xx(`~\/c/5̈́Y )}g*k ^^>gv@=,]ZB@qW$s-gIDCB)݄֭32WE.쟶^Wlpr4O~VY越&~Ev_P7'_|lY&ʄ%aUhgD^#dnȆ pdKЃ_QJ,^zw}-&:,#S¡SR#HS;.b~J6MK$Sz|i mԃNW`cQe~%ឫ`fqyJv׎A|=x79&IC;׉yl,'VNy2Cĩj0Mt~ɦx\{e؍1^ܙuV>LPUYnyuM%9 ~jU^):7$wiN>Uе0 rqU*Qaƾ3ԦwZrZ –t5y=;Q)1w\hpV;FyG5IUɇ7[= GZ^Ԗ4ӶՇRdbVE 6Ϳtt["6ocWۚ'L;ߕel1~U e P5Ex(Deu,yte"i1?C[U$vMԇhl QAv#fC㼮zeHMd)}gH,4GsW}kLK RPк ٹKϒB)32_*opZ)7ImzF%՜ΌdW}lMCMivSh9gza"i048ּ%s/j YDu1wO[8RdNC[DbhƬ֎hr\D"}$9&ɖ04~ - s0K08R/l2i5B'f'C܀%>zQ&*hp}fQ5ְ 9{=A{JϹvGvXc.Yy7tH| A3ƭU%P7\rOtab~xG'>|GA6b,=I&Aݧݟtg/[}s7`ւ K C?ba WֶZ\PT~<mI\@M%_tA],=m_:b^>.r=;&n೫ :d$QAQ ]+OXeY\$Rɑsmf[6㆞g% nG~鬫y::~t9tb(3#-!9ʉL⡬[Nں, Yxߛ: 3aԱ$:=OC_O_Y2a9L#V+$5rqVE^wލ$~rW'SgX"`uڨtcA|حU[4*۹gJGJ[N&Wx1Ut2:ӝ{X6e=-}5⡈VW"Y#2Ϧe:+cN CL[7&DM-؋3Yȧ _֥}(tQ &=L_- LXړ=e+Gkٞ'U'J6rSBQ*C;YLR&_wToIm+-4"$o zZ!@xY;,4z1ѧ1&.!** ~ap3'c=o-(+ FU5Yi ǘbHN)?F$qGߚ;9RUҡ*.Kӟ= ˰33& KFL y2Ru,FsC[[d?U?>ziƹ@xv]a-s5MqK%##jw6akK7԰? ޕ*FQ @-Ro26"S\fO#xBˮCۘP!z, oYR#Gw;+ev9Kްpvot*ލ) z;k(OOO%ESlX̭Rg(]# ֨l&zgA*.[Ajj#Og1U Nt.f$;OEETHTȗcLst-]%?]hC3"@ߓRO]Zq5㝏rv6 Qk߳U^w/M<ЁD~ܡ.>7L]m0пC{So<eUXիpJS.~ANHQMʼn{6C'Ud"tm nV&0d:DHV6aƆOr ]cicN=dy5;5Mw۟ꜬiJ5mZDSNޠf=[#B4Tk>_y,JNAstH4QK"\& +iȶ.LK*ZF-b|\#bX̴~V9p& VPD0ްG>L6Y}@֛yDGEƽh7yr>bb-xQOhWtoiQwW̷&tCp]lwuiGƺ]rJ5.YNK3qubYٌ/2nvDW+YvW9UگpH"D hv ` ܉0YaI(Jik"<> Ƽ(s玞=3(;"}֕;P8e#++=.#(<9 #ͼGrOZԺ1ԓz^HӅ**D|TwFn)?Nc&4D^}(~Y àƹM;$`ŝ۔f_-a":_0 =yR,Qx>.qQtm8]G1G4 ҇ vD1+Z3"{wfsYBm8XyJxy.:4A*+4"yjم5q՞[k☫ei_$MPl8B+PDg)-ݟ$?2V衅G7%@uqdco=$SQ 1p>!aAKulHLH3)%t-|gubPؕw#@ &OdRBޮ Z*/LC7#4lds6r>1U ERFa}!i+h{ "S iVdO[mxވ['va($af֗|gLz-f>P.V /3>/?y&rMXIfW|zھǔ`(kr} _ Cӥtd9N RI€ vR\[x,䃜fU;Lu&..WLmM @- :T.<s3,mDl!Vq)t_&bLAbQ2'%q=]m` Mt^r986\1r^k #i;m"T~oXIRMؗܗ./0V=BJ6!3)ۤm&i5s!n3J9檟oO1\1I6aUDrr"ՊBgijpәB~]+W/CU &\,F~g(~ZU瑎,Lu8 .kyP0M"#%FLaJ 1hZ=Wc,4A+&zSed[m0L{%#0ٍ;>"8%'5M/2He| ^_]9[c•Ph Fd hw B'muKF">![7ν~9"o('[z:PʀPteOWH ֧3)I1ElPA~2;U;>^<) oȡia-ظGRv*87佞A$%0gB]^Y˽YڙuSo=E*dwdJ^-⡿T51IW`-V@eU~|Ċ8 {z N"nv%7"DJe عM_)64ʜ!9qA嚪(lF÷Ͱ=:7j!uCMO u[Rx rx=DYu3R6@CY t|*n!EUVm&V{>š`ǑfIreIkAO*{\Ѹwڢ,OZri_+CPC-S{)5ch S+7NtǏyB8KePtjx&8jN*b)#6]^Y+wa,B7W/(Z;}j)S'˫|?:}sAп}bCp5+׊)g"M9@5 7?N8'=_&6cnJ:*zVkۍ2V9 sJKxp<}ǚr;.*Y*އQZ&{-T^?5ծL):O\[\:l14?+quV?m\4wy&C}U=_ʇ+3|X(Ƞ; %Ė2Tdsn0+ 2L鳨v5sg Z){./8L𐂗ڽ$XR]&_Nҗ@/zj20~;63P߀T j}Rn,+1e`nKvlzŨiUF}ݬERtYE7j>ӢsxѢY%#y:UDŽJ^Wz˵ķ:-IL:٩)mG3H;<ʶ;O1!x{mަ_<&>S|?i=xRpߘ~نPJ$3jY$'0_kjFߜ|Y쀺T{\oÛ=?fsX.Д.q}_P餄U98Hi@LHAԎWX~-r749̥c;D 3&-xWOE>S,~hY+n%lrmɡ:m/? ;SGVҜFlkRs`|ER"S o62Z:ڿ=jnf{'=8xgbu ύ}]+HYN:|Qig/$-;ﭰ!Uzj9ʍ0fXUjzn`AŃ*~ƃbvM% dnhwVzS`2r k>o3̗'B\)r o7q24.0-Wg {E#d7l!gj`A J{$G OeVU>"yt"zCE ښ+m`EÃjمLN'Z|j)S#zJ7椟0^jUaV >@3ODɟ8ȯH]XـTڲ*a`|Ĩ$7=)74| ˽G@Uc[EǓGmԪiLo W=jxQVv=g g1UU1=֘#1Cl.BGJ4CY!JG|?_Zc1 W"pR,:#w1zƎkx|}EzN*bMKS>y]dm^ X\hYf"22{]QNCGL2MBQ śj` yV3nEz֖NuUĜ;XE(L#X|Ah my ]`/IBU%cݡ+ugAA.S$4=):\ӋY%|b Fرlqxrt؜ jH57i{Þ)MulM[BoEs%A'qpqp9-Q2} *7Ūk(dX`?Cڨjrnd ַCwI~ @LnD}'L{2x|,9=з{zZyFC.3y:.GZ#xzpD+3nZo2p8FxCR"5CMSY'+q`c)] [=73TDZQ82ji35wɆ/'roU 4ὺwHb 01Mڑ*T xP zUA>{UlxL"&xXZjF|PIpFf,&z뛴_mwlٿ4j`n9V![u< WH5"dPg CVdfs7+2\k) uG:|fmoc"%L3m/#J~S/i;6eh~(J "DӡYA6@Q7<?-8`M)~ #]9, DEݗ4r̻u?ZZZLpE(;Ch/_4\Ŋe"[i>MV~)կ`~Nl$wxSAi"UBbu aƺk,u?I@Ka qc`̷T4.-avB'"F6uY MELj} ;ZuVcZ\,~7~F6 {U8ifNKXu>qeIHB"\ ~sbxK\]Tq+^PR%Clɧ},YEJO}JF,1O! !h*scʋFTz4+ڢR$ܤ zfl@MB(e#Od.Qcn`VϿAaz;>6b t5v Y&ݑ`^=Z1YE~TNq=o5ONhpjT`4=Fbx,NKn/YW~L$LX]HFuD/JZ2C4Kȩ eaMQms8vҜhorgBoˁ{3~ '6Z'9ss{;9a#( W $;O:2$ˆ@ɾ K& nBSSTA+ꇼyx -|) }C6QN0`VŶ++c5rB?ic?YUE(8*z`gH*Wq} +$_ܺfW_u'gh߄ZI``2=Ņ[QsTcxBR[+FAJJc,Jn>E46%7V bI9߳cHp^TYlа% .ڞxbwM}}<%_I>0XhC6!.qh'4<<878ᤘÅp(9:w3#MGxDЅo{$5t-', /-́ѡ]ob6R]3fx=P5`FIBZ5|7Hӹ)fF hufAqT>uCdgÒ44": IK=\9Z#;t  UE(r⨗WƳ~D)Ơ&p{h_'[OHIԽP]yb\lB'5墼MdF9+)dc`>ѫjuo~f_):4~˩0L&2n Y$P ,Z!pU[ =~R|BTß:uN+|{KpL+r{N3SIz~hZs9yh܁z0g] 19Y#\!I$16Wv5jqgw' {#oWV,z)#eyiMw+aAo@rQ_x[H =1`%h>9r+BY=>`Chy͔FPT?V&nF?ťh..qjBjXf%q*DHƓxj-͆v2N C kqnE/&S~9wTD;#J~\(nCxK&MӄsR$(Ϲ$Hha@"߷n |: c.ph`0:6!ѵZQi%?ww"R9L5fCȤsK6=,r;8O_2跔~Ū"h錰J9`譵5%ry*0Ki iЦ6?ڨW#UJ|&$ !M`[ 2 jn2:!GKk)*dѣ㞹[陂OkU㛫n[VR(M(ҏ)V(@CFίQ~UEIlsСH2eWn,h mqAr y=}Z>.|")KQgR০R%WV l+*[wOgu L251K%'=/i ^9ƒg Wd5Y~̡Cyl trqUo^C/~Pذ |j8!8 ?%ߩ&:k$Mj N47^$pGj9uG}Ȉ,Ul.e 4C.Z:>hw,|'xH@lJz1-z[È iU&/+y|xy^7PF|"?* P?/r7J i,#x~Vt匩hz_/|f?~(YXz!ȧ lc<&Jz6o0Iiߩpq6l&Oh7\w) cM/=5k^6[H9'-2َ@wT?Yd1E0Fƴ 1IƼW T̆`Y䒈Q|Rڂb8<V(=b~o]jm:bOLZBx44}oUAݼ"kCLCggI`^9ֵw05ys0g4fY/}4/< |=P3c(B?2oM9>KO-6ע_/T+cu18O4Ҁ  zB{vp=vMJq_a|=ŷC_Cu]YoX[[_GtN 2 a5CUYؘuGz:ir:U)xE0Aa 2O~1ƞN?*W@ŗ4JQnv2_)US3 GELn]̃E>Qnt$+Ok/COz "¬e%4gpaC8HI[$ s> U6ͣr)/cGiGxvFKM~= j#$~RIZJ/t:^観( 䀢\ַ_%S]\JTh/jه,09 ?,d}}3-SY. w|Mǟĺ`kTKm#$r'd_$rP%Kx dzo۟Qp ϱ` kz4j KG\3']=5B["ۜieʳD8C=}6z~yXwsq$^“s)Az4L)}t+;KD<.L[e˓B,e$GcgM=V{GhBxϾyzwBKc_%K'2UkFX @[ 0_ kZ y_xT;6^*s]1~浟XRIE D¼҃Z٪G Ç/-n%=Yܐ.i~zp>s74(/4nE'PwV =26[:-Ya߀~f@._D}NsYWp`n6,Rrd^@BzXpҜSr@dif4Ң= WVo>2o}cG84\hAshȝ.L6-\_)emޣ;To mc|S'I/|ac7'_'9=ԫ<#Q?#S^]F}9ż_yxU7IgoЅ9<ǖm L^ґvu}D#֗ٸ[ Ne_/Is&# Ej:ߥ-:o/{}Z, urN8k7Y%%a[IJpP_xՀ aT2^ߎdG}u[&?N_T\X@T~0t!8{¨ĨZP?Ά96vl*`U='"q/!w` [`֙uѼIhҪ0g98sC.lfgL(XA6INTHH !%AZ(9`+.)kbU~%7]!kBi^h?fږ1>䂟կtŹ)V /`l׊MJޚp)P6ظw~4 k CuhCXSs7K~Z lhKEɮ:[ uaJ)TJ?MXJ^ endstream endobj 685 0 obj << /Length1 1585 /Length2 8585 /Length3 0 /Length 9637 /Filter /FlateDecode >> stream xڍT6LHHw Ct# 0 HH7J7(*twHJsZ߷f-f~޽w-emJ0$7P x@~ff} a6#\!0GA{Lpܠ>88(!6 3 , B@'͚'&&W:@ X` t5 s^E&iDyy=<V {EV"L6s'ܪ"۩ NS-VDsN˕Ez wޡٚ:lZn} _ G&65<7Q|_ʞ5=e:e#ܱ1/Jǘ)`"rxѫ%r ,]{Ͽwmb2C?%fLUySH'8潮 ud<$FEɋN2fY%ɩ! S0`NmSHʕK+ r[̭b#6е'Ku EСְG]R,賞2OZ(oH1p.#hk^gءH*:ӹmcƉ5.fesJ.'Kl:3& XƔ&:m> \͖̊_=۫eewDU."ȕ/+,G,Eݲ}<toV )ӓ#ӄ"1Jx?'Dc.VEz+^́S '8Χ+ 0X6ȵHsҊF!h湓Zsjܵ ,AeY`\jj1_a%_|xIBh= 97zNf'ڿsBK#l?w<.)7L(̦Zų./OۘiV%=X\5 crxk`&?ȽG3x^&kгJ~n~=j(ɧʨe:_ݯtnVٵGn0ˉ up(I'j< %vž |+NЋM 3ތPxdZ!%_:^luJ뤳g utqRG9t /O`DYІ 4f\pn̛maxVwn ?1d׷._} 5$f(3c@H_Is/M?f+Ph^-rHyBHN<-ؼ>.47I>X^?΋7't9Գ[cs4/eZnK|=4֤bdlk9-X)e"}*Ʌ i3*-B>bNԑMr)r̅Ąѯã)C42| I]a ?wbF8Xp V\FȒY@GCY.Vצq{/`ԭ]x%]~%3usSVݓ>j\ >:2߳xNdq:sqd(djߩgz4^B-`$WyEu, v+'5<&u1ZW\=ˆ_*lLy *p1TBΌ˥*h≅01k*w9KG}zf9]BSܦlve\1sx&e1Nyxf/":#Gt"XhRy(&mF~~n8e ZkR>CWXHtxUN~HMuFMy){h q\Fq?-ճQ g^Cص G]\WuyAmkbyz= 6$S]IVL}eTA?{!p₊f$[j1V0R1><'.\SkrEK?:w [Z]'/Eo*'d&y^ E q'+<@Lo}p .}QLi7X͜f-3oLz'KP~syP_α6V1ܰĈ $3ƺ^QVB6$ZB /mb-4cTLp̌:BF8+×IZ/}Hky`f\wu`l=[I|jx$F~I+A.D/*.YٿZN}S_Yg3!en`Rhҋ!%b9Ck> Jh rBiK2Wh wRy`&R]Ft;yٯGƥrxr`r2.n:m)KZJ92㜪O-?Kt #y5(oS?b٢|A!an{3wCČjkըuǷ]tp q1Q3'” U7c.Ϣl193X(_ꄣPE? ̯~8̙r46EmݻKÃzXYX.*ijf%a8s^3pTC|},1 SYr )'~ϦfzPA}knܷ&ŵ;^1C#f(4كh-4j3/8;1 % c؃`!Xi5p EF}4MuR\K+w{\V8hr0\wMn/'(Nxt@.")t:Eh-P-D]cͅk/~o~HFS??zޮ*ٽ&:n7(n djºig_j"ܝj(O/ZrkFH?Ֆ s9%%aEݒW)L&aԱmN<ÎZ3%+}H *;$D;d Lkʾ,Օm^04 y&WV  %ݔun+hhʤ쐦Vt_/TuB&"̴$Ã"/D6VE9~V?y'pX)7K=Wxh.QW%:{_pSab,R<ƵLjRL%]) a'džy~ U=˝5mQH,Yių8]\9|E4 !_7MtLQ 4B)\K˼c !ij/SR:Y&Yvi>c*Rl_k݌>{fԮjVr, wa X(1Lt(X 8N 6,(DHنQe閠aA|TkqPhz[qD?qOMn}$淛IyA*r>os˚OƙB$r2IL3cc`B7tLP92,O[[ʶL_7tǺrX۝L;9ںG葘>D&ޜ(d44"ox_]HuQv];{S04gadžwQfC7M9]Ԛ8C2c1pq1o/=Y?15ns7}ʏ"_Q,;iD1k"nt÷y<[H.Nm9QeA0$S.wL`(c@nL8LրjM:F7-jѮfmc n =]vGneU#6;̱ ;PE]ƎPL14^؍ Gã(Ehf"L8.Y>fW2APf'k}1>}RDE􉙾oFޏ 0y̋؇X_JSV/NW7U7>5/g*Tm_9rM-ϰ Kžծdw`.o,ESlMy4,:;b|[h{SeQÙccǘ/Ĭ_h=Qf{׳RvqsZ񹽃4e;mLn@FҙƐ2I3Tu|3*+ ZG}FuM`됯s F[cxWG4K{i5Mlp.3Z^[>a:&f9fJІWoRz?%Jee ;a%m#YZl>5@Е>4ƐX!Y^ūW߉Tc@ϘtVW`RS3fɊ;0i>AQM`d1?F^/_Jqbp x˼![Ȏ.79}cStܖD1)QK}ߞj mbys?1X=B'XRDl0YnR #itAv:ңCˍC#ut;±*. Mge`rSȰ|N|=l֞mePP &u&ƷBa-8 xpMya1خყk(+Fu4> $t4٦&eDu7%LCS?)/u[!AY}_QD 2?~Nv%<tޝL&tnl)Yz:Iv{etiShT&$SU)oWRj .7V1v%#kS2V w2F)7,=6U8H\671h;NW1vuÚ c J+@lMݤ$qOt<gt[;7z`Zmg%(~rzl-@ WY q^ғ֊*o\n!uzq*ewisWkU5f WwDžlP΋=̳|y^S7{Sz+u6ɗ&~gQDr'Ue~-9q(N3ڕzhwff6/I6?C\GB@f#2aKo8<蛷Y,ϒ^q5ReΕK*#i T9mɖ/ƤyndiGAT.%~bm<P*Je $ .Y7L["xMDgwFmeחE*C,"ê*u{v /??d2O~SJ\f\Y0ue1hw{sxApvG"ڷ9jVi6n P^Y,M̉ImCXYZ,#R3bWbm>RANkY[]{&hj0xۀc1q?(^\(aAA~Ī|cxf˰--@KeAyqzFo@˫ͼ=ctr^|JX5L4md7'o~9 --x2-t𭎐'kEGfUWg\ ~F{ѫyʘi @yTH6xk9F{0vx_+VZr{v(9TbvAtf%y͂"+3 uUO7,\c`i{H0!SHXH&|I0> stream xڍTl6LI*+,- .))ݠt7"))݈"!><|sؽf{fk u-V +Hue` T49@ ȉNK v2ꂜ]0R sG#O(A\^A>A   sH*lEN+srغ>KFpliڂO4```uuudg`3wpa9ۈ2<M d0@36tZ-] fa < `K1 jr<RP9,f`;_ѿ--aP/0` jl,s/9on[<~Wn?6W{.`GW60W݋CaP5je +7Gv( ф |<ii+#跓?G# b:|o[,@6`(? ?#8_? xC}: Z:') rqX9yn￳OSS_n0;*Q ? Z>V?$v3`_Gͺ>_ ^W=@#E RZί% u ׫`Y/ˣ"@G@-aV6`l|'q@5 `g\Can.إAv/]o7[8u[ >!89vc*ؿ 7_ ]+[ŏ$@ K9P]]hes֝qitFVNkL׌k?a/o0/R|om@hKh5MԜiGD88Q]~U[n=b+|"m?z!ޥ{9/ռJSq:/Jfh ,rf]YQpxb͜OMvJ(tΑ/~Ȩ?e;Pcy9hR>Y"`w^κ(Nd-V[_83'yjv)ro41[S{-XdI= k%KY)ώv8!L`9Ym͛ 7*۟' :S,c_ho]; ?#LuņhRZ|@5[eֶF~{`uM)'.X>w[tJ֣|gc>yH,A.01lzToZLMfTnpˮMmuGᢧV|Fi_!Mu*t3k-1Š/VGa;NW;.E>e"?gw1e,$ ݛDh+,Vяq?Xbg+lAF?L!,P7L("&Ѕ8gVz{F~ʖeGLSί9辤=&gަ ց7_nie;j&N_LƌxH'UcPB-pċ={7!h] 5\'wBRRb]$TC Mq}i'*MjL$P͟I eoĽ$/|S+rZ\3}es3W0/3xt?t3Wֱ|)O:⹹|\/UQ1'd(샱kY Ks?U8Ѧ7m>,.6 h=48+'y~XcR=C?}gmQX&mx*,hf<_cQh>[NvlYRxG.џY`޲6Xs h^DUa֙EM,7Y#Á,a 5}BO/4y+M8!]\Qa* *HkgT`/}9Y֞ 9J۶D?ZQ:KQgRZguðhxcax$Զ+8{ݛJkg* :ѫ1LcL ?z=rk)6b'I:5C%z\s] BL.l{?zAٔBF_8(f$ip [!̀57FFc#,i.\p߫LTᩨAFKbz0}Z /N:F.`h1_ 7IF~q`pKdg3/2ВrHa(e%rlI(͎[`) H$T{KٹvK|<N֖/t47n 鿹!il ?(g[[ft ·%Gh*P8[ {ʍW,>F侨<׭U\Dv2cVNj wBݮ 2K xfrmqzQ\^Ub+Vh8d1)("XĨ`Td#i`Z;ёmDЛb187Yʈfʊ7/;aRvJ+mS Dòf 5HA8EwO%C&TZ&`$*=ʢv'%yaY+}NvNϦ,̼}-ѲDvYPdSqJ'%h;΢Ms},?jϘ8')4ڳG~+gE'?5JiǏkfW~*)GiuU]*e}M.fP~O~WVRIẼNDMՁnX<{Ut=a$ 9b$"3(b'4~F+|Yd/[<@%_xpMjolp!M )R9^Z6 øœ̄fg5T#S }lP.[ʏ vs7Rpmnwy`iH'rV} XD#o &l="/mԳ+SКFut~J^ X _VKq);֢IsĠ41ݧq 9#1)!Rr' SopѓxW+Nz'ZN3-G#ڑ$Q /إ/ph^d{|XWr!E^ҟ#_дZsN,Zx y)|[l1=|-:zc>'kg4v֞cLWO_9>c`L*,]aJ,wh{?F:u!הq=1T&|*#)bѬ+Km1o1& !aAWhLr$G}Nas%\p vg[jfʌD"HJ[-'IUPd_2Xͽ5ٷ }կ)ΆRF dZW/U cf \^}1KusuU/bFJd=`|Bq+Y6〇nAnJID%һ&SDTi!CRZ*=mg_PYS qJ%m|{*6;yf6Ay*غA6mݳb,hcvQ|7FfJV`hɄx,={X9znK?/!ۙb{j7_@n{ E " c>pӧo ކ+y^+'^qlzu޺&˨!"E}ـW eDs@}z>9G-extK9fSgDSId)uUA 21Vƒ*_|;0݆-J,JK \TI'i@*Q#D8 ү]_? P>}bY+VRrjȓF.X+ip9ǾTHhOXOg5ߖD%4^a8g{mzVh9x`nb66Rދz57^qXֱʞbهH j= PNxEU^ȹ V~tֳ76@L\y4&DneaFmUnR];Yru^֢hf.G*7%O,B@jH4p2O4'֘I9})L@_eNn&24}ھfΥpv9k*y,xݍYf u&Tj?$ I^e|810[SH.&ÎC()JidN!߰2ZOӟwAGbcښ>>|ڣFwX%8[M|bqm-k[]$ٻ 377l*u~д/e^BŁ#o$#3]ȴ_^ ]6<^[e-^:7.E꾖S q$VAK^(E,?ay݊/T3n);<7۪$,Oue@~< \$IŒɜ7 M0vJRO^S8h&<# n z$GQ{1ΏUZ~k,__պ [SssC@ ]d~Z`lx@h&d-/ie&bCTfV#hZ!N5[)R(^ޱ8i+ifGbwL6F l)ʨ'#>k[uZ|S6p$N)şWPf[(F*,LɟHNxrBupD!E&=[zr>0[_)4{(O}+vs Gw%a97/௃?E|8.O|8Y 3+-ԉZu1 #"W依/(Ӎ*'|G%T^cLPQéW/RgۯCA]p#)4O7rs (7vCT)"S7!"99*TMx>o4q>WfAf-HT f^X)+[-J&"#E^KE1D+`c8E_ Ζ<.UFj:WC@ν3P:{$PNe/$Bfޕf,,S Wm dRsbh4&QIg4`U{&뾫)raArI1Пy+wJe/Nւ+~vcFn_w4RLrS0'갃58^[݊mv>03!M/e '3~1$1z1)#sCQڲn/qZn_߱^3}!}YN9HZCٔt9hyӉI>XJ>楩;!mf&Kr(P#绤Ǣd9\H[ۤP:4m! s`d1Bvҩ~@ gIRDWjPNȬSRbRLol}%F`=ܑhŞYg/>Ȱw]:_tV~#G`{c=LQ&}OC~[<\w=IWڹqrn5 HܩYmAMA}RdԌ&LLP84SO0#5>_km~E'n8뻇+U*E!8ƄE,!*ըR2&=wL C P-5D*4wxSnְʸ%kq ŃF:n_rg_"btzbϦ?:{ϝժY"Zf W'\{hۓp0\V='9 YҙC~U2 7a|Hr?Pe!(N_ P IE2գͱ6U5RF4~ic#_StyȆtɰBfMC'tK\'Uʅ?D2 f =LDz^pҩUf 6D͆(>])0 >%/x%ˌK]Zk&$ ;2(#ⱐTEHDՍw0$2u<pnIZD(d5Ʊ2qF.X0̑$#0Xd7 U7Ö"RH(6*LӰŘ\PX+DCY[A6qj2 9GF J GWŒz_2%yA-xh8Iz]ZjK%yfP &8lFo? v%HȬyJu+t̬*_% _)vFrb$`S< Dh {S+;Wҥ"{}͋&r7+LCԉ+&#EwFܣp5k=GΈZN/ZQV!h)|[E_ϋX{\<jx)-:ܶ@J ~OIoV&mx)q2&i6@~LTXgV_;@A-[!41ѫmP0wj"^:S)llۯyNC[5E9ЊXJ6J}/t1E 1_q{U΂н*7AYOierqqjJڬ{Q>T( F^N=tZNd!ޘ2t߶X8a{J3s<ʨưAO.5w)ez(rPfDO3Zu <_r7]d]Sk VN@jNJB6m=q/N(ԙE cZSD.\lƵp> stream xڍVTZ[@Jj$C:``I%钔nSDiި{kfs>{}g -"xǚ" P$`cӇ l`w8ocw0)(7M J IA P_0wIb `8c;:_K @@BBw8@A u >H)@J{yy\|0wG\</ =_\#`;@z0; @!6`(PgT5Z`g?< 'tEJ\\AP`q4j A 3p@INBWwpw+8W%+Bm\\PW} w } YAvZp7B< yL`o_}\A_fT~0Wp!<8l!65%;; GM 0'{e- kk>!yy7W ̢ UXU X-`Ͽ0b,7̀"@ԗC7v?Qpvr r8@q_R?] x'4 w%BJo6a* apȯ?0lP%R(!@ dh `?@P`),G=KHlN`o 4acuXEgM4.^9V$1n2WEfȊ\@"kM:W?-u7 f?Q0՗v7 vlDoWcv'ΡV)Y~1U!Nd7 ,p)Z/#$gߎS&؏3Y|u9T/OgBˈyJ><'F3W;(E^RmݗUsm_a _)-Y*-_R mvf| Y{mlq=qǹx#yU"4@_^7 Bwm ߥ3vJ LIžzr+Rjr 5bIzJ!H|1i{U'2p}s/ cZaLMB1pۤ} [ Tܱ f5BQ$#ܫCw --io!r8]n-= x5LbEh{Ÿ͝Y_x:<QMGzl6{]Xϑi40Ԓ@ھfQ?%wֵ Ÿ\RRI*ڢF,xfY_%@(D5PYfi3e12Ey}q(l`+XIK%[tNvMCfK+v^n߀ P.:ɶ&OsD :&mއ'HBmՇvz1"߉<;|}>#0RjږRLLj_bG & 1;( .zSFOe͑=Cnry Y 7v Q(6Hɵw{dnP+vUb''>J% M\u+2iAvܠ'Rl'񵷾y>-31N„F0S0cQLRO !^yu4'p?ΠL ԜtΈ 3&$HWc|jw˛. Ώ; Մ8~;Y S |$5?[ɾjx!8hX Zľi\ƨޓ1:en:<-S(G0rHp|[,ؒP=Ǖ !Y jZ}7xCT{E* 7\DB\sSyr=4ض "XU@`ZZ K~8wu4xLSM 4'Yaҡb< 䅮^Q4R_|3%MR9Vl(Q/ik"o~~:A>,{F#~#kQH4iQ)s;H&b$z4zZ6NZcĤ6O=p`T'SH~,۽R]@ ?@Ftp~iBx]e#hԆhB*5M57u7g,'ld˵PruʰvR"G`t2+Fψ N[o\7L>OQս 6@7+t9D39nRޥ-18\1 !~?mdk!w%Y Jpo"%K;>aw2:+,N-Jw*u R#^͕ox*oIˋHp|kgfUVƾ4×)yt-B`Kx6!uP7]0avTdܬvOhJu!*{5Ђfk:iyńd9ƲA E'J" V (ڒ Mo48Z{|pl3}mumgп (\:&Eኅ6+&5i{IDiRf@iVO~I|j)Pٸ K)1E.(>8?hL#5溊[I6TN~c3*zPT$813[{bGE#K9Dv BÏUv*U;Y~mVrUNY1>x¿`;"D~W%稀ډ+#v΢fIۧSϢzЬ+08e-N2MͫNʂ/-T2\)>w{R6;&#xWpw"̉g3|m44/Ȟ?Zy \1K8`}OHV٠i2ʵ-x ޣ#8*zdn|3Hi@M׽axQ|~7G²inmAƎy_4-Ź]FEH"+PCҹfXi(?:ъ|u+iK&lǑP ݮlA9^s)D&8C_5l"``)G 12&zm3x9`iyc30)7.'WĔW4؍K+S588 bI()9z#{n&ovl?EkOu0EIgpݚ$KJ|`9ЭVD,2,0if6vͶ&}-K9`f)a.evMH~U\"䩑UA [ E\}l;}¥ =|PmV$UK37ڐ]&)ECN遊^* uV(|7d$\pqFp8ʲQo.IK[R{sR]Aex<uI=[Nxa9zbN.5Db9j^D.phr }t'C:d?E#x s2^'Жd#Qlny$'#藢V`.@O \}ԡqB򦧇w6)hhf2A}r1w&~㞤9n(MIo͵X\9_lC:w‡iFOTYT/ËpDɄ%E\o)UAmjꪍ7Q^sA~|K8gBYãWL>H:_1`X9ͣh_tqI} BvCu… Jcϋ/hTB{l/)ݫCOA -)lj|gxkk:7T}fy?-iM\Iܽ{6sEls+ge޴$4.'>*4мMH&rȖ0/=Q ;x\ [W,O4e3:/|%QW⫲铓uסMA6ie/ 'l3S>SI&|Mn/WaKvPw\;] AkGU?M4$n 7L [1`<ZyNp]+lMlJTTBT>>Y-tu?;]hSxE{wT\;F?Pqˤ;̝:6m "鿆ak[5&3)8Id`澩Ld)V5s-Ac9e6eSDQs> QMv{7~xcr[]gq)]o7DAe]wdI{im~ןq*C.vLN~xAYX =fo}dガRG<߃BdLޮIW}᷹-O0jxDHY?Ĺ,,{ X ye~R!W)Qj]yeI%D"qDjCǖvӲa^x+n'mOWSYJ~-v8/ֿeo){:g/e& V'UzvO)8W ew)74ד=:+ QSf_'N!,†(\r:=&XCɟhIR_}P\ѩY/0{ $B+-sU>ds<~j,JurвNb4,xHL)NM$!buHr$<~wEi#3ydv$|DSvy?(-鴐m= ;^ 3i"} xb/qkv|(VnzlaݨPiRnRQ\-UWdZۑkc''l*^ryÛ6~l0cI=?%C|$\e,F)H{&h6aK^qQ?G"^(>R$9[k'7F "Jfiܛk?K)6ܨRey0,R8[ROبTWitQ|44ɥZ#yo endstream endobj 691 0 obj << /Length1 1520 /Length2 7259 /Length3 0 /Length 8283 /Filter /FlateDecode >> stream xڍvTl7%1eRh.1 F(iF@PR A?yw]Mc!%G\e*zF@0XT ps |PF/T>X3=4 !2I0(K%T!z@m4 VA{z!]|YsiiI@%$ zP8uaOBʺxȀ@P0YOq^~pGvP$wcn lvzX(Gh G1c ja H(*r:!;>>@(!AP¡@u%C _yü>_~^QDQ>ހ_"0~ Ga(G_-8zLQO__X `Ii0 \@z+!`l! <0P?8ߊK舀XGN  }rrpAjƪz[bD$B"` ""B ?Z('4PO[O~M;>X8[0]oAGBX`nMUEV%ߗVG >0?TZ0w nFzPB0tحa o,إwJ5 kD%P//h ;b$@k` H^_󄀁;H3 K 鋝1i #c#x`;gâXlX;/?񰝂|HAAp?zza&b#~f8 03ݎt}F_hmXn{ fƫ'Q_uvĢױR@էU5#Y VkÖsd#Ϸj{HXL׃/<7ksyJQОhϽZ3\!=/ 4^wóIFB!6b~ʉqܑ+vd@C"ՒȣɠJ&.&+F6#c<tmҢ kXO86S+$d 7םl65bfӟ_VUyorèv1Sl48ϟK~J"yYNV*W,!Sz(9!:^' >"7&3.N+O[xN"m[Q@p&¶{@K8Ox$n<)v O_np'cRrR'i;IuǥpKnwݛlߑUשVaʇ&ZgOꪬ9x`$aԎk _UGiGLVNbd9]Kˋc i+ni ZYM,!a[sk&OB鹴<hq Ѥl Stn+.˦ZԖp {2+4[8P3 0׼@Y_RvS6 0N8r'0ݐê^miUwJG>mrۖD:ۘ}wqDt~!Dy=lHH; i=6KE$_jj~; ZJxsU*o#zw?K )tOѴ_*/ᘘPaۛBbQv#ZOggzĵByj;@OO^O*EYza9! %wOKd>ZQ'!S;u;'> ׬p{x.g(rۇJ2 )H%cB>!}YwD~V"%eluهT=P+Ԡ:_ӦJ&}-%U@M▁k\ mpV^qx4e-VuNrԁ{W|gۤ~$}j2>WL 9K1oP/.*>Q=$:; xu%a Lb朆Y%x:,Vc_d!U**xگ*fLDYkB9 1'tu ˤ ]0th5)jnSv<-eN Mx˚M@S>e%O{h//XSjWkd.F# :%Ls'?"6r6[fSTDN\XOMͫ f|3dk,>-.fa_ÉRJ|+mBaDO1GYTN}`d KZp] }ŤKWHjU?GC'􋨱t͡C RoHWOgJ6 \W=o9:\>@ejzf$ّ oK>ˆPE+?Ζ Cq8Лuggn7ټz_ [k0V"^Sv'gzt'Y-5;ok칞qx 8|d]pW%)p"zכ䘏仳N3Neӝrҷ)nˋ,'3NK9NT]\_gN~{O~QUZ匲\ '^{ K!DDQ?:\/`|1:T_u"߯ck.{[MsSʫ7a[pЎAŗ՛dQng{`o}IV&Mvj\Q%Q1]+{_)J8}/ᅛKDTH«!aV 'i6irT[/zZKk1zu:Q&P*} !bQ{]MaBU.gx s+ee uNJs I}B+uKou:L2c$yC‵1Xأ덥ٱKg}m })8P1/n:b5K]AN~.)GeP_!#3W/-w3:$'mM}f,gxAř$ds6Q)T <G01"Bm=_ CxpJY /+BgYAw-MUl}oul늣 *7۸h"KwnkUŊXj.Q&ipmiMdܩG*>w Lʭi,%skdi0VCIGtI&!9uρ{^wnИ*v뢍MUoY#uT֛#O#8QA9\Z{%Cbu``~aw]&ŷZ<ҞJ@8{.'nM8X/cQ<\nﱇ)Q|7=ꡓ7w:G]p2$$Vszm~* .A&QB梘ȫz)c.et@Hh¼ _c){B{:aLݝ6_ImP[3!FM7'@˪h -ݣZP˜=.$=D4${@4Wpۛt<ve#3Jŝ&a߁T=#Ӫ<7J7B`s%Dӏ'+ f]W7fI, f7|MKa>tqBAeqoRVQ nLQJc8A5P5zH?񞂊ҾC=I-JQF5w#ɯ*<"FSihS[ 3+wWh$NIi2y8r?{yd\-hVp 0h&zE?Uȳ9XJn\D%ei%UlNCv)I[` 2n9M|FhBg$1oȩ{Ahrm,Y(>Ek'2j^Zw3Nܚ^ݺH:"W-N8 8?VX$[KX*vdz:zd Rhgo=0p}F#6J'{QQ^'mB?]}CYKY{U4X'};]C;ZۖNNPqU*2w礶z~=P>?U.%K#QmJ) 8>. ~:!e"gU=+L,MNzp_YH;_"iŃ\V aIf7udZ[1k2'}ψpeŎ`EVnDQpmnZ.#)[N\TWVoHjQ!FI" /OF>fntmQl}B8|/q=$V:'U=>TOGZΕmz0a5v6I:q-$=kMOYf--L$,Wֆ.:q+3+r9osIny3[^ˈ|&$(+PjfWVE*XQ&1Y~PUHnDMMMU?P;̈E{EI˖% hBV³c2 b'.Euٙ%7f2"7ԑzn};v9{ @x-@:fjT>}.6uw=Cܠ̓+Zs85RN~dB3Kxܳ9Ew+=&`,d1,Bzi3FX>xvZ6+ݍ4'P1tvZﷱP>ھ~?D HC6g1[iS|?*+@%x R~20O- ci4W8SP)]@&ҕ{uNŵ$0NBi7<}R{l;&d(~}p5Ẹ!Q hZ {3mM>4 i = ǀ$E2a(/5gϻ/9]C9)@26S\??rvB$N`BOƶ8# ۞s̏_{x=[YA76i4r?Kd>K=~zC؝KE@N:ww/̪OmVt2SUW~eԹ\D~7#(%gMS4:Gݺt&i!P55-QnÑcd5=k=My5:M (yqv>%rN*ާT P"a"mwiF ڏ'֛bd?^9I5ȱWXm9 K4-ȎnU`O;L2)eH{U|Ak!*\񍗏I}ΞS@}/=n2_ 2pڴ8+#n h) 6b()&~7I('գaϏ۝ X8[?3\F#.1=;| ٵu=Ű6xԓL~J_i:gj$!b m5ApHR3!ܩڞR۫L'dwD׌Mfq}T:!Q`8`DMak즆hA}NVʧ;D COp͠$[3M_o݆}Ub\](}T~'5o)o^s͟ޖ9 !b WM)e w͸[<3zފH Of7mm;]4^ i`/Vhؕj0qSH(xH%ly}|cRڧ_N5Gri&'eU[ Dԓ}S96Zsj]AdAfFyhY맓/?JĒn_7>k;ۘO,c6HXK5z}t4h.68N$G@@aW uZC>jy`x`Ӱ=&5u\8[%sصDwV5lx4E#Fmq4Fj7V^Z _˲ .EnnJ?X\cW&c,n[=M&>6zcM2}ֹbIJ;GcRڟo:A]o __B;]Mn194|&8p~ E:lJVs1ooDWsz5!)7{{DG;5dxs]FEd1f{P\ 1;dl/So{%$NXbbk0v mXԗ}~ڐi,Zc|S.fySV8l&a?ެ%r y֮=g@>qS^"]bz޷9$Gt狲xЌ:o endstream endobj 693 0 obj << /Length1 2579 /Length2 21541 /Length3 0 /Length 23020 /Filter /FlateDecode >> stream xڌP.ܽqwwwwwq \ww'8vf7սU|6%:PWT3!PRjظ"Pj]\mA|ҋM2 S7# n`errXXxх aacPd9N.6Vn,1r3uؘn@pFsS{ BX913{zz2:29X 2tzX%T9cT@YhKHG~ᅲp{7?J"H^꫷sgZ͵O}bW[βoyЊW@)I>C#*7ZhPkT@(5~r}E+ 2Vmq4[=K;^Kt5޹W"7ir=R^_;9%ض;xO8ϲgLvƛeVzwOd^#+.gr 1Rk\?rq#A\Z"S }I4KO"Tポ %nдkw'2˾ \m}5rt}ccD" c]U;G)e(E 8o. ]ә_FmnkįwsQ)Qi]0?/Sg!S^o<:6Dzݍ[XNf:i.>q\$l(sS0.'PazMk5`eēM/ud x%: )޷w5>9+)2CYDX {@3j". ·2j_^I)ddIhW.VK8&,ۤs#}Q XËz23YZb++?}+% pщPy}P}G5uRyLp:X/ Mw=ݡ+b4T"}h{$;|ޢo(#?Ηa|s}>X @2l4o2_2mqO*&6zCO;ͷRY,+2V #ϙE26 t_pO05DžrDڽ N3 9 W^$N1m}w0^Ύ!fPk0qJKUhJ[A!BY_&zThX9@oX? 볅Θ.JCiXIUK+6|IPsq6ЩTdSM[(0HUNK\O"{l aOq`)}skEQPsR=,~ "?851l4O~;@5`G/1QjOr.,~tͧ-2h,:yRGsm53atGS~Z& 0"V Zװ˷5Tn-Jɂ魨ܪl Kc+qrLoccy9y;q*ЩyP17t3EAaZ~ ங' #C[Av. |dQt 8eΐ}dULƁNNZ>ǧ9Rؼviy0PRrɒ iڿw;WY|~p;E(8/;A=Y,h}۸/[/Mu ·}4'6Yo9]㍷VU31n`W};=8gUlc-]nk;%.Q=PMAK#/Ɠ]VVyԭ ~ku~Sȁ(KvIOjS4z5-4ͬ54q,{nB`<6Ȕyq~Mlۯ83 !(,#b h#Q.}|$mzB] ~O e]P_?^@ DPa1 a~gh>nDxSl[UcXՕ?͡2ೠ:}o}ègpRp;Y̧AzCc~}Q\Y4FwmTn#\^sl_p!VMč'mF,nzcs򕂍%[{Q+TQּoك BEg:`JkWî=I"£㟛w&pZcCP袮oI/C\%0JkCZ7brv+-43&Zٕh/@JE@t VRoҪx}h'{8뤺3+(E+qc\t V/nλc*^P"6ģڵC顃#R/t热}쀰Ĵ9j3,Zmͅ'_:d,%ktu]`l`e0($>.:a2HxrCȮay?Vt{em(PI:NGzy<1Px)2xI@'? {9$O_>a_H,NH"}716]Dԥ6·J;6cF sc&,|zjNgMdTk'oGU\D :CO' ryv޾8ܯhE3mWyJ 1Ѿ%i잺2PWהVYJt&2m ׏O7c}(0n +rxhn|7hm\Xcy,rtGtYE4.LXk카@1*GMm1kt _OdB5E肋Y-'F/ jlA kdJ.''SаsLkr 5;yBbxڡ (A2gthG4/H7sY4rqm7 nҩ֦f6קo=y[ h[O'/pOaH,Z30Mk۩]>\1O!0!,MzvM-6/̉؀EZ^jٚ}258d!|{>fD5̣ bsB)QTD7ɆX qf̓`hYP8R? P%{A[g_4O,W7;%Qf?9v [[0 .(Q3=%\~ 띢[|ح9le# a4\Wxw۠8T]orWR=D2R)tPq̥O5)m~N&'^ iU:5l4{M ZNtR2=!$tG_R(8}NB.=#4$}X[milvK~YH$~jMG$Tn›X `.08V",1pcX.;UlXZ,mWAv}nPlߋt7h tc$l4ϔƬ;5\NRQmWBۨ_Ž :hhBo6 Vf${+̺V\ xfL{('$қ%#׏!ݷF3׸R{a!edv~Dal|W=Qazsq#QVNb}EIo3`xW̡)>ZteOv;45@(U26d oc 0DD&$X9 7:q_:q |Zцė'Q#D_-!5h*ŝG!V#}k<,Ԟ r!AM#r`*G}p*٦ H9o1Q=?)A}3u<G!tOf"Oaً6HEvœΦ:ṌNڽY* ZXDKmcEȲOsYܸ2Df[;촋 :`N6K! RLyvAFB j  f2 !GK8,LYql[sGMZQ;PZ-9GKVGw_s"V%x\* $" ;=`&k΁+Y(lM/eYu t"Jnqyr 90.1t|q#UR.2lŲ't(M԰E)8nd)KD@(N~s,Q(6/8;ԑ8;Ux]Y3o\OyD=3|%5Goo(*\}Ŝu mR-_t\NCol_s68T768!Cza }}!? {+V_b: 7yk +Qv\Cdel6E%1eT;ak꾡1 zG ~SZߡ4[8(E/aߥ(O#k廗"tIt3QȚ _ǵ̔R}yO@.MpۈmbhaQ7<5YAK4+[djp31 ڽR8yRBD9z:zf>A ^"u5_lJ!do%o0N))KXb_ qE3qš ^)ڂ΍[K֙r _Mhx9HZZWqoAf{_Y)Ʉ ɭ'r$0"]S<]n'tRxY TvS~@{eU[-S6Z2~Y4uu}SC?#Bh00kؓkHii˴;Ke|}&cAR"]7q˺k"3UT(70w'r y|5WZ[U^rkVe5Br `W8P8.' HE[mkEw4 Y^!VB70KQ(:a}ܭBkH؉El6݆-{JM#=:eoЪk~L9"1vd'^*G^ Z q C{?D2Z͟Lo^!N(D(\`.p`IT<(:^FeA/\ߝuZۢ︎K2ZNkuV[S}$~/5t8jŘ}Xy[GY>5AX6fKHv1ALVRA/*ȝo}'Ƹ+Mw!oZBj=.N.Oj Im( `bG,`owQdCL$-Â9.B&<)uS{Ly28!*|kyqkKnko 8FJXPj'4E<<@ߢݗ%Hy~LZg:*Qo8nܤS|3+g(M!Rqm_y5,wb=nMF4|VZR;vlLEK(3A8oT~(@{b3fCp*aO'"l1ݬuZJ>z(PT:k3Zo`CR:׮A9"kV2?pHRϓAԃR]m+?JIPli2FC \!mK3Hy3]>8 pi}sUs&]3,;2_5"pjp-b=J&E{ᙕfO#:k# ~NUXF#Á%+QeS-Xj;儻p`2?=arzHQ>Yj/>@)fuZI/k7͸p$ť l'5;UZuy2f\dBОM萺J3&[-EY 7|/xh&^fՈ9~GwH+'xl&'D:u#p9{Z${FK@vf?eVZkܣ^D{D'$^p/@`@([50A;Gsnˮ*U*t`!ρ$Z:-E#5.?k<i,1[6C>5*eGUJT2WtgdSg֊*I >H."g (ֲ[[gZN[#JkWB {Dh 7ȁ1u>e^X<,xA=s%okD /d_M#!%Sjڟ9r(3|xB#קzM[Q97z1y#~|N-B&a{3ݍ>pr !9̎6xlz>{"!LLoFѴdi(96"JW#kϡ!9\0Σˎl\'vuh NfdI:\kNym?`t"CF+@wzytL?5TyUz뙔0#K Q ^\,wtj*ݩz\9yWs+`6\=^a/(5rՐΒo'vGy0cɆ1tr  Cb=/SBIkfF'1 l7X@9/kYWe>$o̩BJ3mJO:0o k" iv d*|IwݪQL6c@-D'DFjJ3p uQm Qy,)TKI=$%>7-3֩4;Q%@S^h;q!`cqv]ncX垘ќʝ#.cSBv1S:dl,6my 1Rv&ELsr,-JUJ)Wh-\>_j*7LkxSlj}tG$…1B(b 2ݛ8yl:/a%7IH0Bd oaFCA{Z*6}g}IȺѢtcB:tLrC?gJeOBFQOP"ƠG->\89 s(ܱM)8 ;3k݈Zȓ$9y16~W,R!fXM~Uk ' 8sNKBEtvY977YuO}s{4~x |ۄ-xBA%eL;k|uFJJL$&W_*M`ĥ[8K))/gCyZ2H{uEMvW(aSpĽ>r[6=7C_f ˒Ǚ%O@` Ai`:pWF/Rߓ`X怹 ')-)iQ Sk<)6%bAK+kRo8#ElttJSwrk?J a=*n-ĩ,&oSIc~}RP C wE3/&=nGyw' 1Ħ ~-J>É3l}%bL4:1bܘ!Lf&Ovݯ1iA%bˊf@{:#^POD+?m3㣜)E>?RNk zM% XڗaF&ԟktJGZ8cG*MMk_zRXJ;GḂN8LZ1!? 3A 9ƹ7,?MV:VPPV^wP ދXuJHC϶tYaؼC(y2`O WwUܝqIcavhk"GO/t z?lN}\>r/r4{jMA$M؛Z#BGI|W̾ ,9|IÔ& >Ŵ`k}7d`MI霪̓ G4Z1sv܆k$(8>'%A~V*ofH[_ZI>M mc KswxվGy*.%7?KrGZQh=B+KXmk(7iH+g"b*u)rQ[)kb\l{$\N/FhF6Bɮc n-$jRO$ϏP4@FyҔD#LIj>!E2ͬcC"2,18Fƒ2IsL꯸۩ ~Nk/7:;骨whwoPwVǗ7M"Dodb%%utcFQ\e&|\$@xM Ya5*[19 h3g_ihU]ݞN9a|x7Qe:ƫji&-p xϧ#Rq*i _Va2=,< E(uU9D\?~5"hҖ}ImEs E[K%S2g3-2O<9>Izt:e p.:6~Du0*C,\ōuV>wq[PS~3 po&\m_\BB8ɱ7bbH쵫:ˡc\ \ڄ JͻW; q|CU&/0_Qf 4*S_/IKYuȉyU1~jZnyc쵡EK[ ch`wk?ʩzw'0uRcp)KWP:c)4e7K}\Bj%ɂJ$elDW=~IPKKSC]U]E:xP>V{Jz:5}#8bQRU\ðL* N+hhaJx?X,˰noϸ{BDns3%GMef-\0VOw'/>dj;))wha~hAwԪ*rRG!3Go <.TCfVx@'q$q]nGƥHȿj>)073̋2Z/nGM*S&FV(4Sϭ\'5 Scl_GkS`7RbŹ&DBL 7Ix#d+E|h#z;0$S=3M̏+] ס) th!+L ksWhQdG6Lǃc_Nwt+xClc-aԝuRnIci/ ?C#1O4?W}X[ڞf9 _ v` 뱞<W;2Q+Y f41佗E>njI7bG>s0w"_M{&'I@;w)+^ k}.&9ҤFOM +N[?rn'kQCA?V4fZJ&%GU ټk1y)QpY#VUcO6!'6Gr2#!d乩Ħ4̴&?|_lniWɺvKĥ豒e#\'yac{p._q.~֜|u}nï jӖZ <3| }i=^WgzA.aJ~^ZH7@ľklq~<%}GlU-eː@a~ H?F8VV5:4VȠy/Sʛt+U\G2i I;uⳝ_JO/N#{ѐ3;vWQ}ީia$*p޼vɜeLs~V A7:}wnAieT_#ghZ#CĻg X sd|g_Q \pNé!u1VÙvdc=9zFbd=D?)G(M}j s)J@NvVIL!U.O &Qy*)C\FVuxӔGXd*?nNP1gkzj.h.뼤ȐboyaqAEO+7{:tP{>oeO:e.=XA Ĝ,s*LE۾?{eDp_k2?~W*uY}+q1j$C-qI `/3@uvք\YubE2!Qsa4t/Mƨh^Y zc`QTH%c2Y ˺J ~ h624sT _SG81]؅ !K=ӕ1nIGMP "=fw |ctu^1R3GG 梼S20܏P3Kr_1aR&,V0xa xM tnK5QH |I/͆~Beh#]uLʎKHedUl^fX*Y: I}dT{!],ebKoi}qYLL/H`__cjj%0ePvX.CUFb[w2o  lgtp$)xMFPyv\PE *|G.?S.ӔC$c:@4v / tW~ĸYfku[ʞ:ҭzB^PW/W\xYaV$ F-hJ%y<Q s?bvb0j]ǨR{Qm Ƚcʝ8/7[ \ΐcP`R󋏎Aa,4NBu7NU,G'bՏ T1L2Zl? TEUB{CF2: ։PI:ul.au")Y}CChAv+뽅6 "yx *nT(W4YZB<GRLi8PWg$68 qwr$iw]CS\HNFni7EU|XQZn!1 wݧ^Hwc;C2*Lv֦VkNMNuOp4pi[eVhK~dYnVy`q =wt:yO7ЎiҀn_]ox_^EHB>Fz‚Нʰ+SZ8t5d(0f,e/uБR.F7(Qǀm PUϙCe۶tP:$V~}I|H$GH$۫Z @03b89_}8C%'c4Gk7FD. Ѫ?/j~zF{QUKBm"jA(HPЫVn{UDϞfE"b9p&Kx=J/_uHy)\uW ., kS.k<;rytѭisu'3U%l?L?0kz.Gq})m@$ϯ$\z^=wEl u+z:[88=?QqT\_|&S9# i/#t:ii6W E(3zDnxEmbHP b0"NhmDxuDyOc6~>i;)H'e&IidGM#cKӸG#,HyE3V-|lGBxގ:.!zV˟BS!-Oz!$-[ϑdy~.] Sˁ Xdmte8p!Db7|\ӣ,ž5Nzj]` 䩭5[aFg 4]!Q*)wgV&Ϫ GV*`DQ!Y[~CO1R>y1"4"$}ǤOD |F˳9kQ(VOx5(S@YM'n?,SaU66t4QoaY8& q ԃ9݉!v!7{k jGcY(؅Ԯ>D8D( P$QM$T^oN%v@}jckAESЧlŻ0s(f1vŖ:d'TlmAFO 5Ωvo#֧@il[1E#b>[h (jrJIξʆ_N/Qm{y4QKE"6L WqYlM>,S.WɭxZs&½R-D$+x9}2uG9Z\B"d[8 nTxsH}(cma+H )49.Q xo:ysƎN*XJtm#9LZxQ0 o!ETgb(O "(rN3.TNi3،~!{l(7)|Tr|{)\V趻t+5NvB}?6D p,`3"n\NA޿hb,Aj&șJ=6-FךKBCvN_[oj9O*ZY1*MlthIo0=c.>,;Ak.s,Ҍ/>$z-Yt~~,h".3 jG3+n3:=lس~>0} =#L>|ț.]yY  i۽JDK-kF+ǾnseciIiu^03FgNg{@Iަ‚BKO~{E 6)ѹ0T4MT:\2/8 pl3/5!T}?w` 4TSތxwfM2OŅ| ~Y&Ӓ)7b ro2ZSlrsND,{iZ1e*̒(m6q}4N|>psǻhF$5uTkLyŒf'vq`P0R|K.73I3B86 xdUV,U|눜?AB25E4!+wg VўK$) _t?j1YAY꫖n/XKd~:l{V$0 k/t~k a?l%'id|}X?,!d "n$$hx0O+!1X*E q KP'R1˝|G8U1|D=U8192+hU=R즋NsJA@B=g4""O7 IO!ŪQI%uXRyo U65Y/(ioFKTERXl6STn.cd)83Z{'F_':si-\^ ҸJ8OE15VK.iq=̇RyQ@XM~-ܜ:bsqvf)H쑔G9ө@9NB \}),Yl4//QTEV7NzdƁiآM?-id= k.Mu 厡Gx `N Q讎n ŵ_-?pvW1?T)ʵ4ݽ@ ZlK[WWZ9G4&3w,Q?."@cNMSY^\#>"ƀTzfF$;нNY}mg UGQL]Zjk{!Zx*ΟYkX\59ZA" 1kG@aSGY]~,,eB|qpA25`1;tۺ Md/3o =@))e n;Sly?ZU]?;H|v=)O4ITR*oD@SB1HRPKt 9]b79XLtx8ŖB67sJRn]:b,kSUºB[g?)Z`70&JfOwGe[Gӝy'MM\z$A8ң;n\i$7x2oFwLX || t#;"`A%,& dk3$KmɁxmF{u,. daYT +ܤ9PK%wsg3Gc\b.M[qP :7jSmBGqR ؿw4?~;!M3:FP)_w9u2v;gXH99k{0CZEp:7}mq4v./n@5c( LaVT8g; m  >;8Gra<V!;Gh +Tx̲܈QHZZ2{+ilȾDsmdٗb٧Ml{O с([Âwl>WθS֘[L*]1M 8#e`~䟗d$ np[U 9{urldɐ0e2\U{0G|knAw%7 #Kf"(0yW@wZ,#VHL$V] ~n4)ìmJΙAqN>MZeU}V-xe:Mb'p7X.A%4u﬇m,֟WYmL7CLW:2q,+Zkq;x.vhʭ׿m(s*avmR@&S:\l. Ss+}2:/VRcl%!V4b^} p֢}?,Yy2?`(õl.}٨ `~)6PQ[.5=Y`M)Utϔ!,r+\N9 ڦ2JeV&S-lZ|5;h/agTl ΅UN4s85a=3M`c'I*(($~Q -xa)zEd "n/8˓XUAC y%C*3Y*I~,J/! FT)O &X$0$4Ws Z`$lӃ(іBfGv &=sJT X60 @3K! T64+XPϓcIWuNoϵ $]NŲ K&Ճthvdn=On:AϤ/F+d_}H,rV- qH|w_WۑԠK l=|vlT͹YAlrr#YR^q:Tj-D4 E`Em3\C@{v1-).Y*B8ΦbY@{nfТYܐ6r[U>: p|> B0J+)uFk!u+C"Ra?Y؉,cDu(-5ՑK&L$etš")_(dDP\?q&&"z Ll|s) Y6Hkw^zķNN\D%`FLXӨG endstream endobj 695 0 obj << /Length1 2405 /Length2 15114 /Length3 0 /Length 16526 /Filter /FlateDecode >> stream xڍvctoi4ӸmFÉml촱m7jl4m>缻~+kM!L+hlkqec*˨02T̝8dj@Gs[; @4' @ `dbdb`010p@ K:“ ڹ;9VtatmxɂAat@u?h@OX@հrGp'OAwuZCDGAcT G+G t5( 'W(l8-=ʎF,.R;O*ǟA<ΉN[?_94_5 jwgeS{w&bߦñ,gɹHQ1Ԇ/j)A!wBW#DžxK9yЋP;M2>1t[L_{yhOeyp_8%|g:E,BS)O1*:X}gCj/ 5Kع:Ǻ::Dؿ)_!!u`Wj8$͟1JFڹMRq#ZIxdHPG2'م.Ƀ#I*1az(,t{n+UNe.qi~OEJlHR^{F_@;ԋz2}@zs6G;c[6bQߴEw^{H݄#u\T' f ܷ6RxTA}OO3R_]Q1zUOg$[KG)ۡ*R)ȍ?jpO0h,1̡E-گӑ`yQĝ.UAL`"!7|ٛ@r?R\]򫞨#srW.*'j72#<[,e+2e ާ/tȺuJ_g-*ug1]{~6ж:宾S;`%\<$kq:\j qCz'F@z)^>}71>SO/U@BAaQutEi{E%r-oݬaQ$9:&֣fc3ƫm^PF0kU8iN xSraq 4hM l.-7a;fXNq _<lDA@+`^qHΖ<8:THXe3LL{_=G\Oqr.r>ݨ$7f|E}3qZ{ M#ֺ†/I^.增k#l.|{nީzĖvwyGWىƸsۊ˶ A)Su lmC|n FJ&?Prً̠}OT} 2c+&/O'_Z$JU~5xijs[a"ȒJRB~D8}3Iu okغsL/2V9PKc?&jmUOǞ°,+E,f74)HK%;e i%JLYeYp,Dً9sh!8e 6(y[O FN)? Lؙՙ RLwNgxE҄Xg`f`Iײ%?Dq ӱi}Q]OnBʓ?R #Lhȉ%6 XzW) A<BNa($n(fQ0+*X#jᘇQC+Ui)޳"meB"?&mq{; ЯxE;J Pi-zQ_n0Q {PwDB}I?mAMqr6]f{Zj(ް: &2<7fZ_Sa)H"Bх0{eҰl?ԍ E-@zV:HTr|^NKHFRJ̘$s paoPDoIOǕiGFzs[vzKvEHH+CPƾtMbowީ9A.#lz0 #ML̉5:8/MT5}dgX(cv_59{ 872#=C.C~~AzgezGl۸pɁpONK)FZ_e,j.+p-6"<Fh u*$MaA̐ƮRLrx-oeyG[l.Hr`{? vٓ {"j#rOMu\*W7c/x<*Cu}\\]u)ٴ{@~m[M:nNMR 4q{rD9H0SN;s͸4c3ؾd|IW"+AzWZTmPZ7g}΀'K  Un ,*m.)֧s yw8qDM>0R*A[m-AUU_֔99BWƑ:7CC&`eHk,cLPj$[VLReF{u:=.€6Wkc523$EeSgVzHϢk7ca8¢lO"a0Zw1wl-ʖNS)ͥ Gx6Sb@䄵x* +9͙M`5@-sXp |6{ w;3 :gėå0a@%=d'BI48bSM4T wq_>1VcF a+uG,G/ϙ٣>sc7|BTE*& \b?3kkwv'-Urzr~_]e'Cd `:KF^u4,Pqo7Bov 'IǍ+ +r(1'b2BbW A*ɚ%OS0b)B&pIb2 DOWHdWp̀xEcUhޭ5mUHsEanhh,9dlLdZmc̬1 S7vBNh1 ۝vaoc8VsJF %6R Z:scd[Qfld^m>6ИJXaAǐSf"xaBD?+*sGo >.q۸nh0&@P+1dn3pX@Z"e7XyxL$l=b8Қ?]E*~;CRڿ Mm;ǝXJ5l'=Aꒊ!oFm:ozI4~i[[[nIZۿ>pkB ,t'\5[+S/%niJ 7eM,q̍}F*BjMӊS5Y .דQO?AOhpVJY-ȸ>hŔ%Tݍw9cGf3'ˇ,"1؏ݘt[9Y1 ![?PՊ9 E_\ѧ 6E rD!`.G+,3Gh> Nֶȍ {zFWK2T2 CyhF3'PZ:m 9;ho 8dEhMW4iMaɀu&2ݜr I>Vچ S2r6m9Ƕ5fM9Znjg/{򨑯xTL`S7/؜$0mz#I RsA-on)"tV5od$+XpqdD 4[9}O!ΔRW9.ҍzk5QтQ[`pfO"}1(/yLR?L8$m~& $9 / wYln{}ǡH/kɳ6ǐkqo˷ei=Vp3$8C ||;5D 1H &Iؗ1E͜v؄NVrAOQד т*Iv$荒 Cq]}hqҬ1SKqpJܝΝ9kwI1Ԩ٘/ X94i-\FA9M?EréTɧqfn%z1+Ґfcl74>I!2N1,C*޻MD"nZOytZtx]2g&gT-h_RlAAF8(ý7 ãǤ1><,}^+mu,Gظէ2QW1Ҿ9!ahr%ppNd:]q^ 8{B*~#QB/zFe.3N2 =O;!deY\U9聆˪PPߦX?s)P_'Rؚ̙[|#hc ђ>|z;=ҟ~=jBs!cdd~[NH_oONs2k>͘4$_ M/xG{*a{N=yDj goq` itOGU\ֵyEj!X&T M_9D[mᒲQh"p1aG2yvm='c@ޅ3Aꣿ4ʛD2u)eq5:)g9s_6B DRv9d@2fzWO.8>^//lO~6̣1Z{_}=]^ҍpQv׬bz+u97Ūj\E'9 }KE(spݒNΔΧ凞ީ  ~oا,+QKk #a+m&cb:_isviߚYf^$j+K^_~ <[\GhA9~_X78=a&tt+ X?حWҺgG[e4ë́RL9d{V0Y&(b߷ ԭ%^ā\6=mO]|s7w#Q]7;}тOcA]yH<ﶒpCSˆZg\Z3 Ƅy)/ΣΩlƀൃOtC=JEHt_ 'Gu /k'g P40(,  D8r/Ký,.ZWnq6} ֿ!nǥ\)cM7.B} 6Bڧ'cEqAAE J&mwu}7+xa[b "+Lla΍zԊݐM.~ute]t6ԀXp^/6֯ 8^<[1 zI-֤ra#KD_pdl5tX guIݢj$1RpJ}O@ǬaFVZ]f]^JQĬULQ/*4$Loa^ mdޯ5G (39ox( X 3Գ Pvei~@ 0ѐ88-GѺ;nWG$0 ruvUF<>qpz$! dS<@Ae{~5 #ǾO8T^]XIU_ik.WT-iE>h[P%ffqZݐT]UY2P;&|My 3ƗM8C읧G(y `?@Zad|$ "j|bWR0k NbrVYЯM&+s2߱QOivFYݥzƓxŕcσ#m槀3G2_@h)_3D1) Ewt qEz:VY%"̹i$V ޗ=_µ\96. Aφ@N\džV2f 6gcWo5À :CaǏ4H~"`T︢'zrvdE S4uFj=*)XOXLԿ]\|> וKwn.@VRf]k-6E}%Vkb:xݜɧ3e=^ bHp% -k<* }՗6:ٸA'vM &^J9dtH5b:LE[-ο =f+yjUvyUf-ټX.xV|[MċҜXMd\eBX.rO.#`4Ɗq-ZjY$/:Or FU+R9\py*@ ټ~>en>hٍdW%>Fy[Nk!!sWB XRv|ye㍏+x4WiStif:P,mtVy ꧶_NT0&hc2>HMUM8ݔg!,T"y``%nPddbuMK+jھ aHueD _Y7|b;c3ݷ)9D.4M-}/6 zloFR"J2WZH_|Zf#ٜӸ]FD#h-+j8J +ꉪꞝRK? ͙QNZYt*v_CaU27+e:n_0T!"r_gWlE$׺#D:Kn{6FYӳi3Su_'c8JW^;moڀww;3ZwkLp6;|U݆ shڴj=ˍlCbΞkr{])V 2[QHBnX;m)U CEF C쯍1|Yx Fe$9[34h"&|+(Hfz.e&Ô-ـʲ#7G?s=I0JthoJ)*bϣDn <ፋ j1VJKG0}/ac(2~`:x֏ԅ8޳r[9\;̓;Ca j}m/w7\-A;&:5Y*$>7y3I$Y("z%}ptu#4&coEvS >#KxZwJ\0N\)3_DTdz 4(Y27KKK3{# a!' !/%c2[Ǭƛ`. 9B[` W^Jc)y012wlūW\tA U^bMZ#3ֱXXv{4wӎ+"/(G$2gtݯ|E?&IA1syKX,Mv L0 UAAT蘤WP'x1" +/Ianw'k3l#fZ]t/s\~JA+e+k٤o})x\,Uv\/}B >~rc{ĐGqcfe^!*30Ch;6mA1hGfj,isZ򵥙nV3YBp!$:  5Ok={h\4KE;֝~\@c2ڸ'ВC/9fۖ'p/ZGS!X sTdV"o zTht/]グ'izLCҥ]te,u ;鯶3V[Y߉W5ZlR/Tۭ4K:d?T~v+WqFTQ"s.΂2J2OBh@%yK\blnkXe*tR]1 ,{A>jaU3,DS_Oߎj|/ mCy;=̓DtXX_M ɭV6eS%Wm_kߋ') ]$n) N2|{!b{ {:" R!X]%9@f>YRY5 Zef)w%!uGo:ҹ)%And]ز7ͣ,.+ޟ9K^tIc2̐#*=BNص b|Cc4?wZ*Ns3~Pũҏ9paqJx=Pqr55~mfb8r<>|).RBYoe` +#eni%,Ixėga؛PV@^gyԦC@!Iԫw'5لC JnokJLXhBI7|;ؑ 6\y#S]h <²SIv\*HQQOKID/ٶǛZɟJ2ʪ1<psk sAт ~5G5Ts"XBp֓$HI/{{xI;l.y؛n vcWCf2Q-*fꭽҋ$r?FI52lD/|cf!aV?ԉr=(s5I@ zP' s"/7 nLfBi= +,~}Y6z/x&HF|FREg5zb}CLgLtS+9orX(}1X,"2RVJB0k[:i;P[Y4:cu+nʉ.+VA cg UC#auN-Kޯ*WDv &gR i#,,ntZ:[y|FYdրZ~M4˭CoZx== _% JفF:0Oge_qU /ڭ[׳l@b\[29 .ꅰP+D,oXAM`S z&9Іqzj7 :MKOߡweY|F>ML@B&^_ӂ˗cS|~ ۵+iNn3<L*y7UDo^F &G2aVNLu̼ RM:SE\Mg}=ixx ~5ɶ?k4hL;dN-tzf읠xF?w F֠kMމ"'&Ǿť&"]+zȮ#n;b@jJ<}B 5ayka[2I-2k  6Sm 0 L#ݬr*qCލ{{tJ3b,9Ľ9<&~w%XŠS@w*}|ߞFi.9nw5  (t<\P돪f麳+v΂L+ZcM__޷wgL*]'0 , Q~cM`}1k*͇zޮ :()PƠE`@ m!G'Ym\"|S<%dJU\VRT- li`17]h&}_A2$+74бZQF/?BYGZ90A@t"%r;Y֍VS*x l_YOXj2"nP^ ]x W}w- 1mF_A mHJ,Mo/A asaS"BEvo oKgxfy'Wq*#$t]"m_> & g&Q{ȉA⣅(EWwv"<Jtk!|Kgtsѡ)!LsOϜ ܬv9}ԐϹNcHv>vγ>ۥe+#հ^FKi/b[pIW11HY0+- ƕGk{o Rp..͉*"8ء.3XR:_˂hVx< ũ:W[N[[۞CHthU qSdg4K gd(%Q̣ZC&n ys&8lP]\ ZWY,ǚyu ]2.DYcN+>0ZxYĄ(VqI%r:]53wF6# չ҄XRQO)zdv8Irg"l>ar?uk endstream endobj 697 0 obj << /Length1 1619 /Length2 7678 /Length3 0 /Length 8756 /Filter /FlateDecode >> stream xڍT[6NwJIH#  3Cwwwt#!! - %(7z}}k֚g?{yY̠%k)p.>n^q)/W q@`T [8S#0(@ yy& ^`{&7@ y0|Np:?؀>11ǿ w0 д;\+m!} +'8MۛՃ( ;@ w/=W-[WПָqN`0; ` ڃm7/_ǀ?O?ѿm@- u8! 7` Ex^`t[.<`7G_i۬p_)A@ľ9\(;jӍ ~ RUA@8` 8@WLXXz x~-` #zwm w @`(?0/q`9/B~|_bYUc?-)'sĄy|||!@؂_P@rw^4g@ΥC(`GB@W"%O䷟/o a GL& 1 ]M=p[4B {(}@:`8/7 ҁy0(^! 肸E<!f*B0_/$ uwA5![n(  z 8q~G 2¼ >^D;_?s k#tuAO=m͏ Co@A/l?Uxկ=I;[m}@> ;P"eDu,7FolHs' + P7/Z'n7OY"4CXb>m FrrhنH /90go#nvz9D󑤳GmxZ$z#D[,8`)BfFzyT;qiS;-kܺ*J?<%>k_'e@ =^bYQnG KV^'E1_=R@h%le +a0@9 "MXa< ֫lYM\^=>Ġrozcܾ7_s/M7V`G{.VKY:OS1۔hf`6{/9Ank^,s @W `AqdaD;Ȯ굎JusrI긊 J &.n,X/1Ft!%LA\a )uw:"}'pRZD7 Lzr5Ms=O%-|)V#߲u˨,W~YThW,Kҟȶau=N"@mL^d}$yZ-8e (g{8uBdzܶ71dL7*E}Wȭ-LGFt-VL ʥv팎8iZ cZ6R'9T\Pދ!!ݵ Ve21qv$v>fHAH@_Og_СV#ױO+8OP,w!wȯ Ã:SGr+`Gb 30#KNnSלv|^~R͇(h蚡 ^pAag=OfM{1@\|< E" B’-嫥Qfc8^:Нri݄MMDGԽu?&pYqy6=xЄat+>)vߒ&ʺ^&jۭr$n# Eȗ$<f~g#K{>7qkJ~Oc_%V1E,XbSlO"TLu6$R\\)3,(۟,}buWi*7ؔ&ncgc8VOMfy9>#3Ţ] kL i9Oy#y/DߺP,?.ķ/.=Ak @_ȪZFSh^|i m KM^dJKe n>ey5d}z4nj A]Hۓ _VC'{bn ֽ(mk+lHTN;[< HI/+Hj]G"AiRؙI1.r|rm]#BcU7;-KCzS+M {.Sns3m:Q .\\\,N?ьd&o]fCt~΍Eۨ$q;Gn=NcWiN~Q8z@gz?ēžBP׳-sț򖃂_Y9 CO\x]) 'g.:276K}m50&`֥+eXpؕ]t?Q\?1}pVI}$)է2@v"Kj,EE$f@!ܽB y9n=|wEJTD}1HZx2զEV;Z%}5}q {"[ǏV\MN^,m W;ZHmf:ݝ3:\ꪊ 㳛^0vOGEqtϛDHyk2/y-.bLUR 6ߎ%lv?b?0UKoRoӳ2ˣ$xIm\vdWب\4eSsVy;~L<֩ڼ.PdAķ";5L6za-*C7]Jlaj"ς[μ6.W 4''ViY~7 %O$A7ٔ@OÎ0!IP.b0(p ?D4ܮO3ǽKj qфKJ6DqlɣYGeݛ!m#噷WKE4(:,"p8!ʥD:W{?V3z&Oo5Kw)1 r7Jb\p.DR2N,I~[; Ynfx}+;00~0,D|Zb9YYK\V$#,]ըp:AQЭ!0־8̏K ^;luיף|i]7#Rb\ ^$՝bR8?u;WGD/HOxi M| $}Eb+HV5tjD&6j{<`֙BTAz,A9Kݰݻ];*/9 gb4޸Py D¤CGjx+.=XJ7Ԟ8˻} u1RVRՃ2i򘶜laZ+0{DRxfYM28EN1rDG,\;DS~%*zDVfOEoauyO s#eUy&$ {83C(yVǘ?.,azdY8BBJ7Y*]{@s' պ("km н[u@.12]fPH|c+2ѡ6e\i^~>멬QN4C8`uLAS Z}L)fj>u #QR'ڎ^B:r0}xmIA2CWW۫Z;Ϛm!LB:Cֳq kV;sދ69(I1JO8s({-Onakz``--ǿ}RUB>KNǻ+)f[z9yvroO崑Jk,AۊRSQm6h#x^7 ̏0,O@(qنTΉ5qp/EqO{TrO3_kҗ/8w, >X϶M^)icRD;9x[:N$)}ߣ"oiGcоcw#ݣ|d|?ߙE&4_ˢP H$Ti̶ԏԆ]_#?; 7~`KaHEb uWN y.S$˾ms{ip 3cNDԊ oK"P 싹\;'ҫl!y>Fw[q6uN0}5$O)GwB=~{&+ AKsD,\03m7jƇ894kF 3V*,Ҧo9vbt_R]6C9\R(0(B6I,H"vo1o%5)`$\j1OTsR|1_3w>׾6*ȡw l?wfԘܮ.-͐$K<0}ryttBu]).@M\wϽb bN%RY'Dk'Ok/wǽSZJH*zVE q~Vqyo7IV2OK븎ďqQY#U\ఌO)k"5EczRfڥt#;@&ƚ܀S':5]/,9V*gSo_ |iPcǔ sxi0x[\q5]'8LJɐiUUqgg hYzQ0c;Yj$/7_K£@CmIZBNvj(]i1 Ɛ'zj%ꏲBB{[[4j-Z]`'MfܨV m:Qc"IV⤣4!Xq"$-}Oy$K}v҄HZ+#s)Zmnd! ʴ3k eb=־3bo?j Gl$-FVr$m\KNM@VUJUBLIƳO`!Kt-*3孌ΗӦF'.w$ٱn؄L?/N&R4e inŎк1*h '%g1^\ʫ7CO},>-x"2yvqi OPLsT]|R/߫[rp>K){s^'t`L.1ף`xUoJ mE149pt' ՌSq[5^.̖W־2yS.\g/C:%8>Q6XPyC4ڒ/61,_qa_5:3׺Si}{L4nUYZOrEY2n:^½e(= o8#弪DO3$RB5XMaN+֨IDaТE )ǘ͇FtGKQDW\A|T"S O*TW ^ ~}B዗ѷʏHq/qFZ1̓g›xVQ)T HS|%(DIi'z~Be2FGwh )@R*ֿX;.2hb䬣5S^ò!E2.S+F/W𖇧,pٶ.߫RK܂`VxO_uR!:6Q px,]ѳ~PS$tKF["sDܢT~]YlZ'8r\O#ǘ ٸ7׫\f9)2Jh_&DdWK}^4lCy -` b4K .3m^G%/:KuyNĽM;dxHsҒqpbd'Iu$x%E_@׹S@ֆ1ZG3ņt;Ǧ ]wNJD%y M(yr2r-ȳ p 62wu.m- q޴ڹ{uA!w\Κh1ѫؽZ7x GL5$>6(SUwWk o(iTBHIF@9׆o Ia}Mk|a-}Nh:*'$Z+ i%?e}MHdob3ob*n9zC|"*][ְ2"I FJu>)#I ES=;ԯ`Zp•™P'8TC(eV؊; Y{V_? 6tT |α^cvu "D1BZ! JR[]i( G{#P=or&>D9Kյt-KE4yUzOca&7){Tq&ނ ֱȝ3*L񂺏C u QOόz5-)>J!$poQ<?G endstream endobj 699 0 obj << /Length1 1470 /Length2 6650 /Length3 0 /Length 7646 /Filter /FlateDecode >> stream xڍtTk.(1( ! ݈0 0 Cw7Jt)%t ҈?wZYxEGGnQÐ<|yM}cQ / &`3 \prG@@HB40#_/"/*{G8BrZ4yjpĕM셀!Q?z$w8@ A0&iqB9`(vH8 rr#l9`ֿApT<uY~W(@j:#]y]ZJ"Z!] ~էE@{lu¬m~5a |ATrAA`$@x퀿x9C~|T? G r76D+-FOv sF-^p? 76T699'_H/z F bUa6p?բ"_;Z Qo1W %7Gf r:z"%M8Jv} #ZM5鿭HJ0[yxPW%'Z/9Ba+׷_6+M|}" 3ay$ , G  / DP=l_kAO *Q+j0PWԪQE!_BCANPJL@g5_ȿ!Cu/ N-߫e=x6qVڢ{"3y>v.rU4:ǧ}x} ZLQE)<` aَF&2޵42}0Nכm[&No$,)8ٜ=c䀱I<4@5F 4B%k4>}`a?'UzprZ o5>Z,FW4v9oe h1jXi% H$]!~I^8ylRwZ P(U2ضv\4>2f͎;!hrrGK*zyCzstkaq9yZ"_]Hl'M3ydd46g2Ѓ</qfbyeiqy9%R &_sY2jP#m>4Z#U_bvcr:vDOJ# *gզQ㳸Q^KO[+l`>Ry\~Nt2]ssw8C*U䢆*7+7Vis|B;WU,bQ餝8|Hr9U-H슣׃FRCH40^c7q N}+KUa^lw^]6y| " 6]k,$eGtMFXTZt x M9hA}6f O-py'E |Q9~kRxOV:e6L_-e='>|/gon(ne~{r~Ǻx?6sP"D*xz^G&|Jt;VNPJZQUoh&l9ÄD&Z}b5A v9"iy9wr{os~>8;IBtлA2$~ 't~)Ԕ5FI{~H5P{\wuYyd8΋:a9mnr@Ol3TP˘~ݱ2CP\\}-đKܷvB';x.8 #RDp1AqtII`hwz1sux-5Vݞ\ )~{ QS6{G.ܸG"_z.qE5W3&}נ26;l/Y'׿/?8R(bNZ Va;֕QP7:`׳F6<aJtc[+娾@ȍ"V*Pa"\(X@FYeuȑQJW9k̺J%Kt~~.Tw+ AOYWޮmzo8uo8 }F ] 3!y Y5 l?M\bNR -L"7McNLѡ ZKvP(R#,«nCځeщCwr3@s-Zx g߭2Dð H?--ro~J]!S$9g)'%!{Q>¾;&%,YF,wj:䧦^&C2K#eo{n]8\qVLuFد=d]$ f{HFu\/v?4Ux9$lY`F1rXY_3oqmE('Vz8GEXן8}Ŷ7]Y8FWlfS.l,MiF{؝c2Xd) P{_a/sC|d*z}WEKoESJ Ew辑Z9L1L! HIF*/f\۴|8A@ǎp~O}9YyZ 'aץIzhw."(+ `K6ͨVh9-?] ~eBл(f1UOB,r!m |xs<-gwrj>&K8(RfpzN8&[)[؋>vKE=++C7b*iu}0Vbuϯ22ȏ3|8QwѹHou#duv0}SRWl}!ZcU`̆Q%q5Bdb&2aJ묯e{dhME`waE6. 2= 0 gҸ|&SL_+<9Dd[}n[*8qÔ|S-C!Ѷ-òְ殙@]K6-- d_̚o1j73CGKՎaͬ"ibJ2b߮|>{M%2-lFB?ؿd3m%j3uT$?Ю?-ڶI2}V|iNv!ze i|iab<t+Œ߼$:E$)yski ͘>]JZAmR%=مG6`R Q7}\S`K5IxmsFpG>8\nvMX)?IܯrHchup&0ƨ{|p8]SW]T^VcKؿtsp3Mr]tt6D{H\Y+VK%#:! F6lӫhFķC@ړm̯l !> !hB+?l ?x42\2\djXjK _R?*'>П9y3lXm4dN0;=q`ual>?v|rosCW4=nGrM=Y.Ǎ,nQG _9滦ݱA^fa֞Z"DeMDwAE"q )7Mեs䚠;̽Ys}Ӣ's]|kztML?/z8㸛}Rx^%n9QxP˝YoRSr B[͉)*C7\/](FjLwG#g"I[Ѐ^݉ jՒƥ0pBhw硪gIQmڇKFjB[ƙyB} [ߌGӭ) {SoQ:jz#@_<*y'X6gA\!q x鯥yI/5aȩШXHht\[OދLhTa4?>H։J BT-(z:'?ljjo݀2(Nm֭'I2ݐ}]`~ Ie{SOFTG,eC:v Hpa,Ӕ7ZMHݔ9̇շZjPrßo'o7ϗmNfHJ_o&?ȥ]nLG~OLz{N5{$ZJu)pl9h̋,!GDLTc)ʼnW )^<h5D,\$ۧ9xgɵ uzLj~{`~cVY.g)Y, G"Uca_{vv^X_tj:>%i`. = 1Go 9ηo"hk!]a#1%0xBAjSuܷg$L\q=E/I^wI}jޣ vL0ל8-`B'˹:+Gs^GuJ7n~$ gD&-IZlֽ"d@63\i-}5 ?+1- OwkVy+/ kֲ[oJtD9V='^at,4"n>#N;Q2zoYuzlPg jjћ )x<&H@KD0#7:Ե~G/t:)SH~Ǘ_F]\ŇrZ8`8 ń1+{ DHJKJ:~˚#ohb~CkNըMb9a|G'(Iΰ2ãe$;W/te_)2r&^;d7vÀ-߀vuu:m>t! gvg΂jlGNQ>ôbq 9J〦[bӘKEc,7Ȏa(s/dm̈́ jOˮɽstuG>r! XqH&kU*f~E$xyrmi$UO&YEaZ2 CQ%83ُSyYLn'P\C` !g(U m>OFh^ȿ?nA!ww:1{O6Zp.|Ak^Nɖ$r,4ggprcmoǓ*[ 4J5=x-#Gpp`x0I}}=/·mJAg$G2"Vl&=~kyLz]2ƒe5܈s65]YLѣ.zZНR=N~#xJ*{?zjl\,Ւ6Wssu'5w,D},j4M9#I:.[ xl_WNM^ d;ڼA[1ue :iwӐ\B<;˷? : Ӷ˥5z1hdo(/ƹ ߼CSᤥ0 HH־ӄ{edidԾ|Dx`qlpEwp;e;<}zkJRم2LNM[Q˰Vb͝=Y׻H{;v^Q5fI ΆvV( .O' ;Tcuޜ=#I; ߄S6*{V_]blg3zSԇpFލKlU*ڌ endstream endobj 701 0 obj << /Length1 1794 /Length2 12602 /Length3 0 /Length 13729 /Filter /FlateDecode >> stream xڍPMCn 88݂ $syew﫺LEQ l9325dYY,,L,,lTT gTZ@G'؎@c7" b`errXXxvHL9 Jlt~_ZS:+//7ßQ[#hl }6=+=3Br@3%mƄHа96wv3v 6 S[;@]VlY/߇`ebwH3lkolle)&gwg6NxcWcɛßҍR dF?Ҽ8h> #=\k;׿dgfGf.̚v >o&,N6t7dc {j́o]gG?YYf Sg dof_v w[X[l<3ˊˊ(]/FvN#' yTAG9ܷsd׿{w.%[iO,,oXϐ.#W͟9֍mA6{u(f]" bo jgьL,ANR w ˮǼـ*`'/[ +֚.f31ll\cGGcķ~#NTlf3-Vrq0Ef7Y`V7x&,?b6@6?m#y[MxoJloRloطdߤߤ8ߤ8uro|S|Sd{S|S|S'ś8:N[W|r@w)ؔ?Ȫ&JЍE$j%{~h-cF8,i|8um& SN .l?;;u,pΚԆܮDfp~<nnyO j":*]4BZtvCBd$X>sg`Fxtb/y$zE o4{ j Lk4.L%7u!s-f$3ݟ-G~N ͎N$BOvNriLAwH}K臘0}EVN0pJʚM+dlPQZd6}7bk'S ȎZ1\t,hEA"CHrLU@5y:& N{dnanH`>>5{/^iFJ- gW#*C?$3Y78_nxS'N^g"z9OgwFC_j<^3mεS^5#}&s LE%yՎrm Gȩw֔պ^S~ՒSQBK'/| ~qQ̥dt7 bn~2ujqExs&١]ViY_{sI 1Ve>)~88WY='AϢKK9q8ѕvS ˲];ˏ/LƤg8|.6\Q%>EELȗc:Ή!ݺ1c*϶6z3X$1j3nR6Egzf,_]vUs~ʽ>l;5ai6{iu_Ҋz/Fχ[EkwQSȧ-O[^c1*,)r(5&'#ra!I,ZԞV}M}z. ?IV~bs_p奀ZOSCB1+֯h():P15jigAj^o הּD'_?#[5d;KWo!5QK}/ZȚqy|&wė8VKr/wK,b:&L飯sJ6Z|dh H(1Wqfлtr={ؿK>y$ U>8C8I9Wpp=9k6l ҼIZ B?CڑٟZ;ldh "'w"Ԭ*O$* (Z GӝHy Z%/'R|yOZ>.ۘ_3>HMx/zuHx!IUl/<uG)fZbzrM2_Xb]WQ:F4tX#e|,Q|qn'KjV;ZBf[xUl'S)Ey}UoRmY4/-Z{Sa›\\Ggf{aK;k0*ˠ|[k͂3"6/Ÿ#<iGMd%/hf`[|}݈0o=A.ipe\%Χwg.F)٘O P.rRѩQ,[r'ڞag; J2;%7νJ|~Q[;0D1obj DּYv$zv6>uﳀ5"k,bLͥ^mتt‹CԂ*tMmiEZ K .0WguOմr٪N|]?r^LȢב1-m>p>I$.繝5g. xq&,cV:1qH[ hF# A\ u:DIP\`$V XXi3ۉ,>=ՔcoBOK;vDKc+bлLD?-o'mGex3:xc/pBwG%%'kWR%q&ّ~@f@|og:toNےc?gbG؅EѰC|*9Plw#lѵ90p Eop}GyB5hђ~901c*hY}@N&#MJjAT,[gLm1Lx4Ľ"T]Nx`;2X1ZЯ 5eW=K.o8@tҔIܤFL %>Ƣ^OLvB\ }L#쟣&wwdvmZo&7X8<x* Qپ"25c.>2|xsuNSpR4Nvda$u.ͤ EgT(z;˭~]Z`M}H]x̓Xnp Efiql 8~>ik՞|TTSlt5Vt|*b$8e$Rx[֫4#f9~$-rUbio1s)o?yz 6rsstՀ n#XrD,E0sD[>7ژy_kJ 3 #'}A9eçb>`/vXg!=uXԡӘ ;Ժ;/f?:^SNC0P@<2x{FNuF,b%K*i &w||2Vsd-u|zE2jDqn~U3`) w8D}$,=0#L(U]D{HN/׷D=ɕ/m$Ѻ\ɂhVn&P_$;4Y㌁RtjtUw*sc9h~Tad9mFtfԫ wZ=Գ$>*-gϱ(u}UPxxpdʗyx-k'AW|H(aOT^3%BԞDؙU@gZ C> mOY@Aso -2@覱pM5c]ީ5W3w?lK}_GG>!Q;? &ꏯ1sÅ빫1 "nH/s_O !.p?2{?ԋ~d"sMEh=4L( >]nc_,$\~p=x C_~{S&q4epI g]"${xiYh0L-aL[~ ~v,GYIM׆ݰjg4CFPk\dd&^aW ``/үň a\NxP|K3 ZsG = To{\j*ˑβ0xFŶ3zj_θS1_RF}aGc6Dhg7چ%@wh?fMX{ց; K}sg~2 t$MpgL]d p/^~GW&ޑSxߑ[w1e^{kw9~4E(l5+Ï}с'\Pii])4e!ʹDO0Ӵؘ2\W c#)r9CpOqx!X E׮3ꔧ/1L=c ӕsk6kz6ԙXjnbdW vNU]% ;URjҶƙwk-O&S暸 q誷"!k\zYځ$s2ms|F[Q6fgyiUErT鈺T (>Y҈S&yJZ#j7 8+Ni@rVh)uVk`|B5+d"P#-A9q,Wr~[pS27+2Cb&sZN0VLVڱMH]Qi ~BZ` @h'zdeyZ \ @ÜtT28 F-!s)+o!z'O6&IMzu\4XS1_pȖM(ϟR _: @v{$΋DFARN㯼]yoˎґ̶DGgYs)MވdV_N'T 7ZpNp0wT57 ʵ@ Գc'am&P;88brQj|VJH8xPk%¼ ZY$]٧tx AǫG.țraZs)wkL'ɴdK&DcpVT'M:'=g :"9L~n$FSuh?6dJM1ӒTt2ݗme%Z&}[g0 .^fHôoEI[|c Dz8}ԐW5Y#勼%a䭴친CV=b\fgCNO)9YBx/en&BFODcRNҷ +sY^{S98m|~`Svҵțzl+%k#{W?pa^$sޛDeZa ٿw&>8{29P ThPsF ɺ!K1dU 6ůߔ$˪?m8仍UW1.i"G.G`7]-ͻx.@YF)olʶT՞ǒ<)!9g_Xs0Sя쵛ѣ'Z=#lg5'(nQBOv{L1Hxsӹ∦HAץAnKC}y^/LvawF>0 ]:KGt4}-+#{l+i.6.+X H&3JwRP*^64L`ёmEYna:}IFa&Ǽ\6cR|?BZgfBI%H4<AVV.VEBQ,H[iI3v]&4&^tRPho`}6DiBᐳMH߃JU8(@#=4 %qv)O9yIz277N [=KE!Ѵ&7$a G[pTT%Um1a=-QaK9WAB@xH!eBT|TMΕ=5fz&ҵB <{di}b'8y_nVx޵۠Lf0yn9~kVFUW0X/W"EٯX`kfiyK]sykդaYa_Ɠ1kD|Nևjni%an]sۻF"(H] pz*CcB)>w"%="t>þ\4yM}%{!Ո\xʥ3:='KXޭZ,;ӯ/+{L߲ߏ?az\Pk]ˏr ڲenVN)׊إs#媊k?m2a?RvYGen+#qV|^d:mue␂ڮ^ǗDX'*Ah,9uQ[v-X)Fjſ+;sW,R %BVѸM;}G†Ga32!&1)z[D­y.RJ\6A>֐d\,>$5{gvX[o5\hV.% 77J&#FSՊXjg)[2RAC[Tw Ή3,*ڹ#6L5b!/GhFˎqo3rVCo(E)Y`1-3сgTa_X%K~}PC #QDUP"N  bxL.)z.9N}zfb)kˀsU&N.ŋ>|FcF(iʻn'-h(W8_얌`A+`kZףЫ24$-KOj3' #+g);eZNaZb&U:[]fŃ.8Gw|4%5 QEn~+ W@\ս2=JE&TDg,Iz ,o C6ōu=p%u-)Xz}VXل_qH,-d9m;F Z`c&n"J/К@Zʂ߻Lâfq8 *[zg|?m؉IMǸq3ͣS.@<SgYo -P^ˣQ%엯BkNBVt!0f\*#e-\C,k  ? #O#œbjB?p>U}{mޯ VU<'2'i+~#@e2>{?:I̓s 휓d,*JW'-MLXgbp!^Q ՁpM”ЛKj_=X"HP55+Χ4 N4$ܝ?'^S7ͲmZ~̃ת!c\b{x`Y gK@eFKSv1%L-5{le4M8JI;D XÆ7lVY "dz07}cga|U"ꍼ^euI[ 1S^p}-bFL|E;,Y :+;PKQkrj ƻgP^*oi ec~{򶥅vl,잧+ p#W~,{ޕBs-؏zbm'CLIV@}jIU=˛ݓzjN; z]%x* $;b|ѓw_t4O$p AͶmaѨy03[# ɟ_ Ј'H ѶmeҨΐZ)6g6g=55-")r{  <>> Jcvo,vB ۘO!jJ P1E"}pC&<|n;ϽJ6 [We.ۈE[=tbe}a)7MPKlf6b~,B&_n@?J1dR }yNM,,Z/u5TSmOE̙[Ogsz-Hu1ʾvGmzKp+OC(*OL~iR(yb`?,CRNg 8(&uKD6dA/ #$I\>$'[Dd|rY=6 .׳&E {Liߋi W:Фw$$2Q5aqL*C,|~ >t$(πVgqL!Z,6H9곏dy(3^/97=NZdMCSKk>J.y@rYK21r!t循sв0dtITd1{vv{`E|p Y0=@Ï=li9s{,ٖH~w=v_&~pnfEU!,d tm"FP/#D[j4BjNSe1橞zjs $ W99PfJkw;*ƙ[[gY?hz,}tK!sh) _]tKT홿`Pc@6n >ĺYmyth$>NxyQ# y̨Ԫ--rݺR26mrQ34ZM&0dos 5H|"lw?СD}{E48# 3s`񈻄5V{+Ӈ(saFO<$>^rSaV۪^mk8yt,+e+ՐK&k!#|#VenF1ⳌXFP8 1[@nssGfo~Q÷ixe «7/\{{8,f]dRq~n.jһ12r|mo+v6Ȫ^1{ŲRF豞\up03dl MUf<'|2JjGVͭBCVgIe2zޞ ö E"HQvS=tQܻN/|zR/U#W<wmuR"u bkPyPʪ Tj_G,S'e*~7=B7P%ݔ>TP T6]>͐%`*ZTL Vm' |I:m٢_ &}1:|K ]SDj4~'@L4AG[s#.+=N'$s@f|Ώolr Gf4n|KTSӮ\qMQj43/xEwe09#4|~RTAؤ *neĂRUk6$ }fwޅ@5uYdv;17QB<[3VɭW3аo~7EE}7ɮ4$EeEKnovI̾F\bؼS-l;@<*eܟC D#2lD?yd$(_e eeVqH<* EW"I9[z>@{!^F}ŴSH0k9&w~Jat7:w\H Q"4jOĸmhZ"aTv|\&PhP{(noDI ?!/3y/J'@Eg{Ie`7VpuH(y OVW ?umB 9dC{\%i ^{Og:HQݱeWWP>FF[?T; Q7U O$`̟J5CaW(|u{x {y(Y 8?&"/ yELA2c>NJ:6Tt56`yߡCg4-ZÇXU`k|#J7BzFZS?ju%#uQ2rd | f 2[k^ָp59:ߔ,|Hsjqϫ<ƣsYf.;F\uxWP:Nyk$Ҕ@w$I> stream xڌP۶  ww܂;=xpw^}ޫR79+d*ꌢf&@){;FV&>,/ Rb#doq' X&aSȹX\||,,6QwHL9{;3"~hLi 2@vEcK-8 @t/4..|LƶLNB w%@ tr*dl 2&DJ%o؀Lv` W;3.Pv߽2ݿr'`\<\vf)8ۃ݌A6&`en U wyΦN g&g_%2eI;3q{[[3_I{2}vvs_E:0kځ]V,.N6n0d˽_z;;E}A@?n@+M3 hC,wytY `~YSY[TYs&&ffd0qXY9xF4Xڙxܦ@/%{4\oͿiu96x[<.PV@3>u1/xY9X8@@3#\UUA-`+u/Sk q%L36N.'" f/_ `fw5V.N_ ,,x̒!nb0K!6b0!"(!p.8GCO? &?nMC3S{u/߳?TeCL?1B{W]X)q.h .ndOl.cp 叔ҿI:Ifv9ڃW b;˟eevq1n??.lW7O*`O^@C`˿o4k zMMCBjD w'8P"ov$U E70JߚqX ~dD,5Øl3La>;+wuۙY ߈28^gJ P|J?"7RJ=ݴҘ8 ϑݙډU3CjuFlgOM>!j'(\CDY F-&3":18u*.+I\e{{;fzӔAK2z.pJZU9Z0ǡo `Җܭuqez"$h>^^",7X͙ef80Je"tٷBƴBo2s>[ 'j9r5//3 _{ܔDpُ}[9mZ6Ij0aKzS3fܙ^D^esx3 |Jet6cgg&\'+f].BG4!߲ON886[ YW̱p&*T\Zc[(nkCc9;^ xy}XXRI#?s\ܱF`'i \$BGk^4NuqX^^㈲,3rCbp FnG#FwU 3\#W;,&SRɗ"aVa8-es!Cq; 7*M ߶[ 0ԁ̥k}|FaQaRB9JSM1Sdgk^>bB kcVR԰Bş6 x.h*&rEjK9`+R,2v |57g31 *QR۱zSlУ= }e?$@ nEd^ͦ'Q:}Ʌzk qx(`Q wM)V"a?Etcz'gdΰc5 aLjOJZ'n7,ne44UIz/܁*e 0P/a*|p(fMjVF{gEg (\ezh(E[a#[(ox l.r2ξ51ĉSۚ%Fj]Y),et#36g^R)eM. H e8%ץ\TM6tany4M4.3,6gq68)?Y&\&68GNu~D9k8Q_Y o{cr`yYVn K8 p`68% ڧ< 䩭* I1p׳:Oݰnh+Ae=-pzb(r\3FֈѡUoT4s@Sk%2PTQ*i+g:zcp?rC~1'HaVJ԰^.PӰ,^?ehPA֥%lwp:/H|Qk Iw>,ۆ CeVAsd:GNr TVBϰM#\*']JTBۊHc 2| g;̿~Rd{ hӝTݾA^^]{h={5BäJKIrLo$Vvy(Dv&dkoEdֹuNѩ.QoCMhWuLg>/ưK/7 &:*ȇV> f8ֿL|j0xӰL5"25kЮdr(6&2M ]Z3`?2 `;9w{*f's? t(7RhZKp?~-aظ ._}94Ը3 [E &`Tx"7Y_$jMQV& -E?Om񥘶 6b2a<ͳ3䂑p!"ؠpt}P`'} A^ +_?nqQ+g# e"coƼ?Gc@^ЕR҇f4%sFJH_޼k)xsfiKx@Olq&;?Sʉ1#oI)$P3-^S/N<^̾F;5[ nMr#{Z,^o 15c?/J.)G\Z𩤃 'ni\K)j{3AGhRӐXdB<9@͈A4mTWRkz Sd(scLyD`|<Xt/6C\HKx ῲ4[߬b!CUy\v*3irI V& w9U1>gqy0#+axGnombGzQԈVr8hGDٷfqjl{uvD;MN?tLݪ,]Ip/8Z4EڝLWw {W~j_h>Ϙ=Yl/=M|T[ &Q M";(PdְDݴbFMO0{#L5#5n9fh]6MT_T+tI89FϐDUpMKj_Nvo-Wf73BW+-BW@ݪT=2jҳMtG$brq"Sl݀ /zbWyeVF#ްوgFӊF/9̫ho}̮]0w rxP^N])6ϊmb87X\ඓo)Ӡi+*X9p*7Iߕd^ }]j@ [y1„66ڊ/r%5 Օ-Q 5C*!2hST5iK3ҝύK^nm#ZƅP<"-S%l K6Ֆ7Y rG%Izg"R,JV, 8cpž`(-u(*M2l_Y4 /hI22\ex8mk J午Q TKRֽt"6D5Հm*5ie&cvKMfy֠#+Ptt֪md]dQ?>Yܥk$ǰum%cߨ\Zo%G2mS4vr.uvpK~{xi{)$oKQ6ZY>!sҕ4%? 7/F;s(>ɳX^^3&l sRcrT},ϴX@XnTٓ]J UqQGb, hM`^:N(ȸCꔸ9\5_U͢w @yXO꿜/}o e: P%qA#l !:91ða\昺ĕ7v՝s)a]sw :bcj6 1}lxq;ҁͰClRr[Q-4DZ7|9JxF1ìTXο+I>XՉ&'-enr}Ŋb>DD#3w $[. _l޵1t1xfO'|. TiU-0V$m<l|QW.@U`2f2݉o"+zNt8>6͔ kۓ*yOeFE/'*5wIOs;F?g o`p6+^`cVH)6f1ToܝCtX39[ɓS&yq1 1IԺo ΢527hOeh`Igԃ@ r'NEV 7 1}=qp-8¿4"oPVvVVF!jVŠOux@*65 wzeQ>F#N9BuP3_H-ʰ#xө䒵 {& N`<.|6rl1!O'["qlK`?l- /(@`$mcG ݜޠ)ԂԖۭL@XYq;E9xpj#hII*"_Q?ޯyU ^wZT*WN9r|*pg5w5hRWc)p"[hB*WK< QA$Gy$Gg})T ȅtH<1 +˫xIea]d47EYqCx.jf@PނBʖg[_*H~Wᦕvps poG] vt1NEt?PK/7fG{]YYd8{Bl|VH `+>, ysЋ*ʚ陕oAu(ZO޳ba&_! _20vlj0\d_r &Bjg0&TԨSO׾rSc~,ׅ`_(X5(nLn:nωϧ|..<-s=6Ts'8B8!yE@`_!="\j Uc կt YOSZ2u[ٳTSyA<c)硆;>8"Mײ)ORvc-f-|g*kUwxC﫮$=XH=>K&3[x7tS Χ3}|(.o#:Hfv"n^T07;23:L <`kO[:P2LJ!U&ʻicv/αeYD=愚ny^fH6v hJڂ&v0v xa@4Sy t]d24l,]wSҗ0j3ԝTV@΃z;VR9Zq+n݀;xTR~Rwz_E"m! 85\8櫡ZJ4P_(7ְhMyU阧/ "}'NNF<ɀ"޶]TڸsqB 1訥1;*,gB8,~X>ѻk^~A|Q³01ymёlq0{ uA1 Ĵ,!`C*6!1v?+yɭ:"r%_7@r]mL1jb̈dA=y\uퟍ.L1i8g_,}DtU2\͟=k6m[0 #ܟE1H+T= hwQ萺J iR}Po Z UC|eZ$[PM 68+sv[F?@7UYHsb,9ڣF#?Sc)\F>,5IT-lTYo?A}@ڝ˯{X á IK?%%8#-t9-b#oc7 *kXAR͂4ԏr8>,+:N_F*"Kq坩S+~?; ϰ7P*QߴQ 4NXgqfW.Tr U9BiX^/

PAx&A0ʂײp.<7dO =g eRFVePD6M=P1~rAUPLmv88pY&ZTDwAVa1]Y/KH0>Q:5lh&DRK׊hA]HhDwVRՐ-u"ޏp lR[ K"lzux>3#5;jtCѺA_V{`I ư083Z$׋>%vҖnfnVr +B R"9w/'H?2B\=,7oC+m_1Fhm^86w@FŇz5U|5Zпu l _66?pe5G-CP)-دhijj/:Ǽ ٪Dл"@OЯuict*4H^U#j֜(I ql;z5 JI`ӬeOj4* (R9V7xe{r}?0Ғا5w Us >5~3KX7lASwrZo9Bݘ1P߿}[<~vԵR֛v0vq"j%oOfb=z^SvFeW}Y۰[BD+8jڕH*IIv hyщa:@fܔ;$e3@M֘_Dv:ͮFR5ծ${?uiCgOsJID]ʅJ$?E47v6a%4BCϳ_/VS_Y@RW\u8e~Eؒ5(;iR0s v+!B C.YJ\ J}j(01tC"\JĝGbscaj`art ?$,|#jxA~5R\ff<Ֆwaq36u&`;&!e.vS4vh\N\:CuK3J79=8;ty]{:Z{YD$Rw췦ځW[P1f-]Z1|O AI8F_2' 93_kOOY 12#`uEspG+kOwmωjtx'ϊddtPqI)I߈\9IK Wץ=r?6$-CYu;ִ}Htx4 i-P}uً4=?Ij})]w/q 4!S.U2lFݰ{=v$~Q0?L>9v]ŠT 3{B'KO *û[N(2;'B!ވ+w|9~[5jBWmfjGDRqY"/duxI𵕑QS+(S 2M-P/l6s]u`h]1Gj#}QaE/%c(!aFlu|$ʹtpX)Y3ye*xwHBڙSJ_N(_\}ʺpK?bh'?y'A'XpBYq1]z$20CRr3XegZQK+~2՚0@7ևZ##r CD&v~x`jhwg 3R+Ͳѡ8(Q (Ѭm6<4w7쨍 ۰UcAPuqW"Z?{b,mL^55~g~ IJGAthp]Z8M4cItdB{'}t:As7` Dh"7Ls#nB6'ؔAlLƥ⟴Dx Wu`Ͻ؈.fȕWmkV)$9SgX}/y*^@ԇ2+#1渎h*{|$+˵C֪AbpB&ыX!RJ=ǴE?#ó gaKR'89Q 0 8έhn Mj-(cz //+ך kI|3˻QcnUnvkGt G"ї+E6) KPAW1E0'0A=,0NgOoi{;jgRn :o[I÷CVG|:9uMh7%z:ȨJ}?@+1W+ԃmWFt9R}_FvddGZԼ(f~ugAcm;6fs'oQ8w}"U'JܙR]ݔ3pMY镡!'2qm ~fN{\*4 7葻E( gxk5C\.:=v&aO NHwρn 1 H!TP:bՎ_8+#9Nʑeg\w]VJg&'N6> ;E1cd&<_⮶\2>GEL. aWV LV^`qzSKx{:/#K]5U7"/(-昱y32$۬jKfi0h]D* 1ڨm+ ǭpnD'l J2g2r>}Ql < >_hT \V-:Xغ&ۙɏ,m["qH9M "? =`"|cZqY10 \= /}Ac/)¾YВ(}dLةfR\ʒV(m?rE7e2Zw.&dd{|ǭoOQז#LkBF%w.\/p{>a}lɼ$4J IQͥ8YӪ32aE$2*ڟ,L>|G6K%x6t>'Or:ߎ/1wYh-R3\fov[u %N)݌{z.$ ,֌(SSX%0^4{Xg6hb:S 9fa38&' ^T@'\7H3I5x_k3Y֜&xZo/Xowp-ϻjJEitʂ> NDNɯe,R2C+b=l2-&Qmi}»6n)N}D!F\_ё{|2 }w&D>*ΌeD' Vt-3)t1z)=->4xf[nQCRY2p=” 2-da"GL*Y %b~ t*tDka9&wf$8Owl|5BGlWu_ᄏ1*uvvNz%tXmu`-JUw# )Δ c 9D(J$kŠ93zǼFIG{r]\z]cp9$I+gc+**9g]Vqjz\e0<L-+_`,\uU(V UV{:2- \/':^@J`r'T쉢),Ց/n{F.)ii e֜pslRoWzKF_{p^|vΕpAN0CRGMvhB-Bz\"HTQ5rvgXhH/"{|x֧gPs`#8 #F#I Iss7PB?/o | DYs.;սmTGuvBcѺCgyZqn:؉Oi,9,/4@&E!JtYo]tܽ[ ^8KgQ1'd(˺&`Zq3Kr }'Qw<tj Lg{]rnЇ]Й9 +!mp%.tԝRQS kE_5zyQ(|?TwZ-H|{,[A{w~93c|Uk'GXGݜra{VGH/_ D֥^SgoAR0&?z:>JLWHM{dXD×.kpDeo$ϒ4+lQ5-}KGzյ7r*Iopm hľe7uQU>Xwj 4D5p^OE*bԡ$*~k@NL_i[)HdDYy㚫b tS@I}s.r:tl*C:؊Os ҩ`~)`?Vc5VGfL0R$&HۈRIZV n,6IfڨQ!*W !!k b'i8nsLN!ǻ=H_ sߌ:1E{@S8'Lm~0+-L[8]ڐQݰ^A5M)sS F>^k#8 (<[=Vȹ0gƶz. %LsHgٓ RW4tzH{ɏX\r}Bp+^rz3极Z쟨xBKAHϾ+EBףpN&$g1tȽ/pd5j_^y26(<${UDz\+h9M }@wk1!ͬAChz8@6f(DlZ>ϤװNsYճcbyYF~f|+3-Yt$>3tb= q Mi) vⳤ %9 U:,倪@69bp4 E^A`b}S(zN'❡kA̪Mhzٲc͑Z7։0iApVG|YOnTɵȖ=7FCg56Qc*]ٍ5 ,j%yJ, j&e8a*I\lʓ!jʛZg+te3};7 ^{?Kl%K9N~Wvʏ#'1*٫%ABs.‘qJ1gs+2xUZ%!;0bIcBצ/f ѧ4 Vǣd Lv1HP9K4Dſr)!ԕW"0a 0D*id nL~-RV[ ] ڞI65WJRq['09wLsKv w"ə(EA~٨%j`:%sƳzfӀhN~S~6|6IHjRbr:))8_hqy|?/_rcہ6[R:aV|M0:H7ծøOcͮHkY4+Li$4S.V0!2,E4H背nucEAV^˗j>^v!xWĘ# u:D9c^@[~q0J0KG=ȩCt$^|jKmH1f)Go/ # a@I`Lh_KþpO&ъ < 0*Ql;'*3kvK*G@[˪f5bw ,7*M8\ fFivn#:g,1WJw;D1;%sѽع=:] ~g$68\-zؤ"alՌh\31#M ;W^f__VQ$?耆cT }x>*V!r!ѡZ<>Voەܛvu 6chnf:vm0ّ #J#]jQ|u) :OVz%_=ʠY~ႩvKI/\3 ~~(.KNU1I{ qƸD-VK]iD,z((Fh|:'2ىw\zJ"* WzVi4N<10Ŏ>{S6()-Ty je Ժѳi㒡 M+z5@ 8։Ά.Ym:G+C!` _7W%رb@*^}W2矨ymkQ0~̠^ y{0Me G,Thyj@/NQyF rhLUNu9&݄|M7:kڀϣ D WD$h [j\ #.|ڀAHzDّ"SjTz\h \A2AXaTgGVOpHՖFc}YsHAMu|'i\4HgB304=sDSTu3#D>XF0Í${:Tuc5^w fqAlk w%7ir"(-n~`@ BJF#,TUxǖ;2mbHW %U#p[H\ZpNL=[=- U Ơ[(ѱɛfƒxpu{MA3vӹ7:V귕ar*xs;#?6#Of˂$sr#f"*gOH!7b+& k.a)غ?{?,2.)6vk#$=l޳qcD2hHEԮe'aI+x|o*0C+cf龚K]RW|Q.A'#Oi|~7Pe@hǘ4B$Ya !8fQ75X NʩAjyɂXY>ZןV{b~ͺy)ff>I7W+7ZJ9>h뽦894I+7:z-wP9 ñdd533|s4@B}{7VA7S_Y &Y콍|fY$|1L0-|EY,', 漖hszF +hDYB>tRH8Je5K͢}pH[^U&[RC~]y2u4wML#QԒf[QfB^+\QFmex0z^%MWCBY:AmIJ\wrF]X:nBXn.>8Zy<:|Rl#gtKP;o&?Xgm=H D7eШTbhl‹?] n[il@;l)~f8[WCg>l0g* ),*)͋տ:1_we)T*L4vW0jΟVXp\c.:}Keșѫ8?Jso> stream xڌP c3%www\C oιszYmw9 1P΅ "`bbe`bb''WtGOtr$5r]m̬ffN&&  xFn9\*j377'![@h b41؛X]<'#;35 t:M 74xr*f.FN@H`cisڙ v,@h/c3@v;:yZڙ,mqY:_F6 #7#K#cߩąF ]3_52f1;S{[[3_Z:M@}dZٻyYڙU+PJ6 9:&z:V2%`04{;.N@_*33L-M\@sK;?Abٿ0,=:Lc0Oz 3c3J)+*K*=lzVV;+'Q,?<Jԥ$ zP7=hn?cdyv7Ezl-m". `TOA >"n2@"c'#k 0s#g__(]"vP0{G_hJ? ]Ai?.,/?@&,꜅ ? (+@PAU3;PKl@ '2;4Ѓ ԠbA@/{6l@W? z YAptwfcO!?2wlclL],81PY.pp3d οkg{x@P@PP@? ^rE:+wL\@ui]J@~}ބ7Ī1;,F5S3L*u}NЎӃP ց Wfd>/ G]_FJ΅FU?:ZCvI8r!)?Kx4TmN)s||ZU ,_&/4]!v'A@^XB˟{'Ne-e]܏KCy6@-|&]QÿD3=B3Grɣ#+d-1M9sb@pCk..Q^6<Dk*#}uu~/s~85c&-!Mߧ*{jaCgZ $5$|"M NwZgwC\mpZ;K淗;AM\1ZI6f Rf]2g~DLӧқu/gO ~2m|>o;8Z 7ooK Ljdb8n-9-) I)ՙ(8vsc`m!SC%21yW/>(h)S٤9_S_3M0_ƴҀWI@ Qĥ`h-G+]|{wW ^gc 6$[cϞF'ږhW;cgx*W6Ef6ABc+;z{̛V|o09c%s5A\O +3*ЖoE(޸IZ0͆<^Ƥs ߬ciRlc18Zd }xϿmdTU>._0SnX[s?KC|؝d?ay8pRu1V8hGÑDrн+ &hpBzAU5^N2hsZ3t8T`z rA⇪b8~,P[`Px OnHqAB#3 ra9#]!9B/aNq_5L3t߆xxۑ.MkdN^Q{X QXê\ DߗM!!`/h{:{'OS-W2`TiBxrT)_\40 ōr}nr/_Hԡ~[Rdk^X sVzC5|n(/ @_5Z86T,@U,bH>k~[WaxL_kDS7cz1 E1q- *!^$DsL nux<~y8[U'݉ G׷_`vR52EeGz㼷. 1MS}B%߽rN%){B"2͖pŖŏjLGāqx:|CzT&GG;b T/yeh~w=-V,Qƽ!kh;Tc?-^%'njxF_u\LX]wh=]?vL7  wjnE$q#eH>O[j&gZa( 03gcN4VTo!TfȻBro qF{J S- V"GzM#+ Ԙhx n"٨vo,p"@: >z}]\ }=[UL^MAoƇIYIaj\s+^'ze!_xdy 5ɧ5SBȾ`Op;4f J+SsЫ\.xjRʵRA~CV4jJjFC %|L,HR;W"p6 p+hAE1(lbLr&7 üCA;Por䕬 pR){D6[tj}K"-?6VP{yv.X+}g*lz*?Z% AT nV"[+% `,8+ˀh{sW{̹`K1LkYeAES.-ׯkgڱt0|QΩ[ܰY߾ qT[QW:P Xԋp ٔb4η!!!VڇIi PavhhS 9|>i0T|<fx[UMFDC*7 M}mߊ[^JC({62nrԯCY"ezs;J.̉:l<0J^·Pf,@AXΤ?%#Q AR/YiJ)V٘+̏M樮?K r$'63MV%&b椮U `vTecΘUE~rtIE\ C)NЯޥGkeibEkKG UsY$,Uo]&Տ3DGRp/doS47M1>0 Dsf[K^˴*} ĭ 39,jLf¯h \EB^{Zl0_U$'3Sc*ݎb+tղ iIq\BhBPF$D'}i ȞhCww?'oh`~&^oFJC Np[.)p{ i9CO_IAj~es|$쉳`B'jYqpi Qa"쾓ϳ[NAӌ05 ‘ Jtz3q 71/ԭH"8NKŬ,ݡO[ ᔻl|j-/nz<$"2ݼnOGolcJnjNjuz#L"A,B.h#Eu)LNYٳ*"uu/З1}aM&*E/"un)M"S*2S`nJAMƁz dFWCyFCr,,.SpT:ß|xEv8V4C4 E"." +N w,ʗos!N0͍ =֌]dydK-ط㻯ޱ& $*: TS>W" syN`>chX[0Yx>vxjZq6m|x{ NK iqKkdB F@d?bpR hvBޙJ`9SxΠN#Gz_ Qulv)xo0푦C:\'D* ؏xvhEܖ;ƨDGthV4ܚptޤ ǚ;x7'/ARpirC9<}p1"|hkALPMtTjQ :$l|X$!xs4( t4 v nr VIh'񵃙x~/AW::6҅D}k9 ֲg+hNaǐ2H&X~5L’Q;5uIV0&Etl [Htp [gx^.촦%PxƧsz)Ǡ&"w x{tJ~_a~m ܗzS2}V5< ndhmUIx,{w,%']-]DjrH}c2F2I%6ǦCHc#At"ڙL="Lfn"2tF}zIsjyN#M6 5"m'؃u=_HqoݛwQTb$RBU_na}\yj:Iwj nIwf,ѕXG/?!1'{GcV9XpԼ3?&O"ǔI^ϔKfְDvJ`=蜦KI#Wrp}Յ?!"V% 2pxD2=$\T¤u'= /ژ b$ S;jaYk 4LF<:$,peK^o"s;]H֝/z"QIEH 7FVl*؎/SN$A^')GxmX\c+Z#' 2JJa!̧[ErEjJvҠ,Hy1E`p"Y~}+zɩIeֲXk^xFyT{%9(l Ro2F jB3c^8qƌvZ@ g5yJxS!F0*W͑}~k*UYYdpץٛ60XJ)cW$݇@H]>Ģfڱ\w W((wڵcF򆕾zz]eUDQqيL$Ld{?fRڬ|L@łMmR+&A61אb+_Y7= Vd)T7Өdlӓe`!C)meaUs1pϐ@I4a2![RiMܨ!itG\c4W*WS9Yt}nNCx}0 'fQPs +QyU) Uh); dxq6Gc,d"S~ ø=R9 DFn\p)[U\+݃Mڈw$qޞ1Q}k((}y?Sa,.ö)sM'⤗@Zv-1p(Hp^q߮pUh<ܡeS;r!x]?=" qjn)r5XIMMH5ZIb,B` l$̓~% ˆU6(9ku#{rl uioc4-\ίYV\G("ɞ.4Әa=${HD<)b¸t8'Ȋe\wމGW4$)$u7HE瀼mg`ͼ$x__~qW*F,O#BR>A ceqYK~ jC$+7凒 {u_,qЏFԛBS杽y7prd bSsS/2JO5؎Ǜma JUcE&7}{KlJyV`^-H~'H}7{l $F㧨77`?쨀.6 ui&wi0) MQ }6p_# cPoMK÷ Kr~(g'{%Ŧl>+K嶥9>ݬWD<&c)e)w{0-U ^j49؟weR#/o'F|rH+cu W^׃ldӘם$ ]/I 6vb$9\~w^A JS~ iFSyz!qiW}w +eF)u\agBgk+acᥗްaI8 c̜ա6Ԇ߽P ! .Q6:%>3<vܐ֕ yHN ͜s(X]\30v`OdGq޹i_=#Tt0p^܋0r\RK )&ȅ஄ UWicZfcV':\Wڎ=( '$crIj!5n g}2ˌ%k!In%VIEݚ+wa8^_ܦJ©~DŒTG\Apn){)V bÿjލ*t&z"\ڑ47ja^a ]lQ.#?˸ڍEgD6qחbm9[uG^^c{¡ MaJG]!Iu[IAo@ t^WTS-*AzL扛[Q!M;Ip_o?1$p:.I5s喇Rybk%]4lNmr2FIڎ`o~By"[#}9n-lͥsUP&LѲnjwwG NX[j.c5v2YM.p%: +J_D"]S02#Pf\9wbkq]~DRv7Qz&<:BԀvs.j0g0}InLz2w1b4G)KB8@YaF9<ݺ.b/oҁ$wXs2ҝTT1[T.VEL/<#Y,cOߌ3 ^dHτ1qldO{#ZQr9BlYyfTlMN(n}>!j IRknJOo^Bд'x1Gr6Fux,tf$*!7Bkz7p RV͔]B0GyګT®4JwpدxPr{LU=m{JSn1*% hN-:'Sf@o #LqZrØE+~㬊7Ԅ.IoJjv;35{eҭ^3]5~_{WP '+ QBN\ՔsI%DJ -+d;9E܉0w0kw&)mI[A&Z␖' ot;.8y{6F/X/Q?Tpą|b&^Q_҆UqGl;gbo=m`Dxa>cˍz0Iɢ G |)Q`did 1xڶ pf|aWD2n:RlQL>;&>K~zGqBgvŧ4F4_"''߂{m|FoxT2C5 t \m_קNޝ*oVE(NFGqC{(ˇl$ܦ䴭~;Y](!R)z͓\ȬhS*.eQP5U 62ᐔK3Sb.Y1~*',p4A? NLnWrm۫}iwD=ܽ@Z6Ҭ]5 9zeDETDo k@#r39ZONtveޱ/7_+|xk@(<r/?: i+VR ܄¸ye oizh.2|ׁ|̸7.ʨ8d z5r|Kd;{H)BFjy WgGEfjIڻ 6N1nAӺP lY2<h0hEt,6oi, 0xD5oyLP$_#Dm_򃖎EQn^Iʚ:}uFƏBP}uTB&_E$)kfp H7~6E/]/҇9O7iہ?s?{חqM*jK{C$X4PY  { d?vADu GH 8Ô~{YPBPB0&|/᲌5y*Nf-|AnI -@#l$bg{tCᝫ6%o>^)rz*ٱwm>ϊL,6kă2$?1/; ~teeo;i}*O#Z5/1@!Uu^vőNBjMi=O)hτ;!5ʼQ9żrĜoDT]nA; =Um|K*re̿,A3e,嵋OapVW[B@;Ov#0 s8h Nָ*(#c k(/aDULr~b]9 0 --_pXނ ޿K7J"Evmu$ :1iF}zTJUS h}, bXUTd]eF1h{Z~ӵd,֋'4*(&uXihX:mq : qܗ3A177붠D yͨbזnn"Clɮɀ M -VYyY+̯hO"/SNj.PhVql V}.κ uO gΨƦA|_9zr6{]w/6丅+b࣏̊>PI1lw*SWcEM,7 %E!n6Bz/3O'΢s mL9` sȌ ifʣIW6wMTOte]pFB0^\^Sm>!+}?YQRg0$I47ɠr.Q*lhaWMDN DHXa .j-xiO'RZ#aCKpȱvWcv8@vHXQ:'* v+fffE-?Ģ4SRt[H Ȯ L0Z;D&:woC՗b ^\K"gRrNil Om3 sczHyEhҺ9  W'r[n[>td1?VVc5rH .4y/<^A/FѠ?/]~Y_KwQ3r儇Ԣ.\,I@6(, --O$: L7|D~PEtI2Rb7H-ey, ؕ%!ݜ|tW˞uMݷe~ ނޯ0 :tV׮2^NKЩU0(lƷII0YM1piv.0?qBݤk=i1 Wr UNbn ˠ:Km)JH~7-Lt\Ijz2:etӳA`$x{˽?5\HmdM rMTEe8 >ZKaoGF[*9GJc3W^`FpfţFYק=\bpPzǃKV7ii` Hџ n 7M>)KWXR~jgYORLELaHѽin,6 ij.\_&FH^$+xԎaKO'R`t*rѷk}>mGik4:l@ v֖Oӌx ϚT_&5zW=W /]F /ؙ O}ȣE|$z Ԥ6#ZEQDZ.CxԈQ0a\\h{m [v%BZQ6P;|pmo. jEgTpbb"\(h"-{(x0-{fV%,gh~wJ`Y=t Ξ Z233tK ^VB:F@j_%3zGa[%RtC)>0P'i&Sɀᤇ'B2/riVRjPϱEǑ&F'[{E 'TϮhk'LY9@Bީldwb21WIrd O 'Q&YeSS1&:λgLl4?ֵNT6K+,vsl1Ԇ̓HTxC8‹`ό?$d'aaq."z^cS :&ja ;u4gCc_4L#:,U SBj>lY mg,>vcya_*J3Զ?D)Gl8 _$ @ \nke.Kp? ZJՒK5vehktll3+E?2! CY7ލ؍SI\zV .vlKǓaWɽ wݯ^ME&vvZ <`9\@Xor[0ә}kj4s˽5׭W1p1/?}QaRʳ2b*w"@鍤k?CgMŤ2Wz(Mp,NhOt;d.*qGBJb>W׆-L (DE endstream endobj 707 0 obj << /Length1 2020 /Length2 11859 /Length3 0 /Length 13108 /Filter /FlateDecode >> stream xڍTk6Lw% Jt0 1twHHtwwtwH7H ߜx}[3ϵCO*a1BPVN6! ^-Ơ:9 ` PL So\N>!N~! !NBiSW@ :cKA<@VPX^38YtH@`)hhnjЀP J ubgwssc3wf8Y1@Pk: Alj=@\b u3u`;9 p[ E_,{d't657;=@`+%PUdCY`? M!0SWSMjS9;l ?(e:cQ4 hk_'k  $,\ص G&0ƿ2+ ! :pS cqXH}@@+urxo Cf@+01/ ;|';@6{?~e#%tw+wUSEp֤{5KY տ#na{p?.GۈoA.vv_MAvF %l k*-@.U@leOAβ w*jn`%c@`*ǝ`lma3lTake?`d#.^^'l-N0 \0v>K `CK .?HfiAgs_+X  әC`n Fk+_n7`Za` " `Q 9al kva f9CK _5,l[ Vo8a:t /vVo!`Y/lY١N: urp68N' oKoU0[O_9k]`Wwl@spMmp},FVeDƪ [^mW7W[QZǩOa,M=<,Q7@Uv-b |lG\{~9ձЅ]*?JgX s2IiQhLs7Y1cDsxmr|RMFGGJxC06%ydѫ0jCt#'B薽Unl|`&+=[q| I\LqM 1bm~DHjQ {$,F?Hm*cE7BsQž`&"3ps2Mbp.gbt?F-}\f2].V}Gƈrq7#lmZy+/R h5|MRh`* g7΢~Z۽H)ͷ5~?J>=kF22m5p5^0Nwʊʖ/(O/p!*ޑ=RwdT˜?2Noc,a`#R I7Slzn3gH '5 lx6o%[ .bL&ԍY~8?+o3}왘ғSa= 8=Z>-'kpPAGSL#դ_:R>`1ez7MpqD-M<|9 UDL{ $%=#jRW=Hx"}<%ч\SS?j pۧ$ }Pμm^ .~YhD-u sm gxAo*'+_:&_bz5!HA^N7:kB{hJaCdY1>xxte㧰';>li1@각z]luBw ִ2yɴ`+Xm:ś7!XW: /)n'W_6 ] /M(_`ph`!)xSو= E.wtDgM\Ssȥz޽a2t|ŧE`zGyիQqЁj_6bѦy#|kx9DOw(Y<Yk#ўR{eҊτŠԯO[_]'z.tddP TЄ:>oc[WMXd8| DpleQB +ʤ44I='G ~k6BWiոT*؉M(@d yg2# *8סӾ/OQ bEںs+fRz*;gV8գp>h õ㵻C{D$BQJ.ǯ;66%Ճ_yV#w_DfbiQ"Vl*_Bx -'2q<_e4aYV_Y|YVʃe:tif?1`l.vdטi=*jxs~C,9AŒL ^QCdށfzr?F-7''%0l[KPېl61jd! M1p]1UK9 n$ 6OUq.uGBՔK\AmBބ9+&؃|)G3-aN9^23eCXiD=i2'8& ss)Yy\ݨ&+^FJ|ejWz`PFBLeGoD19d_o14MFT϶{=/ 37+@rx\C`nUojGL+t.T1* KُæDp5,j2f1m\,K(HHq,uIgtO;FtCG[Ȣ9)v0K4~PK^@ql0{v ݉ KP'HX]v_㧜jp.)yp<d!ok ino8 Jh/pYʏ7X,K=,s] ?()LV Kd#ɗTn"~!ylҪ"i%|>qVvnI ׾ÞB on=. 3\S %[T ~~ؾ+c1b,(N)8-v,o-"o?JV`wufc ZZMbmbZ뀻t]][XQu|.CjdhW3 NJ˻m&ӛ:Y~We uɻhZ|hj w382l_E:52A{ S >aO &GV҄(j%+SZZH_4ǥvWk\oG,ܝxD)Mf|\:LV`2L+j˻@"­]D3!@JballSq~{+ k2\M^!-ٳ5)(.v#soҼ@tN2 :VV #C eu_I2P4!Z TiLӖ(8ORл\צ7xC;?v^12ou592QfG="6Ud}$e<'Uxۣ*_/Pk"24ւa#}) Q7|]v- 2%\֎ S#󞬸ҁqn&(*%rَ֝(<"l.8b7vIaGw1FhtV*wW9Gl?s%W_겠xh"u7TB`b@uα[ qBo6V2̉ Ӷj/TvRe85Ewh;eG2ޟ??}Zvh=gb5Z#䛗ngy"OV*_LTj 6[* 22]?U6[}ԻpݑkzV4oG-TOd[[B hfHMo49`G}|d5&HMx4 ׇ$wR|5|lӛ֧`P}wlQ\R>PJ/a GBl9hQ];;܈LG: y t n ;[ qL=XoG}&j$S/|H*Or3t_Ѥ㯩ER}bb;\|Lvtu$aQ_Bw᯸tӪ`; H&Һ-OθK3@tx̋y㇓3<}+ 2í'ݟm5^ShjWqMٌl17_LaZ]ڹY׼=KZ@Vѳ'LcES1wI^RIl>O K~qb>nUTDž-vP pz.\8Qhb29̐JH8_Jn~(e0V @͵Er썒'[m )?uQ54Co { 3?Z#Ƴ]GTg4qS-1۱^uxf]ۧ=mD?\DN|Yh@Mq|ھ(V(j_Ӏw5!AX?WLZs{ݾK!>/¿)*yA%l% Qg;vM'D;6:"ﰡfՌnfBeb R 롙{YM+ Al\^J-G0n^$"T\__O~Sxz7VTP=,ۥG tD XΈhT0mgXP' _#k_"R>9#OJ᪛_}@M{S.y쵧旙|a>ߝ-Փ}J*j+@P4 /׾ ӜypBImH2+7DgtZgBHJ!?L%n .ø^u\0"! _~f wU,sR4.Jfq{>使w/8,$8uW^pj>ER:zII:v"3=2:U /V!JRߟO&Į4&lUds"QC&CmE{d^IuRM ]EJ ;Sʲw YDy%Wd8 t0뼩 m*1q7W]=tbot SKc,6)1iT ;>Ol xLLH=VjiMؔ/g$c擑U4d?T]- VwTZvU .p8~DB,栛QIZi29ePW:"0h]A7CǾ,,i"e*Y傤ZĚuB48঳dhr^zAtMI׿#׳X֦KXI1O%>¯\7ve:q+dЖhS%5CmU!Eu;4EAqՎ)(]ޏ]۝C ƛrс?lTyBl 52 lYLe\3HվmiED+6^}A , yժ*Bn)Ku=Qo@?8BWn$E(ef-z",; з,4/r)-7?7LZ%] 4do:(;bb~ζ ~;p'T"P+a<[vkʹ6EL@XsjyH퉅,~~\s&`O(䬢T9:~1]N$Ur=Ro۾"]Z%'jE"fO#n{}-&'40Q׳=2|zks7FwF!8huaji34MpRY(=o>4'j{0o0#~[<^JGUU[-Hmh- 1iGkܯ@JAMBNr4K_6?{"~**UU7Ζ.Oc3ѐܱk}N'zO_XkQ-ǵ(r]~ͦ]{  ]/ r2e=Q6F׏efS7ę g ӂavv$S*D?kEjzOhOEhTC0+.1gomwyKZ vЕtQQ) 1':+Z+Wat6I϶F/os<GDM*gg"6긆'G\>FTvf\ {g쀗ZO)3RJ(Sx٩!xϪ4jpKUsA33$j s})if6B:O~!  .ߝ0 07YR6-Q~ާNӉߢ_S6/GqfߥZP_"{1HF|jAe5f?ux\+!l>.1ؿ-选u@P#pqruY93zO}}uq\cGY1X>u }Se7̮шP.v!oF\͈?TGJ4Ј+\yO_\:$ M#FVjIjݟdKݦ}m@zwuXA25*v$&TBb5YЁμ.FΨH+cAv/;luKϕO18X)m~%`3^Zm^[K:f?u2|r=$g2Eb~S*ʖ+v1|6_}̵OM?<[ @Ļi}-`7pơ R( u7Z \,]ȄeGܡ7$Mr#NSױR*"''ȗI?/Rj`<̙)7z{WRͧ+cT}F=Nd)'cPB]#~N|pZWa]s9N2E]23 eBW>[gp1I`z = ή5}r v.oBM VVѧ7fˆpKDܼ}J#J1tZV)Q g3,;G&W4)m KJh:I(}=}ʚoiB,q7!To|Ii=Gp^_nX5"ʒ3:/SLbjVҲycQ4ShSP±{(Bkh8һfDv#-aU +gmkoaHoh>X{k@< / S~5 v*SD%O96-#>#%/6_ nP#͒ZJʂ*1=LtEGv׵I[݉8= =WBb.58gJ7ccZ̓-9 ,rP(~s"8r̼Xst:--'d44ZCHO(\mz OEt:ǔaWXTѳˤPc՗C*/r<">mD|#M7j~W ĸCwң@9~e6EP#؍ !a+V(A%hg%ǎL iOq0}I?Xjzwp wp мd9QLw|8f4@Kg4 7f$}g Ao4$*:*yRdʸ=K:J%sSĊ]\Oyq7hBN~(0>Jޘ5'R cemz}4,=(9,x0p:Aҙ3DaG&2U^ae2pnKy.G FCI-PtFIwǧʋuŵ:3ُضwLM6s V oHD5&GFY'{^R6?x]ogIdOFYY%,]06d̔СfG%sc%xY[yJ>m2W#5a6󅛆݂b媱઄39J_Q'sӇyouJlՔQ>MЅ-۳:R$=*eB-lC$?Lw> 8Txo[M8#ܟ?Z׾̳T3M㗽JF;B,Cq՟M 8zN 70}aާ23_%V@̤j% ]Cj\o^Qqç|qgQFofEcjyv.zG3y!xP^_f#? 1#@ZݙO))Xo~H2SnZLS+-x!yX33uK'ĮUO Û'ʚfS+vu0B}N^"dGL LL'V>1aR3bD<͖sk 7r SԱRX&N0Զ^+#S0{Jdd<R"!d\5/sI5BxC%cLPfAO+=di F2D7*j /NOy2+B7ïc՘w?_>N'=&~o\\ОMPݨ-nO/w5hT >}t+£@7R+=ui<1&qkU.ٽs0! _}ެyꆓޥ3e n0w|FE&ESxwp⹘Y JrJ`m{DZh09 S؇W.g3cfG,YŦ endstream endobj 709 0 obj << /Length1 721 /Length2 4672 /Length3 0 /Length 5264 /Filter /FlateDecode >> stream xmrg4ju :ѣ D%.E13 3ѣN"D'щ5DF^7]Zz>쳟˥A!0HDT`n `P<V2`pb 2^ `@D!c ȹ*➋`+\7"=`tBTʹ @F`N6NH@ CqA- p'0h8oM8?Ю,Z-A t4x5â>_//u'!p$ A!dM m<?wt-w p f?wrCQ t1p 0YP_z9 $N醀#VB- ]O?ڏcN;z?<50 ⯽bP? \""X7Oa#i|žc4׻9$ #d |r o Y {igKX /(lok} (V{"B-XOΞuZjuӘ'OM{$ަ,}'OίmE3;1|KyzI!TB3`eda0$3;6/3?=KqrytnEGu2rHtn%MbԈpsڧ BJ ;`e`FX(8WD"Q/]*\ұaRƨoV@~CM…bԙe3'3'>]}TJT!{QyŦr؞{ } 2%.Evpz#J, Jc9u}-*;\pf4ѫ&wϯ,3o;!@ LGl** 7$WWpYQ5Ϛ5# o9-ͰEq?sHf =R=]q'b."_{88  8ixxs=e26R>-MԜy$l$Hr*ReK\w:(_``M:ǦBԲmhR@NP >ѝU%' 13atLjgt4O ")<u@VoYA38IG 4_?)o~[u.ᅬpLw$,ttQ[ \6Qb})Ŏ72K@w>T8~5,N乁c-Tlv#$I2<-fJLZ摳lru^Pd<=.m1MMf+km(=[3/71,(m}!\.·ڔe=D{ωM^ E2 !w/3+H6= M4A'Z,Dƞi*s\F. ONޜՍ 6 ۹,W!#%Xfo߷90 )!Us*@>i}ޟ|Gv-z C-d9Du1N,tA po%ǞMݩvIeʾ&Ĵ6flVk;;v^-YlM.#&l^D3 KYOhlu9ZM:IQtf\jwwŶLaG|-;+qm@٧ N4 8$ZTcg3-KVn*?CmY;S^cyס8'"R\R.E(/^,j&Ny[뙧}x0Q;>vdJKo7f>!ʏs5hr\TesnX͈S)lY,W%!%?b:I9;D>b60*/꘤p&8y\/+5D 8ǒܚsϩRXKIHdݢxN m& V}ih6{͎Q z|yń'<3reh;Xy3E ="A`.jbZ_+2f%vI^ف7Ҥz3q|Po_-g畈 eWGߚ&PJ/$/32pDqDwu&:`O#4) =lp7X\~\m+r-]hQ"eG>xTh "#Ud5i\*!' xAE@}oU4gnş5Y,tl:/IZo8io'"v){gdXߟ;ٺE+u7{</&Uiѝ*v|0l (kN1S#k>w?{Y9Ay|'?8*Yf dW(jP ]~:e!=0iټ౱]PEf-|ѝ6%~R)'ryhz`v,z5bphѵ1[$1ʪ{Jb~Կ s;_<9|9t*ʝX|Jy~>M۩^L(ݡ ֣KHڪzԴDjt³ޘy&m=t9+r[lS3΄QDgy+3f^x_hiޠdd357hm Oڻ;=F!}7;\+9n"jqK5T灁?"(l ,A]Dn,,fhaP)Feɻ3o52i@{;H8dg%lo VUÜ{#gZ#K 2f}{UZIݴzEW1M;7I^_w󱛍^1cŐ=!m endstream endobj 617 0 obj << /Type /ObjStm /N 100 /First 919 /Length 4989 /Filter /FlateDecode >> stream x\YSI~W㮍)+cM6fj]:[&[%ZB@{GUez;)ws&u!j`Cg}<6\]l|JKsLKsbJt):؈kj]rcC@1Ju6ura"bgk#,:,y,N #ɀ4O"F2Gj78BF.W5ƕbHZ Vyc3c F A!ֳIx G'dVBAa>97LevEPl<0G,xY8"qa"`b ,fQb Q7&{t@Fel.ἲ%X4].l}ab ged6+z)x}I k+&0% ,RX=K\1q=L% U&DR gԊ1;`RAGqC+d,/nH Eb8R~efdV6!vÌ"{)يs( z1ZbI/Nj7"$ϧ[wnMw@!sy3g\8~r~1+"+6hMj`cX 6EݳnxBIgBbh U F@7y~~{M&/yVzoGxphkUN//ӷ&Gg?]a'G{%\{,sx]YOOπ]-zzumvu5kԫ> mM<:d7PCh{ȹuk;%PNd]Y`tr0yǿ|y&g Zg't1ӴW)fmFi?vk76ܜZ>ӃT}%S^Zw}z}s`{TN.+dj16bC1q7 6`^F 'HϥaCit [cӊaI=C<.=,3u~xғ tƙ1 C>2 P Ûup ` 08F=(֐NRm\*a3e S +6}`SbJzr.T~;vt~1#?{؂X-gOv 4yà\%cX]@i4l:v8X{œ5#\CQ҆ iaj;&5H]ɂ0!0=wVn=$`H1DK `@<H]AL:e$[נsOKp=xMdt\@"8߲!D{UsIvbDmDoqV|8 xep[E&Bܪf"47['ڌ$A8*G2k8 \-ӱ/s'(KTvң]XJ[a\k=΋$buu:2[I,$vi2è9\iֶ8J!M!]b%RYGKJtS;y%2dⓅcو2Hr% 67x\Kvh/6i8 ibpM̶(I((jNF{vdtLG;{bزv M\&rM}TuX[3lsZƜg;&rJV8S5=Q1c nnTLY+uIuV<2&9O]A!UH` =9=HHFwt>6Q*mh!_@$8qRشU< 9 B [3a&W-2@#),,e c~y|Sf}U7>VT7'>6T,jJQu(j&I.zK(,/K-Ii)79>tG94/9ܲLU}/cZW@h~qHX(Rё z]ŖHղL cl E!Y۶!M!mZAJn=k8ᘌ֭dW97S*ObW8iw CIpl&oO2܌eFRG5{~HLŬΗs y"gV'&Z^ :|O|>RdjISfJ5Yh&0%n%i'S]rKgڏgIdym(+ɬ٩6SƓth3 J` G߬N{~k"?je%ZɞgpK&uPݎ1iv1bo~G4G)3M{Nr\f]Eb>"KeUK Ro"R+0 Ak~}D{+[HښV)- GXʃN9H(`R&N.S0mKV#|1ls8X}?Ȋ?p~\?zzO=~o;OYO]P{YDzwbґߚl_|lj[o?}d {kUýÓۧN:?<5ݭɛ6 N[W=LJbĿ|'$15m C uы{Jޓz+?%q:?xB}!LY_->:y0*8.~4н9 7iCҽ] ݯE_'{cƆ?b8k?o܈Ռ<{=!>_z;loblj_Ka_=OBº:oKn(e=}t[P?&L7 LeS}-}s}vr;qo=G${e{ L ʤңnH_KY?{I#:o(40+q@`R|ppqH"׵G?32>&?<_ (49|+<y;9d" V{~HߎKHh_JϩAqZo>{?[#g0LK 4vogo-QYl%٬/~#?bJ3Nz!~AG6h A6k)G;>GlLf-}}'ovo5GT~ej,f,.U}p' >ףSiW7: Y'`νË#Ird-W;TlMNo>xBеl˅!-,oǸ˕峹|]RayuzZXݦ/ 3Z-_`tuI endstream endobj 743 0 obj << /Author()/Title()/Subject()/Creator(LaTeX with hyperref)/Producer(pdfTeX-1.40.19)/Keywords() /CreationDate (D:20190225210404+01'00') /ModDate (D:20190225210404+01'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.19 (TeX Live 2019/dev/Debian) kpathsea version 6.3.1/dev) >> endobj 711 0 obj << /Type /ObjStm /N 73 /First 633 /Length 2960 /Filter /FlateDecode >> stream xڭZ[s~ׯqs]3w*Z{%;^}DLD Pί?̀A 8vXb랞F0 "P&=*' =BO#$ yJ=XC`-MhRTp0?@'$%AУ[z0$ģ4TpX('Lrr Rp[E@K LPOA*)b6pF`,GƘ"q A}^WȅOpyGA/8 (\)_\Hx | =!+ap+Rr8!Db EE!L FUE4@;Tz!WB/T"/F(Hi@$"( F PHb܋xQ>3wE`r2 'cr`W7L~mrpE{ٶ,veudr>.4ΦglvFI%>2OYIHua=ɋhP;˸sMIe7Χp=tBgp׫Wꃇ(8&tzӋ =rz:-^ v8+!߇Vo` :{E;褅.BwWt.[o(뫻ۏGWt^T m }o\ ^o/@_WG|@=ViDq߯:>Y >|V_x>>韰L=`mcVv +{u198vh_ǹo8VɪkO `:?oafͧiu00o`pl妜a 6qp̧9߄ࡵmZK|V:b&hqzCj3A1)ya;vl|3y<(Ѱ-l-$7ds9hG7\.׵ ,dmkp͸ӌt80s$>Yת$Y$.m\5m]I16\pXI5N3Fi&Nb^Ҧ6Fv[knDwքFw6ۇ4[(|~T-eR_(u{ѩ6[n$;pIu^1.Bo(yWDB0GLɲs!AdEZu(sR:>E]rSi)RA 7sS-cOQ 2hJ;^IDm)dLVϩg$"(4~槠6y/0`lO+V%ET/2]Z7'*YMKM֝WNB3uXApel[eI*}+kT2J3%?bz:9gT-[fg=$^.3<͙,qb8XDG]8MobN<y shu10n:~a5N8sWylmQ0.O )ob6A5ՃO?f[dT-Krx1G.O8c՜NvŧDn} S%F9u?lmS`筒5dΓ*vS8q=[AʩMqaNv0ay]'ATa0E081/gEqY)p7!Nh/euZ̤pK~џeyaG!NQHڀF~'L~j԰H09*"0 ө#!6qfq8B!pg:Iciu0,Nq' L8{,zk.t6aX˹ lR'4)[^uW@<8Gw ] /Length 1767 /Filter /FlateDecode >> stream x%KlUEw(m4Zrz)m(7tn $3`gEOs` a0 -y1+UZriO66>̗-| _l݀wE08dDiՃ)ӀOa%v0g{CjJytc}P+JPV P ր ,ir `@Q6ڐ뀶-ͷrKm N v} T[͛yH mzpP(8@7 ZzIw"g` 0 }rL>KTn\pGeټY)uj.i0fdָD7̮&JE(Q*JE6*gf*J͖*FDf"jp_kfo+j!pDc[?:q-*y^o3z dzuX.b]ĺu~dv pͳ!{8q2"WD4ُH1,}rVTSdـQ*cDLj_>#"2\5}O7AVwD/ԋh/ q Kė/_E1b,g`-ql;otP֪FӝA T^ZJPg f='t ǥ(ϘFH+`ow,bl9&֋:A8J۵93 y0..np8KBtܫ$9PΊ7ǤR4b[*_YYE-qO_St/|HфMZ\є+fpEѬyhΊ=+d ݟq7pP4CLV Ș Ȩ}F3j Ϩ}F3sпEjQg>g>W? Yo$Vw#! endstream endobj startxref 436357 %%EOF clue/inst/doc/clue.Rnw0000644000175100001440000016521512734170652014403 0ustar hornikusers\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/src/0000755000175100001440000000000013435045065012014 5ustar hornikusersclue/src/clue.h0000644000175100001440000000205611304023137013105 0ustar hornikusers#ifndef _CLUE_H #define _CLUE_H #include void solve_LSAP(double *c, Sint *n, Sint *p); double **clue_vector_to_square_matrix(double *x, Sint n); void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count); void deviation_from_ultrametricity(double *x, int *n, double *v, int *max); void deviation_from_ultrametricity_gradient(double *x, int *n, double *out); void deviation_from_additivity(double *x, int *n, double *v, int *max); void deviation_from_additivity_gradient(double *x, int *n, double *out); void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose); #endif clue/src/assignment.c0000644000175100001440000002370411623271641014334 0ustar hornikusers#include /* error() */ #include "assignment.h" /* main routine */ void ap_hungarian(AP *p) { int n; /* size of problem */ int *ri; /* covered rows */ int *ci; /* covered columns */ time_t start, end; /* timer */ int i, j, ok; start = time(0); n = p->n; p->runs = 0; /* allocate memory */ p->s = calloc(1 + n, sizeof(int)); p->f = calloc(1 + n, sizeof(int)); ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); if(ri == NULL || ci == NULL || p->s == NULL || p->f == NULL) error("ap_hungarian: could not allocate memory!"); preprocess(p); preassign(p); while(p->na < n){ if(REDUCE == cover(p, ri, ci)) reduce(p, ri, ci); ++p->runs; } end = time(0); p->rtime = end - start; /* check if assignment is a permutation of (1..n) */ for(i = 1; i <= n; i++){ ok = 0; for(j = 1; j <= n; j++) if(p->s[j] == i) ++ok; if(ok != 1) error("ap_hungarian: error in assigment, is not a permutation!"); } /* calculate cost of assignment */ p->cost = 0; for(i = 1; i <= n; i++) p->cost+= p->C[i][p->s[i]]; /* reset result back to base-0 indexing */ for(i = 1; i <= n; i++) p->s[i - 1] = p->s[i] - 1; /* free memory */ free(ri); free(ci); } /* abbreviated interface */ int ap_assignment(AP *p, int *res) { int i; if(p->s == NULL) ap_hungarian(p); for(i = 0; i < p->n; i++) res[i] = p->s[i]; return p->n; } /*******************************************************************/ /* constructors */ /* read data from file */ /*******************************************************************/ AP *ap_read_problem(char *file) { FILE *f; int i,j,c; int m,n; double x; double **t; int nrow,ncol; AP *p; f = fopen(file,"r"); if(f==NULL) return NULL; t = (double **)malloc(sizeof(double*)); m = 0; n = 0; nrow = 0; ncol = 0; while(EOF != (i = fscanf(f, "%lf", &x))){ if(i == 1){ if(n == 0){ t = (double **) realloc(t,(m + 1) * sizeof(double *)); t[m] = (double *) malloc(sizeof(double)); }else t[m] = (double *) realloc(t[m], (n + 1) * sizeof(double)); t[m][n++] = x; ncol = (ncol < n) ? n : ncol; c=fgetc(f); if(c == '\n'){ n = 0; ++m; nrow = (nrow < m) ? m : nrow; } } } fclose(f); /* prepare data */ if(nrow != ncol){ /* fprintf(stderr,"ap_read_problem: problem not quadratic\nrows =%d, cols = %d\n",nrow,ncol); */ warning("ap_read_problem: problem not quadratic\nrows = %d, cols = %d\n", nrow, ncol); return NULL; } p = (AP*) malloc(sizeof(AP)); p->n = ncol; p->C = (double **) malloc((1 + nrow)*sizeof(double *)); p->c = (double **) malloc((1 + nrow)*sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= nrow; i++){ p->C[i] = (double *) calloc(ncol + 1, sizeof(double)); p->c[i] = (double *) calloc(ncol + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= nrow; i++) for( j = 1; j <= ncol; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } for(i = 0; i < nrow; i++) free(t[i]); free(t); p->cost = 0; p->s = NULL; p->f = NULL; return p; } AP *ap_create_problem_from_matrix(double **t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[i-1][j-1]; p->c[i][j] = t[i-1][j-1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* read data from vector */ AP *ap_create_problem(double *t, int n) { int i,j; AP *p; p = (AP*) malloc(sizeof(AP)); if(p == NULL) return NULL; p->n = n; p->C = (double **) malloc((n + 1) * sizeof(double *)); p->c = (double **) malloc((n + 1) * sizeof(double *)); if(p->C == NULL || p->c == NULL) return NULL; for(i = 1; i <= n; i++){ p->C[i] = (double *) calloc(n + 1, sizeof(double)); p->c[i] = (double *) calloc(n + 1, sizeof(double)); if(p->C[i] == NULL || p->c[i] == NULL) return NULL; } for(i = 1; i <= n; i++) for( j = 1; j <= n; j++){ p->C[i][j] = t[n*(j - 1) + i - 1]; p->c[i][j] = t[n*(j - 1) + i - 1]; } p->cost = 0; p->s = NULL; p->f = NULL; return p; } /* destructor */ void ap_free(AP *p) { int i; free(p->s); free(p->f); for(i = 1; i <= p->n; i++){ free(p->C[i]); free(p->c[i]); } free(p->C); free(p->c); free(p); } /* set + get functions */ /* void ap_show_data(AP *p) { int i, j; for(i = 1; i <= p->n; i++){ for(j = 1; j <= p->n; j++) printf("%6.2f ", p->c[i][j]); printf("\n"); } } */ double ap_mincost(AP *p) { if(p->s == NULL) ap_hungarian(p); return p->cost; } int ap_size(AP *p) { return p->n; } int ap_time(AP *p) { return (int) p->rtime; } int ap_iterations(AP *p) { return p->runs; } /* void ap_print_solution(AP *p) { int i; printf("%d itertations, %d secs.\n",p->runs, (int)p->rtime); printf("Min Cost: %10.4f\n",p->cost); for(i = 0; i < p->n; i++) printf("%4d",p->s[i]); printf("\n"); } */ int ap_costmatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->C[i + 1][j + 1]; return p->n; } int ap_datamatrix(AP *p, double **m) { int i,j; for(i = 0; i < p->n; i++) for(j = 0; j < p->n; j++) m[i][j] = p->c[i + 1][j + 1]; return p->n; } /* error reporting */ /* void ap_error(char *message) { fprintf(stderr,"%s\n",message); exit(1); } */ /*************************************************************/ /* these functions are used internally */ /* by ap_hungarian */ /*************************************************************/ int cover(AP *p, int *ri, int *ci) { int *mr, i, r; int n; n = p->n; mr = calloc(1 + p->n, sizeof(int)); /* reset cover indices */ for(i = 1; i <= n; i++){ if(p->s[i] == UNASSIGNED){ ri[i] = UNCOVERED; mr[i] = MARKED; } else ri[i] = COVERED; ci[i] = UNCOVERED; } while(TRUE){ /* find marked row */ r = 0; for(i = 1; i <= n; i++) if(mr[i] == MARKED){ r = i; break; } if(r == 0) break; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && ci[i] == UNCOVERED){ if(p->f[i]){ ri[p->f[i]] = UNCOVERED; mr[p->f[i]] = MARKED; ci[i] = COVERED; }else{ if(p->s[r] == UNASSIGNED) ++p->na; p->f[p->s[r]] = 0; p->f[i] = r; p->s[r] = i; free(mr); return NOREDUCE; } } mr[r] = UNMARKED; } free(mr); return REDUCE; } void reduce(AP *p, int *ri, int *ci) { int i, j, n; double min; n = p->n; /* find minimum in uncovered c-matrix */ min = DBL_MAX; for(i = 1; i <= n; i++) for(j = 1; j <= n; j++) if(ri[i] == UNCOVERED && ci[j] == UNCOVERED){ if(p->c[i][j] < min) min = p->c[i][j]; } /* subtract min from each uncovered element and add it to each element */ /* which is covered twice */ for(i = 1; i <= n; i++) for(j = 1; j <= n; j++){ if(ri[i] == UNCOVERED && ci[j] == UNCOVERED) p->c[i][j]-= min; if(ri[i] == COVERED && ci[j] == COVERED) p->c[i][j]+= min; } } void preassign(AP *p) { int i, j, min, r, c, n, count; int *ri, *ci, *rz, *cz; n = p->n; p->na = 0; /* row and column markers */ ri = calloc(1 + n, sizeof(int)); ci = calloc(1 + n, sizeof(int)); /* row and column counts of zeroes */ rz = calloc(1 + n, sizeof(int)); cz = calloc(1 + n, sizeof(int)); for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[i][j] == 0) ++count; rz[i] = count; } for(i = 1; i <= n; i++){ count = 0; for(j = 1; j <= n; j++) if(p->c[j][i] == 0) ++count; cz[i] = count; } while(TRUE){ /* find unassigned row with least number of zeroes > 0 */ min = INT_MAX; r = 0; for(i = 1; i <= n; i++) if(rz[i] > 0 && rz[i] < min && ri[i] == UNASSIGNED){ min = rz[i]; r = i; } /* check if we are done */ if(r == 0) break; /* find unassigned column in row r with least number of zeroes */ c = 0; min = INT_MAX; for(i = 1; i <= n; i++) if(p->c[r][i] == 0 && cz[i] < min && ci[i] == UNASSIGNED){ min = cz[i]; c = i; } if(c){ ++p->na; p->s[r] = c; p->f[c] = r; ri[r] = ASSIGNED; ci[c] = ASSIGNED; /* adjust zero counts */ cz[c] = 0; for(i = 1; i <= n; i++) if(p->c[i][c] == 0) --rz[i]; } } /* free memory */ free(ri); free(ci); free(rz); free(cz); } void preprocess(AP *p) { int i, j, n; double min; n = p->n; /* subtract column minima in each row */ for(i = 1; i <= n; i++){ min = p->c[i][1]; for(j = 2; j <= n; j++) if(p->c[i][j] < min) min = p->c[i][j]; for(j = 1; j <= n; j++) p->c[i][j]-= min; } /* subtract row minima in each column */ for(i = 1; i <= n; i++){ min = p->c[1][i]; for(j = 2; j <= n; j++) if(p->c[j][i] < min) min = p->c[j][i]; for(j = 1; j <= n; j++) p->c[j][i]-= min; } } clue/src/lsap.c0000644000175100001440000000033311304023137013103 0ustar hornikusers#include #include "assignment.h" #include "clue.h" void solve_LSAP(double *c, Sint *n, Sint *p) { AP *ap; ap = ap_create_problem(c, *n); ap_hungarian(ap); ap_assignment(ap, p); ap_free(ap); } clue/src/clue.c0000644000175100001440000000135211304023137013076 0ustar hornikusers#include #include #include "clue.h" double **clue_vector_to_square_matrix(double *x, Sint n) { double **data, *val; Sint i, j; data = (double **) R_alloc(n, sizeof(double)); for(i = 0; i < n; i++) { data[i] = (double *) R_alloc(n, sizeof(double)); val = x + i; for(j = 0; j < n; j++, val += n) data[i][j] = *val; } return(data); } static int clue_sign(double x) { if(x == 0) return(0); return((x > 0) ? 1 : -1); } void clue_dissimilarity_count_inversions(double *x, double *y, Sint *n, double *count) { Sint i, j; for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) if((clue_sign(x[i] - x[j]) * clue_sign(y[i] - y[j])) < 0) (*count)++; } clue/src/assignment.h0000644000175100001440000000334511623271704014340 0ustar hornikusers#include #include #include #include /* INT_MAX */ #include /* DBL_MAX */ #include #include /* constants used for improving readability of code */ #define COVERED 1 #define UNCOVERED 0 #define ASSIGNED 1 #define UNASSIGNED 0 #define TRUE 1 #define FALSE 0 #define MARKED 1 #define UNMARKED 0 #define REDUCE 1 #define NOREDUCE 0 typedef struct{ int n; /* order of problem */ double **C; /* cost matrix */ double **c; /* reduced cost matrix */ int *s; /* assignment */ int *f; /* column i is assigned to f[i] */ int na; /* number of assigned items; */ int runs; /* number of iterations */ double cost; /* minimum cost */ time_t rtime; /* time */ } AP; /* public interface */ /* constructors and destructor */ AP *ap_create_problem(double *t, int n); AP *ap_create_problem_from_matrix(double **t, int n); AP *ap_read_problem(char *file); void ap_free(AP *p); int ap_assignment(AP *p, int *res); int ap_costmatrix(AP *p, double **m); int ap_datamatrix(AP *p, double **m); int ap_iterations(AP *p); void ap_hungarian(AP *p); double ap_mincost(AP *p); void ap_print_solution(AP *p); void ap_show_data(AP *p); int ap_size(AP *p); int ap_time(AP *p); /* error reporting */ void ap_error(char *message); /* private functions */ void preprocess(AP *p); void preassign(AP *p); int cover(AP *p, int *ri, int *ci); void reduce(AP *p, int *ri, int *ci); clue/src/init.c0000644000175100001440000000373613340544526013135 0ustar hornikusers#include #include #include #include "clue.h" static R_NativePrimitiveArgType solve_LSAP_t[3] = { REALSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType clue_dissimilarity_count_inversions_t[4] = { REALSXP, REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_ultrametricity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType deviation_from_additivity_t[4] = { REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType deviation_from_additivity_gradient_t[3] = { REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_ultrametric_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_reduction_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType ls_fit_addtree_by_iterative_projection_t[7] = { REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; #define CDEF(name) {#name, (DL_FUNC) &name, sizeof(name ## _t)/sizeof(name ## _t[0]), name ##_t} static const R_CMethodDef cMethods[] = { CDEF(solve_LSAP), CDEF(clue_dissimilarity_count_inversions), CDEF(deviation_from_ultrametricity), CDEF(deviation_from_ultrametricity_gradient), CDEF(deviation_from_additivity), CDEF(deviation_from_additivity_gradient), CDEF(ls_fit_ultrametric_by_iterative_reduction), CDEF(ls_fit_ultrametric_by_iterative_projection), CDEF(ls_fit_addtree_by_iterative_reduction), CDEF(ls_fit_addtree_by_iterative_projection), {NULL, NULL, 0} }; void R_init_clue(DllInfo *dll) { R_registerRoutines(dll, cMethods, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } clue/src/trees.c0000644000175100001440000002401211304023137013266 0ustar hornikusers#include #include #include "clue.h" static int iwork3[3]; static int iwork4[4]; static void isort3(int *i, int *j, int *k) { iwork3[0] = *i; iwork3[1] = *j; iwork3[2] = *k; R_isort(iwork3, 3); *i = iwork3[0]; *j = iwork3[1]; *k = iwork3[2]; } static void isort4(int *i, int *j, int *k, int *l) { iwork4[0] = *i; iwork4[1] = *j; iwork4[2] = *k; iwork4[3] = *l; R_isort(iwork4, 4); *i = iwork4[0]; *j = iwork4[1]; *k = iwork4[2]; *l = iwork4[3]; } void deviation_from_ultrametricity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) delta = C - B; else if(B <= C) delta = A - C; else delta = B - A; if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } } *v = p; } void deviation_from_ultrametricity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 2; i++) for(j = i + 1; j < *n - 1; j++) { A = D[i][j]; for(k = j + 1; k < *n; k++) { B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][k] += delta; G[j][k] -= delta; } else if(B <= C) { delta = 2 * (C - A); G[j][k] += delta; G[i][j] -= delta; } else { delta = 2 * (A - B); G[i][j] += delta; G[i][k] -= delta; } } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void deviation_from_additivity(double *x, int *n, double *v, int *max) { double **D, p, delta, A, B, C; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); p = 0; for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) delta = (C - B); else if(B <= C) delta = (A - C); else delta = (B - A); if(*max) p = fmax2(p, fabs(delta)); else p += delta * delta; } *v = p; } void deviation_from_additivity_gradient(double *x, int *n, double *out) { double **D, **G, A, B, C, delta; int i, j, k, l; D = clue_vector_to_square_matrix(x, *n); G = clue_vector_to_square_matrix(out, *n); for(i = 0; i < *n - 3; i++) for(j = i + 1; j < *n - 2; j++) for(k = j + 1; k < *n - 1; k++) for(l = k + 1; l < *n; l++) { A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { delta = 2 * (B - C); G[i][l] -= delta; G[j][k] -= delta; G[i][k] += delta; G[j][l] += delta; } else if(B <= C) { delta = 2 * (C - A); G[i][l] += delta; G[j][k] += delta; G[i][j] -= delta; G[k][l] -= delta; } else { delta = 2 * (A - B); G[i][k] -= delta; G[j][l] -= delta; G[i][j] += delta; G[k][l] += delta; } } for(i = 0; i < *n; i++) for(j = 0; j < *n; j++) *out++ = G[i][j]; } void ls_fit_ultrametric_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta, tmp; int i, i1, j, j1, k, k1, N3; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[j][i]; B = D[k][i]; C = D[k][j]; /* B & G have a divisor of 2 for case 1 and 4 for cases 2 and 3 ... clearly, we should use the same in all cases, but should it be 2 or 4? */ if((A <= B) && (A <= C)) { /* Case 1: 5080 */ DQ = (C - B) / 2; D[i][k] += DQ; D[j][k] -= DQ; } else if(B <= C) { /* Case 2: 5100 */ DQ = (C - A) / 2; D[i][j] += DQ; D[j][k] -= DQ; } else { /* Case 3: 5120 */ DQ = (B - A) / 2; D[i][j] += DQ; D[i][k] -= DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_ultrametric_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, delta; int i, i1, j, j1, k, k1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); delta = 0; for(i1 = 0; i1 < *n - 2; i1++) for(j1 = i1 + 1; j1 < *n - 1; j1++) for(k1 = j1 + 1; k1 < *n; k1++) { i = order[i1]; j = order[j1]; k = order[k1]; isort3(&i, &j, &k); A = D[i][j]; B = D[i][k]; C = D[j][k]; if((A <= B) && (A <= C)) { D[i][k] = D[j][k] = (B + C) / 2; delta += fabs(B - C); } else if(B <= C) { D[i][j] = D[j][k] = (C + A) / 2; delta += fabs(C - A); } else { D[i][j] = D[i][k] = (A + B) / 2; delta += fabs(A - B); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_reduction(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { /* Once we have ls_fit_ultrametric_by_iterative_reduction() we can always do this as well ... See page 67f in Barthelemy and Guenoche. */ double A, B, C, **D, DQ, delta, tmp, N3; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); /* And initialize the upper half of D ("work array") to 0. (Yes, this could be done more efficiently by just propagating the veclh dist representation.) */ for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[i][j] = 0; N3 = (*n - 2) * (*n - 3) / 2; for(*iter = 0; *iter < *maxiter; (*iter)++) { if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[j][i] + D[l][k]; B = D[k][i] + D[l][j]; C = D[l][i] + D[k][j]; if((A <= B) && (A <= C)) { /* Case 1: 5090 */ DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; } else if(B <= C) { /* Case 2: 5120 */ DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; } else { /* Case 3: 5150 */ DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; } } delta = 0; for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) { tmp = D[i][j] / N3; D[j][i] += tmp; D[i][j] = 0; delta += fabs(tmp); } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } void ls_fit_addtree_by_iterative_projection(double *d, int *n, int *order, int *maxiter, int *iter, double *tol, int *verbose) { double A, B, C, **D, DQ, delta; int i, i1, j, j1, k, k1, l, l1; D = clue_vector_to_square_matrix(d, *n); for(*iter = 0; *iter < *maxiter; (*iter)++) { delta = 0; if(*verbose) Rprintf("Iteration: %d, ", *iter); for(i1 = 0; i1 < *n - 3; i1++) for(j1 = i1 + 1; j1 < *n - 2; j1++) for(k1 = j1 + 1; k1 < *n - 1; k1++) for(l1 = k1 + 1; l1 < *n; l1++) { i = order[i1]; j = order[j1]; k = order[k1]; l = order[l1]; isort4(&i, &j, &k, &l); A = D[i][j] + D[k][l]; B = D[i][k] + D[j][l]; C = D[i][l] + D[j][k]; if((A <= B) && (A <= C)) { DQ = (C - B) / 4; D[i][l] -= DQ; D[j][k] -= DQ; D[i][k] += DQ; D[j][l] += DQ; delta += fabs(C - B); } else if(B <= C) { DQ = (A - C) / 4; D[i][l] += DQ; D[j][k] += DQ; D[i][j] -= DQ; D[k][l] -= DQ; delta += fabs(A - C); } else { DQ = (B - A) / 4; D[i][k] -= DQ; D[j][l] -= DQ; D[i][j] += DQ; D[k][l] += DQ; delta += fabs(B - A); } } if(*verbose) Rprintf("change: %f\n", delta); if(delta < *tol) break; } for(i = 0; i < *n - 1; i++) for(j = i + 1; j < *n; j++) D[j][i] = D[i][j]; /* And now write results back. Could make this more efficient, of course ... */ for(j = 0; j < *n; j++) for(i = 0; i < *n; i++) *d++ = D[i][j]; } clue/NAMESPACE0000644000175100001440000002445212537351717012461 0ustar hornikusersuseDynLib("clue", .registration = TRUE, .fixes = "C_") import("stats") importFrom("graphics", "par", "plot") importFrom("cluster", "silhouette") export("cl_agreement", "cl_bag", "cl_boot", "cl_consensus", "cl_dissimilarity", "cl_ensemble", "as.cl_ensemble", "is.cl_ensemble", "cl_fuzziness", "cl_join", "cl_margin", "cl_meet", "cl_medoid", "cl_membership", "as.cl_membership", "cl_object_names", "cl_pam", "cl_pclust", "cl_predict", "cl_prototypes", "cl_tabulate", "cl_ultrametric", "as.cl_ultrametric", "ls_fit_addtree", "ls_fit_centroid", "ls_fit_ultrametric", "ls_fit_sum_of_ultrametrics", "ls_fit_ultrametric_target", "l1_fit_ultrametric", "l1_fit_ultrametric_target", "cl_validity", "n_of_objects", "n_of_classes", "cl_class_ids", "as.cl_class_ids", "cl_classes", "is.cl_partition", "as.cl_partition", "is.cl_hard_partition", "as.cl_hard_partition", "is.cl_soft_partition", "is.cl_dendrogram", "as.cl_dendrogram", "is.cl_hierarchy", "as.cl_hierarchy", "as.cl_addtree", "kmedoids", "pclust", "pclust_family", "pclust_object", "solve_LSAP", "sumt") ## S3 methods, sorted alphabetically. S3method("[", "cl_agreement") S3method("[", "cl_dissimilarity") S3method("[", "cl_ensemble") S3method("[", "cl_proximity") S3method("Complex", "cl_hierarchy") S3method("Complex", "cl_partition") S3method("Math", "cl_hierarchy") S3method("Math", "cl_partition") S3method("Ops", "cl_dendrogram") S3method("Ops", "cl_hierarchy") S3method("Ops", "cl_partition") S3method("Summary", "cl_hierarchy") S3method("Summary", "cl_partition") S3method("Summary", "cl_partition_ensemble") S3method("as.cl_addtree", "default") S3method("as.cl_addtree", "phylo") S3method("as.cl_membership", "default") S3method("as.cl_membership", "matrix") S3method("as.cl_ultrametric", "default") S3method("as.cl_ultrametric", "matrix") S3method("as.dendrogram", "cl_ultrametric") S3method("as.dist", "cl_dissimilarity") S3method("as.hclust", "cl_hierarchy") S3method("as.hclust", "cl_ultrametric") S3method("as.hclust", "mona") S3method("as.matrix", "cl_proximity") S3method("as.matrix", "hclust") S3method("c", "cl_ensemble") S3method("cl_class_ids", "Mclust") S3method("cl_class_ids", "Weka_clusterer") S3method("cl_class_ids", "bclust") S3method("cl_class_ids", "ccfkms") S3method("cl_class_ids", "cclust") S3method("cl_class_ids", "cl_class_ids") S3method("cl_class_ids", "cl_membership") S3method("cl_class_ids", "cl_pam") S3method("cl_class_ids", "cl_partition") S3method("cl_class_ids", "cl_partition_by_class_ids") S3method("cl_class_ids", "cshell") S3method("cl_class_ids", "default") S3method("cl_class_ids", "fclust") S3method("cl_class_ids", "flexmix") S3method("cl_class_ids", "kcca") S3method("cl_class_ids", "kmeans") S3method("cl_class_ids", "kmedoids") S3method("cl_class_ids", "movMF") S3method("cl_class_ids", "partition") S3method("cl_class_ids", "pclust") S3method("cl_class_ids", "relation") S3method("cl_class_ids", "rock") S3method("cl_class_ids", "specc") S3method("cl_classes", "cl_hierarchy") S3method("cl_classes", "cl_partition") S3method("cl_classes", "cl_ultrametric") S3method("cl_classes", "default") S3method("cl_membership", "Mclust") S3method("cl_membership", "bclust") S3method("cl_membership", "cclust") S3method("cl_membership", "cl_membership") S3method("cl_membership", "cl_partition") S3method("cl_membership", "cshell") S3method("cl_membership", "default") S3method("cl_membership", "fanny") S3method("cl_membership", "fclust") S3method("cl_membership", "flexmix") S3method("cl_membership", "kmeans") S3method("cl_membership", "movMF") S3method("cl_membership", "partition") S3method("cl_membership", "pclust") S3method("cl_object_names", "cl_ensemble") S3method("cl_object_names", "cl_hierarchy") S3method("cl_object_names", "cl_membership") S3method("cl_object_names", "cl_partition") S3method("cl_object_names", "cl_ultrametric") S3method("cl_object_names", "default") S3method("cl_object_names", "dist") S3method("cl_object_names", "hclust") S3method("cl_object_names", "mona") S3method("cl_object_names", "phylo") S3method("cl_object_names", "twins") S3method("cl_predict", "Mclust") S3method("cl_predict", "Weka_clusterer") S3method("cl_predict", "bclust") S3method("cl_predict", "cclust") S3method("cl_predict", "cl_partition") S3method("cl_predict", "ccfkms") S3method("cl_predict", "clara") S3method("cl_predict", "cshell") S3method("cl_predict", "default") S3method("cl_predict", "fanny") S3method("cl_predict", "fclust") S3method("cl_predict", "flexmix") S3method("cl_predict", "kcca") S3method("cl_predict", "kmeans") S3method("cl_predict", "movMF") S3method("cl_predict", "pam") S3method("cl_predict", "pclust") S3method("cl_predict", "rock") S3method("cl_prototypes", "Mclust") S3method("cl_prototypes", "bclust") S3method("cl_prototypes", "ccfkms") S3method("cl_prototypes", "cclust") S3method("cl_prototypes", "cl_pam") S3method("cl_prototypes", "cl_partition") S3method("cl_prototypes", "clara") S3method("cl_prototypes", "cshell") S3method("cl_prototypes", "fclust") S3method("cl_prototypes", "kcca") S3method("cl_prototypes", "kmeans") S3method("cl_prototypes", "pam") S3method("cl_prototypes", "pclust") S3method("cl_prototypes", "specc") S3method("cl_validity", "agnes") S3method("cl_validity", "default") S3method("cl_validity", "diana") S3method("cl_validity", "cl_partition") S3method("cl_validity", "pclust") S3method("cophenetic", "cl_ultrametric") S3method("cophenetic", "mona") S3method("cut", "cl_dendrogram") S3method("is.cl_dendrogram", "cl_dendrogram") S3method("is.cl_dendrogram", "default") S3method("is.cl_dendrogram", "hclust") S3method("is.cl_dendrogram", "mona") S3method("is.cl_dendrogram", "phylo") S3method("is.cl_dendrogram", "twins") S3method("is.cl_hard_partition", "Mclust") S3method("is.cl_hard_partition", "Weka_clusterer") S3method("is.cl_hard_partition", "bclust") S3method("is.cl_hard_partition", "ccfkms") S3method("is.cl_hard_partition", "cclust") S3method("is.cl_hard_partition", "cl_hard_partition") S3method("is.cl_hard_partition", "cl_partition") S3method("is.cl_hard_partition", "cshell") S3method("is.cl_hard_partition", "default") S3method("is.cl_hard_partition", "fanny") S3method("is.cl_hard_partition", "fclust") S3method("is.cl_hard_partition", "flexmix") S3method("is.cl_hard_partition", "kcca") S3method("is.cl_hard_partition", "kmeans") S3method("is.cl_hard_partition", "kmedoids") S3method("is.cl_hard_partition", "movMF") S3method("is.cl_hard_partition", "partition") S3method("is.cl_hard_partition", "pclust") S3method("is.cl_hard_partition", "rock") S3method("is.cl_hard_partition", "specc") S3method("is.cl_hierarchy", "cl_hierarchy") S3method("is.cl_hierarchy", "default") S3method("is.cl_hierarchy", "hclust") S3method("is.cl_hierarchy", "mona") S3method("is.cl_hierarchy", "phylo") S3method("is.cl_hierarchy", "twins") S3method("is.cl_partition", "Mclust") S3method("is.cl_partition", "Weka_clusterer") S3method("is.cl_partition", "bclust") S3method("is.cl_partition", "ccfkms") S3method("is.cl_partition", "cclust") S3method("is.cl_partition", "cl_partition") S3method("is.cl_partition", "cshell") S3method("is.cl_partition", "default") S3method("is.cl_partition", "fclust") S3method("is.cl_partition", "flexmix") S3method("is.cl_partition", "kcca") S3method("is.cl_partition", "kmeans") S3method("is.cl_partition", "kmedoids") S3method("is.cl_partition", "movMF") S3method("is.cl_partition", "partition") S3method("is.cl_partition", "pclust") S3method("is.cl_partition", "rock") S3method("is.cl_partition", "specc") S3method("n_of_classes", "Mclust") S3method("n_of_classes", "bclust") S3method("n_of_classes", "cclust") S3method("n_of_classes", "cl_membership") S3method("n_of_classes", "cl_partition") S3method("n_of_classes", "cshell") S3method("n_of_classes", "default") S3method("n_of_classes", "fanny") S3method("n_of_classes", "fclust") S3method("n_of_classes", "kmeans") S3method("n_of_classes", "partition") S3method("n_of_classes", "pclust") S3method("n_of_objects", "Mclust") S3method("n_of_objects", "bclust") S3method("n_of_objects", "cclust") S3method("n_of_objects", "cl_ensemble") S3method("n_of_objects", "cl_hierarchy") S3method("n_of_objects", "cl_membership") S3method("n_of_objects", "cl_partition") S3method("n_of_objects", "cl_ultrametric") S3method("n_of_objects", "cshell") S3method("n_of_objects", "default") S3method("n_of_objects", "dist") S3method("n_of_objects", "fclust") S3method("n_of_objects", "hclust") S3method("n_of_objects", "kmeans") S3method("n_of_objects", "mona") S3method("n_of_objects", "partition") S3method("n_of_objects", "pclust") S3method("n_of_objects", "phylo") S3method("n_of_objects", "twins") S3method("names", "cl_proximity") S3method("plot", "cl_addtree") S3method("plot", "cl_ensemble") S3method("plot", "cl_dendrogram") S3method("plot", "cl_ultrametric") S3method("print", "cl_class_ids") S3method("print", "cl_classes_of_partition_of_objects") S3method("print", "cl_classes_of_hierarchy_of_objects") S3method("print", "cl_cross_proximity") S3method("print", "cl_dendrogram") S3method("print", "cl_dendrogram_ensemble") S3method("print", "cl_ensemble") S3method("print", "cl_fuzziness") S3method("print", "cl_hierarchy") S3method("print", "cl_hierarchy_ensemble") S3method("print", "cl_membership") S3method("print", "cl_pam") S3method("print", "cl_partition") S3method("print", "cl_partition_by_class_ids") S3method("print", "cl_partition_by_memberships") S3method("print", "cl_partition_ensemble") S3method("print", "cl_proximity") S3method("print", "cl_pclust") S3method("print", "cl_validity") S3method("print", "kmedoids") S3method("print", "pclust") S3method("print", "solve_LSAP") S3method("rep", "cl_ensemble") S3method("silhouette", "cl_partition") S3method("silhouette", "cl_pclust") S3method("unique", "cl_ensemble") S3method(".maybe_is_proper_soft_partition", "Mclust") S3method(".maybe_is_proper_soft_partition", "cl_partition") S3method(".maybe_is_proper_soft_partition", "cshell") S3method(".maybe_is_proper_soft_partition", "default") S3method(".maybe_is_proper_soft_partition", "fanny") S3method(".maybe_is_proper_soft_partition", "fclust") S3method(".maybe_is_proper_soft_partition", "flexmix") S3method(".maybe_is_proper_soft_partition", "pclust") clue/.aspell/0000755000175100001440000000000012462665664012577 5ustar hornikusersclue/.aspell/defaults.R0000644000175100001440000000023113142056061014502 0ustar hornikusersRd_files <- vignettes <- R_files <- description <- list(encoding = "UTF-8", language = "en", dictionaries = c("en_stats", "clue")) clue/.aspell/clue.rds0000644000175100001440000000156312462665506014241 0ustar hornikuserseUz6 S";yu֬nwK)A}sF'$~?'^w_oK`eъF dvzť]q|~|9nBkSb:4QkID匦ѓ-n/QQh哥(!/)1hE&,R6[0,@t* -:,1j~W||RvHC gG=(BZAl yɈтw$ p2f,CZp$noܙb۹fNExwg[:К}d.gip2GpuBQ':׆/&J<RkA[kEG^ ؜.""Np՝ Lɬ (wf1{]7Π;x\Z#0tH`p-(+c0^}62}D*JYXk;iwIΝYYah9PprZ /ʩ8'^ ն ^\clue/data/0000755000175100001440000000000013435045065012136 5ustar hornikusersclue/data/Kinship82_Consensus.rda0000644000175100001440000000143413435045065016447 0ustar hornikusersVMLA^"քz2M2 Mp@l-]nkg#^̅ȅK!!DAO W ?+uv+5ѓo{H|:iZH#!-i8$>}ZX;'=rs$}ls5i@~Ax܏UE`_s$$>s;ٮTƶ' W2Ww\lgT`x⎧kVYae3!C}sA3/;|0St.ż1!7C^o;?z׺L֮;Hr>-7ڠZQͥ+Fձl ~i#ҥ\1YʘUT =g 8%~b%(?٪aS0Vc JVΨL.c&-QBDc3j+d[fDQ\*ZMب̾adT|NDI!m 3ͬ=GQ z!41n8ZLayYZt摯UJhďQA -r TPAj qQՃ@xM"J{ W xσ)* >?ey <Hv7v7U:#PϠЍтhӤ(EP`E|+&C]WiF^]l۩l[5cJ{E0_JTS9,ڲ?AAHX\!#t-vX96#:z"J Cįzݴ=6bg6c`2l# KaeL ]9~{nPyciRYtU32]09FCY猁o72 MټױͱZUaȧgc51&$FMzXN=V3m;of ڷ ,قVCۊPo/nCg)rl0?bӓ O2yДFY=N[+rz\$IKD(]@ 0W@$OxO|mOD1Eq} a:^1^ԅ8GOTW?B?X/F?U}cya`D! Tl&ra_|WRGO\#,_!D?3S%m/}#3ٜ"{THQz9yvP7r*$ U2D낊{~.XB6 U2*s&!?c) *d3 '|%Iul̖m#H]7eUeڮJerPupv"׭ -njnclue/data/Kinship82.rda0000644000175100001440000000236713435045065014415 0ustar hornikusersBZh91AY&SY9Q8RJTT@E]P e 3Pdɣi0LM4$LH& =Adɣi0LM48ɓF!`i&DS4ɣ& &cQ RBe= $P4F@4!=O)Sb. v.Yr;$LRpqԓ;*)%YpAn߉kƱð$ Ij$F-E'$$)>f'&䓦II)n̤XR%ݸj7IS!)ãL`beP{:FqCHY Db(Rs3l=N'ǤI1Ta&HdբOrq&}N]}iɤ8ҝ$x5qN1ĺI8:JLXI L(%2,#889ljf.^Npk3w2;a#2ɔ󭰶:3nuz66#]w{$,I$I=N&|$$'&I3^MIl@Zb\LlĩВ}|B1lz ℏh6;6&T-(K(&W/ըm k f9E#IQ4'')XOs睋$"TnstTMLXĉ1% 2FX_(ȨI #syu' JJNZFoOj4)_YT2]XI0$6{IIʅ7+0L.)uF^(QNtsF1ez#.RbF)%qjf2a~zݻ2Uk'+ѺGZ-NW q<%'gjW(HC)tC;GdB{PSt0W * t7B5}1X'}c3U\e`Hn3mj%1Sa؃E~TBJAXG3|?I,",\zKΒpK/XXK}k,Lm|zЦܞ7z{*춭Qa6!<S(HAn6_ȻvϰFyXȯbATݩk$6ڌ"$U-ˁ*LwLcՙ(O3+B[O^D߱%]JH_Wd7ekhײJROsm=)j9%'  _jB quIx]SZ_'SEӒ`~9 P%u;GeaRkV+)S61(*p&03!GШXO/һ#D}\r@K@v~3Zb'^M shaK-w6aسn 2 NAB˱?ej 9Z[.QUkV&".=43GzMVSLRXq/kM#*9SbIx1TGUۗ1WޥQ<:]b'-V-ufw@9G#x}swAHfp%-RoQzk*9L"D!98 ƯѸ4@+m!U! JM{܄ObjxsxmdzW;?:XmSB"zLymK^{x{+,5= 1N:\l2kF GSP"u P_'jη ͫ!l6ʠɌbfk322lс_wb"lVX2A姹0Zr-V&b"}tU|{&D4>u 4ES榥"~yRҟ[2 q;b 2{A6ET/оL),ztfo_l=xBb rHqeJ$$r3|<?/^IguJ _/>q"N5\$ݦž1rP/_y8i.I2F+BDgN:!0'8Ώbt7T.]sA gQ.$5m33pM@jap-vmaAyXm%UdA&.yc㈛)-5Ρ|y4,.gJ,kd3[뾺Ģ>ƉB[>H0ES[yby)@LrK@qb"q{Vv1_dst( dF$KI7E(߫ۙ_}KW]lAF!"dG_L$OtϙeFbfc͵e'*gn͎D}q|sf{qb Tk 31DS6^A)7- p_H5C@vT9hAxwL;a4!)z;F_ ?m8+J,͡Mftޤm!ey2IwyNF.'ڝ:E1D ~]etYܻzmZTYZC-V&̣!r{VyjxQ%s߶L`l\ -;f($fXYhՖ9<0-jzAxӼg$Lku0 YZclue/data/Phonemes.rda0000644000175100001440000000152713435045065014411 0ustar hornikusersV[HTQ͌aCX(" "B6#FY "⇉E2)"zhKSKS2aFE "Q}D{{aI }9g^ƙ4 fln3pkxrn7'/g9%z=vCvB ԏLJN`Xz}pc}5Uju[Zi.]U *YI/>RUseE^}m+X%0:^(2]ENׅMH5\?xr?iE:^sG58cԾvRݯ/&'T;:0o B_3e\BKCA+8/@X3_zWx@?}E}|\O2CsyzrsUDc}&-+XߌT>G9̸J?ZR\bYvQrŸgJ2uv^s3Ey˩k4@ _a:^ a|׸O^/?_z7oU㟉~>~>i:~_zWx:<W?W/tot}z#2O'87&ÝWg\}?g3/3+aDN|C_!)˜2obܖBhDy܈Dy+QD "rx]?iSUսPS xxprKkݼyrBų3Z;p?k "kQk`ScʸTo|l`q,ZrY<89z!=2вؠg|\} *}m|z:5z9Er#ܳrX \zGc_W[8dapbE t}R?w%oQ#'E iB.*('rE^;63(tkSC+;$PCOhyqN b;(4@]R݂r[]=Ӆ6/$(UEF->q }ս.#t57 ,̲-;Q9mNC赱~smNm4KոROj ^Y CKC= ШX6Nj,}7>ܻLsj}-̛<%$` u3.B_0"A^_˩Wv#,\ܖsFlBm>b֘R"Tb\]s6pc"Uԏoq+6x׸Ce"cwW1͢``)GًpGVoPgWi?^\jue%b,(PBKE"ME2QχG%+S3/ӈP^GV 'kWW<䝹&zHX;RpY^Kz wmg&lXGZ ˛X`QVO%вN%" LF#\T_9 [w,ZP{gQ jԩ1iPMaT, T+/klOCp ŝG-PP.R4ۺVo^67q[৚fOq򱸃I}R1^ŅҮ*=?E(opPH#1(ڦ먳c* W) G;Wu&mSua8m!5z}8+o3nݧhkQ_:+*zJiaD^מtB̌և%QUlܢ%eye,d䐉MުԄ9½Ee)${7Ng|A1ѫӶܝ"Od?Tl"y?eo},%Y[Ì~<`[Aڛ Ʊ l( nU]4_)­>ŏjPDWyNLokS|eƩO ?s,Q+|QTq[ٺx>;CQ']8 V,9mfy9 vI푰CTK<,nz[ݲpmZndq JjֻVU?zcf>z%ͫf 8IYOsTpk`'=`&srnh=>Op9Rp'f \ʻG~}CSEcpͼ@1[OqmI qj{cs~wn W s:czOZLeXS6cfs54lu:~K[ßqFKp8l+]k< 'u8v y;<1Uo|vz-B,˖CJ=gaZ5Tʄes{_mG[^>-"m?pߵ7-.Y0j׎*Wƥnz9eypd鮣&nH|csML?8Ș^>%ۺH]aɞ݇c&-h0^/P}WZKVrHamD%V'F)ON*v6ÍOZ}u+'q-2ը1iH4hl*zWUS*lxKq2Mo<|bG{.a e8mB!v/T ^PYf=oLY'܊#/n20]2VXXBcm9^]pkݲ:84tE{7[PRco u~ޟAUѰZTi.1%̣=4 Ke8aLwQSa~r<)=9jBѢg[ϫENej?{Hm2ǼJΚHX&*k@S|s36qNf=8ϓ^Wޘ-CQࠬ^d*> Kz"Ƶ130 J_p|ajstE+nRF7|ػxս3{nx s}ikUaa4W&m7<0ڠnq3xyڪ_!U"q> h$Q1I nZPU?8o u)܋Es}Ε(ECAL,_ vwCmWx`ROk OG0_zZW9%'%騒y\t0b\RE3sPg`MeDq%.Q#7S/Xo=*Lj% פgUnc݈K7+2j/*-7p2D FJh nrdØ2〫p/ fLύfYb۸Ey]q6DL3q!|kIGh`&ܿLV ҈,s)"B>~ԻCfY}w~7Z I%fcm}*([gN[Wx Vt6G^q ~4 f˸F,{s_:?SK{*fƔVvZQ,+;TS3W;ƭ1/@w =nTYG'wȩo_ÅK ѷ.ѿ5_oT3S啁.FIGPӠGmF4PX#>U=,0؜:~!UzOZ="݄S"2ٸlޡQybe`xR{ܵ·t*Nٲa# =1o+4ΒCe!POGܵď["F5}HtJ6O9s?|gԽ܍]2P[Aom4tAV>'#"h\čşvaoMιzwc]=T4tl>/{\diWT)8?x<-F,)͂(+PԣLE2pAѺ2Z3.p<mtM2gq-k਋6 |MOd𲹷Q/ݭ PӖyh4¶7.Et?#1+Ro3@DyAq¬\#Q-Ÿ>;yfMQ.厳JP[߽{&9} uJJѯPS@Kj6Ӝ>n= ujW9%ZFQρAGN<\=$KV BK[53)bi9|_pO~'@n|byo*ϊ*g8iC2OJ/tJ6Z&jK a@WԚ mg`oÉO%/hS=~HC۴V")q>AUG>jt 5]!/8}?ұ z XA~O\O_2͢aѻ1JUeT_Bœ>"Rc> zE45Uu_`z*e ٹֵUc_cu^ƫց\ж ))A"\ '^zVS>zB/gO9}{gRU, jYOp[La臎|1`g1L+ r[t;OṾ$'g9w8l @%Q30L-J b@Oʭ`ww;]J8zmK9%5v027gW%/GSB}M|B hr{ x=5_Xȷ sX: $$ Oq_9yܿtcw?h3? FqOx]~_>]_}AȿK3V'%xK/)$.?=0^Lm<_΂$R[Ud؇Y?J|Y |y[R"5fٻW ?vogI>Z@ j_y]No w,i~< W%%5 ??)79({c,@N6wsXJ`Gs챓en5h x{]i!5_}iwޢ m2 J"i6gL0'gCmrǎ-Ͻ!UCE(OJ(eZq:pUʨFIv~s oCr8X^ 8 : ANpg8s3Bw}3TGkgU5Tw_S9cNzN6Yw9S9~ǣT:&I&o>!}i3qrGq?eo1yC<[Mh,|&{@3~~_FxA(+8X>9=΁@;{A~}Yю~, =8r{;_Ag4U@ϒ6P3fn\@}Sv }ޓ_Yܸ%w LXNY OLK̩Z0?o*))[3=x|7cz d>qe* C'4U:͡s3 mltSFa|h)DJh8 "x;&f!;/y˚/!~ k'r-D3?Sv:Y8G`:0?Zx$~apS$gc؃ %$rfjnw.—ⷜٔC!l >k4R_x--sL 6J9 ہH|}lbیN#5@GϢu:׉Dr;m5]ÝZ֠C0?6" :%39g.r*;%AXBCzz$cS/ g7s-w>K4[ o16$|lH]<ϐ:ǟzM<AROԟc0?aOۍ9K'E%Y] g%\$*f XFNxL}cyh[o^%C0=W!qc >J 5V<18醑\$vM~ygϻ|!}3Zo.҈UA$zD[U-:OYTNQ3̘Q!p~!h(-:&B;+Nlף:v D Jȟ_O^5vc'/nRV?NPyƕu:hSqVHhi MA>;*Ӟ[IZTx13#uܝ؉ -3d]SM/ZGW%6:_~ٟbI=d̓ZAgeQ2>qsh죓TwSlH3~=kTwFpSCw`wx y}gS=:3$ڮ/g5?M9Lj|(䃪5{2ڌfn rwg<_"rn"t +92!Hic_iOa>8;kREd_ej.m}rв+Id8{ z&7^#= wDA?.N#'#K>kl^s=tɻԃAݎ~Go (?B;#_IMl \6Nɶ,0|lg vr^RFr.9ēz~N5B9$RW,_uT6.6|x f"'„w.ٟ̑~4>~4~G?xpsFr;!nшi#2bs~ řmw9k^˟9 :cBA>mѻwPF~c·~e#:_؃_yM?9z^]l~fM=ؼYAcmwt+1PycŮuA.zg|_`=O5O:Zê~hW%n9}ۡN+ ! 1̄؅# 8.O`M64^VթE2N3]WDx@0?q& 8a1=eFwEI=Fܤr'`~bGOn@ޘ+bT? :vxI i $oϙoɺ~, 8&8́:$MoB; ?Ey,mtۖK$vpI-79)6~ O t1@>uې:X_H\'vayuن܉e@NybϯIO tyl%ue'бH=R z|͐2])[O R]`9 ۫ k`τ/(Y Rö$i#` g Yi#πo$Z:[c |Z7&#EvaDsWׇB<9]4^GiLȺ;J :V rj!cʲ:hK[H6yp ^@.9O\Av2v? 'uRG$6<>sB7V@7㡲w{H]8r-/9Q.0^tcv#7,ԩ>gr)8odЍH:AַSy-CU095hx}r>Ha["ncN!=CGH+'uX@ӝdəR%~'v'|< -I.1fw:ȸ(rO#>J%zA r)'ޙuÑ?'b~4^rGGnIb d/r|zD߉l<4I'@'WRG"f՘;i ?+˩PXO\>C~˸ys"_Ў[ݩ!N8SO}ؼ wMuO hRRBHnOHJW|G$3 {Z'@˔yg^;ǣy s+ϩ%^!u#pq37Ρh*G6Kγ1fiٷybĿ^y`/3G^$Pdp:7|kI 婢$V_ߺ*S_%694}#אq6TQMJVY_cLyM#u`|8XVVp&tncޘs~,_=GyH? 9Cy[0nD֜5"$O,U$ӔSε_=cgsW`Y'w[$?$ޒHi|M=@EwtɻVzA߸?^:\}ݱpC y!a!}!2J])x7gweAOH!x_kOx @k~1܋>1Q} RɀӻG7g{Tٝ 8JxʡN |E.pΩGUok-Nу6'5uGB d=rro,=eQ- ীa~.XFN[ni4d :8Ewe3xKp}3/7/] _ƃߢ@ψ{½3;{Qܝ#G܋#p@# [{ķ/ R@~ʲFⓗTS,9s#tө,s 8«Hq@jv/.d"NY=Eg^3&{p/WNX_ *f- c&5RΪ;r}.8஢)~w~/ܛcJߣ~oN[+"Wa }>*ۯw;JQ*ਖ;F%€kqsPf)xCz&mُMn`N)ϋ_K$'q, )|' SoWs7)TٹA4ȫmZ e yx {e#ʑFW,;J.3$|ݳ\~$gn9B; k9}AHyBVnc!qzY Q{4sor)8іϨKl4可:e^Ro<; u7M8x/= ?(*YG#.`R*Rkyɖ!gO5[?}=u~Ջ/^K3ʽ(8_iVfnp'WwDߟEE; 9y?MdOclue/data/GVME_Consensus.rda0000644000175100001440000000415513435045065015431 0ustar hornikusersZilTU;m) p Q5C EU!:etHL]wD@([)XP`;373⾂aDؘy?w˻ws^o=eDl͖fg4it[ow1Enݫ fofn5WRO]CI}w@񷪮;9-2ɋ-i!:pר1K>) dE9 _~N!Q].mtN=eV.f)(zĺCz4 ysQ9MZ~L7E0N撨㖶ΩP /ʹ0DpmUc拉CX`ywsb@Zi?Gn4iPyiL++<WyRU ꁧDDwɉH(LMt*z.uvq13ߡ;\/qj\3s^ۍ8(sGT)cT{yUExt&8:[GY=R~ _Q3_/k%w,mkzH.#gçVS2=yQx^MA$4X Y#PE2 |r.=[M޾M@'Y>%3ɈXYs1E˳)ưL*n@JIYH-/)"zЍ ʏڛýCQ=(HڀU$Pu >-lMP YCs{e^2<ӛJ0_3^Uډ_ A 6)7@z([s :ʷS~UB"§/=H o:N1 >fPnqQF73bB3Լ@F]*i,fjqH~NlFXMZf6z ZLb38 IQ\~6}"9Va}rj8 { ›!|7BpDž.mRH;>%uz1E'|O`6 nPuG#(2^\ߖ +]D#ByJ/ßvF70b`\R~-j3k$)_  6?,54v =D;19,?fERJyZpkhaP~y_x; { Z< oB;?@O&`/_WwC[ARuiVثBjX+b 4> :NK ܸT;kx IDloJ,b[nSp {U#AF0AsY+rX i#@Ʉ(~_i3D|+ (:EYՀ8CA+G *pr0:Hc|&y#AF;&U'&\rCʌ*2Aԉ br}4I0!uޝj_#$-zbT[ Ȩa+MaPxf~=Dt!aH \B R\OTVT1WQ~ d5Cl 29h, 2h(݉yr&G v761n0_Q. @ҺX%]J1mE $S혌pU}RJQz'g8RS QEqxXLj D:4VEZu%R(MᏁ×=C9tr( H{K{caT Ii ܇j\(!ԯ>|O*8mpr7A@?rXLKY}q>(EF=N4);|DwaDwaBBf%K7Kx4k@͇T`ѽ /h;'e*#clue/R/0000755000175100001440000000000013435044702011423 5ustar hornikusersclue/R/classes.R0000644000175100001440000000460613434542602013212 0ustar hornikuserscl_classes <- function(x) UseMethod("cl_classes") cl_classes.default <- function(x) { ## Be nice to users ... if(is.cl_partition(x)) cl_classes(as.cl_partition(x)) else if(is.cl_dendrogram(x)) cl_classes(as.cl_dendrogram(x)) else stop("Can only determine classes of partitions or hierarchies.") } cl_classes.cl_partition <- function(x) { n <- n_of_objects(x) out <- split(seq_len(n), cl_class_ids(x)) class(out) <- c("cl_classes_of_partition_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } cl_classes.cl_hierarchy <- function(x) { ## Assume a valid hierarchy/dendrogram. x <- as.hclust(x) n <- n_of_objects(x) labels <- seq_len(n) ## Only use the "maximal" partitions for each height (relevant in ## case of non-binary trees). groups <- cutree(x, h = unique(c(0, x$height))) ## Give a list with the (unique) sets of numbers of the objects. ## Note that objects may already be merged at height zero. out <- unique(unlist(c(as.list(labels), lapply(split(groups, col(groups)), function(k) split(labels, k))), recursive = FALSE, use.names = FALSE)) ## Preserve labels if possible, and re-order according to ## cardinality. out <- out[order(lengths(out))] class(out) <- c("cl_classes_of_hierarchy_of_objects", "cl_classes_of_objects") attr(out, "n_of_objects") <- n attr(out, "labels") <- cl_object_labels(x) out } ## Be nice to users of ultrametric fitters ... which should really fit ## dendrograms (which inherit from hierarchies). cl_classes.cl_ultrametric <- cl_classes.cl_hierarchy print.cl_classes_of_partition_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) writeLines(formatDL(names(x), sprintf("{%s}", unlist(y)), style = "list", ...)) invisible(x) } print.cl_classes_of_hierarchy_of_objects <- function(x, ...) { labels <- attr(x, "labels") y <- lapply(x, function(i) paste(labels[i], collapse = ", ")) y <- strwrap(sprintf("{%s},", unlist(y)), exdent = 2) y[length(y)] <- sub(",$", "", y[length(y)]) writeLines(y) invisible(x) } clue/R/membership.R0000644000175100001440000001737311633352676013726 0ustar hornikusers### * cl_membership ## Get the class membership matrix from a partition. ## ## We could use sparse matrices for the memberships of hard partitions. ## Not sure if this is really that important, though, as we typically ## use memberships in a context where dense matrices (memberships of ## soft partitions) occur. ## ## ## Currently, the number of classes to be used for the memberships must ## not be less than the number of classes in the partition. We might ## eventually change this so that "optimal" collapsing of classes is ## performed (but note that optimality needs to be relative to some ## dissimilarity measure) ... ## However, from the discussion of the second method in Gordon and Vichi ## (2001) we note that whereas optimal assignment is "simple", optimal ## collapsing (equivalent to partitioning into an arbitrary number of ## partitions) is of course very hard. ## cl_membership <- function(x, k = n_of_classes(x)) { if(k < n_of_classes(x)) stop("k cannot be less than the number of classes in x.") UseMethod("cl_membership") } ## Default method. cl_membership.default <- function(x, k = n_of_classes(x)) .cl_membership_from_class_ids(cl_class_ids(x), k) ## Package stats: kmeans() (R 2.1.0 or better). cl_membership.kmeans <- cl_membership.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_membership.fanny <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$membership, k) cl_membership.partition <- cl_membership.default ## Package cclust: cclust(). cl_membership.cclust <- cl_membership.default ## Package e1071: cmeans() gives objects of class "fclust". cl_membership.fclust <- cl_membership.fanny ## Package e1071: cshell(). cl_membership.cshell <- cl_membership.fanny ## Package e1071: bclust(). cl_membership.bclust <- cl_membership.default ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::posterior(), but this now only ## has S4 methods for modeltools::posterior() S4 generic. Let's call ## this one, and hope that flexmix has been loaded ... ## cl_membership.flexmix <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(modeltools::posterior(x), k) ## Package mclust: Mclust(). cl_membership.Mclust <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$z, k) ## Package clue: Memberships. cl_membership.cl_membership <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x, k) ## (Note: we cannot simply return x in case k equals n_of_classes(x), ## because ncol(x) might be different.) ## Package clue: pclust(). cl_membership.pclust <- function(x, k = n_of_classes(x)) { ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. In case we ## decide not to fill the membership "slot" for such: if(is.null(m <- x$membership)) .cl_membership_from_class_ids(x$cluster, k) else .cl_membership_from_memberships(m, k) } ## Package clue: (virtual) class "cl_partition". cl_membership.cl_partition <- function(x, k = n_of_classes(x)) cl_membership(.get_representation(x), k) ## Package movMF: class "movMF". cl_membership.movMF <- function(x, k = n_of_classes(x)) .cl_membership_from_memberships(x$P, k) ### * .make_cl_membership ## A low-level common creator. .make_cl_membership <- function(x, n_of_classes, is_cl_hard_partition, meta = NULL) { attr(x, "n_of_classes") <- n_of_classes attr(x, "is_cl_hard_partition") <- is_cl_hard_partition attr(x, "meta") <- meta class(x) <- "cl_membership" x } ### * .cl_membership_from_class_ids .cl_membership_from_class_ids <- function(x, k = NULL, meta = NULL) { x <- factor(x) n_of_objects <- length(x) n_of_classes <- nlevels(x) if(is.null(k)) k <- n_of_classes else if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") ## ## Should really use a sparse encoding of this ... M <- matrix(0, n_of_objects, k) ## (Could also use .one_entry_per_column(M, as.numeric(x)) <- 1 for ## the time being.) M[cbind(seq_len(n_of_objects), as.numeric(x))] <- 1 ## But note that we also need to handle NAs ... M[is.na(x), ] <- NA ## if(nlevels(x) == k) colnames(M) <- levels(x) if(!is.null(nm <- names(x))) rownames(M) <- nm .make_cl_membership(M, n_of_classes, TRUE, meta) } ### * .cl_membership_from_memberships .cl_membership_from_memberships <- function(x, k = NULL, meta = NULL) { ## ## Dropping and re-filling of ## zero columns in case k is given may ## seem unnecessary, but really canonicalizes by moving zero columns ## last ... ## x <- x[ , colSums(x, na.rm = TRUE) > 0, drop = FALSE] n_of_classes <- ncol(x) if(!is.null(k)) { if(k < n_of_classes) stop("k cannot be less than the number of classes in x.") if(k > n_of_classes) { ## Fill up with zero columns. x <- cbind(x, matrix(0, nrow(x), k - n_of_classes)) ## Handle NAs if necessary. x[apply(is.na(x), 1, any), ] <- NA } } .make_cl_membership(x, n_of_classes, all(rowSums(x == 1, na.rm = TRUE) > 0), meta) } ### * as.cl_membership as.cl_membership <- function(x) UseMethod("as.cl_membership") as.cl_membership.default <- function(x) { if(inherits(x, "cl_membership")) x else if(is.atomic(x)) .cl_membership_from_class_ids(x) else cl_membership(x) } as.cl_membership.matrix <- function(x) .cl_membership_from_memberships(x) ### * .memberships_from_cross_dissimilarities .memberships_from_cross_dissimilarities <- function(d, power = 2) { ## For a given matrix of cross-dissimilarities [d_{bj}], return a ## matrix [u_{bj}] such that \sum_{b,j} u_{bj}^p d_{bj}^q => min! ## under the constraint that u is a stochastic matrix. ## If only one power is given, it is taken as p, with q as 1. ## ## This returns a plain matrix of membership values and not a ## cl_membership object (so that it does not deal with possibly ## dropping or re-introducing unused classes). ## exponent <- if(length(power) == 1L) 1 / (1 - power) else power[2L] / (1 - power[1L]) u <- matrix(0, nrow(d), ncol(d)) zero_incidences <- !(d > 0) n_of_zeroes <- rowSums(zero_incidences) if(any(ind <- (n_of_zeroes > 0))) u[ind, ] <- zero_incidences[ind, , drop = FALSE] / n_of_zeroes[ind] if(any(!ind)) { ## Compute d_{bj}^e / \sum_k d_{bk}^e without overflow from very ## small d_{bj} values. d <- exponent * log(d[!ind, , drop = FALSE]) d <- exp(d - d[cbind(seq_len(nrow(d)), max.col(d))]) u[!ind, ] <- d / rowSums(d) } u } ### * print.cl_membership print.cl_membership <- function(x, ...) { writeLines("Memberships:") print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### .has_object_memberships ## Be nice to users when computing proximities: all measures for ## "partitions" we currently consider really only assume that we can ## compute memberships and/or class ids. ## Note that the cl_membership() default method works for cl_class_ids. .has_object_memberships <- function(x) (is.cl_partition(x) || inherits(x, "cl_membership") || inherits(x, "cl_class_ids")) ### * .stochastify .stochastify <- function(x) { ## Try to ensure that a stochastic matrix is returned. x <- pmax(x, 0) x / rowSums(x) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/lsap.R0000644000175100001440000000147012036747337012521 0ustar hornikuserssolve_LSAP <- function(x, maximum = FALSE) { if(!is.matrix(x) || any(x < 0)) stop("x must be a matrix with nonnegative entries.") nr <- nrow(x) nc <- ncol(x) if(nr > nc) stop("x must not have more rows than columns.") if(nc > nr) x <- rbind(x, matrix(2 * sum(x), nc - nr, nc)) if(maximum) x <- max(x) - x storage.mode(x) <- "double" out <- .C(C_solve_LSAP, x, as.integer(nc), p = integer(nc))$p + 1 out <- out[seq_len(nr)] class(out) <- "solve_LSAP" out } print.solve_LSAP <- function(x, ...) { writeLines(c("Optimal assignment:", gsub("x", " ", strwrap(paste(seq_along(x), x, sep = "x=>x", collapse = ", "))))) invisible(x) } clue/R/addtree.R0000644000175100001440000003111013435044354013155 0ustar hornikusers### * ls_fit_addtree ls_fit_addtree <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 3L) return(as.cl_addtree(x)) if(.non_additivity(x, max = TRUE) == 0) return(as.cl_addtree(x)) ## Handle argument 'weights'. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_addtree_by_SUMT(x, weights, control), IP = { .ls_fit_addtree_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_addtree_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_addtree_by_SUMT .ls_fit_addtree_by_SUMT <- function(x, weights = 1, control = list()) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start, start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } L <- function(d) sum(w * (d - x) ^ 2) P <- .make_penalty_function_addtree(n) if(gradient) { grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_addtree(n) } else { grad_L <- grad_P <- NULL } if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... d <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control))$x ## Round to enforce additivity, and hope for the best ... .cl_addtree_from_addtree_approximation(d, n, labels) } .make_penalty_function_addtree <- function(n) function(d) { (.non_additivity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_addtree <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_additivity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_addtree_by_iterative_projection ## ## Functions ## .ls_fit_addtree_by_iterative_projection() ## .ls_fit_addtree_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_addtree_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### ** .ls_fit_addtree_by_iterative_reduction .ls_fit_addtree_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights))) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") x <- as.matrix(x) n <- nrow(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ind <- lower.tri(x) L <- function(d) sum(weights * (x - d)[ind] ^ 2) d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_addtree_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- matrix(d_opt, n) dimnames(d) <- list(labels, labels) .cl_addtree_from_addtree_approximation(as.dist(d)) } ### * .non_additivity .non_additivity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_additivity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * ls_fit_centroid ls_fit_centroid <- function(x) { ## Fit a centroid additive tree distance along the lines of Carroll ## & Pruzansky (1980). In fact, solving ## ## \sum_{i,j: i \ne j} (\delta_{ij} - (g_i + g_j)) ^ 2 => min_g ## ## gives \sum_{j: j \ne i} (g_i + g_j - \delta_{ij}) = 0, or (also ## in Barthemely & Guenoche) ## ## (n - 2) g_i + \sum_j g_j = \sum_{j: j \ne i} \delta_{ij} ## ## which after summing over all i and some manipulations eventually ## gives ## ## g_i = \frac{1}{n-2} (v_i - m), ## ## v_i = \sum_{j: j \ne i} \delta_{ij} ## s = \frac{1}{2(n-1)} \sum_{i,j: j \ne i} \delta_{ij} n <- attr(x, "Size") if(n <= 2L) return(as.cl_addtree(0 * x)) x <- as.matrix(x) g <- rowSums(x) / (n - 2) - sum(x) / (2 * (n - 1) * (n - 2)) as.cl_addtree(as.dist(.make_centroid_matrix(g))) } .make_centroid_matrix <- function(g) { y <- outer(g, g, "+") diag(y) <- 0 y } ### * as.cl_addtree as.cl_addtree <- function(x) UseMethod("as.cl_addtree") as.cl_addtree.default <- function(x) { if(inherits(x, "cl_addtree")) x else if(is.atomic(x) || inherits(x, "cl_ultrametric")) .cl_addtree_from_veclh(x) else if(is.matrix(x)) { ## Should actually check whether the matrix is symmetric, >= 0 ## and satisfies the 4-point conditions ... .cl_addtree_from_veclh(as.dist(x)) } else if(is.cl_dendrogram(x)) .cl_addtree_from_veclh(cl_ultrametric(x)) else stop("Cannot coerce to 'cl_addtree'.") } as.cl_addtree.phylo <- function(x) .cl_addtree_from_veclh(as.dist(cophenetic(x))) ## Phylogenetic trees with edge/branch lengths yield additive tree ## dissimilarities. ### * .cl_addtree_from_veclh .cl_addtree_from_veclh <- function(x, size = NULL, labels = NULL) { cl_proximity(x, "Additive tree distances", labels = labels, size = size, class = c("cl_addtree", "cl_dissimilarity", "cl_proximity", "dist")) } ### * .cl_addtree_from_addtree_approximation .cl_addtree_from_addtree_approximation <- function(x, size = NULL, labels = NULL) { ## Turn x into an addtree after possibly rounding to non-additivity ## significance (note that this is not guaranteed to work ...). mnum <- .non_additivity(x, max = TRUE) x <- round(x, floor(abs(log10(mnum)))) .cl_addtree_from_veclh(x, size = size, labels = labels) } ### * .decompose_addtree .decompose_addtree <- function(x, const = NULL) { ## Decompose an addtree into an ultrametric and a centroid ## distance. ## If 'const' is not given, we take the root as half way between the ## diameter of the addtree, and choose a minimal constant to ensure ## non-negativity (but not positivity) of the ultrametric. ## As this is all slightly dubious and it is not quite clear how ## much positivity we want in the ultrametric of the decomposition, ## we keep this hidden. For plotting addtrees, the choice of the ## constant does not seem to matter. x <- as.matrix(x) n <- nrow(x) ## Determine diameter. ind <- which.max(x) - 1 u <- ind %% n + 1 v <- ind %/% n + 1 if(!is.null(const)) g <- pmax(x[u, ], x[v, ]) - const else { g <- pmax(x[u, ], x[v, ]) - x[u, v] / 2 u <- x - .make_centroid_matrix(g) k <- - min(u) g <- g - k / 2 } u <- x - .make_centroid_matrix(g) names(g) <- rownames(x) ## Ensure a valid ultrametric. d <- .ultrametrify(as.dist(u)) u <- .cl_ultrametric_from_veclh(d, nrow(x), rownames(x)) ## Note that we return the centroid distances to the root, and not ## between the objects (as.dist(.make_centroid_matrix(g))) ... list(Ultrametric = as.cl_ultrametric(u), Centroid = g) } ### * plot.cl_addtree plot.cl_addtree <- function(x, ...) { ## Construct a dendrogram-style representation of the addtree with ## the root half way between the diameter, and plot. y <- .decompose_addtree(x, max(x)) u <- y$Ultrametric g <- y$Centroid ## We halve the scale of the ultrametric, and add the maximal g from ## the centroid. h <- hclust(as.dist(u / 2), "single") h$height <- h$height + max(g) d <- as.dendrogram(h) ## Now modify the heights of the leaves so that the objects giving ## the diameter of the addtree end up with height zero. g <- max(g) - g names(g) <- labels(g) d <- dendrapply(d, function(n) { if(!is.leaf(n)) return(n) attr(n, "height") <- g[attr(n, "label")] n }) ## And finally plot plot(d, ...) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/bag.R0000644000175100001440000000276711304023136012302 0ustar hornikuserscl_bag <- function(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) { ## Currently, method 'DFBC1' (Dudoit-Fridlyand BagClust1) is the ## only one available, and argument 'control' is ignored. ## Construct reference partition. algorithm <- match.fun(algorithm) reference <- eval(as.call(c(list(algorithm, x), if(!is.null(k)) list(k), parameters))) ## Construct bootstrap ensemble. clusterings <- cl_boot(x, B, k, algorithm, parameters, resample = TRUE) ## Construct Dudoit-Fridlyand BagClust1 consensus partitions, ## suitably generalized ... ## ## In principle, this could be turned into a "constructive" method ## for cl_consensus(), also allowing for weights (straightforward). ## E.g., ## .cl_consensus_partition_DFBC1(clusterings, weights, control) ## where either 'control specifies a reference partition, or the ## first element of 'clusterings' is taken as such. ## k <- max(sapply(c(clusterings, reference), n_of_classes)) M_ref <- cl_membership(reference, k) M <- matrix(0, NROW(M_ref), k) for(b in seq_len(B)) { mem <- cl_membership(clusterings[[b]], k) ## Match classes to reference partition. ind <- solve_LSAP(crossprod(M_ref, mem), maximum = TRUE) M <- M + mem[, ind] } as.cl_partition(cl_membership(as.cl_membership(M / B), k)) } clue/R/dissimilarity.R0000644000175100001440000004447113435044476014456 0ustar hornikusers### * cl_dissimilarity cl_dissimilarity <- function(x, y = NULL, method = "euclidean", ...) { x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_dissimilarity_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_dissimilarity_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-dissimilarities. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_dissimilarity")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } cl_proximity(unlist(d), method_name, labels = names(x), size = n, class = c("cl_dissimilarity", "cl_proximity", "dist")) } ### ** .cl_dissimilarity_partition_euclidean .cl_dissimilarity_partition_euclidean <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sqrt(sum((M_x - M_y[, ind]) ^ 2)) } ### ### ** .cl_dissimilarity_partition_manhattan .cl_dissimilarity_partition_manhattan <- function(x, y) { k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "manhattan") ind <- solve_LSAP(C) sum(C[cbind(seq_along(ind), ind)]) } ### ** .cl_dissimilarity_partition_comemberships .cl_dissimilarity_partition_comemberships <- function(x, y) { ## We used to have the straightforward ## C_x <- tcrossprod(cl_membership(x)) # M_x M_x' ## C_y <- tcrossprod(cl_membership(y)) # M_y M_y' ## sum((C_x - C_y) ^ 2) / n_of_objects(x) ^ 2 ## But note that ## \| AA' - BB' \|^2 ## = tr((AA' - BB')'(AA' - BB') ## = tr(A'A A'A) - 2 tr(A'B B'A) + tr(B'B B'B) ## = \| A'A \|^2 - 2 \| A'B \|^2 + \| B'B \|^2 ## which can be computed much more efficiently as all involved cross ## product matrices are "small" ... k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) sqrt(sum(crossprod(M_x) ^ 2) - 2 * sum(crossprod(M_x, M_y) ^ 2) + sum(crossprod(M_y) ^ 2)) } ### ** .cl_dissimilarity_partition_symdiff .cl_dissimilarity_partition_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the partitions ## regarded as binary equivalence relations, i.e., the number of ## discordant pairs. ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) n <- n_of_objects(x) .cl_dissimilarity_partition_Rand(x, y) * choose(n, 2) } ### ** .cl_dissimilarity_partition_Rand .cl_dissimilarity_partition_Rand <- function(x, y) { ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) 1 - .cl_agreement_partition_Rand(x, y) } ### ** .cl_dissimilarity_partition_GV1 .cl_dissimilarity_partition_GV1 <- function(x, y) { k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) C <- outer(colSums(M_x ^ 2), colSums(M_y ^ 2), "+") - 2 * crossprod(M_x, M_y) if(k_x < k_y) C <- rbind(C, matrix(0, nrow = k_y - k_x, ncol = k_y)) else if(k_x > k_y) C <- cbind(C, matrix(0, nrow = k_x, ncol = k_x - k_y)) ind <- solve_LSAP(C) sqrt(sum(C[cbind(seq_along(ind), ind)])) ## (Note that this sum really only includes matched non-dummy ## classes.) } ### ** .cl_dissimilarity_partition_BA_A .cl_dissimilarity_partition_BA_A <- function(x, y) { .cl_dissimilarity_partition_manhattan(as.cl_hard_partition(x), as.cl_hard_partition(y)) / 2 ## Could to this more efficiently, of course ... } ### ** .cl_dissimilarity_partition_BA_C .cl_dissimilarity_partition_BA_C <- function(x, y) { n_of_classes(x) + n_of_classes(y) - 2 * n_of_classes(cl_join(x, y)) } ### ** .cl_dissimilarity_partition_BA_D .cl_dissimilarity_partition_BA_D <- .cl_dissimilarity_partition_Rand ### ** .cl_dissimilarity_partition_BA_E .cl_dissimilarity_partition_BA_E <- function(x, y) { z <- table(cl_class_ids(x), cl_class_ids(y)) z <- z / sum(z) ## Average mutual information between the partitions. y <- outer(rowSums(z), colSums(z)) i <- which((z > 0) & (y > 0)) I <- sum(z[i] * log(z[i] / y[i])) ## Entropy of meet(x, y). i <- which(z > 0) H <- - sum(z[i] * log(z[i])) 1 - I / H } ### ** .cl_dissimilarity_partition_VI .cl_dissimilarity_partition_VI <- function(x, y, weights = 1) { ## Variation of information for general "soft clusterings", cf ## Section 5.2. in Meila (2002). weights <- rep_len(weights, n_of_objects(x)) weights <- weights / sum(weights) M_x <- cl_membership(x) ## Weighted marginal distribution of x: m_x <- colSums(weights * M_x) M_y <- cl_membership(y) ## Weighted marginal distribution of y: m_y <- colSums(weights * M_y) gamma <- crossprod(weights * M_x, M_y) delta <- outer(m_x, m_y) ## Entropy of x: H_x <- - sum(m_x * log(ifelse(m_x > 0, m_x, 1))) ## Entropy of y: H_y <- - sum(m_y * log(ifelse(m_y > 0, m_y, 1))) ## VI is H_x + H_y minus twice the (weighted) joint information. i <- which((gamma > 0) & (delta > 0)) H_x + H_y - 2 * sum(gamma[i] * log(gamma[i] / delta[i])) } ### ** .cl_dissimilarity_partition_Mallows .cl_dissimilarity_partition_Mallows <- function(x, y, p = 1, alpha = NULL, beta = NULL) { ## Currently, no "real" primal-dual solver for minimum cost flow ## problems, and lpSolve::lp.transport() seems to work only for ## integer bounds. Hence, rather than using ## ## C <- .cxdist(cl_membership(x), cl_membership(y), ## "minkowski", p) ^ p ## n_x <- nrow(C) ## n_y <- ncol(C) ## if(is.null(alpha)) ## alpha <- rep.int(1 / n_x, n_x) ## else { ## alpha <- rep_len(alpha, n_x) ## alpha <- alpha / sum(alpha) ## } ## ## etc right away, ensure a square cost matrix so that we can have ## integer bounds for at least the default case. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) C <- .cxdist(M_x, M_y, "minkowski", p) ^ p if(is.null(alpha)) alpha <- rep.int(1, k) if(is.null(beta)) beta <- rep.int(1, k) lpSolve::lp.transport(C, "min", rep.int("==", k), alpha, rep.int("==", k), beta, integers = NULL)$objval ^ (1 / p) } ### ** .cl_dissimilarity_partition_CSSD .cl_dissimilarity_partition_CSSD <- function(x, y, L = NULL, alpha = NULL, beta = NULL, ...) { ## Cluster Similarity Sensitive Distance. ## Reference: D. Zhou, J. Li and H. Zha (2005), ## A new Mallows distance based metric for comparing clusterings. ## See .cl_dissimilarity_partition_Mallows() re solving cost flow ## problems. ## Dissimilarity is defined by minimizing ## \sum_{k,l} (1 - 2 w_{kl} / (alpha_k + beta_l)) L_{kl} ## where ## L_{kl} = \sum_i m_{x;ik} m_{y;il} distance(p_{x;k}, p_{y;l}) ## with m and p the memberships and prototypes, respectively. ## If we get matrices of prototypes, use .rxdist; otherwise, the ## user needs to specify an L function or matrix. k_x <- n_of_classes(x) k_y <- n_of_classes(y) M_x <- cl_membership(x, k_x) M_y <- cl_membership(y, k_y) if(!is.matrix(L)) { p_x <- cl_prototypes(x) p_y <- cl_prototypes(y) if(is.matrix(p_x) && is.matrix(p_y) && is.null(L)) L <- .rxdist(p_x, p_y, ...) else if(is.function(L)) L <- L(p_x, p_y) else stop("Cannot compute prototype distances.") } C <- crossprod(M_x, M_y) * L if(is.null(alpha)) alpha <- rep.int(1, k_x) if(is.null(beta)) beta <- rep.int(1, k_y) sum(C) - 2 * lpSolve::lp.transport(C / outer(alpha, beta, "+"), "max", rep.int("==", k_x), alpha, rep.int("==", k_y), beta, integers = NULL)$objval } ### ** .cl_dissimilarity_hierarchy_euclidean .cl_dissimilarity_hierarchy_euclidean <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sqrt(sum(weights * (u - v) ^ 2)) } ### ** .cl_dissimilarity_hierarchy_manhattan .cl_dissimilarity_hierarchy_manhattan <- function(x, y, weights = 1) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) sum(weights * abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_cophenetic .cl_dissimilarity_hierarchy_cophenetic <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) 1 - cor(u, v) ^ 2 } ### ** .cl_dissimilarity_hierarchy_gamma .cl_dissimilarity_hierarchy_gamma <- function(x, y) { ## ## This is a dissimilarity measure that works for arbitrary ## dissimilarities, see e.g. Bock. ## (And the current implementation finally respects this ...) ## if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) n <- length(u) .C(C_clue_dissimilarity_count_inversions, as.double(u), as.double(v), as.integer(n), count = double(1L)) $ count / choose(n, 2) } ### ** .cl_dissimilarity_hierarchy_symdiff .cl_dissimilarity_hierarchy_symdiff <- function(x, y) { ## Cardinality of the symmetric difference of the n-trees when ## regarded as sets of subsets (classes) of the set of objects. x <- cl_classes(x) y <- cl_classes(y) sum(is.na(match(x, y))) + sum(is.na(match(y, x))) } ### ** .cl_dissimilarity_hierarchy_Chebyshev .cl_dissimilarity_hierarchy_Chebyshev <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) max(abs(u - v)) } ### ** .cl_dissimilarity_hierarchy_Lyapunov .cl_dissimilarity_hierarchy_Lyapunov <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) q <- cl_object_dissimilarities(x) / cl_object_dissimilarities(y) if(is.matrix(q)) q <- q[lower.tri(q)] log(max(q) / min(q)) } ### ** .cl_dissimilarity_hierarchy_BO .cl_dissimilarity_hierarchy_BO <- function(x, y, delta, ...) { ## Compute Boorman-Olivier (1973) dendrogram ("valued tree") ## dissimilarities of the form ## ## m_\delta(T_1, T_2) ## = \int_0^\infty \delta(P_1(\alpha), P_2(\alpha)) d\alpha ## ## where the trees (dendrograms) are defined as right-continuous ## maps from [0, \Infty) to the partition lattice. ## We can compute this as follows. Take the ultrametrics and use ## as.hclust() to detemine the heights \alpha_1(k) and \alpha_2(l) ## of the splits. Let \alpha_i be the sequence obtained by ## combining these two. Then ## ## m_\delta ## = \sum_{i=0}^{L-1} (\alpha_{i+1} - \alpha_i) ## \delta(P_1(\alpha_i), P_2(\alpha_i)) ## ## We use cutree() for computing the latter partitions. As we ## already have the hclust representations, we should be able to do ## things more efficiently ... if(inherits(x, "hclust")) t_x <- x else if(inherits(x, "cl_ultrametric")) t_x <- as.hclust(x) else if(is.cl_dendrogram(x)) t_x <- as.hclust(cl_ultrametric(x)) else return(NA) if(inherits(y, "hclust")) t_y <- y else if(inherits(y, "cl_ultrametric")) t_y <- as.hclust(y) else if(is.cl_dendrogram(y)) t_y <- as.hclust(cl_ultrametric(y)) else return(NA) if(is.unsorted(t_x$height) || is.unsorted(t_y$height)) return(NA) alpha <- sort(unique(c(t_x$height, t_y$height))) cuts_x <- cutree(t_x, h = alpha) cuts_y <- cutree(t_y, h = alpha) deltas <- mapply(cl_dissimilarity, lapply(split(cuts_x, col(cuts_x)), as.cl_partition), lapply(split(cuts_y, col(cuts_y)), as.cl_partition), MoreArgs = list(delta, ...)) sum(diff(alpha) * deltas[-length(deltas)]) } ### ** .cl_dissimilarity_hierarchy_spectral .cl_dissimilarity_hierarchy_spectral <- function(x, y) { if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u <- cl_object_dissimilarities(x) v <- cl_object_dissimilarities(y) svd(as.matrix(u - v))$d[1L] } ### * as.dist.cl_dissimilarity as.dist.cl_dissimilarity <- function(m, diag = FALSE, upper = FALSE) { y <- c(m) ## Fill non-inherited attributes with default values. attributes(y) <- c(attributes(m)[c("Size", "Labels")], Diag = diag, Upper = upper, call = match.call()) ## (Note that as.dist.default() does not automatically add ## 'method'.) class(y) <- "dist" y } ### * [.cl_dissimilarity "[.cl_dissimilarity" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_dissimilarity")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_dissimilarity")) } y } ### .cxdist .cxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the column cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[, j], B[, k]) ## Currently, only Manhattan (L1) distances are provided. ## Extensions to Minkowski or even more distances (a la dist()) ## could be added eventually. ## ## Possible implementations include ## ## foo_a <- function(A, B) ## apply(B, 2, function(u) colSums(abs(A - u))) ## foo_d <- function(A, B) { ## out <- as.matrix(dist(rbind(t(A), t(B)), "manhattan")) ## dimnames(out) <- NULL ## nc_B <- NCOL(B) ## out[seq(from = NCOL(A) + 1, length.out = nc_B), seq_len(nc_B)] ## } ## foo_f <- function(A, B) { ## out <- matrix(0, NCOL(A), NCOL(B)) ## for(j in seq_len(NCOL(A))) ## for(k in seq_len(NCOL(B))) ## out[j, k] = sum(abs(A[, j] - B[, k])) ## out ## } ## ## The one actually used seems to be the best performer, with the ## "for" version a close second (note that "typically", A and B have ## much fewer columns than rows). ## only few columns method <- match.arg(method) ## Workhorse. FOO <- switch(method, "euclidean" = function(M) sqrt(colSums(M ^ 2)), "manhattan" = function(M) colSums(abs(M)), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(M) (colSums(abs(M) ^ p)) ^ (1 / p) }) out <- matrix(0, NCOL(A), NCOL(B)) for(k in seq_len(NCOL(B))) out[, k] <- FOO(A - B[, k]) out } ### .rxdist .rxdist <- function(A, B, method = c("euclidean", "manhattan", "minkowski"), ...) { ## Return the row cross distance matrix of A and B. ## I.e., the matrix C = [c_{j,k}] with ## c_{j,k} = distance(A[j, ], B[k, ]) ## ## Could also do something like ## ind <- seq_len(NROW(B)) ## as.matrix(dist(rbind(B, A)))[-ind, ind] ## but that is *very* inefficient for the "usual" data by prototype ## case (where NROW(B) << NROW(A)). ## ## No fancy pmatching for methods for the time being. method <- match.arg(method) ## Workhorse: Full A, single row of b. FOO <- switch(method, "euclidean" = function(A, b) sqrt(rowSums(sweep(A, 2, b) ^ 2)), "manhattan" = function(A, b) rowSums(abs(sweep(A, 2, b))), "minkowski" = { ## Power needs to be given. p <- list(...)[[1L]] function(A, b) (rowSums(abs(sweep(A, 2, b)) ^ p)) ^ (1 / p) }) out <- matrix(0, NROW(A), NROW(B)) for(k in seq_len(NROW(B))) out[, k] <- FOO(A, B[k, ]) out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/lattice.R0000644000175100001440000001356713036514161013205 0ustar hornikuserscl_meet <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_meet_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_meet_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_meet_hierarchy(x) else stop("Cannot compute meet of given clusterings.") } .cl_meet_partition <- function(x) { x <- unique(x) if(length(x) == 1L) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ids <- seq_len(n_of_objects(x[[1L]])) ## Cross-classify the objects. z <- split(ids, lapply(x, cl_class_ids)) ## Subscript on the non-empty cells to get adjacent class ids. lens <- lengths(z) pos <- which(lens > 0) ids[unlist(z, use.names = FALSE)] <- rep.int(seq_along(z[pos]), lens[pos]) cl_partition_by_class_ids(ids) } .cl_meet_dendrogram <- function(x) { ## Meet of an ensemble of dendrograms. ## We need the maximal ultrametric dominated by the given ones, ## which can be obtained by hierarchical clustering with single ## linkage on the pointwise minima of the ultrametrics. as.cl_dendrogram(hclust(as.dist(do.call(pmin, lapply(x, cl_ultrametric))), "single")) } .cl_meet_hierarchy <- function(x) { ## Meet of an ensemble of n-trees. ## Need to find the classes in *all* n-trees. ## Equivalent to computing a strict majority tree. .cl_consensus_hierarchy_majority(x, rep.int(1, length(x)), list(p = 1)) } cl_join <- function(x, y) { ## General case. ## x either an ensemble, or x and y two clusterings with the same ## number of objects. if(!inherits(x, "cl_ensemble")) { ## Be nice about error messages. if(n_of_objects(x) != n_of_objects(y)) stop("Arguments 'x' and 'y' must have the same number of objects.") x <- cl_ensemble(x, y) } if(inherits(x, "cl_partition_ensemble")) .cl_join_partition(x) else if(inherits(x, "cl_dendrogram_ensemble")) .cl_join_dendrogram(x) else if(inherits(x, "cl_hierarchy_ensemble")) .cl_join_hierarchy(x) else stop("Cannot compute join of given clusterings.") } .cl_join_partition <- function(x) { x <- unique(x) if(length(x) == 1) return(cl_partition_by_class_ids(cl_class_ids(x[[1L]]))) ## Canonicalize: ensure that class ids are always the integers from ## one to the number of classes. n <- sapply(x, n_of_classes) ids <- mapply(function(p, ncp) match(cl_class_ids(p), seq_len(ncp)), x, n, SIMPLIFY = FALSE) ## Order according to the number of classes. ids <- ids[order(n)] ## And now incrementally build the join. jcids <- ids[[1L]] # Class ids of the current join. jnc <- length(unique(jcids)) # Number of classes of this. for(b in seq.int(from = 2, to = length(x))) { z <- table(jcids, ids[[b]]) ## It is faster to work on the smaller partition, but this ## should be ensured by the reordering ... ## We need to "join all elements in the same class in at least ## one of the partitions". In the matrix ## C <- (tcrossprod(z) > 0) ## entry i,j is true/one iff z_{ik} z_{jk} > 0 for classes ## i and j in the current join (ids jcids) and some class k in ## the partition with ids[[b]], so that i and j must be joined. ## I.e., C indicates which classes need to be joined directly. ## We need to determine the transitive closure of this relation, ## which can be performed by repeating ## C_{t+1} <- ((C_t %*% C) > 0) ## with C_1 = C until C_t does not change. C_new <- C_old <- C <- (tcrossprod(z) > 0) repeat { C_new <- (C_old %*% C) > 0 if(all(C_new == C_old)) break C_old <- C_new } C <- C_new ## This should now have the connected components. ## Next, compute the map of the join class ids to the ids of ## these components. cnt <- 0 map <- remaining_ids <- seq_len(jnc) while(length(remaining_ids)) { cnt <- cnt + 1 pos <- which(C[remaining_ids[1L], remaining_ids] > 0) map[remaining_ids[pos]] <- cnt remaining_ids <- remaining_ids[-pos] } ## And update the join: jcids <- map[jcids] jnc <- cnt } cl_partition_by_class_ids(jcids) } .cl_join_dendrogram <- function(x) { ## Join of an ensemble of dendrograms. as.cl_dendrogram(do.call(pmax, lapply(x, cl_ultrametric))) } .cl_join_hierarchy <- function(x) { ## Join of an ensemble of n-trees. ## Only exists if the union of all classes of the n-trees is itself ## an n-tree (see Barthelemy et al). classes <- unique(unlist(lapply(x, cl_classes), recursive = FALSE)) ## Now check if this is an n-tree. ## We must verify that for all classes A and B, their intersection ## is A, B, or empty. check <- function(A, B) { m_AB <- match(A, B) m_BA <- match(B, A) ((all(is.na(m_AB)) && all(is.na(m_BA))) || all(is.finite(m_AB)) || all(is.finite(m_BA))) } for(i in seq_along(classes)) { A <- classes[[i]] for(j in seq_along(classes)) if(!check(A, classes[[j]])) stop("Join of given n-trees does not exist.") } as.cl_hierarchy(.cl_ultrametric_from_classes(classes)) } clue/R/sumt.R0000644000175100001440000000722111304023136012527 0ustar hornikuserssumt <- function(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) { ## Default values: make it nice for others to call us. if(is.null(eps)) eps <- sqrt(.Machine$double.eps) if(is.null(method)) method <- "CG" if(is.null(q)) q <- 10 if(is.null(verbose)) verbose <- getOption("verbose") Phi <- function(rho, x) L(x) + rho * P(x) if(is.null(grad_L) || is.null(grad_P)) { make_Phi <- function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) NULL } else { grad_Phi <- function(rho, x) grad_L(x) + rho * grad_P(x) make_Phi <- if(method == "nlm") { function(rho) { function(x) .structure(Phi(rho, x), gradient = grad_Phi(rho, x)) } } else function(rho) { function(x) Phi(rho, x) } make_grad_Phi <- function(rho) { function(x) grad_Phi(rho, x) } } ## ## For the penalized minimization, the Newton-type nlm() may be ## computationally infeasible (although it works much faster for ## fitting ultrametrics to the Phonemes data). ## De Soete recommends using Conjugate Gradients. ## We provide a simple choice: by default, optim(method = "CG") is ## used. If method is non-null and not "nlm", we use optim() with ## this method. In both cases, control gives the control parameters ## for optim(). ## If method is "nlm", nlm() is used, in which case control is ## ignored. Note that we call nlm() with checking analyticals ## turned off, as in some cases (e.g. when fitting ultrametrics) the ## penalty function is not even continuous ... optimize_with_penalty <- if(method == "nlm") function(rho, x) nlm(make_Phi(rho), x, check.analyticals = FALSE) $ estimate else { function(rho, x) optim(x, make_Phi(rho), gr = make_grad_Phi(rho), method = method, control = control) $ par } ## Note also that currently we do not check whether optimization was ## "successful" ... ## ## We currently require that x0 be a *list* of start values, the ## length of which gives the number of SUMT runs. But as always, ## let's be nice to users and developers, just in case ... if(!is.list(x0)) x0 <- list(x0) v_opt <- Inf x_opt <- NULL rho_opt <- NULL for(run in seq_along(x0)) { if(verbose) message(gettextf("SUMT run: %d", run)) x <- x0[[run]] ## ## Better upper/lower bounds for rho? rho <- max(L(x), 0.00001) / max(P(x), 0.00001) ## if(verbose) message(gettextf("Iteration: 0 Rho: %g P: %g", rho, P(x))) iter <- 1L repeat { ## ## Shouldnt't we also have maxiter, just in case ...? ## if(verbose) message(gettextf("Iteration: %d Rho: %g P: %g", iter, rho, P(x))) x_old <- x x <- optimize_with_penalty(rho, x) if(max(abs(x_old - x)) < eps) break iter <- iter + 1L rho <- q * rho } v <- Phi(rho, x) if(v < v_opt) { v_opt <- v x_opt <- x rho_opt <- rho } if(verbose) message(gettextf("Minimum: %g", v_opt)) } .structure(list(x = x_opt, L = L(x_opt), P = P(x_opt), rho = rho_opt, call = match.call()), class = "sumt") } clue/R/ultrametric.R0000644000175100001440000006365313435044702014116 0ustar hornikusers### * cl_ultrametric cl_ultrametric <- function(x, size = NULL, labels = NULL) { if(inherits(x, "cl_hierarchy")) { ## ## Strictly, not every hierarchy corresponds to an ultrametric. ## return(cl_ultrametric(.get_representation(x), size = size, labels = labels)) } else if(!inherits(x, "cl_ultrametric")) { ## Try using cophenetic(). ## This starts by coercing to hclust, which has methods for all ## currently supported hierarchical classification methods. ## To support others, either provide as.hclust methods for ## these, or make cl_ultrametric() generic and add methods. ## Or use the fact that in R >= 2.1.0, stats::cophenetic() is ## generic. out <- cophenetic(x) } else { out <- x if(is.null(labels)) labels <- attr(x, "Labels") } .cl_ultrametric_from_veclh(out, labels = labels, size = size) } .cl_ultrametric_from_veclh <- function(x, size = NULL, labels = NULL, meta = NULL) { if(.non_ultrametricity(x) > 0) stop("Not a valid ultrametric.") u <- cl_proximity(x, "Ultrametric distances", labels = labels, size = size, class = c("cl_ultrametric", "cl_dissimilarity", "cl_proximity", "dist")) if(!is.null(meta)) attr(u, "meta") <- meta u } ### * as.cl_ultrametric as.cl_ultrametric <- function(x) UseMethod("as.cl_ultrametric") as.cl_ultrametric.default <- function(x) { if(inherits(x, "cl_ultrametric")) x else if(is.atomic(x)) .cl_ultrametric_from_veclh(x) else cl_ultrametric(x) } as.cl_ultrametric.matrix <- function(x) .cl_ultrametric_from_veclh(x[row(x) > col(x)], labels = rownames(x)) ### * as.dendrogram.cl_ultrametric as.dendrogram.cl_ultrametric <- function(object, ...) as.dendrogram(as.hclust(object), ...) ### * as.hclust.cl_ultrametric as.hclust.cl_ultrametric <- function(x, ...) { ## Hierarchical clustering with single linkage gives the minimal ## ultrametric dominated by a dissimilarity, see e.g. Bock (1974, ## Theorem 39.2). Hence, hclust(method = "single") on an ## ultrametric gives the hclust representation of the associated ## dendrogram. hclust(x, "single") } ### * cophenetic.cl_ultrametric cophenetic.cl_ultrametric <- function(x) as.dist(x) ### * plot.cl_ultrametric plot.cl_ultrametric <- function(x, ...) plot(as.dendrogram(x), ...) ### * ls_fit_ultrametric ls_fit_ultrametric <- function(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) { return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) } else if(is.cl_ensemble(x) || is.list(x)) { ## Might be given a list/ensemble of object dissimilarities. ## In this case, compute the suitably weighted average and ## proceed. if(length(x) == 0L) stop("Given ensemble contains no dissimilarities.") ## Let's be nice as usual ... ind <- !vapply(x, .has_object_dissimilarities, NA) if(any(ind)) x[ind] <- lapply(x[ind], as.dist) x <- .weighted_mean_of_object_dissimilarities(x, control$weights) } else if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .ls_fit_ultrametric_by_SUMT(x, weights, control), IP = { .ls_fit_ultrametric_by_iterative_projection(x, weights, control) }, IR = { .ls_fit_ultrametric_by_iterative_reduction(x, weights, control) }) } ### ** .ls_fit_ultrametric_by_SUMT .ls_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Fit an ultrametric to a dissimilarity by minimizing euclidean ## dissimilarity subject to the ultrametric constraint, using the ## sequential algorithm of de Soete (1984) with a slight change: we ## try to ensure that what we obtain satisfies the constraints ## "exactly" rather than approximately. We (currently?) do that via ## rounding ... ## ## This fits and hence returns an ultrametric, *not* the hierarchy ## corresponding to the ultrametric. ## w <- weights / sum(weights) ## Control parameters: ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## If x is an ultrametric, or satisfies the ultrametricity ## constraints, return it. if(inherits(x, "cl_ultrametric") || (.non_ultrametricity(x, max = TRUE) == 0)) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") ## Handle missing values in x along the lines of de Soete (1984): ## set the corresponding weights to 0, and impute by the weighted ## mean. ind <- which(is.na(x)) if(any(ind)) { w[ind] <- 0 x[ind] <- weighted.mean(x, w, na.rm = TRUE) } ## We follow de Soete's notation, and use the veclh's (vector of ## lower half, in S the same as x[lower.tri(x)]) of the respective ## proximity objects. L <- function(d) sum(w * (x - d) ^ 2) P <- .make_penalty_function_ultrametric(n) grad_L <- function(d) 2 * w * (d - x) grad_P <- .make_penalty_gradient_ultrametric(n) if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } .make_penalty_function_ultrametric <- function(n) function(d) { ## Smooth penalty function measuring the extent of violation of ## the ultrametricity constraint. Also ensure nonnegativity ... (.non_ultrametricity(.symmetric_matrix_from_veclh(d, n)) + sum(pmin(d, 0) ^ 2)) } .make_penalty_gradient_ultrametric <- function(n) function(d) { gr <- matrix(.C(C_deviation_from_ultrametricity_gradient, as.double(.symmetric_matrix_from_veclh(d, n)), as.integer(n), gr = double(n * n))$gr, n, n) gr[row(gr) > col(gr)] + 2 * sum(pmin(d, 0)) } ### ** .ls_fit_ultrametric_by_iterative_projection ## ## Functions ## .ls_fit_ultrametric_by_iterative_projection() ## .ls_fit_ultrametric_by_iterative_reduction() ## are really identical apart from the name of the C routine they call. ## (But will this necessarily always be the case in the future?) ## Merge maybe ... ## .ls_fit_ultrametric_by_iterative_projection <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative projection run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_projection, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .ls_fit_ultrametric_by_iterative_reduction .ls_fit_ultrametric_by_iterative_reduction <- function(x, weights = 1, control = list()) { if(any(diff(weights) != 0)) warning("Non-identical weights currently not supported.") labels <- attr(x, "Labels") n <- attr(x, "Size") x <- as.matrix(x) ## Control parameters: ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 10000L ## nruns, nruns <- control$nruns ## order, order <- control$order ## tol, tol <- control$tol if(is.null(tol)) tol <- 1e-8 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle order and nruns. if(!is.null(order)) { if(!is.list(order)) order <- as.list(order) if(!all(vapply(order, function(o) all(sort(o) == seq_len(n)), NA))) stop("All given orders must be valid permutations.") } else { if(is.null(nruns)) nruns <- 1L order <- replicate(nruns, sample(n), simplify = FALSE) } ## ## Adjust in case support for non-identical weights is added. L <- function(d) sum((x - d) ^ 2) ## d_opt <- NULL v_opt <- Inf for(run in seq_along(order)) { if(verbose) message(gettextf("Iterative reduction run: %d", run)) d <- .C(C_ls_fit_ultrametric_by_iterative_reduction, as.double(x), as.integer(n), as.integer(order[[run]] - 1L), as.integer(maxiter), iter = integer(1L), as.double(tol), as.logical(verbose))[[1L]] v <- L(d) if(v < v_opt) { v_opt <- v d_opt <- d } } d <- .ultrametrify(as.dist(matrix(d_opt, n))) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### * Ultrametric Target Fitters. ### ** ls_fit_ultrametric_target ls_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) mean(x) else function(x, w) weighted.mean(x, w) distfun <- function(x, u, w) sqrt(sum(w * (x - u) ^ 2)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** l1_fit_ultrametric_target l1_fit_ultrametric_target <- function(x, y, weights = 1) { fitter <- if(identical(weights, 1)) # Default. function(x, w) median(x) else function(x, w) weighted_median(x, w) distfun <- function(x, u, w) sum(w * abs(x - u)) .fit_ultrametric_target(x, y, weights, fitter, distfun) } ### ** .fit_ultrametric_target .fit_ultrametric_target <- function(x, y, w, fitter, distfun = NULL) { w <- .handle_weights_for_ultrametric_target_fitters(w, x) ## The documentation says that x should inherit from dist, so coerce ## to this if needed but if not a matrix (as we will coerce back to ## a matrix right away). if(!inherits(x, "dist") && !is.matrix(x)) x <- as.dist(x) x <- as.matrix(x) y <- as.hclust(y) n <- length(y$order) ilist <- vector("list", n) out <- matrix(0, n, n) mat <- xlist <- wlist <- vector("list", n - 1L) for(i in seq_len(n - 1L)) { inds <- y$merge[i, ] ids1 <- if(inds[1L] < 0) -inds[1L] else ilist[[inds[1L]]] ids2 <- if(inds[2L] < 0) -inds[2L] else ilist[[inds[2L]]] ilist[[i]] <- c(ids1, ids2) mat[[i]] <- cbind(rep.int(ids1, rep.int(length(ids2), length(ids1))), rep.int(ids2, length(ids1))) xlist[[i]] <- x[mat[[i]]] wlist[[i]] <- w[mat[[i]]] } values <- pava(xlist, wlist, fitter) for(i in seq_len(n - 1L)) out[mat[[i]]] <- values[i] rownames(out) <- y$labels u <- as.cl_ultrametric(out + t(out)) if(!is.null(distfun)) attr(u, "meta") <- list(objval = distfun(as.dist(x), u, as.dist(w))) u } ### ** .handle_weights_for_ultrametric_target_fitters .handle_weights_for_ultrametric_target_fitters <- function(weights, x) { ## Handle weights for the ultrametric target fitters. ## This is somewhat tricky ... if(is.matrix(weights)) { if(any(dim(weights) != attr(x, "Size"))) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- as.matrix(.dist_from_vector(rep_len(weights, length(x)))) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") weights } ### l1_fit_ultrametric l1_fit_ultrametric <- function(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) { if(inherits(x, "cl_ultrametric")) return(.cl_ultrametric_with_meta_added(x, list(objval = 0))) if(!inherits(x, "dist")) x <- as.dist(x) ## Catch some special cases right away. if(attr(x, "Size") <= 2L) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) if(.non_ultrametricity(x, max = TRUE) == 0) return(.cl_ultrametric_with_meta_added(as.cl_ultrametric(x), list(objval = 0))) ## Handle weights. ## This is somewhat tricky ... if(is.matrix(weights)) { weights <- as.dist(weights) if(length(weights) != length(x)) stop("Argument 'weights' must be compatible with 'x'.") } else weights <- rep_len(weights, length(x)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") method <- match.arg(method) switch(method, SUMT = .l1_fit_ultrametric_by_SUMT(x, weights, control), IRIP = .l1_fit_ultrametric_by_IRIP(x, weights, control)) } ### ** .l1_fit_ultrametric_by_SUMT .l1_fit_ultrametric_by_SUMT <- function(x, weights = 1, control = list()) { ## Try a SUMT with "pseudo-gradients". w <- weights / sum(weights) ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## For the time being, use a simple minimizer. n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(d - x)) P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) w * sign(d - x) grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking". Use sd() for simplicity. start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } ## And now ... out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ### ** .l1_fit_ultrametric_by_IRIP .l1_fit_ultrametric_by_IRIP <- function(x, weights = 1, control = list()) { ## An attempt of implementing "Iteratively Reweighted Iterative ## Projection" as described in Smith (2000, 2001), Journal of ## Classification. Note that this suggests using the Iterative ## Projection of Hubert and Arabie (1995), which we cannot as we ## have not (yet?) implemented this for the weighted case. Hence, ## we use our SUMT least squares ultrametric fitter instead. ## ## However, we never got this to converge properly ... w <- weights / sum(weights) ## Control parameters: ## MIN, MIN <- control$MIN if(is.null(MIN)) MIN <- 1e-3 ## (A rather small cut-off which worked best in the cases we tried.) ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## start, start <- control$start ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") n <- attr(x, "Size") labels <- attr(x, "Labels") L <- function(d) sum(w * abs(x - d)) ## Initialize by "random shaking" as for the L2 SUMT, but perhaps we ## should not do this? [Or do it differently?] u <- if(is.null(start)) x + rnorm(length(x), sd = sd(x) / 3) else start ## (No multiple runs for the time being.) L_new <- L(u) iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Outer iteration: %d", iter)) L_old <- L_new u_old <- u weights <- w / pmax(abs(u - x), MIN) u <- .ls_fit_ultrametric_by_SUMT(x, weights = weights, control = as.list(control$control)) ## Use some control arguments lateron ... L_new <- L(u) delta_L <- L_old - L_new delta_u <- max(abs(u_old - u)) if(verbose) message(gettextf("Change: u: %g L: %g", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } d <- .ultrametrify(u) meta <- list(objval = L(d), status = as.integer(iter == maxiter)) .cl_ultrametric_from_veclh(d, n, labels, meta) } ## * ls_fit_sum_of_ultrametrics ls_fit_sum_of_ultrametrics <- function(x, nterms = 1, weights = 1, control = list()) { if(!inherits(x, "dist")) x <- as.dist(x) ## We could catch some special cases right away: if x already is an ## ultrametric then the fit would be a list with x and nterms - 1 ## zero ultrametrics ... ## Control parameters: ## eps, eps <- control$eps if(is.null(eps)) eps <- 1e-6 ## maxiter, maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L ## method, method <- control$method if(is.null(method)) method <- "SUMT" ## reltol, reltol <- control$reltol if(is.null(reltol)) reltol <- 1e-6 ## verbose. verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last. control <- as.list(control$control) ## And be nice ... if(identical(method, "SUMT") && is.null(control$nruns)) control$nruns <- 10L L <- function(u) sum((x - rowSums(matrix(unlist(u), ncol = nterms))) ^ 2) ## Init. u <- rep.int(list(as.cl_ultrametric(0 * x)), nterms) L_new <- L(u) ## Loop. iter <- 1L while(iter <= maxiter) { if(verbose) message(gettextf("Iteration: %d", iter)) L_old <- L_new delta_u <- 0 for(i in seq_len(nterms)) { if(verbose) message(gettextf("Term: %d", i)) u_old <- u[[i]] ## Compute residual r = x - \sum_{j: j \ne i} u(j) r <- x - rowSums(matrix(unlist(u[-i]), ncol = nterms - 1L)) ## Fit residual. u[[i]] <- ls_fit_ultrametric(r, method, weights, control) ## Accumulate change. change <- max(abs(u[[i]] - u_old)) if(verbose) message(gettextf("Change: %g", change)) delta_u <- max(delta_u, change) } L_new <- L(u) delta_L <- L_old - L_new if(verbose) message(gettextf("Overall change: u: %g L: %g\n", delta_u, delta_L)) if((delta_u < eps) || ((delta_L >= 0) && (delta_L <= reltol * (abs(L_old) + reltol)))) break iter <- iter + 1L } .structure(u, objval = L_new, status = as.integer(iter == maxiter)) } ### * as.dist.hclust ## Using hclust() with methods 'median' or 'centroid' typically gives ## reversals and hence not valid hierarchies, i.e., distances which do ## not satisfy the ultrametricity conditions. The distances can be ## obtained via cophenetic(), but ls_fit_ultrametric() prefers using ## as.dist() [as arguably more appropriate] which in turn can be made to ## "work" by providing as.matrix() methods [bypassing the need to handle ## the extra arguments 'diag' and 'upper' for as.dist()]. as.matrix.hclust <- function(x, ...) as.matrix(cophenetic(x)) ### * .non_ultrametricity .non_ultrametricity <- function(x, max = FALSE) { if(!is.matrix(x)) x <- .symmetric_matrix_from_veclh(x) .C(C_deviation_from_ultrametricity, as.double(x), as.integer(nrow(x)), fn = double(1L), as.logical(max))$fn } ### * .cl_ultrametric_from_classes .cl_ultrametric_from_classes <- function(x) { ## Compute an ultrametric from a hierarchy of classes (i.e., an ## n-tree). labels <- attr(x, "labels") ## Ensure we have no duplicates. x <- x[!duplicated(x)] ## .get_classes_in_hierarchy() orders according to cardinality, but ## a consensus method may forget to ... x[] <- x[order(lengths(x))] ## Get the objects (unique codes in the classes). objects <- sort(unique(unlist(x))) ## (Could also look at the classes of length 1.) ## Recursively compute the heights of the classes. heights <- double(length = length(x)) for(i in which(lengths(x) > 1L)) { ## Find the relevant classes. j <- sapply(x[seq_len(i - 1L)], function(s) all(s %in% x[[i]])) heights[i] <- max(heights[j]) + 1 } ## Next, create an incidence matrix (objects by classes). incidences <- sapply(x, function(s) objects %in% s) ## Now that we have the heights and incidences, we can compute ## distances, using the idea that ## distance(i, j) = min(height(A): A contains i and j) n <- length(objects) d <- matrix(0, n, n) for(i in objects) d[i, ] <- heights[apply((rep(incidences[i, ], each = n) & incidences), 1L, which.max)] dimnames(d) <- rep.int(list(labels), 2L) as.cl_ultrametric(d) } ### * .cl_ultrametric_with_meta_added .cl_ultrametric_with_meta_added <- function(x, meta = NULL) { ## An alternative to adding a 'meta' argument to cl_ultrametric(). attr(x, "meta") <- meta x } ### .ultrametrify .ultrametrify <- function(x) { ## Ensure ultrametricity. ## In earlier versions, function ## .cl_ultrametric_from_ultrametric_approximation() tried rounding ## to non-ultrametric significance, using ## round(x, floor(abs(log10(.non_ultrametricity(x, max = TRUE))))) ## which is nice but does not guarantee ultrametricity (and may ## result in poorer approximations than what we use now). ## Hence, let us use single linkage hierarchical clustering which ## gives the best dominated ultrametric approximation. cophenetic(hclust(.dist_from_vector(x), "single")) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/boot.R0000644000175100001440000000255711304023136012511 0ustar hornikuserscl_boot <- function(x, B, k = NULL, algorithm = if(is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) { clusterings <- if(!resample) { x <- rep.int(list(x), B) eval(as.call(c(list(as.name("lapply"), x, algorithm), if(!is.null(k)) list(k), parameters))) } else { replicate(B, expr = { algorithm <- match.fun(algorithm) ## ## This is not quite perfect. We have ## cl_predict() to encapsulate the process of ## assigning objects to classes, but for sampling ## from the objects we assume that they correspond ## to the *rows* of 'x'. Argh. ## ind <- sample(NROW(x), replace = TRUE) train <- if(length(dim(x)) == 2) x[ind, ] else x[ind] out <- eval(as.call(c(list(algorithm, train), if(!is.null(k)) list(k), parameters))) as.cl_partition(cl_predict(out, x, "memberships")) }, simplify = FALSE) } cl_ensemble(list = clusterings) } clue/R/predict.R0000644000175100001440000002456013267260544013216 0ustar hornikusers## ## Maybe add support for "auto" type (class_ids when predicting from a ## hard, memberships when predicting from a soft partition) eventually. ## cl_predict <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) UseMethod("cl_predict") ## Default method. ## Should also work for kcca() from package flexclust. cl_predict.default <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) .as_cl_class_ids_or_membership(predict(object, newdata, ...), type) ## Package stats: kmeans() (R 2.1.0 or better). cl_predict.kmeans <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- .rxdist(newdata, object$centers) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package cluster: ## * fanny() cannot make "new" predictions. ## * clara() gives medoids, and takes metric data using Euclidean or ## Manhattan dissimilarities (and we can figure out which by looking ## at the call and the default values). ## * pam() gives medoids, but might have been called with dissimilarity ## data, so is tricky. We can always find out which by looking at the ## medoids: as in the dissimilarity input case this is a vector of ## class labels, and a matrix with in each row the coordinates of one ## medoid otherwise. We then still need to figure out whether ## Euclidean or Manhattan distances were used by looking at the call ## and the default values. ## Both pam() and clara() show that the interfaces could be improved to ## accomodate modern needs, e.g., for bagging. cl_predict.fanny <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) stop("Cannot make new predictions.") } cl_predict.clara <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(cluster::clara)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } cl_predict.pam <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) prototypes <- object$medoids if(!is.matrix(prototypes)) stop("Cannot make new predictions.") ## ## Add support eventually ... if(identical(object$call$stand, TRUE)) warning("Standardization is currently not supported.") ## method <- object$call$metric if(is.null(method)) { ## Not given in the call, hence use default value. method <- eval(formals(cluster::pam)$metric)[1L] ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$medoids, method) .as_cl_class_ids_or_membership(max.col(-d), type) } ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_predict.Weka_clusterer <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata = newdata, type = type, ...), type) } ## Package cba: ccfkms(). cl_predict.ccfkms <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(as.vector(predict(object, newdata)$cl), type) } ## Package cba: rockCluster() returns objects of class "rock". ## If x is a Rock object, fitted(x) and predict(x, newdata) can result ## in missing classifications, as ## In the case a 'drop' value greater than zero is specified, all ## clusters with size equal or less than this value are removed from ## the classifier. Especially, 'fitted' uses a threshold of one ## because for singleton clusters the neighborhood is empty. cl_predict.rock <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) newdata <- object$x ids <- as.vector(predict(object, newdata, ...)$cl) .as_cl_class_ids_or_membership(ids, type) } ## Package cclust: cclust(). cl_predict.cclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { ## Package cclust provides predict.cclust() which returns (again) an ## object of class "cclust", but does not give the labels of the ## original data in case no new data are given. if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(predict(object, newdata), type) } ## Package e1071: cmeans() gives objects of class "fclust". cl_predict.fclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Note that the 'fclust' objects returned by cmeans() do not always ## directly contain the information on the fuzzification parameter m ## and the distance (Euclidean/Manhattan) employed, so we have to ## engineer this from the matched call and the default arguments. nms <- names(object$call) ## Note that we cannot directly use object$call$m, as this could ## give the 'method' argument if 'm' was not given. m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cmeans)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) power <- c(m, if(method == "euclidean") 2 else 1) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: cshell(). cl_predict.cshell <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) ## Not surprisingly, this is rather similar to what we do for fclust ## objects. Only dissimiliraties (and exponents) need to be ## computed differently ... nms <- names(object$call) m <- if("m" %in% nms) object$call$m else { ## Not given in the call, hence use default value. formals(e1071::cshell)$m ## (Or hard-wire the default value: 2.) } method <- if("dist" %in% nms) object$call$dist else { ## Not given in the call, hence use default value. formals(e1071::cshell)$dist ## (Or hard-wire the default value: "euclidean".) } d <- .rxdist(newdata, object$centers, method) d <- sweep(d, 2, object$radius) ^ 2 M <- .memberships_from_cross_dissimilarities(d, m) .as_cl_class_ids_or_membership(M, type) } ## Package e1071: bclust(). ## ## One might argue that it would be better to use the 'dist.method' ## employed for the hierarchical clustering, but it seems that class ## labels ("clusters") are always assigned using Euclidean distances. cl_predict.bclust <- cl_predict.kmeans ## ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_predict.kcca <- cl_predict.default ## Package flexmix: class "flexmix". cl_predict.flexmix <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) .as_cl_class_ids_or_membership(modeltools::posterior(object, newdata, ...), type) } ## Package mclust: Mclust(). cl_predict.Mclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) pred <- predict(object, newdata, ...) type <- match.arg(type) if(type == "class_ids") as.cl_class_ids(pred$classification) else as.cl_membership(pred$z) } ## Package movMF: movMF(). cl_predict.movMF <- cl_predict.Weka_clusterer ## Package clue: pclust(). cl_predict.pclust <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) { if(is.null(newdata)) return(.cl_class_ids_or_membership(object, type)) d <- object$family$D(newdata, object$prototypes) power <- c(object$m, object$family$e) M <- .memberships_from_cross_dissimilarities(d, power) .as_cl_class_ids_or_membership(M, type) } ## Package clue: (virtual) class "cl_partition". cl_predict.cl_partition <- function(object, newdata = NULL, type = c("class_ids", "memberships"), ...) cl_predict(.get_representation(object), newdata = newdata, type, ...) ## Internal helpers: this looks a bit silly, but makes the rest of the ## code look nicer ... .cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") cl_class_ids(x) else cl_membership(x) } .as_cl_class_ids_or_membership <- function(x, type = c("class_ids", "memberships")) { type <- match.arg(type) if(type == "class_ids") { if(is.matrix(x)) { ## Same as for cl_class_ids.cl_membership(). as.cl_class_ids(.structure(max.col(x), names = rownames(x))) } else as.cl_class_ids(x) } else as.cl_membership(x) } clue/R/agreement.R0000644000175100001440000002713313435044376013532 0ustar hornikusers### * cl_agreement cl_agreement <- function(x, y = NULL, method = "euclidean", ...) { ## ## This code is repeated from cl_dissimilarity(), mutatis mutandis. ## Not really a big surprise ... ## x <- as.cl_ensemble(x) is_partition_ensemble <- (inherits(x, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) ## Be nice. if(is.character(y) || is.function(y)) { method <- y y <- NULL } if(is.function(method)) method_name <- "user-defined method" else { if(!inherits(method, "cl_agreement_method")) { ## Get the method definition and description from the ## registry. type <- ifelse(is_partition_ensemble, "partition", "hierarchy") method <- get_cl_agreement_method(method, type) } method_name <- method$description method <- method$definition } if(!is.null(y)) { y <- as.cl_ensemble(y) is_partition_ensemble_y <- (inherits(y, "cl_partition_ensemble") || all(vapply(x, .has_object_memberships, NA))) if(!identical(is_partition_ensemble, is_partition_ensemble_y)) stop("Cannot mix partitions and hierarchies.") if(n_of_objects(x) != n_of_objects(y)) stop("All clusterings must have the same number of objects.") ## Build a cross-proximity object of cross-agreements. d <- matrix(0, length(x), length(y)) for(j in seq_along(y)) d[, j] <- sapply(x, method, y[[j]], ...) dimnames(d) <- list(names(x), names(y)) return(cl_cross_proximity(d, method_name, class = "cl_cross_agreement")) } ## Otherwise, build a proximity object of dissimilarities. n <- length(x) d <- vector("list", length = n - 1L) ind <- seq_len(n) while(length(ind) > 1L) { j <- ind[1L] ind <- ind[-1L] d[[j]] <- sapply(x[ind], method, x[[j]], ...) } ## ## We assume that self-agreements are always one ... ## cl_proximity(unlist(d), method_name, labels = names(x), self = rep.int(1, length(x)), size = n, class = "cl_agreement") } ### ** .cl_agreement_partition_euclidean .cl_agreement_partition_euclidean <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- sqrt(2 * n_of_objects(x)) ## 1 - .cl_dissimilarity_partition_euclidean(x, y) / d_max } ### ** .cl_agreement_partition_manhattan .cl_agreement_partition_manhattan <- function(x, y) { ## ## Upper bound for maximal dissimilarity, maybe improve eventually. d_max <- 2 * n_of_objects(x) ## 1 - .cl_dissimilarity_partition_manhattan(x, y) / d_max } ### ** .cl_agreement_partition_Rand .cl_agreement_partition_Rand <- function(x, y) { n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The number A of concordant pairs is given by ## A = choose(n,2) + \sum_{i,j} x_{ij}^2 ## - (1/2) * (\sum_i x_{i.}^2 + \sum_j x_{.j}^2) ## = choose(n,2) + 2 \sum_{i,j} choose(x_{ij},2) ## - (\sum_i choose(x_{i.},2) + \sum_j choose(x_{.j},2) ## with the first version certainly much faster to compute. ## 1 + (sum(x^2) - (sum(rowSums(x)^2) + sum(colSums(x)^2)) / 2) / choose(n, 2) } ### ** .cl_agreement_partition_cRand .cl_agreement_partition_cRand <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") n <- n_of_objects(x) x <- table(cl_class_ids(x), cl_class_ids(y)) ## ## The basic formula is ## (Sxy - E) / ((Sx. + S.y) / 2 - E) ## where ## Sxy = \sum_{i,j} choose(x_{ij}, 2) ## Sx. = \sum_i choose(x_{i.}, 2) ## S.y = \sum_j choose(x_{.j}, 2) ## and ## E = Sx. * S.y / choose(n, 2) ## We replace the bincoefs by the corresponding sums of squares, ## getting ## (Txy - F) / ((Tx. + T.y) / 2 - F) ## where ## Txy = \sum_{i,j} x_{ij}^2 - n ## Tx. = \sum_i x_{i.}^2 - n ## T.y = \sum_j x_{.j}^2 - n ## and ## F = Tx. * T.y / (n^2 - n) ## Txy <- sum(x ^ 2) - n Tx. <- sum(rowSums(x) ^ 2) - n T.y <- sum(colSums(x) ^ 2) - n F <- Tx. * T.y / (n ^ 2 - n) (Txy - F) / ((Tx. + T.y) / 2 - F) } ### ** .cl_agreement_partition_NMI .cl_agreement_partition_NMI <- function(x, y) { if(!is.cl_hard_partition(x) || !is.cl_hard_partition(y)) stop("Can only handle hard partitions.") x <- table(cl_class_ids(x), cl_class_ids(y)) x <- x / sum(x) m_x <- rowSums(x) m_y <- colSums(x) y <- outer(m_x, m_y) i <- which((x > 0) & (y > 0)) out <- sum(x[i] * log(x[i] / y[i])) e_x <- sum(m_x * log(ifelse(m_x > 0, m_x, 1))) e_y <- sum(m_y * log(ifelse(m_y > 0, m_y, 1))) out / sqrt(e_x * e_y) } ### ** .cl_agreement_partition_KP .cl_agreement_partition_KP <- function(x, y) { ## Agreement measure due to Katz & Powell (1953, Psychometrika), see ## also Messatfa (1992, Journal of Classification). n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) A_xy <- sum(x ^ 2) A_x. <- sum(rowSums(x) ^ 2) A_.y <- sum(colSums(x) ^ 2) (n^2 * A_xy - A_x. * A_.y) / sqrt(A_x. * (n^2 - A_x.) * A_.y * (n^2 - A_.y)) } ### ** .cl_agreement_partition_angle .cl_agreement_partition_angle <- function(x, y) { ## Maximal angle between the matched memberships. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / sqrt(sum(M_x ^ 2) * sum(M_y ^ 2)) } ### ** .cl_agreement_partition_diag .cl_agreement_partition_diag <- function(x, y) { ## Maximal co-classification rate. k <- max(n_of_classes(x), n_of_classes(y)) M_x <- cl_membership(x, k) M_y <- cl_membership(y, k) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M_x, M_y), maximum = TRUE) sum(M_x * M_y[, ind]) / n_of_objects(x) } ### ** .cl_agreement_partition_FM .cl_agreement_partition_FM <- function(x, y) { ## Fowlkes-Mallows index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) (sum(x ^ 2) - n) / sqrt((sum(rowSums(x) ^ 2) - n) * (sum(colSums(x) ^ 2) - n)) } ### ** .cl_agreement_partition_Jaccard .cl_agreement_partition_Jaccard <- function(x, y) { ## Jaccard index. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) Z <- sum(x ^ 2) (Z - n) / (sum(rowSums(x) ^ 2) + sum(colSums(x) ^ 2) - n - Z) } ### ** .cl_agreement_partition_purity .cl_agreement_partition_purity <- function(x, y) { ## Purity of classes of x with respect to those of y: relative ## fraction of "optimally matched and collapsed" joint class ## frequencies, i.e., \sum_i \max_j c_{ij} / n. n <- n_of_objects(x) ## Handle soft partitions using the corresponding hard ones. ## (At least, for the time being.) x <- table(cl_class_ids(x), cl_class_ids(y)) sum(apply(x, 1L, max)) / n } .cl_agreement_partition_PS <- function(x, y) { ## Prediction Strength as used in Tibshirani and Walter (2005), ## "Cluster Validation by Prediction Strength", JCGS. ## See Eqn 2.1 in the reference: this is ## min_l rate of different objects in the same class in partition ## A and in class l in partition B, ## where the min is taken over all classes l of partition B. x <- table(cl_class_ids(x), cl_class_ids(y)) s <- rowSums(x) min((rowSums(x ^ 2) - s) / (s * (s - 1)), na.rm = TRUE) } ## Some computations useful for interpreting some of the above. ## ## Consider two hard partitions A and B and write ## a_{ik} ... indicator of object i in class k for partition A ## b_{il} ... indicator of object i in class l for partition B ## (so that the a_{ik} and b_{il} are of course the membership matrices ## of the partitions). ## ## Then obviously ## \sum_i a_{ik} b_{il} = m_{kl} ## is the number of objects in class k for A and in class l for B, and ## \sum_i a_{ik} = m_{k.} = # objects in class k for A ## \sum_i b_{il} = m_{.l} = # objects in class l for B ## ## Number of pairs of objects in the same classes for both A and B: ## \sum_{i, j, k, l} a_{ik} a_{jk} b_{il} b_{jl} ## = \sum_{k, l} \sum_i a_{ik} b_{il} \sum_j a_{jk} b_{jl} ## = \sum_{k, l} m_{kl} ^ 2 ## This includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same classes for both A ## and B: ## (\sum_{k, l} m_{kl} ^ 2 - n) / 2 ## ## Number of pairs of objects in the same class for A: ## \sum_{i, j, k} a_{ik} a_{jk} ## = \sum_k \sum_i a_{ik} \sum_j a_{jk} ## = \sum_k m_{k.} ^ 2 ## Again, this includes the n pairs with identical objects, hence: ## Number of distinct pairs of objects in the same class for A: ## (\sum_k m_{k.} ^ 2 - n) / 2 ## ## Similarly, \sum_l m_{.l} ^ 2 corresponds to the number of pairs of ## objects in the same class for B. ## ## Finally, to get the number of pairs of objects in different classes ## for both A and B, we note that this is the total number of pairs, ## minus the sum of the numbers of those in the same class for A and for ## B, respectively, plus the number of pairs in the same class for both ## A and B. ## ## This makes e.g. the interpretation of some of the Fowlkes-Mallows or ## Rand agreement indices rather straightforward. ### ** .cl_agreement_hierarchy_euclidean .cl_agreement_hierarchy_euclidean <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_euclidean(x, y)) ### ** .cl_agreement_hierarchy_manhattan .cl_agreement_hierarchy_manhattan <- function(x, y) 1 / (1 + .cl_dissimilarity_hierarchy_manhattan(x, y)) ### ** .cl_agreement_hierarchy_cophenetic .cl_agreement_hierarchy_cophenetic <- function(x, y) { ## Cophenetic correlation. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) cor(cl_object_dissimilarities(x), cl_object_dissimilarities(y)) } ### ** .cl_agreement_hierarchy_angle .cl_agreement_hierarchy_angle <- function(x, y) { ## Angle between ultrametrics. if(!.has_object_dissimilarities(x) || !.has_object_dissimilarities(y)) return(NA) u_x <- cl_object_dissimilarities(x) u_y <- cl_object_dissimilarities(y) sum(u_x * u_y) / sqrt(sum(u_x ^ 2) * sum(u_y ^ 2)) } ### ** .cl_agreement_hierarchy_gamma .cl_agreement_hierarchy_gamma <- function(x, y) 1 - .cl_dissimilarity_hierarchy_gamma(x, y) ### * [.cl_agreement "[.cl_agreement" <- function(x, i, j) { y <- NextMethod("[") if(!inherits(y, "cl_agreement")) { description <- attr(x, "description") return(cl_cross_proximity(y, description = description, class = "cl_cross_agreement")) } y } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/pclust.R0000644000175100001440000004307013036513600013057 0ustar hornikusers### * cl_pclust cl_pclust <- function(x, k, method = NULL, m = 1, weights = 1, control = list()) { ## Partition a cluster ensemble x into (at most) k classes by ## minimizing ## \sum_b \sum_j w_b u_{bj}^m d(x_b, p_j) ^ e ## for "suitable" prototypes p_1, ..., p_k, where 1 <= m < \infty, ## with 1 corresponding to hard (secondary) partitions, and d a ## dissimilarity measure (such as Euclidean dissimilarity of ## partitions or hierarchies). ## ## The algorithm works whenever there is a consensus method for ## solving ## \sum_b u_{bj}^m d(x_b, p) ^ e => \min_p ## ## As we refer to consensus methods by their *name* (e.g., 'HBH'), ## we rely on the registration mechanism (set_cl_consensus_method()) ## to provide the required information about d and e. clusterings <- as.cl_ensemble(x) type <- .cl_ensemble_type(clusterings) if(type == "partition") { ## Canonicalize by turning into an ensemble of partitions ## represented by membership matrices with the same (minimal) ## number of columns. memberships <- lapply(clusterings, cl_membership, max(sapply(clusterings, n_of_classes))) clusterings <- cl_ensemble(list = lapply(memberships, as.cl_partition)) } if(!inherits(method, "cl_consensus_method")) { ## Get required information on d and e from the registry. if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) ## Note that this avoids registry lookup in subsequent calls to ## cl_consensus(). if(is.null(method$exponent)) stop("No information on exponent in consensus method used.") e <- method$exponent if(is.null(method$dissimilarity)) stop("No information on dissimilarity in consensus method used.") d <- function(x, y = NULL) cl_dissimilarity(x, y, method = method$dissimilarity) } family <- pclust_family(D = d, C = method$definition, e = e) out <- pclust(x, k, family, m, weights, control) ## Massage the results a bit. dissimilarities <- as.matrix(d(clusterings) ^ e) out$call <- match.call() out <- .structure(c(out, list(silhouette = silhouette(out$cluster, dmatrix = dissimilarities), validity = cl_validity(cl_membership(out), dissimilarities), ## ## Information about d and e is also in the ## family returned, of course. Trying to be ## nice to users by "directly" providing d ## and e is currently of limited usefulness ## as the pclust representation is not ## directly available to users. d = d, e = e ## )), class = unique(c("cl_pclust", class(out)))) as.cl_partition(out) } print.cl_pclust <- function(x, ...) { txt <- if(x$m == 1) gettextf("A hard partition of a cluster ensemble with %d elements into %d classes.", n_of_objects(x), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of a cluster ensemble with %d elements into %d classes.", x$m, n_of_objects(x), n_of_classes(x)) writeLines(strwrap(txt)) NextMethod("print", x, header = FALSE) print(x$validity, ...) invisible(x) } ### * pclust pclust <- function(x, k, family, m = 1, weights = 1, control = list()) { ## A general purpose alternating optimization algorithm for ## prototype-based partitioning. ## For now, assume family specifies three functions: ## * A dissimilarity function D() for data and prototypes. ## * A consensus function C() for data, weights and control. ## * An init function init() of data and k giving an initial object ## of k prototypes. ## ## We use k as the second argument as this seems to be common ## practice for partitioning algorithms. ## ## We assume that consensus functions can all handle WEIGHTS ## (formals: x, weights, control; only used positionally). ## ## ## We now allow for arbitrary representations/objects of prototypes. ## What is needed are functions to modify a *single* prototype and ## subset the prototypes. By default, list and matrix (with the ## usual convention that rows are "objects") representations are ## supported. Otherwise, the family needs to provide suitable ## .modify() and .subset() functions. ## The approach relies on having the initializer of the family ## (init()) return an appropriate object of prototypes. ## It would be possible to have default initializers as well to ## randomly subset the data (i.e., select elements of lists or rows ## of matrices, respectively). ## ## ## The 'prototypes' are not necessarily objects of the same kind as ## the data objects. Therefore, D() is really a 2-argument ## cross-dissimilarity function. ## It would also be useful to have a way of computing the pairwise ## dissimilarities between objects: but this is something different ## from D() is objects and prototypes are not of the same kind. ## A "clean" solution could consist in specifying the family either ## via a (non-symmetric) cross-dissimilarity function X(), or a ## symmetric D() which when called with a single argument gives the ## pairwise object dissimilarities. ## I.e., ## pclust_family(D = NULL, C, init = NULL, X = NULL, ......) ## using ## * If both D and X are not given => TROUBLE. ## * If only D is given: use for X as well. ## * If only X is given: only use as such. ## Something for the future ... ## ## ## If people have code for computing cross-dissimilarities for the ## data and a *single* prototype (say, xd()), they can easily wrap ## into what is needed using ## t(sapply(prototypes, function(p) xd(x, p))) ## Assuming symmetry of the dissimilarity, they could also do ## t(sapply(prototypes, xd, x)) ## ## Perhaps check whether 'family' is a feasible/suitable pclust ## family (object). D <- family$D C <- family$C e <- family$e .modify <- family$.modify .subset <- family$.subset maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Do this at last ... control <- as.list(control$control) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, family$init(x, k), simplify = FALSE) } ## Initialize. ## We need to do this here because it is (currently) the only way to ## figure out the number B of objects to be partitioned (which is ## needed for getting the object weights to the right length). prototypes <- start[[1L]] dissimilarities <- D(x, prototypes) ^ e B <- NROW(dissimilarities) ## Also try to figure out (if necessary) how to modify a single ## prototype and to subset the prototypes. Note that we can only ## check this *after* prototypes were obtained (and not when the ## family object is created). if(is.null(.modify)) { if(is.list(prototypes)) .modify <- function(x, i, value) { x[[i]] <- value x } else if(is.matrix(prototypes)) .modify <- function(x, i, value) { x[i, ] <- value x } else stop("Cannot determine how to modify prototypes.") } else if(!is.function(.modify) || !identical(formals(args(.modify)), c("x", "i", "value"))) stop("Invalid function to modify prototypes.") if(is.null(.subset)) { if(is.list(prototypes)) .subset <- `[` else if(is.matrix(prototypes)) .subset <- function(x, i) x[i, , drop = FALSE] else stop("Cannot determine how to subset prototypes.") } else if(!is.function(.subset) || !identical(formals(args(.subset)), c("x", "i"))) stop("Invalid function to subset prototypes.") weights <- rep_len(weights, B) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") ## A little helper. .make_unit_weights <- function(B, i) { out <- double(B) out[i] <- 1 out } if(m == 1) { ## Hard partitions. value <- if(all(weights == 1)) function(dissimilarities, ids) sum(.one_entry_per_column(dissimilarities, ids)) else function(dissimilarities, ids) sum(weights * .one_entry_per_column(dissimilarities, ids)) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { class_ids <- max.col( - dissimilarities ) old_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { class_ids_used <- unique(class_ids) for(j in class_ids_used) prototypes <- .modify(prototypes, j, C(x, weights * (class_ids %in% j), control)) dissimilarities <- D(x, prototypes) ^ e class_ids <- max.col( - dissimilarities ) ## Try avoiding degenerate solutions. if(length(class_ids_used) < k) { ## Find the k - l largest ## object-to-assigned-prototype dissimilarities. o <- order(.one_entry_per_column(dissimilarities, class_ids), decreasing = TRUE) ## Find and recompute unused prototypes. unused <- setdiff(seq_len(k), class_ids_used) for(j in seq_along(unused)) prototypes <- .modify(prototypes, unused[j], C(x, .make_unit_weights(B, o[j]), control)) dissimilarities[, unused] <- D(x, .subset(prototypes, unused)) ^ e class_ids <- max.col( - dissimilarities ) ## For the time being, do not retry in case the ## solution is still degenerate. } new_value <- value(dissimilarities, class_ids) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_class_ids <- class_ids opt_prototypes <- prototypes } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } ## We should really have a suitable "sparse matrix" class for ## representing the memberships of hard partitions. For now: opt_u <- NULL ## opt_u <- matrix(0, B, k) ## opt_u[cbind(seq_len(B), opt_class_ids)] <- 1 } else { ## Soft partitions. value <- if(all(weights == 1)) function(dissimilarities, u) sum(u ^ m * dissimilarities) else function(dissimilarities, u) sum(weights * u ^ m * dissimilarities) opt_value <- Inf run <- 1L if(verbose && (nruns > 1L)) message(gettextf("Pclust run: %d", run)) repeat { u <- .memberships_from_cross_dissimilarities(dissimilarities, m) old_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Update the prototypes. ## This amounts to solving, for each j: ## \sum_b w_b u_{bj}^m D(x_b, p) ^ e => \min_p ## I.e., p_j is the *weighted* consensus of the x_b with ## corresponding weights u_{bj}^m. for(j in seq_len(k)) { prototypes <- .modify(prototypes, j, C(x, weights * u[, j] ^ m, control)) } ## Update u. dissimilarities <- D(x, prototypes) ^ e u <- .memberships_from_cross_dissimilarities(dissimilarities, m) new_value <- value(dissimilarities, u) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < opt_value) { converged <- (iter <= maxiter) opt_value <- new_value opt_prototypes <- prototypes opt_u <- u } if(run >= nruns) break run <- run + 1L if(verbose) message(gettextf("Pclust run: %d", run)) prototypes <- start[[run]] dissimilarities <- D(x, prototypes) ^ e } opt_class_ids <- max.col(opt_u) ## Ensure that opt_u is a stochastic matrix. opt_u <- pmax(opt_u, 0) opt_u <- opt_u / rowSums(opt_u) rownames(opt_u) <- rownames(dissimilarities) opt_u <- cl_membership(as.cl_membership(opt_u), k) } names(opt_class_ids) <- rownames(dissimilarities) pclust_object(prototypes = opt_prototypes, membership = opt_u, cluster = opt_class_ids, family = family, m = m, value = opt_value, call = match.call(), attributes = list("converged" = converged)) } print.pclust <- function(x, header = TRUE, ...) { is_hard <- (x$m == 1) class_ids <- cl_class_ids(x) if(header) { txt <- if(is_hard) gettextf("A hard partition of %d objects into %d classes.", length(class_ids), n_of_classes(x)) else gettextf("A soft partition (degree m = %f) of %d objects into %d classes.", x$m, length(class_ids), n_of_classes(x)) writeLines(strwrap(txt)) } if(is_hard) { print(class_ids, ...) } else { writeLines("Class memberships:") print(cl_membership(x), ...) writeLines("Class ids of closest hard partition:") print(unclass(class_ids), ...) } invisible(x) } ### * pclust_family pclust_family <- function(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) { ## Add checking formals (lengths) eventually ... if(is.null(init)) { ## Works for list representations ... init <- function(x, k) sample(x, k) } .structure(list(description = description, D = D, C = C, init = init, e = e, .modify = .modify, .subset = .subset), class = "pclust_family") } ### * pclust_object pclust_object <- function(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) { out <- c(list(prototypes = prototypes, membership = membership, cluster = cluster, family = family, m = m, value = value), list(...)) attributes(out) <- c(attributes(out), attributes) classes <- unique(as.character(classes)) class(out) <- c(classes[classes != "pclust"], "pclust") out } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/utilities.R0000644000175100001440000001015013036513662013561 0ustar hornikusers### * Matrix/vector utilities ### * .dist_from_vector .dist_from_vector <- function(x, n = NULL, labels = NULL) { ## This might be useful as as.dist.vector, perhaps without the extra ## argument n then which we only have for minimal performance gains. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) attr(x, "Size") <- n if(!is.null(labels)) attr(x, "Labels") <- labels class(x) <- "dist" x } ### ** .one_entry_per_column .one_entry_per_column <- function(x, j) { ## For a matrix x and a vector of column indices j_1, ..., j_n where ## n is the number of rows of x, get x[1,j_1], ..., x[n,j_n]. ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] } ".one_entry_per_column<-" <- function(x, j, value) { ## ## This used to have ## if(!is.matrix(x)) ## stop("Argument 'x' must be a matrix.") ## but that will fail for sparse matrix classes. ## So let us hope for the best ... ## x[cbind(seq_len(nrow(x)), j)] <- value x } ### * .symmetric_matrix_from_veclh .symmetric_matrix_from_veclh <- function(x, n = NULL) { ## In essence the same as as.matrix.dist, but without handling the ## additional attributes that dist objects might have. if(is.null(n)) n <- as.integer((sqrt(1 + 8 * length(x)) + 1) / 2) M <- matrix(0, n, n) M[row(M) > col(M)] <- x M + t(M) } ### * .weighted_mean_of_object_dissimilarities .weighted_mean_of_object_dissimilarities <- function(x, w = NULL) { w <- if(is.null(w)) { rep.int(1, length(x)) } else { rep_len(w, length(x)) } ## (Need the latter because we want w / sum(w) ...) dissimilarities <- lapply(x, cl_object_dissimilarities) m <- rowSums(mapply("*", dissimilarities, w / sum(w))) labels <- attr(dissimilarities[[1L]], "Labels") .dist_from_vector(m, labels = labels) } ### ** .weighted_sum_of_matrices .weighted_sum_of_matrices <- function(x, w = NULL, nr = NULL) { ## Quite often we need to compute weighted sums \sum_b w_b X_b of ## conforming matrices \{ X_b \}. If x is a list containing the ## matrices and w the vector of weights, it seems that one ## reasonably efficient way of doing this is the following. if(is.null(w)) w <- rep.int(1, length(x)) if(is.null(nr)) nr <- NROW(x[[1L]]) matrix(rowSums(mapply("*", x, w)), nr) } ### ** .weighted_sum_of_vectors .weighted_sum_of_vectors <- function(x, w = NULL) { ## See above. if(is.null(w)) w <- rep.int(1, length(x)) rowSums(mapply("*", x, w)) } ### * Containers ## Creator. .make_container <- function(x, classes, properties = NULL) { out <- list(.Data = x, .Meta = properties) class(out) <- unique(classes) out } ## Getters. .get_representation <- function(x) x$.Data .get_properties <- function(x) x$.Meta .get_property <- function(x, which) x$.Meta[[which]] .has_property <- function(x, which) which %in% names(x$.Meta) .get_property_from_object_or_representation <- function(x, which, getter) { if(.has_property(x, which)) .get_property(x, which) else { if(missing(getter)) getter <- get(which) getter(.get_representation(x)) } } ## Methods (sort of). .print_container <- function(x, cls, ...) { writeLines(gettextf("An object of virtual class '%s', with representation:\n", cls)) print(.get_representation(x), ...) invisible(x) } ### * Others weighted_median <- function(x, w = 1, na.rm = FALSE) { w <- rep_len(w, length(x)) if(na.rm && any(ind <- is.na(x))) { x <- x[!ind] w <- w[!ind] } if(any(is.na(x)) || !length(x)) return(NA) w <- w / sum(w) ind <- order(x) x <- x[ind] w <- w[ind] x[which.min(x * (cumsum(w) - 0.5) - cumsum(w * x))] } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/partition.R0000644000175100001440000004352212212427231013557 0ustar hornikusers### * n_of_classes ## Get the number of classes in a (hard or soft) partition. ## ## We generally allow for classes to be empty, unlike the current ## version of kmeans(). Package cclust has a version of k-means which ## does not stop in case of empty classes. ## However, we only count NON-EMPTY classes here. ## n_of_classes <- function(x) UseMethod("n_of_classes") ## Default method. n_of_classes.default <- function(x) length(unique(cl_class_ids(x))) ## Package stats: kmeans() (R 2.1.0 or better). n_of_classes.kmeans <- n_of_classes.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_classes.fanny <- function(x) sum(colSums(x$membership, na.rm = TRUE) > 0) n_of_classes.partition <- n_of_classes.default ## Package cclust: cclust(). n_of_classes.cclust <- n_of_classes.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_classes.fclust <- n_of_classes.fanny ## Package e1071: cshell(). n_of_classes.cshell <- n_of_classes.fanny ## Package e1071: bclust(). n_of_classes.bclust <- n_of_classes.default ## Package mclust: Mclust(). n_of_classes.Mclust <- n_of_classes.default ## Package clue: Memberships. n_of_classes.cl_membership <- function(x) attr(x, "n_of_classes") ## Package clue: pclust(). n_of_classes.pclust <- function(x) { if(is.null(m <- x$membership)) length(unique(cl_class_ids(x))) else sum(colSums(m, na.rm = TRUE) > 0) } ## Package clue: (virtual) class "cl_partition". n_of_classes.cl_partition <- function(x) n_of_classes(.get_representation(x)) ### * cl_class_ids ## Get ids of classes in a partition. ## ## Currently, all supported soft partitioning methods provide a softmax ## hard partitioning as well. ## cl_class_ids <- function(x) UseMethod("cl_class_ids") ## Default method. cl_class_ids.default <- function(x) { stop("Cannot infer class ids from given object.") } ## Package stats: kmeans() (R 2.1.0 or better). cl_class_ids.kmeans <- function(x) as.cl_class_ids(x$cluster) ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". cl_class_ids.partition <- function(x) as.cl_class_ids(x$clustering) ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". cl_class_ids.Weka_clusterer <- function(x) as.cl_class_ids(x$class_ids) ## Package cba: ccfkms(). cl_class_ids.ccfkms <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cba: rockCluster() returns objects of class "rock". cl_class_ids.rock <- function(x) as.cl_class_ids(as.vector(x$cl)) ## Package cclust: cclust(). cl_class_ids.cclust <- cl_class_ids.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_class_ids.fclust <- cl_class_ids.kmeans ## Package e1071: cshell(). cl_class_ids.cshell <- cl_class_ids.kmeans ## Package e1071: bclust(). cl_class_ids.bclust <- cl_class_ids.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". ## ## We used to be able to call flexclust::cluster(), but this now only ## has S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexclust has been loaded ... ## cl_class_ids.kcca <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package flexmix: class "flexmix". ## ## We used to be able to call flexmix::cluster(), but this now only has ## S4 methods for modeltools::clusters() S4 generic. Let's call this ## one, and hope that flexmix has been loaded ... ## cl_class_ids.flexmix <- function(x) as.cl_class_ids(modeltools::clusters(x)) ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_class_ids.specc <- function(x) { tmp <- unclass(x) as.cl_class_ids(.structure(as.vector(tmp), names = names(tmp))) } ## Package mclust: Mclust(). cl_class_ids.Mclust <- function(x) as.cl_class_ids(x$classification) ## Package relations: equivalence and preference relations. cl_class_ids.relation <- function(x) as.cl_class_ids(relations::relation_class_ids(x)) ## Package clue: Class ids. cl_class_ids.cl_class_ids <- identity ## Package clue: Memberships. cl_class_ids.cl_membership <- function(x) as.cl_class_ids(.structure(max.col(x), names = rownames(x))) ## (Cannot do cl_class_ids.cl_membership <- max.col for generic/method ## consistency.) ## Package clue: cl_pam(). cl_class_ids.cl_pam <- cl_class_ids.kmeans ## Package clue: cl_partition_by_class_ids(). cl_class_ids.cl_partition_by_class_ids <- function(x) .get_representation(x) ## Package clue: kmedoids(). cl_class_ids.kmedoids <- cl_class_ids.kmeans ## Package clue: pclust(). cl_class_ids.pclust <- cl_class_ids.kmeans ## Package clue: (virtual) class "cl_partition". cl_class_ids.cl_partition <- function(x) cl_class_ids(.get_representation(x)) ## Package movMF: class "movMF". cl_class_ids.movMF <- function(x) as.cl_class_ids(max.col(x$P)) ### * as.cl_class_ids as.cl_class_ids <- function(x) { ## For the time being, handle only "raw" class ids. ## Maybe add methods handling factors lateron (if necessary). ## ## This could also be used to canonicalize returned class ids ## according to the docs (vector of integers with the class ids), ## using someting like ## match(ids, unique(ids)) ## .structure(unclass(x), class = "cl_class_ids") } ### * print.cl_class_ids print.cl_class_ids <- function(x, ...) { writeLines("Class ids:") print(unclass(x), ...) invisible(x) } ### * cl_class_labels cl_class_labels <- function(x) UseMethod("cl_class_labels") ### * is.cl_partition ## Determine whether an object is a (generalized) partition. ## Note that this includes both hard and soft partitions, and allows ## sums of memberships of objects to be less than one. is.cl_partition <- function(x) UseMethod("is.cl_partition") ## Default method. is.cl_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". is.cl_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". ## (Note that Cobweb internally uses a classification tree, but ## definitely does not expose this structure.) is.cl_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_partition.rock <- .true ## Package cclust: cclust(). is.cl_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_partition.fclust <- .true ## Package e1071: cshell(). is.cl_partition.cshell <- .true ## Package e1071: bclust(). is.cl_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_partition.flexmix <- .true ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_partition.specc <- .true ## Package mclust: Mclust(). is.cl_partition.Mclust <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. is.cl_partition.cl_partition <- .true ## Package clue: kmedoids(). is.cl_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_partition.pclust <- .true ## Package movMF: class "movMF". is.cl_partition.movMF <- .true ### * as.cl_partition ## Note that cl_partition conceptually is a virtual class, so there are ## no prototypes and no cl_partition() creator. .cl_partition_classes <- "cl_partition" as.cl_partition <- function(x) { if(is.cl_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_partition_classes) else x } else cl_partition_by_memberships(as.cl_membership(x)) } ### * print.cl_partition print.cl_partition <- function(x, ...) .print_container(x, "cl_partition", ...) ### * print.cl_partition_by_class_ids print.cl_partition_by_class_ids <- function(x, ...) { writeLines(gettextf("A hard partition of %d objects.", n_of_objects(x))) print(cl_class_ids(x), ...) invisible(x) } ### * print.cl_partition_by_memberships print.cl_partition_by_memberships <- function(x, ...) { writeLines(gettextf("A partition of %d objects.", n_of_objects(x))) print(cl_membership(x), ...) invisible(x) } ### * Complex.cl_partition Complex.cl_partition <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_partition Math.cl_partition <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_partition Ops.cl_partition <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ci1 <- cl_class_ids(e1) ci2 <- cl_class_ids(e2) if(length(ci1) != length(ci2)) stop("Partitions must have the same number of objects.") z <- table(ci1, ci2) > 0 switch(.Generic, "<=" = all(rowSums(z) == 1), "<" = all(rowSums(z) == 1) && any(colSums(z) > 1), ">=" = all(colSums(z) == 1), ">" = all(colSums(z) == 1) && any(rowSums(z) > 1), "==" = all(rowSums(z) == 1) && all(colSums(z) == 1), "!=" = any(rowSums(z) > 1) || any(colSums(z) > 1)) } ### * Summary.cl_partition Summary.cl_partition <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * cl_partition_by_class_ids cl_partition_by_class_ids <- function(x, labels = NULL) { if(!is.atomic(x)) stop("Class ids must be atomic.") if(is.null(names(x))) names(x) <- labels ## ## Perhaps give the raw class ids more structure? ## E.g, class "cl_class_ids"? ## Problem is that we used to say about extensibility that all there ## is to do for a hard partitioner is to add a cl_class_ids() method ## and two predicates, but *not* to have the former give a suitably ## classed object. On the other hand, the recipe would need to be ## extended for soft partitioners, for which it would be necessary ## to provide a cl_membership() method which really returns an ## object of class cl_membership. Note that we can do this using ## as.cl_membership(m), where m is the raw membership matrix. So ## maybe we should ask for using as.cl_class_ids() to coerce raw ## class ids ... .make_container(as.cl_class_ids(x), c("cl_partition_by_class_ids", .cl_hard_partition_classes), list(n_of_objects = length(x), n_of_classes = length(unique(x)))) ## } ### * cl_partition_by_memberships cl_partition_by_memberships <- function(x, labels = NULL) { if(!is.matrix(x) || any(x < 0, na.rm = TRUE) || any(x > 1, na.rm = TRUE)) stop("Not a valid membership matrix.") ## Be nice. x <- x / rowSums(x, na.rm = TRUE) ## (Note that this does not imply all(rowSums(x) == 1). If we ## wanted to test for this, something like ## .is_stochastic_matrix <- function(x) ## identical(all.equal(rowSums(x), rep(1, nrow(x))), TRUE)) ## should do.) if(is.null(rownames(x))) rownames(x) <- labels .make_container(as.cl_membership(x), c("cl_partition_by_memberships", .cl_partition_classes), list(n_of_objects = nrow(x))) } ### * is.cl_hard_partition ## Determine whether an object is a hard partition. is.cl_hard_partition <- function(x) UseMethod("is.cl_hard_partition") ## Default method. is.cl_hard_partition.default <- .false ## Package stats: kmeans() (R 2.1.0 or better). is.cl_hard_partition.kmeans <- .true ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". ## ## Of course, fuzzy clustering can also give a hard partition ... is.cl_hard_partition.fanny <- function(x) { all(rowSums(cl_membership(x) == 1, na.rm = TRUE) > 0) } ## is.cl_hard_partition.partition <- .true ## Package RWeka: clusterers return objects inheriting from ## "Weka_clusterer". is.cl_hard_partition.Weka_clusterer <- .true ## Package cba: ccfkms(). is.cl_hard_partition.ccfkms <- .true ## Package cba: rockCluster() returns objects of class "rock". is.cl_hard_partition.rock <- .true ## Package cclust: cclust(). is.cl_hard_partition.cclust <- .true ## Package e1071: cmeans() gives objects of class "fclust". is.cl_hard_partition.fclust <- is.cl_hard_partition.fanny ## Package e1071: cshell(). is.cl_hard_partition.cshell <- is.cl_hard_partition.fanny ## Package e1071: bclust(). is.cl_hard_partition.bclust <- .true ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". is.cl_hard_partition.kcca <- .true ## Package flexmix: class "flexmix". is.cl_hard_partition.flexmix <- is.cl_hard_partition.fanny ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". is.cl_hard_partition.specc <- .true ## Package mclust: Mclust(). is.cl_hard_partition.Mclust <- is.cl_hard_partition.fanny ## Package clue: (virtual) class "cl_hard_partition". is.cl_hard_partition.cl_hard_partition <- .true ## Package clue: (virtual) class "cl_partition". ## Note that "raw" cl_membership objects are *not* partitions, as they ## are meant for numeric computations. ## Rather than providing is.cl_hard_partition.cl_membership() we thus ## prefer explicit handling of cl_partition objects with a cl_membership ## representation. is.cl_hard_partition.cl_partition <- function(x) { ## If the object has a cl_membership representation ... y <- .get_representation(x) if(inherits(y, "cl_membership")) attr(y, "is_cl_hard_partition") ## Other representations, e.g. for "definitely" hard partitions via ## vectors of class ids or class labels, or a list of classes, may ## be added in future versions. ## In any case, this must be kept in sync with what is handled by ## as.cl_partition() [which currently runs as.cl_membership() in ## case is.cl_partition() gives false]. else is.cl_hard_partition(y) } ## Package clue: kmedoids(). is.cl_hard_partition.kmedoids <- .true ## Package clue: pclust(). is.cl_hard_partition.pclust <- is.cl_hard_partition.fanny ## Package movMF: class "movMF". is.cl_hard_partition.movMF <- is.cl_hard_partition.fanny ### * as.cl_hard_partition .cl_hard_partition_classes <- c("cl_hard_partition", "cl_partition") as.cl_hard_partition <- function(x) { if(is.cl_hard_partition(x)) { if(!inherits(x, "cl_partition")) .make_container(x, .cl_hard_partition_classes) else x } else if(is.cl_partition(x)) { ## A soft cl_partition ... ids <- cl_class_ids(x) cl_partition_by_class_ids(ids, names(ids)) } else if(is.matrix(x)) { ## A matrix of raw memberships, hopefully ... cl_partition_by_class_ids(max.col(x), rownames(x)) } else if(is.atomic(x)) { ## A vector of raw class ids, hopefully ... cl_partition_by_class_ids(x, names(x)) } else stop("Cannot coerce to 'cl_hard_partition'.") } ### * is.cl_soft_partition ## Determine whether an object is a soft partition. is.cl_soft_partition <- function(x) is.cl_partition(x) && ! is.cl_hard_partition(x) ### * .maybe_is_proper_soft_partition ## Determine whether an object might be a proper soft partition (in the ## sense that it is a cl_partition but not a cl_hard_partition). ## This is mostly useful when computing fuzziness measures. .maybe_is_proper_soft_partition <- function(x) UseMethod(".maybe_is_proper_soft_partition") .maybe_is_proper_soft_partition.default <- .false .maybe_is_proper_soft_partition.fanny <- .true .maybe_is_proper_soft_partition.fclust <- .true .maybe_is_proper_soft_partition.cshell <- .true .maybe_is_proper_soft_partition.flexmix <- .true .maybe_is_proper_soft_partition.Mclust <- .true ## See above for why we prefer not to have ## .maybe_is_proper_soft_partition.cl_membership(). ## (Although this is an internal generic really only used for making ## cl_fuzziness() computations more efficient, so we could be more ## generous here [perhaps using a slightly different name such as ## .maybe_represents_a_proper_soft_partition()]. .maybe_is_proper_soft_partition.cl_partition <- function(x) { y <- .get_representation(x) if(inherits(y, "cl_membership")) !attr(y, "is_cl_hard_partition") else .maybe_is_proper_soft_partition(y) } .maybe_is_proper_soft_partition.pclust <- function(x) x$m > 1 ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/medoid.R0000644000175100001440000001562611450366117013023 0ustar hornikusers### * cl_medoid cl_medoid <- function(x, method = "euclidean") { ## ## In principle we can get the same using pam(k = 1)$medoids. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid of empty ensemble.") dissimilarities <- as.matrix(cl_dissimilarity(clusterings, method = method)) clusterings[[which.min(rowSums(dissimilarities))]] } ### * cl_pam cl_pam <- function(x, k, method = "euclidean", solver = c("pam", "kmedoids")) { clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute medoid partition of empty ensemble.") ## Actually, we should have at least k distinct elements in the ## ensemble ... make_cl_pam <- function(class_ids, medoid_ids, medoids, criterion, description) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, prototypes = medoids, criterion = criterion, description = description), class = "cl_pam") if(k == 1L) { ## Simplify matters if a global medoid is sought. dissimilarities <- cl_dissimilarity(clusterings, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) row_sums <- rowSums(dissimilarities) medoid_id <- which.min(row_sums) criterion <- row_sums[medoid_id] return(make_cl_pam(as.cl_class_ids(seq_along(clusterings)), medoid_id, clusterings[medoid_id], criterion, description)) } solver <- match.arg(solver) ## Argh. We really want to run k-medoids for the unique elements of ## the ensemble, but pam() only works for symmetric dissimilarties. ## As computing cluster dissimilarities is typically expensive, use ## the unique elements for doing so in any case. values <- unique(clusterings) ## Positions of ensemble members in the unique values. positions <- match(clusterings, values) ## Dissimilarities between unique values. dissimilarities <- cl_dissimilarity(values, method = method) description <- attr(dissimilarities, "description") dissimilarities <- as.matrix(dissimilarities) ## For pam(), we need the dissimilarities for all objects. if(solver == "pam") { dissimilarities <- dissimilarities[positions, positions] party <- cluster::pam(as.dist(dissimilarities), k) class_ids <- cl_class_ids(party) medoid_ids <- cl_medoid_ids(party) medoids <- clusterings[medoid_ids] criterion <- sum(dissimilarities[cbind(seq_along(class_ids), medoid_ids[class_ids])]) } else { ## Counts of unique values. counts <- tabulate(positions) ## Weigh according to the counts. Should be straightforward to ## add "case weights" as well ... dissimilarities <- counts * dissimilarities ## Now partition. party <- kmedoids(dissimilarities, k) ## And build the solution from this ... criterion <- party$criterion ## First, things for the unique values. medoid_ids <- cl_medoid_ids(party) medoids <- values[medoid_ids] class_ids <- cl_class_ids(party) ## Second, things for all objects. class_ids <- class_ids[positions] medoid_ids <- match(medoid_ids, positions) } make_cl_pam(class_ids, medoid_ids, medoids, criterion, description) } print.cl_pam <- function(x, ...) { class_ids <- cl_class_ids(x) fmt <- "A k-medoid partition of a cluster ensemble with %d elements into %d classes (dissimilarity measure: %s)." writeLines(c(strwrap(gettextf(fmt, n_of_objects(x), n_of_classes(x), x$description)))) writeLines(gettext("Class ids:")) print(class_ids, ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } ### * cl_medoid_ids ## Little helper, internal for the time being ... cl_medoid_ids <- function(x) UseMethod("cl_medoid_ids") cl_medoid_ids.cl_pam <- function(x) x$medoid_ids cl_medoid_ids.kmedoids <- function(x) x$medoid_ids cl_medoid_ids.clara <- function(x) x$i.med cl_medoid_ids.pam <- function(x) x$id.med ### * kmedoids kmedoids <- function(x, k) { ## ## For the time being, 'x' is assumed a dissimilarity object or a ## matrix of dissimilarities. ## Let's worry about the interface later. ## x <- as.matrix(x) n <- nrow(x) ## Use the formulation in Gordon & Vichi (1998), Journal of ## Classification, [P4'], page 279, with variables c(vec(X), z), but ## with rows and cols interchanged (such that x_{ij} is one iff o_i ## has medoid o_j, and z_j is one iff o_j is a medoid). make_constraint_mat <- function(n) { nsq <- n * n rbind(cbind(kronecker(rbind(rep.int(1, n)), diag(1, n)), matrix(0, n, n)), cbind(diag(1, nsq), kronecker(diag(1, n), rep.int(-1, n))), c(double(nsq), rep.int(1, n)), cbind(matrix(0, n, nsq), diag(1, n))) } make_constraint_dir <- function(n) rep.int(c("=", "<=", "=", "<="), c(n, n * n, 1, n)) make_constraint_rhs <- function(n, k) rep.int(c(1, 0, k, 1), c(n, n * n, 1, n)) ## ## We could try a relaxation without integrality constraints first, ## which seems to "typically work" (and should be faster). To test ## for integrality, use something like ## if(identical(all.equal(y$solution, round(y$solution)), TRUE)) ## y <- lpSolve::lp("min", c(c(x), double(n)), make_constraint_mat(n), make_constraint_dir(n), make_constraint_rhs(n, k), int.vec = seq_len(n * (n + 1))) ## Now get the class ids and medoids. ind <- which(matrix(y$solution[seq_len(n * n)], n) > 0, arr.ind = TRUE) medoid_ids <- unique(ind[, 2L]) class_ids <- seq_len(n) class_ids[ind[, 1L]] <- match(ind[, 2L], medoid_ids) .structure(list(cluster = class_ids, medoid_ids = medoid_ids, criterion = y$objval), class = "kmedoids") } print.kmedoids <- function(x, ...) { fmt <- "A k-medoids clustering of %d objects into %d clusters." writeLines(gettextf(fmt, n_of_objects(x), n_of_classes(x))) writeLines(gettext("Medoid ids:")) print(cl_medoid_ids(x), ...) writeLines(gettext("Class ids:")) print(unclass(cl_class_ids(x)), ...) writeLines(gettext("Criterion:")) print(x$criterion, ...) invisible(x) } clue/R/objects.R0000644000175100001440000001310113036461743013200 0ustar hornikusers### * n_of_objects ## Get the number of objects in a clustering. n_of_objects <- function(x) UseMethod("n_of_objects") ### ** Default method. n_of_objects.default <- function(x) length(cl_class_ids(x)) ## (Note that prior to R 2.1.0, kmeans() returned unclassed results, ## hence the best we can do for the *default* method is to look at a ## possibly existing "cluster" component. Using the class ids incurs ## another round of method dispatch, but avoids code duplication.) ### ** Partitioning methods. ## Package stats: kmeans() (R 2.1.0 or better). n_of_objects.kmeans <- n_of_objects.default ## Package cluster: clara(), fanny(), and pam() give objects of the ## respective class inheriting from class "partition". n_of_objects.partition <- n_of_objects.default ## Package cclust: cclust(). n_of_objects.cclust <- n_of_objects.default ## Package e1071: cmeans() gives objects of class "fclust". n_of_objects.fclust <- function(x) nrow(x$membership) ## Package e1071: cshell(). n_of_objects.cshell <- n_of_objects.fclust ## Package e1071: bclust(). n_of_objects.bclust <- n_of_objects.default ## Package mclust: Mclust(). n_of_objects.Mclust <- n_of_objects.default ### ** Hierarchical methods. ## Package stats: hclust(). n_of_objects.hclust <- function(x) length(x$order) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". n_of_objects.twins <- n_of_objects.hclust ## Package cluster: mona(). n_of_objects.mona <- n_of_objects.hclust ## Package ape: class "phylo". n_of_objects.phylo <- function(x) length(x$tip.label) ### ** Others. ## Package stats: class "dist". n_of_objects.dist <- function(x) attr(x, "Size") ## Package clue: Ensembles. n_of_objects.cl_ensemble <- function(x) attr(x, "n_of_objects") ## Package clue: Memberships. n_of_objects.cl_membership <- nrow ## Package clue: pclust(). n_of_objects.pclust <- n_of_objects.default ## Package clue: Ultrametrics. n_of_objects.cl_ultrametric <- n_of_objects.dist ## Package clue: (virtual) class "cl_partition". n_of_objects.cl_partition <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ## Package clue: (virtual) class "cl_hierarchy". n_of_objects.cl_hierarchy <- function(x) .get_property_from_object_or_representation(x, "n_of_objects") ### * cl_object_names ## Determine the names of the objects in a clustering if available; give ## NULL otherwise. This is in sync with e.g. names() or dimnames(); au ## contraire, cl_object_labels() always gives labels even if no names ## are available. cl_object_names <- function(x) UseMethod("cl_object_names") ## ** Default method. cl_object_names.default <- function(x) names(cl_class_ids(x)) ## ** Partitions. ## There is really nothing special we can currently do. ## Most partitioning functions return no information on object names. ## This includes classes ## stats: kmeans ## cba: ccfkms, rock ## cclust: cclust ## e1071: bclust ## flexclust: kcca ## kernlab: specc ## mclust: Mclust ## The algorithms for which things "work" all give named class ids. ## RWeka: Weka_clusterer ## cluster: clara fanny pam ## e1071: cclust cshell ## ** Hierarchies. ## Package stats: hclust(). cl_object_names.hclust <- function(x) x$labels ## Package cluster: agnes(), diana() and mona() all return an object ## which has an 'order.lab' component iff "the original observations ## were labelled". We can use this together the the 'order' component ## to recreate the labels in their original order. Note that we cannot ## rely on dissimilarity or data components being available. cl_object_names.twins <- function(x) { if(!is.null(x$order.lab)) { out <- character(length = n_of_objects(x)) out[x$order] <- x$order.lab out } else NULL } cl_object_names.mona <- cl_object_names.twins ## Package ape: class "phylo". cl_object_names.phylo <- function(x) x$tip.label ## ** Others. ## Package stats: class "dist". ## (Raw object dissimilarities.) cl_object_names.dist <- function(x) attr(x, "Labels") ## Package clue: memberships. cl_object_names.cl_membership <- function(x) rownames(x) ## Package clue: ultrametrics. cl_object_names.cl_ultrametric <- function(x) attr(x, "Labels") ## Package clue: (virtual) class "cl_partition". cl_object_names.cl_partition <- function(x) cl_object_names(.get_representation(x)) ## Package clue: (virtual) class "cl_hierarchy". cl_object_names.cl_hierarchy <- function(x) cl_object_names(.get_representation(x)) ## Package clue: ensembles. cl_object_names.cl_ensemble <- function(x) { nms <- lapply(x, cl_object_names) ind <- which(lengths(nms) > 0L) if(any(ind)) nms[[ind[1L]]] else NULL } ### * cl_object_labels cl_object_labels <- function(x) { if(is.null(out <- cl_object_names(x))) out <- as.character(seq_len(n_of_objects(x))) out } ### * cl_object_dissimilarities ## Extract object dissimilarities from R objects containing such: this ## includes objects directly inheriting from "dist" as well as ## dendrograms or additive trees. cl_object_dissimilarities <- function(x) { ## Keep this in sync with .has_object_dissimilarities(). if(is.cl_dendrogram(x)) cl_ultrametric(x) else if(inherits(x, "dist")) x else stop("Cannot extract object dissimilarities") } .has_object_dissimilarities <- function(x) { ## Keep this in sync with cl_object_dissimilarities(). is.cl_dendrogram(x) || inherits(x, "dist") } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/proximity.R0000644000175100001440000001244311304023136013605 0ustar hornikusers### * cl_proximity cl_proximity <- function(x, description, class = NULL, labels = NULL, self = NULL, size = NULL) { ## Similar to as.dist(), in a way. ## Currently, as.dist() is not generic, so we cannot provide a ## cl_proximity method for it. Hence, we have our dissimilarities ## and ultrametrics extend dist, and we use capitalized names for ## the attributes provided for compatibility with dist (Size and ## Labels). if(inherits(x, "dist")) { ## Explicitly deal with dist objects. ## Useful in particular because cophenetic() returns them. out <- x if(is.null(size)) size <- attr(x, "Size") if(is.null(labels)) labels <- attr(x, "Labels") } else if(inherits(x, "cl_proximity") || !(is.matrix(x) && (nrow(x) == ncol(x)))) out <- x else { ## Actually, x should really be a square symmetric matrix. ## The "self-proximities" in the main diagonal must be stored ## provided there is one non-zero entry. self <- diag(x) if(all(self == 0)) self <- NULL out <- x[row(x) > col(x)] if(is.null(labels)) { if(!is.null(rownames(x))) labels <- rownames(x) else if(!is.null(colnames(x))) labels <- colnames(x) } } if(is.null(size)) size <- as.integer((sqrt(1 + 8 * length(out)) + 1) / 2) attributes(out) <- list(Size = size, Labels = labels, description = description, self = self) class(out) <- unique(c(class, "cl_proximity")) out } ### * names.cl_proximity names.cl_proximity <- function(x) NULL ### * print.cl_proximity print.cl_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_dissimilarity")) "Dissimilarities" else if(inherits(x, "cl_agreement")) "Agreements" else "Proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } m <- format(as.matrix(x)) if(is.null(self <- attr(x, "self"))) m[row(m) <= col(m)] <- "" else m[row(m) < col(m)] <- "" print(if(is.null(self)) m[-1, -attr(x, "Size")] else m, quote = FALSE, right = TRUE, ...) invisible(x) } ### * as.matrix.cl_proximity as.matrix.cl_proximity <- function(x, ...) { size <- attr(x, "Size") m <- matrix(0, size, size) m[row(m) > col(m)] <- x m <- m + t(m) if(!is.null(self <- attr(x, "self"))) { diag(m) <- self } ## ## stats:::as.matrix.dist() provides default dimnames ## (seq_len(size)) if no labels are available. ## We used to do this too, but ... if(!is.null(labels <- attr(x, "Labels"))) dimnames(m) <- list(labels, labels) ## m } ### * [.cl_proximity "[.cl_proximity" <- function(x, i, j) { ## Subscripting proximity objects. ## Basically matrix-like, but proximity objects are always ## "matrices", hence no 'drop' argument. ## For double-index subscripting, if i and j are identical, ## structure and class are preserved. Otherwise, a cross-proximity ## object is returned (and methods for classes inheriting from ## proximity need to readjust the class info as needed). ## For single-index subscripting, no attempty is currently made at ## preserving structure and class where possible. (We might also ## change this to select objects, i.e., the same rows and columns.) size <- attr(x, "Size") if(missing(j)) { if(missing(i)) return(x) else j <- seq_len(size) } if(missing(i)) i <- seq_len(size) description <- attr(x, "description") ## RG's graph:::[.dist avoids as.matrix() in noting that for dist ## objects, entry (i,j) is at n(i-1) - i(i-1)/2 + j - i (in the ## veclh dist representation). We could do something similar, but ## note that not all proximities have zero diagonals (i.e., NULL ## "self" attributes). y <- as.matrix(x)[i, j, drop = FALSE] if(identical(i, j)) { ## Testing using identical() is rather defensive ... return(cl_proximity(y, description = description, class = class(x))) } cl_cross_proximity(y, description = description) } ### * cl_cross_proximity cl_cross_proximity <- function(x, description = NULL, class = NULL) { attr(x, "description") <- description class(x) <- c(class, "cl_cross_proximity") x } ### * print.cl_cross_proximity print.cl_cross_proximity <- function(x, ...) { description <- attr(x, "description") if(length(description) > 0L) { ## Could make this generic ... kind <- if(inherits(x, "cl_cross_dissimilarity")) "Cross-dissimilarities" else if(inherits(x, "cl_cross_agreement")) "Cross-agreements" else "Cross-proximities" cat(sprintf("%s using %s", kind, description), ":\n", sep = "") } print(matrix(as.vector(x), nrow = nrow(x), dimnames = dimnames(x)), ...) invisible(x) } ### ** print_description_prefix ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/prototypes.R0000644000175100001440000000346711304023136013777 0ustar hornikuserscl_prototypes <- function(x) UseMethod("cl_prototypes") ## No default method. ## Package stats: kmeans() (R 2.1.0 or better). cl_prototypes.kmeans <- function(x) x$centers ## Package cluster: clara() always gives prototypes. cl_prototypes.clara <- function(x) x$medoids ## Package cluster: fanny() never gives prototypes. ## Package cluster: pam() does not give prototypes if given a ## dissimilarity matrix. cl_prototypes.pam <- function(x) { p <- x$medoids if(!is.matrix(p)) stop("Cannot determine prototypes.") p } ## Package cba: ccfkms(). cl_prototypes.ccfkms <- cl_prototypes.kmeans ## Package cclust: cclust(). cl_prototypes.cclust <- cl_prototypes.kmeans ## Package e1071: cmeans() gives objects of class "fclust". cl_prototypes.fclust <- cl_prototypes.kmeans ## Package e1071: cshell(). cl_prototypes.cshell <- cl_prototypes.kmeans ## Package e1071: bclust(). cl_prototypes.bclust <- cl_prototypes.kmeans ## Package flexclust: kcca() returns objects of S4 class "kcca" which ## extends S4 class "flexclust". cl_prototypes.kcca <- function(x) methods::slot(x, "centers") ## Package kernlab: specc() and kkmeans() return objects of S4 class ## "specc". cl_prototypes.specc <- function(x) kernlab::centers(x) ## Package mclust: Mclust(). cl_prototypes.Mclust <- function(x) { p <- x$mu ## For multidimensional models, we get a matrix whose columns are ## the means of each group in the best model, and hence needs to be ## transposed. if(is.matrix(p)) p <- t(p) p } ## Package clue: cl_pam(). cl_prototypes.cl_pam <- function(x) x$prototypes ## Package clue: (virtual) class "cl_partition". cl_prototypes.cl_partition <- function(x) cl_prototypes(.get_representation(x)) ## Package clue: pclust(). cl_prototypes.pclust <- function(x) x$prototypes clue/R/registration.R0000644000175100001440000003271611754400707014274 0ustar hornikusers### ### At least currently, all registries are meant and used for all types ### of clusterings (for the time being, partitions and hierarchies) ### simultaneously. ### ### * Internal stuff. .make_db_key <- function(name, type) paste(type, name, sep = "_") ### * General-purpose stuff. ### ### This currently insists on a given type: maybe it should simply list ### everything split according to type. But hey, it's internal stuff ### anyway (at least for the time being ...) ### get_methods_from_db <- function(db, type) { type <- match.arg(type, c("partition", "hierarchy")) pattern <- sprintf("^%s_", type) sub(pattern, "", grep(pattern, objects(db), value = TRUE)) } get_method_from_db <- function(db, type, name, msg) { ## ## Keep 'msg' here so that gettext()ing could work ... ## type <- match.arg(type, c("partition", "hierarchy")) db_keys <- objects(db) ind <- pmatch(.make_db_key(tolower(name), type), tolower(db_keys)) if(is.na(ind)) stop(msg, call. = FALSE, domain = NA) db[[db_keys[ind]]] } put_method_into_db <- function(db, type, name, value) { type <- match.arg(type, c("partition", "hierarchy")) db[[.make_db_key(name, type)]] <- value } ### * Consensus Method Registration. cl_consensus_methods_db <- new.env() get_cl_consensus_methods <- function(type) get_methods_from_db(cl_consensus_methods_db, type) get_cl_consensus_method <- function(name, type) { get_method_from_db(cl_consensus_methods_db, type, name, gettextf("Invalid consensus method '%s'.", name)) } set_cl_consensus_method <- function(name, type, definition, ...) { ## Register a @code{type} consensus method called @code{name} with ## definition @code{definition}. Provide more information where ## appropriate, e.g., @code{dissimilarity} d and @code{exponent} e ## for methods minimizing \sum_b d(x_b, x) ^ e. put_method_into_db(cl_consensus_methods_db, type, name, .structure(c(list(definition = definition), list(...)), class = "cl_consensus_method")) } set_cl_consensus_method("DWH", "partition", .cl_consensus_partition_DWH, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/euclidean", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("SE", "partition", .cl_consensus_partition_soft_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("hard/euclidean", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("HE", "partition", .cl_consensus_partition_hard_euclidean, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("soft/manhattan", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("SM", "partition", .cl_consensus_partition_soft_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("hard/manhattan", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("HM", "partition", .cl_consensus_partition_hard_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("GV1", "partition", .cl_consensus_partition_GV1, dissimilarity = "GV1", exponent = 2) set_cl_consensus_method("GV3", "partition", .cl_consensus_partition_GV3, dissimilarity = "comemberships", exponent = 2) set_cl_consensus_method("soft/symdiff", "partition", .cl_consensus_partition_soft_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("hard/symdiff", "partition", .cl_consensus_partition_hard_symdiff, dissimilarity = "symdiff", exponent = 1) set_cl_consensus_method("cophenetic", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("euclidean", "hierarchy", .cl_consensus_hierarchy_cophenetic, dissimilarity = "euclidean", exponent = 2) set_cl_consensus_method("manhattan", "hierarchy", .cl_consensus_hierarchy_manhattan, dissimilarity = "manhattan", exponent = 1) set_cl_consensus_method("majority", "hierarchy", .cl_consensus_hierarchy_majority, dissimilarity = "symdiff", exponent = 1) ### * Dissimilarity Method Registration. cl_dissimilarity_methods_db <- new.env() get_cl_dissimilarity_methods <- function(type) get_methods_from_db(cl_dissimilarity_methods_db, type) get_cl_dissimilarity_method <- function(name, type) get_method_from_db(cl_dissimilarity_methods_db, type, name, gettextf("Invalid dissimilarity method '%s'.", name)) set_cl_dissimilarity_method <- function(name, type, definition, description, ...) put_method_into_db(cl_dissimilarity_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_dissimilarity_method")) set_cl_dissimilarity_method("euclidean", "partition", .cl_dissimilarity_partition_euclidean, "minimal Euclidean membership distance") set_cl_dissimilarity_method("manhattan", "partition", .cl_dissimilarity_partition_manhattan, "minimal Manhattan membership distance") set_cl_dissimilarity_method("comemberships", "partition", .cl_dissimilarity_partition_comemberships, "Euclidean comembership distance") set_cl_dissimilarity_method("symdiff", "partition", .cl_dissimilarity_partition_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Rand", "partition", .cl_dissimilarity_partition_Rand, "Rand distance") set_cl_dissimilarity_method("GV1", "partition", .cl_dissimilarity_partition_GV1, "Gordon-Vichi Delta_1 dissimilarity") set_cl_dissimilarity_method("BA/A", "partition", .cl_dissimilarity_partition_BA_A, "Boorman/Arabie minimum element moves distance") set_cl_dissimilarity_method("BA/C", "partition", .cl_dissimilarity_partition_BA_C, "Boorman/Arabie minimum lattice moves distance") set_cl_dissimilarity_method("BA/D", "partition", .cl_dissimilarity_partition_BA_D, "Boorman/Arabie pair-bonds distance") set_cl_dissimilarity_method("BA/E", "partition", .cl_dissimilarity_partition_BA_E, "Boorman/Arabie normalized information distance") set_cl_dissimilarity_method("VI", "partition", .cl_dissimilarity_partition_VI, "Variation of information") set_cl_dissimilarity_method("Mallows", "partition", .cl_dissimilarity_partition_Mallows, "Mallows dissimilarity") set_cl_dissimilarity_method("CSSD", "partition", .cl_dissimilarity_partition_CSSD, "Cluster Similarity Sensitive Distance") set_cl_dissimilarity_method("euclidean", "hierarchy", .cl_dissimilarity_hierarchy_euclidean, "Euclidean ultrametric distance") set_cl_dissimilarity_method("manhattan", "hierarchy", .cl_dissimilarity_hierarchy_manhattan, "Manhattan ultrametric distance") set_cl_dissimilarity_method("cophenetic", "hierarchy", .cl_dissimilarity_hierarchy_cophenetic, "cophenetic correlations") set_cl_dissimilarity_method("gamma", "hierarchy", .cl_dissimilarity_hierarchy_gamma, "rate of inversions") set_cl_dissimilarity_method("symdiff", "hierarchy", .cl_dissimilarity_hierarchy_symdiff, "symmetric difference distance") set_cl_dissimilarity_method("Chebyshev", "hierarchy", .cl_dissimilarity_hierarchy_Chebyshev, "Chebyshev distance") set_cl_dissimilarity_method("Lyapunov", "hierarchy", .cl_dissimilarity_hierarchy_Lyapunov, "Lyapunov distance") set_cl_dissimilarity_method("BO", "hierarchy", .cl_dissimilarity_hierarchy_BO, "Boorman/Olivier m_delta tree distance") set_cl_dissimilarity_method("spectral", "hierarchy", .cl_dissimilarity_hierarchy_spectral, "spectral ultrametric distance") ### * Agreement Method Registration. cl_agreement_methods_db <- new.env() get_cl_agreement_methods <- function(type) get_methods_from_db(cl_agreement_methods_db, type) get_cl_agreement_method <- function(name, type) get_method_from_db(cl_agreement_methods_db, type, name, gettextf("Invalid agreement method '%s'.", name)) set_cl_agreement_method <- function(name, type, definition, description, ...) put_method_into_db(cl_agreement_methods_db, type, name, .structure(c(list(definition = definition, description = description), list(...)), class = "cl_agreement_method")) set_cl_agreement_method("euclidean", "partition", .cl_agreement_partition_euclidean, "minimal euclidean membership distance") set_cl_agreement_method("manhattan", "partition", .cl_agreement_partition_manhattan, "minimal manhattan membership distance") set_cl_agreement_method("Rand", "partition", .cl_agreement_partition_Rand, "Rand index") set_cl_agreement_method("cRand", "partition", .cl_agreement_partition_cRand, "corrected Rand index") set_cl_agreement_method("NMI", "partition", .cl_agreement_partition_NMI, "normalized mutual information") set_cl_agreement_method("KP", "partition", .cl_agreement_partition_KP, "Katz-Powell index") set_cl_agreement_method("angle", "partition", .cl_agreement_partition_angle, "maximal angle between memberships") set_cl_agreement_method("diag", "partition", .cl_agreement_partition_diag, "maximal co-classification rate") set_cl_agreement_method("FM", "partition", .cl_agreement_partition_FM, "Fowlkes-Mallows index") set_cl_agreement_method("Jaccard", "partition", .cl_agreement_partition_Jaccard, "Jaccard index") set_cl_agreement_method("purity", "partition", .cl_agreement_partition_purity, "purity") set_cl_agreement_method("PS", "partition", .cl_agreement_partition_PS, "Prediction Strength") set_cl_agreement_method("euclidean", "hierarchy", .cl_agreement_hierarchy_euclidean, "euclidean ultrametric distance") set_cl_agreement_method("manhattan", "hierarchy", .cl_agreement_hierarchy_manhattan, "manhattan ultrametric distance") set_cl_agreement_method("cophenetic", "hierarchy", .cl_agreement_hierarchy_cophenetic, "cophenetic correlations") set_cl_agreement_method("angle", "hierarchy", .cl_agreement_hierarchy_angle, "angle between ultrametrics") set_cl_agreement_method("gamma", "hierarchy", .cl_agreement_hierarchy_gamma, "rate of inversions") clue/R/hierarchy.R0000644000175100001440000002341713036464014013532 0ustar hornikusers### * is.cl_hierarchy ## Determine whether an object is a hierarchy. ## Note that hierarchies are n-trees, which can naturally be represented ## by their classes (as done via cl_classes()) or internal ultrametric ## obtained by assigning height one to all splits (as done by ## .cl_ultrametric_from_classes()). ## We typically used the latter, but note that this is an *internal* ## reprsentation. ## User-level, cl_dendrogram objects are indexed hierarchies, and ## cl_hierarchy objects are n-trees. The latter can be "converted" into ## the former (using height one splits) via as.cl_dendrogram(). is.cl_hierarchy <- function(x) UseMethod("is.cl_hierarchy") ## Default method. is.cl_hierarchy.default <- .false ## Package stats: hclust(). is.cl_hierarchy.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_hierarchy.twins <- .true ## Package cluster: mona(). is.cl_hierarchy.mona <- .true ## Package ape: class "phylo". is.cl_hierarchy.phylo <- function(x) ape::is.ultrametric(x) ## Package clue: (virtual) class "cl_hierarchy". ## Note that "raw" cl_ultrametric objects are *not* hierarchies, as ## these are meant for numeric computations. ## ## Is this really a good idea? ## We can as.hclust() a cl_dendrogram and then it is a cl_hierarchy ... ## is.cl_hierarchy.cl_hierarchy <- .true ### * as.cl_hierarchy ## Note that cl_hierarchy conceptually is a virtual class, so there are ## no prototypes and no cl_hierarchy() creator. .cl_hierarchy_classes <- "cl_hierarchy" as.cl_hierarchy <- function(x) { if(is.cl_hierarchy(x)) { if(!inherits(x, "cl_hierarchy")) .make_container(x, .cl_hierarchy_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_hierarchy_classes) } ### * print.cl_hierarchy print.cl_hierarchy <- function(x, ...) .print_container(x, "cl_hierarchy", ...) ### * Complex.cl_hierarchy ## No Complex() for any kind of hierarchy. Complex.cl_hierarchy <- function(z) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Math.cl_hierarchy ## No Math() for any kind of hierarchy. Math.cl_hierarchy <- function(x, ...) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ### * Ops.cl_hierarchy Ops.cl_hierarchy <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) if(n_of_objects(e1) != n_of_objects(e2)) stop("Hierarchies must have the same number of objects.") c1 <- cl_classes(e1) c2 <- cl_classes(e2) switch(.Generic, "<=" = all(is.finite(match(c1, c2))), "<" = all(is.finite(match(c1, c2))) && any(is.na(match(c2, c1))), ">=" = all(is.finite(match(c2, c1))), ">" = all(is.finite(match(c2, c1))) && any(is.na(match(c1, c2))), "==" = all(is.finite(match(c1, c2))) && all(is.finite(match(c2, c1))), "!=" = any(is.na(match(c1, c2))) || any(is.na(match(c2, c1)))) } ### * Summary.cl_hierarchy ## ## This is really the same as Summary.cl_partition(). ## Summary.cl_hierarchy <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) switch(.Generic, "min" = cl_meet(cl_ensemble(list = args)), "max" = cl_join(cl_ensemble(list = args)), "range" = { cl_ensemble(min = cl_meet(cl_ensemble(list = args)), max = cl_join(cl_ensemble(list = args))) }) } ### * as.hclust.cl_hierarchy as.hclust.cl_hierarchy <- function(x, ...) as.hclust(.get_representation(x), ...) ### * is.cl_dendrogram ## ## Once we have cl_dendrogram testing, we can simplify cl_hierarchy ## testing. E.g., ## is.cl_hierachy.default <- is.cl_dendrogram ## should be ok, and we can add cl_hierarchy predicates for hierarchies ## which are not dendrograms on top of that. ## is.cl_dendrogram <- function(x) UseMethod("is.cl_dendrogram") ## Default method. is.cl_dendrogram.default <- .false ## Package stats: hclust(). is.cl_dendrogram.hclust <- function(x) !is.unsorted(x$height) ## Package cluster: agnes() and diana() give objects inheriting from ## class "twins". is.cl_dendrogram.twins <- .true ## Package cluster: mona(). is.cl_dendrogram.mona <- .true ## Package ape: class "phylo". is.cl_dendrogram.phylo <- function(x) ape::is.ultrametric(x) ## (We could also support ape's class "matching" via coercion to class ## "phylo".) ## Package clue: (virtual) class "cl_dendrogram". is.cl_dendrogram.cl_dendrogram <- .true ### * as.cl_dendrogram .cl_dendrogram_classes <- c("cl_dendrogram", "cl_hierarchy") as.cl_dendrogram <- function(x) { if(is.cl_dendrogram(x)) { if(!inherits(x, "cl_dendrogram")) .make_container(x, .cl_dendrogram_classes) else x } else .make_container(as.cl_ultrametric(x), .cl_dendrogram_classes) } ### * print.cl_dendrogram print.cl_dendrogram <- function(x, ...) .print_container(x, "cl_dendrogram", ...) ### * plot.cl_dendrogram plot.cl_dendrogram <- function(x, ...) plot(cl_ultrametric(.get_representation(x)), ...) ### * Group methods for cl_dendrogram objects. Ops.cl_dendrogram <- function(e1, e2) { if(nargs() == 1L) stop(gettextf("Unary '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) ## Only comparisons are supprorted. if(!(as.character(.Generic) %in% c("<", "<=", ">", ">=", "==", "!="))) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) u1 <- cl_ultrametric(e1) u2 <- cl_ultrametric(e2) if(length(u1) != length(u2)) stop("Dendrograms must have the same number of objects.") switch(.Generic, "<=" = all(u1 <= u2), "<" = all(u1 <= u2) && any(u1 < u2), ">=" = all(u1 >= u2), ">" = all(u1 >= u2) && any(u1 > u2), "==" = all(u1 == u2), "!=" = any(u1 != u2)) } ### * Summary.cl_dendrogram ## ## This is really the same as Summary.cl_hierarchy() ... ## We cannot really call the poset specific internal meet and join ## functions from here as e.g. max(D, H) (D a dendrogram, H an n-tree) ## should use the n-tree poset functions ... ## However, dispatch for cl_dendrogram should not be needed if we also ## dispatch on cl_hierarchy ... ## ## Summary.cl_dendrogram <- ## function(..., na.rm = FALSE) ## { ## ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) ## if(!ok) ## stop(gettextf("Generic '%s' not defined for \"%s\" objects.", ## .Generic, .Class)) ## args <- list(...) ## switch(.Generic, ## "min" = cl_meet(cl_ensemble(list = args)), ## "max" = cl_join(cl_ensemble(list = args)), ## "range" = { ## cl_ensemble(min = cl_meet(cl_ensemble(list = args)), ## max = cl_join(cl_ensemble(list = args))) ## }) ## } ### * as.hclust.cl_dendrogram ## ## This is really the same as as.hclust.cl_hierarchy() ... ## Dispatch for cl_dendrogram should not be needed if we also dispatch ## on cl_hierarchy ... ## ## as.hclust.cl_dendrogram <- ## function(x, ...) ## as.hclust(.get_representation(x), ...) ### ** cut.cl_dendrogram ## Not perfect as this perhaps return something more "classed" in the ## spirit of clue ... cut.cl_dendrogram <- function(x, ...) cutree(as.hclust(x), ...) ### * Utilities ## To turn a mona object into a cl_dendrogram, we need to be able to ## compute its associated ultrametric. Hence, provide a cophenetic() ## method for mona objects ... cophenetic.mona <- function(x) { no <- length(x$order) ns <- max(x$step) + 1 m <- matrix(NA, no, no) FOO <- function(ind, step, s) { if(length(ind) <= 1) return() grp <- c(0, cumsum(step == s)) ind <- split(ind, grp) len <- length(ind) for(a in seq_len(len)) { for(b in seq_len(a - 1L)) { ## Need both as we currently cannot assume that the ## indices are sorted. Alternatively, work with the ## sequence from one to the number of objects, and ## reorder at the end ... m[ind[[a]], ind[[b]]] <<- s m[ind[[b]], ind[[a]]] <<- s } } ind <- ind[lengths(ind) > 1L] pos <- which(step == s) step <- split(step[-pos], grp[-1][-pos]) if(is.null(step)) return() for(a in seq_along(ind)) FOO(ind[[a]], step[[a]], s + 1) } FOO(x$order, x$step, 1) m[is.na(m)] <- ns m <- ns - m rownames(m) <- rownames(x$data) as.dist(m) } ## And while we're at it ... ## (Of course, as.hclust() should really "know" that a cophenetic() ## method is available ...) as.hclust.mona <- function(x, ...) hclust(cophenetic(x), "single") ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/R/margin.R0000644000175100001440000000060511304023136013013 0ustar hornikuserscl_margin <- function(x) { if(is.cl_hard_partition(x)) out <- rep.int(1, n_of_objects(x)) else if(is.cl_partition(x)) { x <- cl_membership(x) i <- seq_len(nrow(x)) j <- cbind(i, max.col(x)) out <- x[j] x[j] <- 0 out <- out - x[cbind(i, max.col(x))] } else stop("Argument 'x' must be a partition.") out } clue/R/ensemble.R0000644000175100001440000001567013435044575013361 0ustar hornikuserscl_ensemble <- function(..., list = NULL) { clusterings <- c(list(...), list) if(!length(clusterings)) { ## Return an empty cl_ensemble. ## In this case, we cannot additionally know whether it contains ## partitions or hierarchies ... attr(clusterings, "n_of_objects") <- as.integer(NA) class(clusterings) <- "cl_ensemble" return(clusterings) } ## Previously, we used to require that the elements of the ensemble ## either all be partitions, or all be hierarchies. We no longer do ## this, as it makes sense to also allow e.g. object dissimilarities ## (raw "dist" objects or additive distances) as elements (e.g., ## when computing proximities), and it is rather cumbersome to ## decide in advance which combinations of elements might be useful ## and hence should be allowed. All we enforce is that all elements ## correspond to the same number of objects (as we typically cannot ## verify that they relate to the *same* objects). For "pure" ## ensembles of partitions or hierarchies we add additional class ## information. if(all(vapply(clusterings, is.cl_partition, NA))) class(clusterings) <- c("cl_partition_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_dendrogram, NA))) class(clusterings) <- c("cl_dendrogram_ensemble", "cl_hierarchy_ensemble", "cl_ensemble") else if(all(vapply(clusterings, is.cl_hierarchy, NA))) class(clusterings) <- c("cl_hierarchy_ensemble", "cl_ensemble") else class(clusterings) <- "cl_ensemble" n <- sapply(clusterings, n_of_objects) if(any(diff(n))) stop("All elements must have the same number of objects.") attr(clusterings, "n_of_objects") <- as.integer(n[1L]) clusterings } is.cl_ensemble <- function(x) inherits(x, "cl_ensemble") ## ## In the old days, kmeans() results were unclassed lists, hence such ## objects were taken as representing a single clustering. Nowadays, we ## take these as lists of clusterings. as.cl_ensemble <- function(x) { if(is.cl_ensemble(x)) x else if(is.list(x) && !is.object(x)) cl_ensemble(list = x) else cl_ensemble(x) } ## c.cl_ensemble <- function(..., recursive = FALSE) { clusterings <- unlist(lapply(list(...), as.cl_ensemble), recursive = FALSE) cl_ensemble(list = clusterings) } "[.cl_ensemble" <- function(x, i) { ## Make subscripting empty ensembles a noop. if(length(x) == 0L) return(x) cl_ensemble(list = NextMethod("[")) } rep.cl_ensemble <- function(x, times, ...) cl_ensemble(list = NextMethod("rep")) print.cl_partition_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d partition of %d objects.", "An ensemble of %d partitions of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } Summary.cl_partition_ensemble <- function(..., na.rm = FALSE) { ok <- switch(.Generic, max = , min = , range = TRUE, FALSE) if(!ok) stop(gettextf("Generic '%s' not defined for \"%s\" objects.", .Generic, .Class), domain = NA) args <- list(...) ## Combine the given partition ensembles. x <- do.call(c, args) switch(.Generic, "min" = cl_meet(x), "max" = cl_join(x), "range" = cl_ensemble(min = cl_meet(x), max = cl_join(x))) } print.cl_dendrogram_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d dendrogram of %d objects.", "An ensemble of %d dendrograms of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_hierarchy_ensemble <- function(x, ...) { msg <- sprintf(ngettext(length(x), "An ensemble of %d hierarchy of %d objects.", "An ensemble of %d hierarchies of %d objects."), length(x), n_of_objects(x)) writeLines(strwrap(msg)) invisible(x) } print.cl_ensemble <- function(x, ...) { writeLines(sprintf(ngettext(length(x), "An ensemble with %d element.", "An ensemble with %d elements."), length(x))) invisible(x) } plot.cl_ensemble <- function(x, ..., main = NULL, layout = NULL) { if(!is.cl_ensemble(x)) stop("Wrong class.") ## What we can definitely plot is are cl_addtree, cl_dendrogram and ## cl_ultrametric objects. (We could also add simple methods for ## plotting raw dissimilarities, but of course seriation::dissplot() ## would be the thing to use.) What we cannot reasonably plot is ## partitions (in particular, as these do not know about the ## underlying dissimilarities. But then we could perhaps provide ## silhoutte plots etc for ensembles of partitions ... ## ## Think about this. ## ## So let us check for the things we can plot. ## (Note that currently there is neither is.cl_ultrametric() nor ## is.cl_addtree().) ok <- vapply(x, function(e) (is.cl_dendrogram(e) || inherits(e, c("cl_addtree", "cl_ultrametric"))), NA) if(!all(ok)) stop(gettextf("Plotting not available for elements %s of the ensemble.", paste(which(!ok), collapse = " ")), domain = NA) ## Prefer dendrogram plot methods to those for hclust objects. ind <- vapply(x, is.cl_dendrogram, NA) if(any(ind)) x[ind] <- lapply(x, as.cl_dendrogram) ## Now the usual layouting ... same as for plotting relation ## ensembles. ## Number of elements. n <- length(x) ## Layout. byrow <- TRUE if(is.null(layout)) { nc <- ceiling(sqrt(n)) nr <- ceiling(n / nc) } else { layout <- c(as.list(layout), byrow)[seq_len(3)] if(is.null(names(layout))) names(layout) <- c("nr", "nc", "byrow") nr <- layout[["nr"]] nc <- layout[["nc"]] byrow <- layout[["byrow"]] } op <- if(byrow) par(mfrow = c(nr, nc)) else par(mfcol = c(nr, nc)) on.exit(par(op)) ## Try recycling main (might want the same for others as well). if(!is.list(main)) { main <- if(is.null(main)) vector("list", length = n) else rep.int(as.list(main), n) } for(i in seq_along(x)) plot(x[[i]], main = main[[i]], ...) } unique.cl_ensemble <- function(x, incomparables = FALSE, ...) cl_ensemble(list = NextMethod("unique")) .cl_ensemble_type <- function(x) { if(inherits(x, "cl_partition_ensemble")) "partition" else if(inherits(x, "cl_hierarchy_ensemble")) "hierarchy" else NULL } clue/R/fuzziness.R0000644000175100001440000000435013435044610013606 0ustar hornikuserscl_fuzziness <- function(x, method = NULL, normalize = TRUE) { x <- as.cl_ensemble(x) out <- double(length(x)) ## ## The docs say that we should only have partitions ... attr(out, "description") <- "Fuzziness" class(out) <- "cl_fuzziness" parties <- vapply(x, is.cl_partition, NA) if(!(length(x) || any(parties))) { ## Currently, no fuzzy hierarchies ... return(out) } ## if(!is.function(method)) { builtin_methods <- c("PC", "PE") builtin_method_names <- c("partition coefficient", "partition entropy") if(is.null(method)) ind <- 1 else if(is.na(ind <- pmatch(tolower(method), tolower(builtin_methods)))) stop(gettextf("Value '%s' is not a valid abbreviation for a fuzziness method.", method), domain = NA) method <- paste0(".cl_fuzziness_partition_", builtin_methods[ind]) method_name <- builtin_method_names[ind] if(normalize) method_name <- paste("normalized", method_name) } else method_name <- "user-defined method" out[parties] <- as.numeric(sapply(x[parties], method, normalize)) attr(out, "description") <- paste("Fuzziness using", method_name) out } .cl_fuzziness_partition_PC <- function(x, normalize = TRUE) { ## Dunn's Partition Coefficient, see also ?fanny. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(1 - normalize) pc <- sum(cl_membership(x) ^ 2) / n_of_objects(x) if(normalize) pc <- (1 - pc) / (1 - 1 / n_of_classes(x)) pc } .cl_fuzziness_partition_PE <- function(x, normalize = TRUE) { ## Bezdek's Partition Entropy. ## Note that we normalize differently ... if(!.maybe_is_proper_soft_partition(x) && is.cl_hard_partition(x)) return(0) M <- cl_membership(x) pe <- - sum(ifelse(M > 0, M * log(M), 0)) / n_of_objects(x) if(normalize) pe <- pe / log(n_of_classes(x)) pe } print.cl_fuzziness <- function(x, ...) { cat(attr(x, "description"), ":\n", sep = "") print(as.vector(x), ...) invisible(x) } clue/R/validity.R0000644000175100001440000001003011304023136013354 0ustar hornikusers## A slightly polymorphic function, similar to cluster::silhouette() and ## its methods. cl_validity <- function(x, ...) UseMethod("cl_validity") cl_validity.default <- function(x, d, ...) { ## Note that providing methods for classes "cl_partition" and ## "cl_hierarchy" is not good enough ... out <- list() if(.has_object_memberships(x)) { v <- .cl_validity_partition_d_a_f(cl_membership(x), as.matrix(d)) out <- list("Dissimilarity accounted for" = v) } else if(.has_object_dissimilarities(x)) { x <- cl_object_dissimilarities(x) d <- as.dist(d) out <- list("Variance accounted for" = .cl_validity_hierarchy_variance_a_f(x, d), "Deviance accounted for" = .cl_validity_hierarchy_deviance_a_f(x, d)) ## Consider adding e.g. the Agglomerative Coefficient or ## Divisive Coeffcient for more than cluster::agnes() and ## cluster::diana(), respectively. } class(out) <- "cl_validity" out } ## Package cluster: agnes(). cl_validity.agnes <- function(x, ...) { out <- list("Agglomerative coefficient" = x$ac) ## According to the docs, agnes objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package cluster: diana(). cl_validity.diana <- function(x, ...) { out <- list("Divisive coefficient" = x$dc) ## According to the docs, diana objects always have a diss ## component, but let's be defensive ... if(!is.null(d <- x$diss)) out <- c(out, cl_validity.default(x, d)) class(out) <- "cl_validity" out } ## Package clue: (virtual) class "cl_partition". cl_validity.cl_partition <- function(x, ...) cl_validity(.get_representation(x), ...) ## Package clue: class pclust. ## So that this works for all classes extending pclust ... cl_validity.pclust <- function(x, ...) x$validity print.cl_validity <- function(x, ...) { for(nm in names(x)) cat(nm, ": ", x[[nm]], "\n", sep = "") invisible(x) } .cl_validity_partition_d_a_f <- function(m, d) { ## "Dissimilarity accounted for". ## Internal function for computing 1 - a / mean(d), where the ## "average within dissimilarity" a is given by ## \frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d(i,j)} ## {\sum_{i,j} \sum_k m_{ik}m_{jk}} ## where m is the membership matrix and d a *symmetric* matrix of ## dissimilarities. within_sums <- rowSums(sapply(seq_len(ncol(m)), function(k) { z <- m[, k] w <- outer(z, z, "*") c(sum(w * d), sum(w)) })) average_within_d <- within_sums[1L] / within_sums[2L] 1 - average_within_d / mean(d) } .cl_validity_hierarchy_variance_a_f <- function(u, d) { ## *Variance accounted for*. ## See e.g. Hubert, Arabie, & Meulman (2006), The structural ## representation of proximity matrices with MATLAB: ## variance_accounted_for = ## 1 - \frac{\sum_{i < j} (d_{ij} - u_{ij}) ^ 2} ## {\sum_{i < j} (d_{ij} - mean(d)) ^ 2} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum((d - u) ^ 2) / sum((d - mean(d)) ^ 2), 0) } .cl_validity_hierarchy_deviance_a_f <- function(u, d) { ## *Deviance accounted for* (i.e., absolute deviation). ## See e.g. Smith (2001), Constructing ultrametric and additive ## trees based on the ${L}_1$ norm, Journal of Classification. ## deviance_accounted_for = ## 1 - \frac{\sum_{i < j} |d_{ij} - u_{ij}|} ## {\sum_{i < j} |d_{ij} - median(d)|} ## As this can be arbitrarily negative, we cut at 0. max(1 - sum(abs(d - u)) / sum(abs(d - median(d))), 0) } ## Silhouette methods silhouette.cl_partition <- function(x, ...) silhouette(.get_representation(x), ...) silhouette.cl_pclust <- function(x, ...) x$silhouette clue/R/pava.R0000644000175100001440000000507013036461767012512 0ustar hornikusers## A Pool Adjacent Violators Algorithm framework for minimizing problems ## like ## ## \sum_i \sum_{J_i} w_{ij} f(y_{ij}, m_i) ## ## under the constraint m_1 <= ... <= m_n with f a convex function in m. ## Note that this formulation allows for repeated data in each block, ## and hence is more general than the usual pava/isoreg ones. A solver ## for the unconstrained \sum_k w_k f(y_k, m) => min! is needed. ## Typical cases are f(y, m) = |y - m|^p for p = 2 (solved by weighted ## mean) and p = 1 (solved by weighted median), respectively. ## A general design issue is whether weights should be supported or not, ## because in the latter case the solver could be a function of a single ## (data) argument only. Let's assume the former for the time being. pava <- function(x, w = NULL, solver = weighted.mean, merger = c) { n <- length(x) if(is.null(w)) { w <- if(is.list(x)) lapply(lengths(x), function(u) rep.int(1, u)) else rep.int(1, n) } else if(is.list(x)) w <- as.list(w) inds <- as.list(seq_len(n)) vals <- mapply(solver, x, w) ## Combine blocks i and i + 1. combine <- if(is.list(x)) { ## In the repeated data case, we explicitly merge the data (and ## weight) lists. function(i) { ## Merge the data and indices, solve, and put things back ## into position i, dropping position i + 1. j <- i + 1L x[[i]] <<- merger(x[[i]], x[[j]]) w[[i]] <<- c(w[[i]], w[[j]]) vals[i] <<- solver(x[[i]], w[[i]]) inds[[i]] <<- c(inds[[i]], inds[[j]]) keep <- seq_len(n)[-j] x <<- x[keep] w <<- w[keep] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } else { function(i) { ## In the "simple" case, merge only indices and values. j <- i + 1L inds[[i]] <<- c(inds[[i]], inds[[j]]) vals[i] <<- solver(x[inds[[i]]], w[inds[[i]]]) keep <- seq_len(n)[-j] vals <<- vals[keep] inds <<- inds[keep] n <<- n - 1L } } i <- 1L repeat { if(i < n) { if((vals[i] > vals[i + 1])) { combine(i) while((i > 1L) && (vals[i - 1L] > vals[i])) { combine(i - 1L) i <- i - 1L } } else i <- i + 1L } else break } rep.int(vals, lengths(inds)) } clue/R/AAA.R0000644000175100001440000000037411304023136012123 0ustar hornikusers## Things which must come first in the package code. ### * Internal utilities. .false <- function(x) FALSE .true <- function(x) TRUE ## A fast version of structure(). .structure <- function(x, ...) `attributes<-`(x, c(attributes(x), list(...))) clue/R/tabulate.R0000644000175100001440000000061411304023136013337 0ustar hornikuserscl_tabulate <- function(x) { values <- unique(x) counts <- tabulate(match(x, values)) ## Still a bit tricky to create a data frame with a list "column" ## which is not protected by I(); otherwise, we oculd simply do ## data.frame(values = I(values), counts = counts) out <- data.frame(values = double(length(values)), counts = counts) out$values <- values out } clue/R/consensus.R0000644000175100001440000011507513140644003013570 0ustar hornikusers### * cl_consensus cl_consensus <- function(x, method = NULL, weights = 1, control = list()) { ## ## Interfaces are a matter of taste. ## E.g., one might want to have a 'type' argument indication whether ## hard or soft partitions are sought. One could then do ## cl_consensus(x, method = "euclidean", type = "hard") ## to look for an optimal median (or least squares) hard partition ## (for euclidean dissimilarity). ## For us, "method" really indicates a certain algorithm, with its ## bells and whistles accessed via the 'control' argument. ## clusterings <- as.cl_ensemble(x) if(!length(clusterings)) stop("Cannot compute consensus of empty ensemble.") weights <- rep_len(weights, length(clusterings)) if(any(weights < 0)) stop("Argument 'weights' has negative elements.") if(!any(weights > 0)) stop("Argument 'weights' has no positive elements.") if(!is.function(method)) { if(!inherits(method, "cl_consensus_method")) { ## Get the method definition from the registry. type <- .cl_ensemble_type(clusterings) if(is.null(method)) method <- .cl_consensus_method_default(type) method <- get_cl_consensus_method(method, type) } method <- method$definition } method(clusterings, weights, control) } ### * .cl_consensus_partition_DWH .cl_consensus_partition_DWH <- function(clusterings, weights, control) { ## ## Could make things more efficient by subscripting on positive ## weights. ## (Note that this means control$order has to be subscripted as ## well.) ## max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- max_n_of_classes order <- control$order if(is.null(order)) order <- sample(seq_along(clusterings)) clusterings <- clusterings[order] weights <- weights[order] k_max <- max(k, max_n_of_classes) s <- weights / cumsum(weights) s[is.na(s)] <- 0 # Division by zero ... M <- cl_membership(clusterings[[1L]], k_max) for(b in seq_along(clusterings)[-1L]) { mem <- cl_membership(clusterings[[b]], k_max) ## Match classes from conforming memberships. ind <- solve_LSAP(crossprod(M, mem), maximum = TRUE) M <- (1 - s[b]) * M + s[b] * mem[, ind] if(k < k_max) M <- .project_to_leading_columns(M, k) } M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k) as.cl_partition(M) } ### * .cl_consensus_partition_AOS .cl_consensus_partition_AOS <- function(clusterings, weights, control, type = c("SE", "HE", "SM", "HM")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ e ## = \sum_b \min_{P_b} w_b f(M, M_b P_b) ^ e ## for the special case where the criterion function is based on ## M and M_b P_b (i.e., column permutations of M_b), as opposed to ## the general case where d(M, M_b) = \min_{P_b} f(M, P_b, M_b) ## handled by .cl_consensus_partition_AOG(). ## ## The AO ("alternative optimization") proceeds by alternatively ## matching the M_b to M by minimizing f(M, M_b P_b) over P_b, and ## fitting M by minimizing \sum_b w_b f(M, M_b P_b) ^ e for fixed ## matchings. ## ## Such a procedure requires three ingredients: a function for ## matching M_b to M (in fact simply replacing M_b by the matched ## M_b P_b); a function for fitting M to the \{M_b P_b\}, and a ## function for computing the value of the criterion function ## corresponding to this fit (so that one can stop if the relative ## improvement is small enough). ## ## For the time being, we only use this to determine soft and hard ## Euclidean least squares consensus partitions (soft and hard ## Euclidean means), so the interface does not yet reflect the ## generality of the approach (which would either pass the three ## functions, or even set up family objects encapsulating the three ## functions). ## ## This special case is provided for efficiency and convenience. ## Using the special form of the criterion function, we can simply ## always work memberships with the same maximal number of columns, ## and with the permuted \{ M_b P_b \}. ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100 nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## The maximal (possible) number of classes in M and the \{ M_b \}. k_all <- max(k, k_max) value <- switch(type, SE = , HE = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum((u - M) ^ 2))) }, SM = , HM = function(M, memberships, w) { sum(w * sapply(memberships, function(u) sum(abs(u - M)))) }) ## Return the M[, ind] column permutation of M optimally matching N. match_memberships <- switch(type, SE = , HE = function(M, N) { M[, solve_LSAP(crossprod(N, M), maximum = TRUE), drop = FALSE] }, SM = , HM = function(M, N) { M[, solve_LSAP(.cxdist(N, M, "manhattan")), drop = FALSE] }) ## Function for fitting M to (fixed) memberships \{ M_b P_b \}. ## As we use a common number of columns for all membership matrices ## involved, we need to pass the desired 'k' ... fit_M <- switch(type, SE = function(memberships, w, k) { ## Update M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## If k < k_all, "project" as indicated in Gordon & ## Vichi (2001), p. 238. if(k < ncol(M)) M <- .project_to_leading_columns(M, k) M }, HE = , HM = function(memberships, w, k) { ## Compute M as \sum w_b M_b P_b. M <- .weighted_sum_of_matrices(memberships, w, nrow(M)) ## And compute a closest hard partition H(M) from ## that, using the first k columns of M. ids <- max.col(M[ , seq_len(k), drop = FALSE]) .cl_membership_from_class_ids(ids, ncol(M)) }, SM = .l1_fit_M) memberships <- lapply(clusterings, cl_membership, k_all) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOS run: %d", run)) M <- start[[run]] if(k < k_all) M <- cbind(M, matrix(0, nrow(M), k_all - k)) memberships <- lapply(memberships, match_memberships, M) old_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M to the M_b P_b. M <- fit_M(memberships, w, k) ## Match the \{ M_b P_b \} to M. memberships <- lapply(memberships, match_memberships, M) ## Update value. new_value <- value(M, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) rownames(M) <- rownames(memberships[[1L]]) meta <- list(objval = value(M, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M[, seq_len(k), drop = FALSE], k, meta) as.cl_partition(M) } .random_stochastic_matrix <- function(n, k) { M <- matrix(runif(n * k), n, k) M / rowSums(M) } .l1_fit_M <- function(memberships, w, k) { ## Determine stochastic matrix M with at most k leading nonzero ## columns such that ## ## \sum_b w_b \sum_{i,j} | m_{ij}(b) - m_{ij} | => min ## ## where the sum over j goes from 1 to k. ## ## Clearly, this can be done separately for each row, where we need ## to minimize ## ## \sum_b w_b \sum_j | y_j(b) - x_j | => min ## ## over all probability vectors x. Such problems can e.g. be solved ## via the following linear program: ## ## \sum_b \sum_j w_b e'(u(b) + v(b)) => min ## ## subject to ## ## u(1), v(1), ..., u(B), v(B), x >= 0 ## x + u(b) - v(b) = y(b), b = 1, ..., B ## e'x = 1 ## ## (where e = [1, ..., 1]). ## ## So we have one long vector z of "variables": ## ## z = [u(1)', v(1)', ..., u(B)', v(B)', x']' ## ## of length (2B + 1) k, with x the object of interest. ## Rather than providing a separate function for weighted L1 fitting ## of probability vectors we prefer doing "everything" at once, in ## order to avoid recomputing the coefficients and constraints of ## the associated linear program. B <- length(memberships) L <- (2 * B + 1) * k ## Set up associated linear program. ## Coefficients in the objective function. objective_in <- c(rep(w, each = 2 * k), rep.int(0, k)) ## Constraints. constr_mat <- rbind(diag(1, L), cbind(kronecker(diag(1, B), cbind(diag(1, k), diag(-1, k))), kronecker(rep.int(1, B), diag(1, k))), c(rep.int(0, 2 * B * k), rep.int(1, k))) constr_dir <- c(rep.int(">=", L), rep.int("==", B * k + 1L)) ind <- seq.int(from = 2 * B * k + 1L, length.out = k) nr <- NROW(memberships[[1L]]) nc <- NCOL(memberships[[1L]]) M <- matrix(0, nrow = nr, ncol = k) ## Put the memberships into one big array so that we can get their ## rows more conveniently (and efficiently): memberships <- array(unlist(memberships), c(nr, nc, B)) for(i in seq_len(nr)) { out <- lpSolve::lp("min", objective_in, constr_mat, constr_dir, c(rep.int(0, L), memberships[i, seq_len(k), ], 1)) M[i, ] <- out$solution[ind] } ## Add zero columns if necessary. if(k < nc) M <- cbind(M, matrix(0, nr, nc - k)) M } ### ** .cl_consensus_partition_soft_euclidean .cl_consensus_partition_soft_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SE") ### ** .cl_consensus_partition_hard_euclidean .cl_consensus_partition_hard_euclidean <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HE") ### ** .cl_consensus_partition_soft_manhattan .cl_consensus_partition_soft_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "SM") ### ** .cl_consensus_partition_hard_manhattan .cl_consensus_partition_hard_manhattan <- function(clusterings, weights, control) .cl_consensus_partition_AOS(clusterings, weights, control, "HM") ### * .cl_consensus_partition_AOG .cl_consensus_partition_AOG <- function(clusterings, weights, control, type = c("GV1")) { ## The start of a general purpose optimizer for determining ## consensus partitions by minimizing ## \sum_b w_b d(M, M_b) ^ p ## = \sum_b \min_{P_b} w_b f(M, M_b, P_b) ^ e ## for general dissimilarity matrices which involve class matching ## via permutation matrices P_b. ## ## The AO ("Alternative Optimization") proceeds by alternating ## between determining the optimal permutations P_b by minimizing ## f(M, M_b, P_b) ## for fixed M, and fitting M by minimizing ## \sum_b w_b f(M, M_b, P_b) ^ e ## for fixed \{ P_b \}. ## ## We encapsulate this into functions fit_P() and fit_M() (and a ## value() function for the criterion function to be minimized with ## respect to both M and \{ P_b \}, even though the current ## interface does not yet reflect the generality of the approach. ## ## Note that rather than passing on information about the numbers of ## classes (e.g., needed for GV1) and representing all involved ## membership matrices with the same maximal number of columns, we ## use "minimal" representations with no dummy classes (strictly ## speaking, with the possible exception of M, for which the given k ## is used). ## For the time being ... type <- match.arg(type) w <- weights / sum(weights) n <- n_of_objects(clusterings) k_max <- max(sapply(clusterings, n_of_classes)) ## Control parameters. k <- control$k if(is.null(k)) k <- k_max maxiter <- control$maxiter if(is.null(maxiter)) maxiter <- 100L nruns <- control$nruns reltol <- control$reltol if(is.null(reltol)) reltol <- sqrt(.Machine$double.eps) start <- control$start verbose <- control$verbose if(is.null(verbose)) verbose <- getOption("verbose") ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } nruns <- length(start) } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } start <- replicate(nruns, .random_stochastic_matrix(n, k), simplify = FALSE) } ## ## For the given memberships, we can simply use ncol() in the ## computations (rather than n_of_classes(), because we used ## cl_membership() to create them. For M, the number of classes ## could be smaller than the given k "target". ## value <- function(M, permutations, memberships, w) { k <- .n_of_nonzero_columns(M) d <- function(u, p) { ## Compute the squared GV1 dissimilarity between M and u ## based on the M->u class matching p. nc_u <- ncol(u) if(nc_u == k) { ## Simple case: all classes are matched. sum((u[, p] - M) ^ 2) } else { ## Only include the matched non-dummy classes of M .. ind <- seq_len(k) ## ... which are matched to non-dummy classes of u. ind <- ind[p[ind] <= nc_u] sum((u[, p[ind]] - M[, ind]) ^ 2) } } sum(w * mapply(d, memberships, permutations)) } fit_P <- function(u, M) { ## Return a permutation representing a GV1 optimal matching of ## the columns of M to the columns of u (note the order of the ## arguments), using a minimal number of dummy classes (i.e., p ## has max(.n_of_nonzero_columns(M), n_of_classes(u)) entries). ## See also .cl_dissimilarity_partition_GV1(). C <- outer(colSums(M ^ 2), colSums(u ^ 2), "+") - 2 * crossprod(M, u) nc_M <- .n_of_nonzero_columns(M) nc_u <- ncol(u) ## (See above for ncol() vs n_of_classes().) if(nc_M < nc_u) C <- rbind(C, matrix(0, nrow = nc_u - nc_M, ncol = nc_u)) else if(nc_M > nc_u) C <- cbind(C, matrix(0, nrow = nc_M, ncol = nc_M - nc_u)) solve_LSAP(C) } fit_M <- function(permutations, memberships, w) { ## Here comes the trickiest part ... ## ## In general, M = [m_{iq}] is determined as follows. ## Write value(M, permutations, memberships, w) as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - m_{iq})^2 x_{pq}(b) ## where U(b) and X(b) are the b-th membership matrix and the ## permutation matrix representing the M->U(b) non-dummy class ## matching (as always, note the order of the arguments). ## ## Let ## \beta_{iq} = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## \alpha_q = \sum_b \sum_{p=1}^{k_b} w_b x_{pq}(b) ## and ## \bar{m}_{iq} = ## \cases{\beta_{iq}/\alpha_q, & $\alpha_q > 0$ \cr ## 0 & otherwise}. ## Then, as the cross-product terms cancel out, the value ## function rewrites as ## \sum_b \sum_i \sum_{p=1}^{k_b} \sum_{q=1}^k ## w_b (u_{ip}(b) - \bar{m}_{iq})^2 x_{pq}(b) ## + \sum_i \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2, ## where the first term is a constant, and the minimum is found ## by solving ## \sum_q \alpha_q (\bar{m}_{iq} - m_{iq}) ^ 2 => min! ## s.t. ## m_{i1}, ..., m_{ik} >= 0, \sum_{iq} m_{iq} = 1. ## ## We can distinguish three cases. ## A. If S_i = \sum_q \bar{m}_{iq} = 1, things are trivial. ## B. If S_i = \sum_q \bar{m}_{iq} < 1. ## B1. If some \alpha_q are zero, then we can choose ## m_{iq} = \bar{m}_{iq} for those q with \alpha_q = 0; ## m_{iq} = 1 / number of zero \alpha's, otherwise. ## B2. If all \alpha_q are positive, we can simply ## equidistribute 1 - S_i over all classes as written ## in G&V. ## C. If S_i > 1, things are not so clear (as equidistributing ## will typically result in violations of the non-negativity ## constraint). We currently revert to using solve.QP() from ## package quadprog, as constrOptim() already failed in very ## simple test cases. ## ## Now consider \sum_{p=1}^{k_b} x_{pq}(b). If k <= k_b for all ## b, all M classes from 1 to k are matched to one of the k_b ## classes in U(b), hence the sum and also \alpha_q are one. ## But then ## \sum_q \bar{m}_{iq} ## = \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) x_{pq}(b) ## <= \sum_b \sum_{p=1}^{k_b} w_b u_{ip}(b) ## = 1 ## with equality if k = k_b for all b. I.e., ## * If k = \min_b k_b = \max k_b, we are in case A. ## * If k <= \min_b k_b, we are in case B2. ## And it makes sense to handle these cases explicitly for ## efficiency reasons. ## And now for something completely different ... the code. k <- .n_of_nonzero_columns(M) nr_M <- nrow(M) nc_M <- ncol(M) nc_memberships <- sapply(memberships, ncol) if(k <= min(nc_memberships)) { ## Compute the weighted means \bar{M}. M <- .weighted_sum_of_matrices(mapply(function(u, p) u[ , p[seq_len(k)]], memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) ## If we always got the same number of classes, we are ## done. Otherwise, equidistribute ... if(k < max(nc_memberships)) M <- pmax(M + (1 - rowSums(M)) / nc_M, 0) return(M) } ## Here comes the general case. ## First, compute the \alpha and \beta. alpha <- rowSums(rep(w, each = k) * mapply(function(p, n) p[seq_len(k)] <= n, permutations, nc_memberships)) ## Alternatively (more literally): ## X <- lapply(permutations, .make_X_from_p) ## alpha1 <- double(length = k) ## for(b in seq_along(permutations)) { ## alpha1 <- alpha1 + ## w[b] * colSums(X[[b]][seq_len(nc_memberships[b]), ]) ## } ## A helper function giving suitably permuted memberships. pmem <- function(u, p) { ## Only matched classes, similar to the one used in value(), ## maybe merge eventually ... v <- matrix(0, nr_M, k) ind <- seq_len(k) ind <- ind[p[ind] <= ncol(u)] if(any(ind)) v[ , ind] <- u[ , p[ind]] v } beta <- .weighted_sum_of_matrices(mapply(pmem, memberships, permutations, SIMPLIFY = FALSE), w, nr_M) ## Alternatively (more literally): ## beta1 <- matrix(0, nr_M, nc_M) ## for(b in seq_along(permutations)) { ## ind <- seq_len(nc_memberships[b]) ## beta1 <- beta1 + ## w[b] * memberships[[b]][, ind] %*% X[[b]][ind, ] ## } ## Compute the weighted means \bar{M}. M <- .cscale(beta, ifelse(alpha > 0, 1 / alpha, 0)) ## Alternatively (see comments for .cscale()): ## M1 <- beta %*% diag(ifelse(alpha > 0, 1 / alpha, 0)) ## And add dummy classes if necessary. if(k < nc_M) M <- cbind(M, matrix(0, nr_M, nc_M - k)) S <- rowSums(M) ## Take care of those rows with row sums < 1. ind <- (S < 1) if(any(ind)) { i_0 <- alpha == 0 if(any(i_0)) M[ind, i_0] <- 1 / sum(i_0) else M[ind, ] <- pmax(M[ind, ] + (1 - S[ind]) / nc_M, 0) } ## Take care of those rows with row sums > 1. ind <- (S > 1) if(any(ind)) { ## Argh. Call solve.QP() for each such i. Alternatively, ## could set up on very large QP, but is this any better? Dmat <- diag(alpha, nc_M) Amat <- t(rbind(rep.int(-1, nc_M), diag(1, nc_M))) bvec <- c(-1, rep.int(0, nc_M)) for(i in which(ind)) M[i, ] <- quadprog::solve.QP(Dmat, alpha * M[i, ], Amat, bvec)$solution } M } memberships <- lapply(clusterings, cl_membership) V_opt <- Inf M_opt <- NULL for(run in seq_along(start)) { if(verbose && (nruns > 1L)) message(gettextf("AOG run: %d", run)) M <- start[[run]] permutations <- lapply(memberships, fit_P, M) old_value <- value(M, permutations, memberships, w) message(gettextf("Iteration: 0 *** value: %g", old_value)) iter <- 1L while(iter <= maxiter) { ## Fit M. M <- fit_M(permutations, memberships, w) ## Fit \{ P_b \}. permutations <- lapply(memberships, fit_P, M) ## Update value. new_value <- value(M, permutations, memberships, w) if(verbose) message(gettextf("Iteration: %d *** value: %g", iter, new_value)) if(abs(old_value - new_value) < reltol * (abs(old_value) + reltol)) break old_value <- new_value iter <- iter + 1L } if(new_value < V_opt) { converged <- (iter <= maxiter) V_opt <- new_value M_opt <- M } if(verbose) message(gettextf("Minimum: %g", V_opt)) } M <- .stochastify(M_opt) ## Seems that M is always kept a k columns ... if not, use ## M <- .stochastify(M_opt[, seq_len(k), drop = FALSE]) rownames(M) <- rownames(memberships[[1L]]) ## Recompute the value, just making sure ... permutations <- lapply(memberships, fit_P, M) meta <- list(objval = value(M, permutations, memberships, w), converged = converged) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### ** .cl_consensus_partition_GV1 .cl_consensus_partition_GV1 <- function(clusterings, weights, control) .cl_consensus_partition_AOG(clusterings, weights, control, "GV1") ### * .cl_consensus_partition_GV3 .cl_consensus_partition_GV3 <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \| Y - M M' \|_F^2 => min ## where M is a membership matrix and Y = \sum_b w_b M_b M_b'. n <- n_of_objects(clusterings) max_n_of_classes <- max(sapply(clusterings, n_of_classes)) ## Control parameters: ## k, k <- control$k if(is.null(k)) k <- max_n_of_classes ## nruns, nruns <- control$nruns ## start. start <- control$start w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) Y <- .weighted_sum_of_matrices(comemberships, w, n) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } y <- c(Y) L <- function(m) sum((y - tcrossprod(matrix(m, n))) ^ 2) P <- .make_penalty_function_membership(n, k) grad_L <- function(m) { M <- matrix(m, n) 4 * c((tcrossprod(M) - Y) %*% M) } grad_P <- .make_penalty_gradient_membership(n, k) out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_soft_symdiff .cl_consensus_partition_soft_symdiff <- function(clusterings, weights, control) { ## Use a SUMT to solve ## \sum_b w_b \sum_{ij} | c_{ij}(b) - c_{ij} | => min ## where C(b) = comembership(M(b)) and C = comembership(M) and M is ## a membership matrix. ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## k, k <- control$k ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } max_n_of_classes <- max(sapply(clusterings, n_of_classes)) if(is.null(k)) k <- max_n_of_classes B <- length(clusterings) n <- n_of_objects(clusterings) w <- weights / sum(weights) comemberships <- lapply(clusterings, function(x) { ## No need to force a common k here. tcrossprod(cl_membership(x)) }) ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else { if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } ## Try using a rank k "root" of the weighted median of the ## comemberships as starting value. Y <- apply(array(unlist(comemberships), c(n, n, B)), c(1, 2), weighted_median, w) e <- eigen(Y, symmetric = TRUE) ## Use M <- U_k \lambda_k^{1/2}, or random perturbations ## thereof. M <- e$vectors[, seq_len(k), drop = FALSE] * rep(sqrt(e$values[seq_len(k)]), each = n) m <- c(M) start <- c(list(m), replicate(nruns - 1L, m + rnorm(length(m), sd = sd(m) / sqrt(3)), simplify = FALSE)) } L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) ## Note that here (as opposed to hard/symdiff) we take soft ## partitions as is without replacing them by their closest hard ## partitions. sum(w * sapply(comemberships, function(C) sum(abs(C_M - C)))) } P <- .make_penalty_function_membership(n, k) if(gradient) { grad_L <- function(m) { M <- matrix(m, n) C_M <- tcrossprod(M) .weighted_sum_of_matrices(lapply(comemberships, function(C) 2 * sign(C_M - C) %*% M), w, n) } grad_P <- .make_penalty_gradient_membership(n, k) } else grad_L <- grad_P <- NULL out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) M <- .stochastify(matrix(out$x, n)) rownames(M) <- rownames(cl_membership(clusterings[[1L]])) meta <- list(objval = L(c(M))) M <- .cl_membership_from_memberships(M, k, meta) as.cl_partition(M) } ### * .cl_consensus_partition_hard_symdiff .cl_consensus_partition_hard_symdiff <- function(clusterings, weights, control) { ## ## This is mostly duplicated from relations. ## Once this is on CRAN, we could consider having clue suggest ## relations ... ## comemberships <- lapply(clusterings, function(x) { ## Here, we always turn possibly soft partitions to ## their closest hard partitions. ids <- cl_class_ids(x) outer(ids, ids, "==") ## (Simpler than using tcrossprod() on ## cl_membership().) }) ## Could also create a relation ensemble from the comemberships and ## call relation_consensus(). B <- relations:::.make_fit_relation_symdiff_B(comemberships, weights) k <- control$k control <- control$control ## Note that currently we provide no support for finding *all* ## consensus partitions (but allow for specifying the solver). control$all <- FALSE I <- if(!is.null(k)) { ## ## We could actually get the memberships directly in this case. relations:::fit_relation_LP_E_k(B, k, control) ## } else relations:::fit_relation_LP(B, "E", control) ids <- relations:::get_class_ids_from_incidence(I) names(ids) <- cl_object_names(clusterings) as.cl_hard_partition(ids) } ### * .cl_consensus_hierarchy_cophenetic .cl_consensus_hierarchy_cophenetic <- function(clusterings, weights, control) { ## d <- .weighted_mean_of_object_dissimilarities(clusterings, weights) ## Alternatively: ## as.cl_dendrogram(ls_fit_ultrametric(d, control = control)) control <- c(list(weights = weights), control) as.cl_dendrogram(ls_fit_ultrametric(clusterings, control = control)) } ### * .cl_consensus_hierarchy_manhattan .cl_consensus_hierarchy_manhattan <- function(clusterings, weights, control) { ## Control parameters: ## gradient, gradient <- control$gradient if(is.null(gradient)) gradient <- TRUE ## nruns, nruns <- control$nruns ## start. start <- control$start ## Handle start values and number of runs. if(!is.null(start)) { if(!is.list(start)) { ## Be nice to users. start <- list(start) } } else if(is.null(nruns)) { ## Use nruns only if start is not given. nruns <- 1L } w <- weights / sum(weights) B <- length(clusterings) ultrametrics <- lapply(clusterings, cl_ultrametric) if(B == 1L) return(as.cl_dendrogram(ultrametrics[[1L]])) n <- n_of_objects(ultrametrics[[1L]]) labels <- cl_object_names(ultrametrics[[1L]]) ## We need to do ## ## \sum_b w_b \sum_{i,j} | u_{ij}(b) - u_{ij} | => min ## ## over all ultrametrics u. Let's use a SUMT (for which "gradients" ## can optionally be switched off) ... L <- function(d) { sum(w * sapply(ultrametrics, function(u) sum(abs(u - d)))) ## Could also do something like ## sum(w * sapply(ultrametrics, cl_dissimilarity, d, ## "manhattan")) } P <- .make_penalty_function_ultrametric(n) if(gradient) { grad_L <- function(d) { ## "Gradient" is \sum_b w_b sign(d - u(b)). .weighted_sum_of_vectors(lapply(ultrametrics, function(u) sign(d - u)), w) } grad_P <- .make_penalty_gradient_ultrametric(n) } else grad_L <- grad_P <- NULL if(is.null(start)) { ## Initialize by "random shaking" of the weighted median of the ## ultrametrics. Any better ideas? ## ## Using var(x) / 3 is really L2 ... ## x <- apply(matrix(unlist(ultrametrics), ncol = B), 1, weighted_median, w) start <- replicate(nruns, x + rnorm(length(x), sd = sd(x) / sqrt(3)), simplify = FALSE) } out <- sumt(start, L, P, grad_L, grad_P, method = control$method, eps = control$eps, q = control$q, verbose = control$verbose, control = as.list(control$control)) d <- .ultrametrify(out$x) meta <- list(objval = L(d)) d <- .cl_ultrametric_from_veclh(d, n, labels, meta) as.cl_dendrogram(d) } ### * .cl_consensus_hierarchy_majority .cl_consensus_hierarchy_majority <- function(clusterings, weights, control) { w <- weights / sum(weights) p <- control$p if(is.null(p)) p <- 1 / 2 else if(!is.numeric(p) || (length(p) != 1) || (p < 1 / 2) || (p > 1)) stop("Parameter 'p' must be in [1/2, 1].") classes <- lapply(clusterings, cl_classes) all_classes <- unique(unlist(classes, recursive = FALSE)) gamma <- double(length = length(all_classes)) for(i in seq_along(classes)) gamma <- gamma + w[i] * !is.na(match(all_classes, classes[[i]])) ## Rescale to [0, 1]. gamma <- gamma / max(gamma) maj_classes <- if(p == 1) { ## Strict consensus tree. all_classes[gamma == 1] } else all_classes[gamma > p] attr(maj_classes, "labels") <- attr(classes[[1L]], "labels") ## ## Stop auto-coercing that to dendrograms once we have suitable ways ## of representing n-trees. as.cl_hierarchy(.cl_ultrametric_from_classes(maj_classes)) ## } ### * Utilities ### ** .cl_consensus_method_default .cl_consensus_method_default <- function(type) { switch(type, partition = "SE", hierarchy = "euclidean", NULL) } ### ** .project_to_leading_columns .project_to_leading_columns <- function(x, k) { ## For a given matrix stochastic matrix x, return the stochastic ## matrix y which has columns from k+1 on all zero which is closest ## to x in the Frobenius distance. y <- x[, seq_len(k), drop = FALSE] y <- cbind(pmax(y + (1 - rowSums(y)) / k, 0), matrix(0, nrow(y), ncol(x) - k)) ## (Use the pmax to ensure that entries remain nonnegative.) } ### ** .make_X_from_p .make_X_from_p <- function(p) { ## X matrix corresponding to permutation p as needed for the AO ## algorithms. I.e., x_{ij} = 1 iff j->p(j)=i. X <- matrix(0, length(p), length(p)) i <- seq_along(p) X[cbind(p[i], i)] <- 1 X } ### ** .n_of_nonzero_columns ## ## Could turn this into n_of_classes.matrix(). .n_of_nonzero_columns <- function(x) sum(colSums(x) > 0) ## ### ** .cscale ## ## Move to utilities eventually ... .cscale <- function(A, x) { ## Scale the columns of matrix A by the elements of vector x. ## Formally, A %*% diag(x), but faster. ## Could also use sweep(A, 2, x, "*") rep(x, each = nrow(A)) * A } ## ## .make_penalty_function_membership .make_penalty_function_membership <- function(nr, nc) function(m) { sum(pmin(m, 0) ^ 2) + sum((rowSums(matrix(m, nr)) - 1) ^ 2) } ## .make_penalty_gradient_membership .make_penalty_gradient_membership <- function(nr, nc) function(m) { 2 * (pmin(m, 0) + rep.int(rowSums(matrix(m, nr)) - 1, nc)) } ### Local variables: *** ### mode: outline-minor *** ### outline-regexp: "### [*]+" *** ### End: *** clue/vignettes/0000755000175100001440000000000013435045064013234 5ustar hornikusersclue/vignettes/clue.Rnw0000644000175100001440000016521512734170652014671 0ustar hornikusers\documentclass[fleqn]{article} \usepackage[round,longnamesfirst]{natbib} \usepackage{graphicx,keyval,hyperref,doi} \newcommand\argmin{\mathop{\mathrm{arg min}}} \newcommand\trace{\mathop{\mathrm{tr}}} \newcommand\R{{\mathbb{R}}} \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\sQuote}[1]{`{#1}'} \newcommand{\dQuote}[1]{``{#1}''} \let\code=\texttt \newcommand{\file}[1]{\sQuote{\textsf{#1}}} \newcommand{\class}[1]{\code{"#1"}} \SweaveOpts{strip.white=true} \AtBeginDocument{\setkeys{Gin}{width=0.6\textwidth}} \date{2007-06-28} \title{A CLUE for CLUster Ensembles} \author{Kurt Hornik} %% \VignetteIndexEntry{CLUster Ensembles} \sloppy{} \begin{document} \maketitle \begin{abstract} Cluster ensembles are collections of individual solutions to a given clustering problem which are useful or necessary to consider in a wide range of applications. The R package~\pkg{clue} provides an extensible computational environment for creating and analyzing cluster ensembles, with basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. \end{abstract} <>= options(width = 60) library("clue") @ % \section{Introduction} \label{sec:introduction} \emph{Cluster ensembles} are collections of clusterings, which are all of the same ``kind'' (e.g., collections of partitions, or collections of hierarchies), of a set of objects. Such ensembles can be obtained, for example, by varying the (hyper)parameters of a ``base'' clustering algorithm, by resampling or reweighting the set of objects, or by employing several different base clusterers. Questions of ``agreement'' in cluster ensembles, and obtaining ``consensus'' clusterings from it, have been studied in several scientific communities for quite some time now. A special issue of the Journal of Classification was devoted to ``Comparison and Consensus of Classifications'' \citep{cluster:Day:1986} almost two decades ago. The recent popularization of ensemble methods such as Bayesian model averaging \citep{cluster:Hoeting+Madigan+Raftery:1999}, bagging \citep{cluster:Breiman:1996} and boosting \citep{cluster:Friedman+Hastie+Tibshirani:2000}, typically in a supervised leaning context, has also furthered the research interest in using ensemble methods to improve the quality and robustness of cluster solutions. Cluster ensembles can also be utilized to aggregate base results over conditioning or grouping variables in multi-way data, to reuse existing knowledge, and to accommodate the needs of distributed computing, see e.g.\ \cite{cluster:Hornik:2005a} and \cite{cluster:Strehl+Ghosh:2003a} for more information. Package~\pkg{clue} is an extension package for R~\citep{cluster:R:2005} providing a computational environment for creating and analyzing cluster ensembles. In Section~\ref{sec:structures+algorithms}, we describe the underlying data structures, and the functionality for measuring proximity, obtaining consensus clusterings, and ``secondary'' clusterings. Four examples are discussed in Section~\ref{sec:examples}. Section~\ref{sec:outlook} concludes the paper. A previous version of this manuscript was published in the \emph{Journal of Statistical Software} \citep{cluster:Hornik:2005b}. \section{Data structures and algorithms} \label{sec:structures+algorithms} \subsection{Partitions and hierarchies} Representations of clusterings of objects greatly vary across the multitude of methods available in R packages. For example, the class ids (``cluster labels'') for the results of \code{kmeans()} in base package~\pkg{stats}, \code{pam()} in recommended package~\pkg{cluster}~\citep{cluster:Rousseeuw+Struyf+Hubert:2005, cluster:Struyf+Hubert+Rousseeuw:1996}, and \code{Mclust()} in package~\pkg{mclust}~\citep{cluster:Fraley+Raftery+Wehrens:2005, cluster:Fraley+Raftery:2003}, are available as components named \code{cluster}, \code{clustering}, and \code{classification}, respectively, of the R objects returned by these functions. In many cases, the representations inherit from suitable classes. (We note that for versions of R prior to 2.1.0, \code{kmeans()} only returned a ``raw'' (unclassed) result, which was changed alongside the development of \pkg{clue}.) We deal with this heterogeneity of representations by providing getters for the key underlying data, such as the number of objects from which a clustering was obtained, and predicates, e.g.\ for determining whether an R object represents a partition of objects or not. These getters, such as \code{n\_of\_objects()}, and predicates are implemented as S3 generics, so that there is a \emph{conceptual}, but no formal class system underlying the predicates. Support for classed representations can easily be added by providing S3 methods. \subsubsection{Partitions} The partitions considered in \pkg{clue} are possibly soft (``fuzzy'') partitions, where for each object~$i$ and class~$j$ there is a non-negative number~$\mu_{ij}$ quantifying the ``belongingness'' or \emph{membership} of object~$i$ to class~$j$, with $\sum_j \mu_{ij} = 1$. For hard (``crisp'') partitions, all $\mu_{ij}$ are in $\{0, 1\}$. We can gather the $\mu_{ij}$ into the \emph{membership matrix} $M = [\mu_{ij}]$, where rows correspond to objects and columns to classes. The \emph{number of classes} of a partition, computed by function \code{n\_of\_classes()}, is the number of $j$ for which $\mu_{ij} > 0$ for at least one object~$i$. This may be less than the number of ``available'' classes, corresponding to the number of columns in a membership matrix representing the partition. The predicate functions \code{is.cl\_partition()}, \code{is.cl\_hard\_partition()}, and \code{is.cl\_soft\_partition()} are used to indicate whether R objects represent partitions of objects of the respective kind, with hard partitions as characterized above (all memberships in $\{0, 1\}$). (Hence, ``fuzzy clustering'' algorithms can in principle also give a hard partition.) \code{is.cl\_partition()} and \code{is.cl\_hard\_partition()} are generic functions; \code{is.cl\_soft\_partition()} gives true iff \code{is.cl\_partition()} is true and \code{is.cl\_hard\_partition()} is false. For R objects representing partitions, function \code{cl\_membership()} computes an R object with the membership values, currently always as a dense membership matrix with additional attributes. This is obviously rather inefficient for computations on hard partitions; we are planning to add ``canned'' sparse representations (using the vector of class ids) in future versions. Function \code{as.cl\_membership()} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. Function \code{cl\_class\_ids()} determines the class ids of a partition. For soft partitions, the class ids returned are those of the \dQuote{nearest} hard partition obtained by taking the class ids of the (first) maximal membership values. Note that the cardinality of the set of the class ids may be less than the number of classes in the (soft) partition. Many partitioning methods are based on \emph{prototypes} (``centers''). In typical cases, these are points~$p_j$ in the same feature space the measurements~$x_i$ on the objects~$i$ to be partitioned are in, so that one can measure distance between objects and prototypes, and e.g.\ classify objects to their closest prototype. Such partitioning methods can also induce partitions of the entire feature space (rather than ``just'' the set of objects to be partitioned). Currently, package \pkg{clue} has only minimal support for this ``additional'' structure, providing a \code{cl\_prototypes()} generic for extracting the prototypes, and is mostly focused on computations on partitions which are based on their memberships. Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for ``new'' objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their ``closest'' prototypes, or providing mixture models for the distribution of objects in feature space. Package~\pkg{clue} provides a \code{cl\_predict()} generic for predicting the class memberships of new objects (if possible). Function \code{cl\_fuzziness()} computes softness (fuzziness) measures for (ensembles) of partitions. Built-in measures are the partition coefficient \label{PC} and partition entropy \citep[e.g.,][]{cluster:Bezdek:1981}, with an option to normalize in a way that hard partitions and the ``fuzziest'' possible partition (where all memberships are the same) get fuzziness values of zero and one, respectively. Note that this normalization differs from ``standard'' ones in the literature. In the sequel, we shall also use the concept of the \emph{co-membership matrix} $C(M) = M M'$, where $'$ denotes matrix transposition, of a partition. For hard partitions, an entry $c_{ij}$ of $C(M)$ is 1 iff the corresponding objects $i$ and $j$ are in the same class, and 0 otherwise. \subsubsection{Hierarchies} The hierarchies considered in \pkg{clue} are \emph{total indexed hierarchies}, also known as \emph{$n$-valued trees}, and hence correspond in a one-to-one manner to \emph{ultrametrics} (distances $u_{ij}$ between pairs of objects $i$ and $j$ which satisfy the ultrametric constraint $u_{ij} = \max(u_{ik}, u_{jk})$ for all triples $i$, $j$, and $k$). See e.g.~\citet[Page~69--71]{cluster:Gordon:1999}. Function \code{cl\_ultrametric(x)} computes the associated ultrametric from an R object \code{x} representing a hierarchy of objects. If \code{x} is not an ultrametric, function \code{cophenetic()} in base package~\pkg{stats} is used to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{as.hclust()} (also in \pkg{stats}) on the hierarchy. Support for classes which represent hierarchies can thus be added by providing \code{as.hclust()} methods for this class. In R~2.1.0 or better (again as part of the work on \pkg{clue}), \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. In addition, there is a generic function \code{as.cl\_ultrametric()} which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Finally, the generic predicate function \code{is.cl\_hierarchy()} is used to determine whether an R object represents a hierarchy or not. Ultrametric objects can also be coerced to classes~\class{dendrogram} and \class{hclust} (from base package~\pkg{stats}), and hence in particular use the \code{plot()} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. Obtaining a hierarchy on a given set of objects can be thought of as transforming the pairwise dissimilarities between the objects (which typically do not yet satisfy the ultrametric constraints) into an ultrametric. Ideally, this ultrametric should be as close as possible to the dissimilarities. In some important cases, explicit solutions are possible (e.g., ``standard'' hierarchical clustering with single or complete linkage gives the optimal ultrametric dominated by or dominating the dissimilarities, respectively). On the other hand, the problem of finding the closest ultrametric in the least squares sense is known to be NP-hard \citep{cluster:Krivanek+Moravek:1986,cluster:Krivanek:1986}. One important class of heuristics for finding least squares fits is based on iterative projection on convex sets of constraints \citep{cluster:Hubert+Arabie:1995}. \label{SUMT} Function \code{ls\_fit\_ultrametric()} follows \cite{cluster:DeSoete:1986} to use an SUMT \citep[Sequential Unconstrained Minimization Technique;][]{cluster:Fiacco+McCormick:1968} approach in turn simplifying the suggestions in \cite{cluster:Carroll+Pruzansky:1980}. Let $L(u)$ be the function to be minimized over all $u$ in some constrained set $\mathcal{U}$---in our case, $L(u) = \sum (d_{ij}-u_{ij})^2$ is the least squares criterion, and $\mathcal{U}$ is the set of all ultrametrics $u$. One iteratively minimizes $L(u) + \rho_k P(u)$, where $P(u)$ is a non-negative function penalizing violations of the constraints such that $P(u)$ is zero iff $u \in \mathcal{U}$. The $\rho$ values are increased according to the rule $\rho_{k+1} = q \rho_k$ for some constant $q > 1$, until convergence is obtained in the sense that e.g.\ the Euclidean distance between successive solutions $u_k$ and $u_{k+1}$ is small enough. Optionally, the final $u_k$ is then suitably projected onto $\mathcal{U}$. For \code{ls\_fit\_ultrametric()}, we obtain the starting value $u_0$ by \dQuote{random shaking} of the given dissimilarity object, and use the penalty function $P(u) = \sum_{\Omega} (u_{ij} - u_{jk}) ^ 2$, were $\Omega$ contains all triples $i, j, k$ for which $u_{ij} \le \min(u_{ik}, u_{jk})$ and $u_{ik} \ne u_{jk}$, i.e., for which $u$ violates the ultrametric constraints. The unconstrained minimizations are carried out using either \code{optim()} or \code{nlm()} in base package~\pkg{stats}, with analytic gradients given in \cite{cluster:Carroll+Pruzansky:1980}. This ``works'', even though we note however that $P$ is not even a continuous function, which seems to have gone unnoticed in the literature! (Consider an ultrametric $u$ for which $u_{ij} = u_{ik} < u_{jk}$ for some $i, j, k$ and define $u(\delta)$ by changing the $u_{ij}$ to $u_{ij} + \delta$. For $u$, both $(i,j,k)$ and $(j,i,k)$ are in the violation set $\Omega$, whereas for all $\delta$ sufficiently small, only $(j,i,k)$ is the violation set for $u(\delta)$. Hence, $\lim_{\delta\to 0} P(u(\delta)) = P(u) + (u_{ij} - u_{ik})^2$. This shows that $P$ is discontinuous at all non-constant $u$ with duplicated entries. On the other hand, it is continuously differentiable at all $u$ with unique entries.) Hence, we need to turn off checking analytical gradients when using \code{nlm()} for minimization. The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm()} is usually faster. Note that the number of ultrametric constraints is of the order $n^3$, suggesting to use the SUMT approach in favor of \code{constrOptim()} in \pkg{stats}. It should be noted that the SUMT approach is a heuristic which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \subsubsection{Extensibility} The methods provided in package~\pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}~\citep{cluster:Hornik+Hothorn+Karatzoglou:2006}, \pkg{cba}~\citep{cluster:Buchta+Hahsler:2005}, \pkg{cclust}~\citep{cluster:Dimitriadou:2005}, \pkg{cluster}, \pkg{e1071}~\citep{cluster:Dimitriadou+Hornik+Leisch:2005}, \pkg{flexclust}~\citep{cluster:Leisch:2006a}, \pkg{flexmix}~\citep{cluster:Leisch:2004}, \pkg{kernlab}~\citep{cluster:Karatzoglou+Smola+Hornik:2004}, and \pkg{mclust} (and of course, \pkg{clue} itself). Extending support to other packages is straightforward, provided that clusterings are instances of classes. Suppose e.g.\ that a package has a function \code{glvq()} for ``generalized'' (i.e., non-Euclidean) Learning Vector Quantization which returns an object of class~\class{glvq}, in turn being a list with component \code{class\_ids} containing the class ids. To integrate this into the \pkg{clue} framework, all that is necessary is to provide the following methods. <<>>= cl_class_ids.glvq <- function(x) as.cl_class_ids(x$class_ids) is.cl_partition.glvq <- function(x) TRUE is.cl_hard_partition.glvq <- function(x) TRUE @ % $ \subsection{Cluster ensembles} Cluster ensembles are realized as lists of clusterings with additional class information. All clusterings in an ensemble must be of the same ``kind'' (i.e., either all partitions as known to \code{is.cl\_partition()}, or all hierarchies as known to \code{is.cl\_hierarchy()}, respectively), and have the same number of objects. If all clusterings are partitions, the list realizing the ensemble has class~\class{cl\_partition\_ensemble} and inherits from \class{cl\_ensemble}; if all clusterings are hierarchies, it has class~\class{cl\_hierarchy\_ensemble} and inherits from \class{cl\_ensemble}. Empty ensembles cannot be categorized according to the kind of clusterings they contain, and hence only have class~\class{cl\_ensemble}. Function \code{cl\_ensemble()} creates a cluster ensemble object from clusterings given either one-by-one, or as a list passed to the \code{list} argument. As unclassed lists could be used to represent single clusterings (in particular for results from \code{kmeans()} in versions of R prior to 2.1.0), we prefer not to assume that an unnamed given list is a list of clusterings. \code{cl\_ensemble()} verifies that all given clusterings are of the same kind, and all have the same number of objects. (By the notion of cluster ensembles, we should in principle verify that the clusterings come from the \emph{same} objects, which of course is not always possible.) The list representation makes it possible to use \code{lapply()} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c()}, \code{rep()}, \code{print()}, and \code{unique()}, where the last is based on a \code{unique()} method for lists added in R~2.1.1, and makes it possible to find unique and duplicated elements in cluster ensembles. The elements of the ensemble can be tabulated using \code{cl\_tabulate()}. Function \code{cl\_boot()} generates cluster ensembles with bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. Currently, this is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{kmeans()} or \code{cmeans()} in package~\pkg{e1071}. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{replicate()} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl\_ensemble(list = lst)} to create the ensemble). Resampling the training data is possible for base algorithms which can predict the class memberships of new data using \code{cl\_predict} (e.g., by classifying the out-of-bag data to their closest prototype). In fact, we believe that for unsupervised learning methods such as clustering, \emph{reweighting} is conceptually superior to resampling, and have therefore recently enhanced package~\pkg{e1071} to provide an implementation of weighted fuzzy $c$-means, and package~\pkg{flexclust} contains an implementation of weighted $k$-means. We are currently experimenting with interfaces for providing ``direct'' support for reweighting via \code{cl\_boot()}. \subsection{Cluster proximities} \subsubsection{Principles} Computing dissimilarities and similarities (``agreements'') between clusterings of the same objects is a key ingredient in the analysis of cluster ensembles. The ``standard'' data structures available for such proximity data (measures of similarity or dissimilarity) are classes~\class{dist} and \class{dissimilarity} in package~\pkg{cluster} (which basically, but not strictly, extends \class{dist}), and are both not entirely suited to our needs. First, they are confined to \emph{symmetric} dissimilarity data. Second, they do not provide enough reflectance. We also note that the Bioconductor package~\pkg{graph}~\citep{cluster:Gentleman+Whalen:2005} contains an efficient subscript method for objects of class~\class{dist}, but returns a ``raw'' matrix for row/column subscripting. For package~\pkg{clue}, we use the following approach. There are classes for symmetric and (possibly) non-symmetric proximity data (\class{cl\_proximity} and \class{cl\_cross\_proximity}), which, in addition to holding the numeric data, also contain a description ``slot'' (attribute), currently a character string, as a first approximation to providing more reflectance. Internally, symmetric proximity data are store the lower diagonal proximity values in a numeric vector (in row-major order), i.e., the same way as objects of class~\class{dist}; a \code{self} attribute can be used for diagonal values (in case some of these are non-zero). Symmetric proximity objects can be coerced to dense matrices using \code{as.matrix()}. It is possible to use 2-index matrix-style subscripting for symmetric proximity objects; unless this uses identical row and column indices, it results in a non-symmetric proximity object. This approach ``propagates'' to classes for symmetric and (possibly) non-symmetric cluster dissimilarity and agreement data (e.g., \class{cl\_dissimilarity} and \class{cl\_cross\_dissimilarity} for dissimilarity data), which extend the respective proximity classes. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes~\class{cl\_dissimilarity} and \class{cl\_proximity}. Providing reflectance is far from optimal. For example, if \code{s} is a similarity object (with cluster agreements), \code{1 - s} is a dissimilarity one, but the description is preserved unchanged. This issue could be addressed by providing high-level functions for transforming proximities. \label{synopsis} Cluster dissimilarities are computed via \code{cl\_dissimilarity()} with synopsis \code{cl\_dissimilarity(x, y = NULL, method = "euclidean")}, where \code{x} and \code{y} are cluster ensemble objects or coercible to such, or \code{NULL} (\code{y} only). If \code{y} is \code{NULL}, the return value is an object of class~\class{cl\_dissimilarity} which contains the dissimilarities between all pairs of clusterings in \code{x}. Otherwise, it is an object of class~\class{cl\_cross\_dissimilarity} with the dissimilarities between the clusterings in \code{x} and the clusterings in \code{y}. Formal argument \code{method} is either a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method, making it reasonably straightforward to add methods. Function \code{cl\_agreement()} has the same interface as \code{cl\_dissimilarity()}, returning cluster similarity objects with respective classes~\class{cl\_agreement} and \class{cl\_cross\_agreement}. Built-in methods for computing dissimilarities may coincide (in which case they are transforms of each other), but do not necessarily do so, as there typically are no canonical transformations. E.g., according to needs and scientific community, agreements might be transformed to dissimilarities via $d = - \log(s)$ or the square root thereof \citep[e.g.,][]{cluster:Strehl+Ghosh:2003b}, or via $d = 1 - s$. \subsubsection{Partition proximities} When assessing agreement or dissimilarity of partitions, one needs to consider that the class ids may be permuted arbitrarily without changing the underlying partitions. For membership matrices~$M$, permuting class ids amounts to replacing $M$ by $M \Pi$, where $\Pi$ is a suitable permutation matrix. We note that the co-membership matrix $C(M) = MM'$ is unchanged by these transformations; hence, proximity measures based on co-occurrences, such as the Katz-Powell \citep{cluster:Katz+Powell:1953} or Rand \citep{cluster:Rand:1971} indices, do not explicitly need to adjust for possible re-labeling. The same is true for measures based on the ``confusion matrix'' $M' \tilde{M}$ of two membership matrices $M$ and $\tilde{M}$ which are invariant under permutations of rows and columns, such as the Normalized Mutual Information (NMI) measure introduced in \cite{cluster:Strehl+Ghosh:2003a}. Other proximity measures need to find permutations so that the classes are optimally matched, which of course in general requires exhaustive search through all $k!$ possible permutations, where $k$ is the (common) number of classes in the partitions, and thus will typically be prohibitively expensive. Fortunately, in some important cases, optimal matchings can be determined very efficiently. We explain this in detail for ``Euclidean'' partition dissimilarity and agreement (which in fact is the default measure used by \code{cl\_dissimilarity()} and \code{cl\_agreement()}). Euclidean partition dissimilarity \citep{cluster:Dimitriadou+Weingessel+Hornik:2002} is defined as \begin{displaymath} d(M, \tilde{M}) = \min\nolimits_\Pi \| M - \tilde{M} \Pi \| \end{displaymath} where the minimum is taken over all permutation matrices~$\Pi$, $\|\cdot\|$ is the Frobenius norm (so that $\|Y\|^2 = \trace(Y'Y)$), and $n$ is the (common) number of objects in the partitions. As $\| M - \tilde{M} \Pi \|^2 = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\Pi'\tilde{M}'\tilde{M}\Pi) = \trace(M'M) - 2 \trace(M'\tilde{M}\Pi) + \trace(\tilde{M}'\tilde{M})$, we see that minimizing $\| M - \tilde{M} \Pi \|^2$ is equivalent to maximizing $\trace(M'\tilde{M}\Pi) = \sum_{i,k}{\mu_{ik}\tilde{\mu}}_{i,\pi(k)}$, which for hard partitions is the number of objects with the same label in the partitions given by $M$ and $\tilde{M}\Pi$. Finding the optimal $\Pi$ is thus recognized as an instance of the \emph{linear sum assignment problem} (LSAP, also known as the weighted bipartite graph matching problem). The LSAP can be solved by linear programming, e.g., using Simplex-style primal algorithms as done by function~\code{lp.assign()} in package~\pkg{lpSolve}~\citep{cluster:Buttrey:2005}, but primal-dual algorithms such as the so-called Hungarian method can be shown to find the optimum in time $O(k^3)$ \citep[e.g.,][]{cluster:Papadimitriou+Steiglitz:1982}. Available published implementations include TOMS 548 \citep{cluster:Carpaneto+Toth:1980}, which however is restricted to integer weights and $k < 131$. One can also transform the LSAP into a network flow problem, and use e.g.~RELAX-IV \citep{cluster:Bertsekas+Tseng:1994} for solving this, as is done in package~\pkg{optmatch}~\citep{cluster:Hansen:2005}. In package~\pkg{clue}, we use an efficient C implementation of the Hungarian algorithm kindly provided to us by Walter B\"ohm, which has been found to perform very well across a wide range of problem sizes. \cite{cluster:Gordon+Vichi:2001} use a variant of Euclidean dissimilarity (``GV1 dissimilarity'') which is based on the sum of the squared difference of the memberships of matched (non-empty) classes only, discarding the unmatched ones (see their Example~2). This results in a measure which is discontinuous over the space of soft partitions with arbitrary numbers of classes. The partition agreement measures ``angle'' and ``diag'' (maximal cosine of angle between the memberships, and maximal co-classification rate, where both maxima are taken over all column permutations of the membership matrices) are based on solving the same LSAP as for Euclidean dissimilarity. Finally, Manhattan partition dissimilarity is defined as the minimal sum of the absolute differences of $M$ and all column permutations of $\tilde{M}$, and can again be computed efficiently by solving an LSAP. For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} \citep{cluster:Charon+Denoeud+Guenoche:2006}, which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{$R$-metric} in \cite{cluster:Day:1981}, i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in \cite{cluster:Gusfield:2002}. Note when assessing proximity that agreements for soft partitions are always (and quite often considerably) lower than the agreements for the corresponding nearest hard partitions, unless the agreement measures are based on the latter anyways (as currently done for Rand, Katz-Powell, and NMI). Package~\pkg{clue} provides additional agreement measures, such as the Jaccard and Fowles-Mallows \citep[quite often incorrectly attributed to \cite{cluster:Wallace:1983}]{cluster:Fowlkes+Mallows:1983a} indices, and dissimilarity measures such as the ``symdiff'' and Rand distances (the latter is proportional to the metric of \cite{cluster:Mirkin:1996}) and the metrics discussed in \cite{cluster:Boorman+Arabie:1972}. One could easily add more proximity measures, such as the ``Variation of Information'' \citep{cluster:Meila:2003}. However, all these measures are rigorously defined for hard partitions only. To see why extensions to soft partitions are far from straightforward, consider e.g.\ measures based on the confusion matrix. Its entries count the cardinality of certain intersections of sets. \label{fuzzy} In a fuzzy context for soft partitions, a natural generalization would be using fuzzy cardinalities (i.e., sums of memberships values) of fuzzy intersections instead. There are many possible choices for the latter, with the product of the membership values (corresponding to employing the confusion matrix also in the fuzzy case) one of them, but the minimum instead of the product being the ``usual'' choice. A similar point can be made for co-occurrences of soft memberships. We are not aware of systematic investigations of these extension issues. \subsubsection{Hierarchy proximities} Available built-in dissimilarity measures for hierarchies include \emph{Euclidean} (again, the default measure used by \code{cl\_dissimilarity()}) and Manhattan dissimilarity, which are simply the Euclidean (square root of the sum of squared differences) and Manhattan (sum of the absolute differences) dissimilarities between the associated ultrametrics. Cophenetic dissimilarity is defined as $1 - c^2$, where $c$ is the cophenetic correlation coefficient \citep{cluster:Sokal+Rohlf:1962}, i.e., the Pearson product-moment correlation between the ultrametrics. Gamma dissimilarity is the rate of inversions between the associated ultrametrics $u$ and $v$ (i.e., the rate of pairs $(i,j)$ and $(k,l)$ for which $u_{ij} < u_{kl}$ and $v_{ij} > v_{kl}$). This measure is a linear transformation of Kruskal's~$\gamma$. Finally, symdiff dissimilarity is the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. Associated agreement measures are obtained by suitable transformations of the dissimilarities~$d$; for Euclidean proximities, we prefer to use $1 / (1 + d)$ rather than e.g.\ $\exp(-d)$. One should note that whereas cophenetic and gamma dissimilarities are invariant to linear transformations, Euclidean and Manhattan ones are not. Hence, if only the relative ``structure'' of the dendrograms is of interest, these dissimilarities should only be used after transforming the ultrametrics to a common range of values (e.g., to $[0,1]$). \subsection{Consensus clusterings} Consensus clusterings ``synthesize'' the information in the elements of a cluster ensemble into a single clustering. There are three main approaches to obtaining consensus clusterings \citep{cluster:Hornik:2005a,cluster:Gordon+Vichi:2001}: in the \emph{constructive} approach, one specifies a way to construct a consensus clustering. In the \emph{axiomatic} approach, emphasis is on the investigation of existence and uniqueness of consensus clusterings characterized axiomatically. The \emph{optimization} approach formalizes the natural idea of describing consensus clusterings as the ones which ``optimally represent the ensemble'' by providing a criterion to be optimized over a suitable set $\mathcal{C}$ of possible consensus clusterings. If $d$ is a dissimilarity measure and $C_1, \ldots, C_B$ are the elements of the ensemble, one can e.g.\ look for solutions of the problem \begin{displaymath} \sum\nolimits_{b=1}^B w_b d(C, C_b) ^ p \Rightarrow \min\nolimits_{C \in \mathcal{C}}, \end{displaymath} for some $p \ge 0$, i.e., as clusterings~$C^*$ minimizing weighted average dissimilarity powers of order~$p$. Analogously, if a similarity measure is given, one can look for clusterings maximizing weighted average similarity powers. Following \cite{cluster:Gordon+Vichi:1998}, an above $C^*$ is referred to as (weighted) \emph{median} or \emph{medoid} clustering if $p = 1$ and the optimum is sought over the set of all possible base clusterings, or the set $\{ C_1, \ldots, C_B \}$ of the base clusterings, respectively. For $p = 2$, we have \emph{least squares} consensus clusterings (generalized means). For computing consensus clusterings, package~\pkg{clue} provides function \code{cl\_consensus()} with synopsis \code{cl\_consensus(x, method = NULL, weights = 1, control = list())}. This allows (similar to the functions for computing cluster proximities, see Section~\ref{synopsis} on Page~\pageref{synopsis}) argument \code{method} to be a character string specifying one of the built-in methods discussed below, or a function to be taken as a user-defined method (taking an ensemble, the case weights, and a list of control parameters as its arguments), again making it reasonably straightforward to add methods. In addition, function~\code{cl\_medoid()} can be used for obtaining medoid partitions (using, in principle, arbitrary dissimilarities). Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{pam()} in package~\pkg{cluster}. If all elements of the ensemble are partitions, package~\pkg{clue} provides algorithms for computing soft least squares consensus partitions for weighted Euclidean, GV1 and co-membership dissimilarities. Let $M_1, \ldots, M_B$ and $M$ denote the membership matrices of the elements of the ensemble and their sought least squares consensus partition, respectively. For Euclidean dissimilarity, we need to find \begin{displaymath} \sum_b w_b \min\nolimits_{\Pi_b} \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_M \end{displaymath} over all membership matrices (i.e., stochastic matrices) $M$, or equivalently, \begin{displaymath} \sum_b w_b \| M - M_b \Pi_b \|^2 \Rightarrow \min\nolimits_{M, \Pi_1, \ldots, \Pi_B} \end{displaymath} over all $M$ and permutation matrices $\Pi_1, \ldots, \Pi_B$. Now fix the $\Pi_b$ and let $\bar{M} = s^{-1} \sum_b w_b M_b \Pi_b$ be the weighted average of the $M_b \Pi_b$, where $s = \sum_b w_b$. Then \begin{eqnarray*} \lefteqn{\sum_b w_b \| M - M_b \Pi_b \|^2} \\ &=& \sum_b w_b (\|M\|^2 - 2 \trace(M' M_b \Pi_b) + \|M_b\Pi_b\|^2) \\ &=& s \|M\|^2 - 2 s \trace(M' \bar{M}) + \sum_b w_b \|M_b\|^2 \\ &=& s (\|M - \bar{M}\|^2) + \sum_b w_b \|M_b\|^2 - s \|\bar{M}\|^2 \end{eqnarray*} Thus, as already observed in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Gordon+Vichi:2001}, for fixed permutations $\Pi_b$ the optimal soft $M$ is given by $\bar{M}$. The optimal permutations can be found by minimizing $- s \|\bar{M}\|^2$, or equivalently, by maximizing \begin{displaymath} s^2 \|\bar{M}\|^2 = \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b). \end{displaymath} With $U_{\beta,b} = w_\beta w_b M_\beta' M_b$ we can rewrite the above as \begin{displaymath} \sum_{\beta, b} w_\beta w_b \trace(\Pi_\beta'M_\beta'M_b\Pi_b) = \sum_{\beta,b} \sum_{j=1}^k [U_{\beta,b}]_{\pi_\beta(j), \pi_b(j)} =: \sum_{j=1}^k c_{\pi_1(j), \ldots, \pi_B(j)} \end{displaymath} This is an instance of the \emph{multi-dimensional assignment problem} (MAP), which, contrary to the LSAP, is known to be NP-hard \citep[e.g., via reduction to 3-DIMENSIONAL MATCHING,][]{cluster:Garey+Johnson:1979}, and can e.g.\ be approached using randomized parallel algorithms \citep{cluster:Oliveira+Pardalos:2004}. Branch-and-bound approaches suggested in the literature \citep[e.g.,][]{cluster:Grundel+Oliveira+Pardalos:2005} are unfortunately computationally infeasible for ``typical'' sizes of cluster ensembles ($B \ge 20$, maybe even in the hundreds). Package~\pkg{clue} provides two heuristics for (approximately) finding the soft least squares consensus partition for Euclidean dissimilarity. Method \code{"DWH"} of function \code{cl\_consensus()} is an extension of the greedy algorithm in \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} which is based on a single forward pass through the ensemble which in each step chooses the ``locally'' optimal $\Pi$. Starting with $\tilde{M}_1 = M_1$, $\tilde{M}_b$ is obtained from $\tilde{M}_{b-1}$ by optimally matching $M_b \Pi_b$ to this, and taking a weighted average of $\tilde{M}_{b-1}$ and $M_b \Pi_b$ in a way that $\tilde{M}_b$ is the weighted average of the first~$b$ $M_\beta \Pi_\beta$. This simple approach could be further enhanced via back-fitting or several passes, in essence resulting in an ``on-line'' version of method \code{"SE"}. This, in turn, is a fixed-point algorithm, which iterates between updating $M$ as the weighted average of the current $M_b \Pi_b$, and determining the $\Pi_b$ by optimally matching the current $M$ to the individual $M_b$. Finally, method \code{"GV1"} implements the fixed-point algorithm for the ``first model'' in \cite{cluster:Gordon+Vichi:2001}, which gives least squares consensus partitions for GV1 dissimilarity. In the above, we implicitly assumed that all partitions in the ensemble as well as the sought consensus partition have the same number of classes. The more general case can be dealt with through suitable ``projection'' devices. When using co-membership dissimilarity, the least squares consensus partition is determined by minimizing \begin{eqnarray*} \lefteqn{\sum_b w_b \|MM' - M_bM_b'\|^2} \\ &=& s \|MM' - \bar{C}\|^2 + \sum_b w_b \|M_bM_b'\|^2 - s \|\bar{C}\|^2 \end{eqnarray*} over all membership matrices~$M$, where now $\bar{C} = s^{-1} \sum_b C(M_b) = s^{-1} \sum_b M_bM_b'$ is the weighted average co-membership matrix of the ensemble. This corresponds to the ``third model'' in \cite{cluster:Gordon+Vichi:2001}. Method \code{"GV3"} of function \code{cl\_consensus()} provides a SUMT approach (see Section~\ref{SUMT} on Page~\pageref{SUMT}) for finding the minimum. We note that this strategy could more generally be applied to consensus problems of the form \begin{displaymath} \sum_b w_b \|\Phi(M) - \Phi(M_b)\|^2 \Rightarrow \min\nolimits_M, \end{displaymath} which are equivalent to minimizing $\|\Phi(B) - \bar{\Phi}\|^2$, with $\bar{\Phi}$ the weighted average of the $\Phi(M_b)$. This includes e.g.\ the case where generalized co-memberships are defined by taking the ``standard'' fuzzy intersection of co-incidences, as discussed in Section~\ref{fuzzy} on Page~\pageref{fuzzy}. Package~\pkg{clue} currently does not provide algorithms for obtaining \emph{hard} consensus partitions, as e.g.\ done in \cite{cluster:Krieger+Green:1999} using Rand proximity. It seems ``natural'' to extend the methods discussed above to include a constraint on softness, e.g., on the partition coefficient PC (see Section~\ref{PC} on Page~\pageref{PC}). For Euclidean dissimilarity, straightforward Lagrangian computations show that the constrained minima are of the form $\bar{M}(\alpha) = \alpha \bar{M} + (1 - \alpha) E$, where $E$ is the ``maximally soft'' membership with all entries equal to $1/k$, $\bar{M}$ is again the weighted average of the $M_b\Pi_b$ with the $\Pi_b$ solving the underlying MAP, and $\alpha$ is chosen such that $PC(\bar{M}(\alpha))$ equals a prescribed value. As $\alpha$ increases (even beyond one), softness of the $\bar{M}(\alpha)$ decreases. However, for $\alpha^* > 1 / (1 - k\mu^*)$, where $\mu^*$ is the minimum of the entries of $\bar{M}$, the $\bar{M}(\alpha)$ have negative entries, and are no longer feasible membership matrices. Obviously, the non-negativity constraints for the $\bar{M}(\alpha)$ eventually put restrictions on the admissible $\Pi_b$ in the underlying MAP. Thus, such a simple relaxation approach to obtaining optimal hard partitions is not feasible. For ensembles of hierarchies, \code{cl\_consensus()} provides a built-in method (\code{"cophenetic"}) for approximately minimizing average weighted squared Euclidean dissimilarity \begin{displaymath} \sum_b w_b \| U - U_b \|^2 \Rightarrow \min\nolimits_U \end{displaymath} over all ultrametrics~$U$, where $U_1, \ldots, U_B$ are the ultrametrics corresponding to the elements of the ensemble. This is of course equivalent to minimizing $\| U - \bar{U} \|^2$, where $\bar{U} = s^{-1} \sum_b w_b U_b$ is the weighted average of the $U_b$. The SUMT approach provided by function \code{ls\_fit\_ultrametric()} (see Section~\ref{SUMT} on Page~\pageref{SUMT}) is employed for finding the sought weighted least squares consensus hierarchy. In addition, method \code{"majority"} obtains a consensus hierarchy from an extension of the majority consensus tree of \cite{cluster:Margush+McMorris:1981}, which minimizes $L(U) = \sum_b w_b d(U_b, U)$ over all ultrametrics~$U$, where $d$ is the symmetric difference dissimilarity. Clearly, the available methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global optimum. Standard practice would recommend to use the best solution found in ``sufficiently many'' replications of the methods. Alternative recent approaches to obtaining consensus partitions include ``Bagged Clustering'' \citep[provided by \code{bclust()} in package~\pkg{e1071}]{cluster:Leisch:1999}, the ``evidence accumulation'' framework of \cite{cluster:Fred+Jain:2002}, the NMI optimization and graph-partitioning methods in \cite{cluster:Strehl+Ghosh:2003a}, ``Bagged Clustering'' as in \cite{cluster:Dudoit+Fridlyand:2003}, and the hybrid bipartite graph formulation of \cite{cluster:Fern+Brodley:2004}. Typically, these approaches are constructive, and can easily be implemented based on the infrastructure provided by package~\pkg{clue}. Evidence accumulation amounts to standard hierarchical clustering of the average co-membership matrix. Procedure~BagClust1 of \cite{cluster:Dudoit+Fridlyand:2003} amounts to computing $B^{-1} \sum_b M_b\Pi_b$, where each $\Pi_b$ is determined by optimal Euclidean matching of $M_b$ to a fixed reference membership $M_0$. In the corresponding ``Bagged Clustering'' framework, $M_0$ and the $M_b$ are obtained by applying the base clusterer to the original data set and bootstrap samples from it, respectively. This is implemented as method \code{"DFBC1"} of \code{cl\_bag()} in package~\pkg{clue}. Finally, the approach of \cite{cluster:Fern+Brodley:2004} solves an LSAP for an asymmetric cost matrix based on object-by-all-classes incidences. \subsection{Cluster partitions} To investigate the ``structure'' in a cluster ensemble, an obvious idea is to start clustering the clusterings in the ensemble, resulting in ``secondary'' clusterings \citep{cluster:Gordon+Vichi:1998, cluster:Gordon:1999}. This can e.g.\ be performed by using \code{cl\_dissimilarity()} (or \code{cl\_agreement()}) to compute a dissimilarity matrix for the ensemble, and feed this into a dissimilarity-based clustering algorithm (such as \code{pam()} in package~\pkg{cluster} or \code{hclust()} in package~\pkg{stats}). (One can even use \code{cutree()} to obtain hard partitions from hierarchies thus obtained.) If prototypes (``typical clusterings'') are desired for partitions of clusterings, they can be determined post-hoc by finding suitable consensus clusterings in the classes of the partition, e.g., using \code{cl\_consensus()} or \code{cl\_medoid()}. Package~\pkg{clue} additionally provides \code{cl\_pclust()} for direct prototype-based partitioning based on minimizing criterion functions of the form $\sum w_b u_{bj}^m d(x_b, p_j)^e$, the sum of the case-weighted membership-weighted $e$-th powers of the dissimilarities between the elements~$x_b$ of the ensemble and the prototypes~$p_j$, for suitable dissimilarities~$d$ and exponents~$e$. (The underlying feature spaces are that of membership matrices and ultrametrics, respectively, for partitions and hierarchies.) Parameter~$m$ must not be less than one and controls the softness of the obtained partitions, corresponding to the \dQuote{fuzzification parameter} of the fuzzy $c$-means algorithm. For $m = 1$, a generalization of the Lloyd-Forgy variant \citep{cluster:Lloyd:1957, cluster:Forgy:1965, cluster:Lloyd:1982} of the $k$-means algorithm is used, which iterates between reclassifying objects to their closest prototypes, and computing new prototypes as consensus clusterings for the classes. \citet{cluster:Gaul+Schader:1988} introduced this procedure for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. For $m > 1$, a generalization of the fuzzy $c$-means recipe \citep[e.g.,][]{cluster:Bezdek:1981} is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Consensus clusterings are computed using (one of the methods provided by) \code{cl\_consensus}, with dissimilarities~$d$ and exponent~$e$ implied by method employed, and obtained via a registration mechanism. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity~$d$ and $e = 2$. \section{Examples} \label{sec:examples} \subsection{Cassini data} \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} and \cite{cluster:Leisch:1999} use Cassini data sets to illustrate how e.g.\ suitable aggregation of base $k$-means results can reveal underlying non-convex structure which cannot be found by the base algorithm. Such data sets contain points in 2-dimensional space drawn from the uniform distribution on 3 structures, with the two ``outer'' ones banana-shaped and the ``middle'' one a circle, and can be obtained by function~\code{mlbench.cassini()} in package~\pkg{mlbench}~\citep{cluster:Leisch+Dimitriadou:2005}. Package~\pkg{clue} contains the data sets \code{Cassini} and \code{CKME}, which are an instance of a 1000-point Cassini data set, and a cluster ensemble of 50 $k$-means partitions of the data set into three classes, respectively. The data set is shown in Figure~\ref{fig:Cassini}. <>= data("Cassini") plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{The Cassini data set.} \label{fig:Cassini} \end{figure} Figure~\ref{fig:CKME} gives a dendrogram of the Euclidean dissimilarities of the elements of the $k$-means ensemble. <>= data("CKME") plot(hclust(cl_dissimilarity(CKME)), labels = FALSE) @ % \begin{figure} \centering <>= <> @ % \caption{A dendrogram of the Euclidean dissimilarities of 50 $k$-means partitions of the Cassini data into 3 classes.} \label{fig:CKME} \end{figure} We can see that there are large groups of essentially identical $k$-means solutions. We can gain more insight by inspecting representatives of these three groups, or by computing the medoid of the ensemble <<>>= m1 <- cl_medoid(CKME) table(Medoid = cl_class_ids(m1), "True Classes" = Cassini$classes) @ % $ and inspecting it (Figure~\ref{fig:Cassini-medoid}): <>= plot(Cassini$x, col = cl_class_ids(m1), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Medoid of the Cassini $k$-means ensemble.} \label{fig:Cassini-medoid} \end{figure} Flipping this solution top-down gives a second ``typical'' partition. We see that the $k$-means base clusterers cannot resolve the underlying non-convex structure. For the least squares consensus of the ensemble, we obtain <<>>= set.seed(1234) m2 <- cl_consensus(CKME) @ % where here and below we set the random seed for reproducibility, noting that one should really use several replicates of the consensus heuristic. This consensus partition has confusion matrix <<>>= table(Consensus = cl_class_ids(m2), "True Classes" = Cassini$classes) @ % $ and class details as displayed in Figure~\ref{fig:Cassini-mean}: <>= plot(Cassini$x, col = cl_class_ids(m2), xlab = "", ylab = "") @ % $ \begin{figure} \centering <>= <> @ % \caption{Least Squares Consensus of the Cassini $k$-means ensemble.} \label{fig:Cassini-mean} \end{figure} This has drastically improved performance, and almost perfect recovery of the two outer shapes. In fact, \cite{cluster:Dimitriadou+Weingessel+Hornik:2002} show that almost perfect classification can be obtained by suitable combinations of different base clusterers ($k$-means, fuzzy $c$-means, and unsupervised fuzzy competitive learning). \subsection{Gordon-Vichi macroeconomic data} \citet[Table~1]{cluster:Gordon+Vichi:2001} provide soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. These partitions were obtained using fuzzy $c$-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. Table~5 in \cite{cluster:Gordon+Vichi:2001} gives 3-class consensus partitions obtained by applying their models 1, 2, and 3 and the approach in \cite{cluster:Sato+Sato:1994}. The partitions and consensus partitions are available in data sets \code{GVME} and \code{GVME\_Consensus}, respectively. We compare the results of \cite{cluster:Gordon+Vichi:2001} using GV1 dissimilarities (model 1) to ours as obtained by \code{cl\_consensus()} with method \code{"GV1"}. <<>>= data("GVME") GVME set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average squared GV1 dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) @ % We compare this to the consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/3"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % Interestingly, we are able to obtain a ``better'' solution, which however agrees with the one reported on the literature with respect to their nearest hard partitions. For the 2-class consensus partition, we obtain <<>>= set.seed(1) m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ which is slightly better than the solution reported in \cite{cluster:Gordon+Vichi:2001} <<>>= mean(cl_dissimilarity(GVME, m1, "GV1") ^ 2) m2 <- GVME_Consensus[["MF1/2"]] mean(cl_dissimilarity(GVME, m2, "GV1") ^ 2) @ but in fact agrees with it apart from rounding errors: <<>>= max(abs(cl_membership(m1) - cl_membership(m2))) @ It is interesting to compare these solutions to the Euclidean 2-class consensus partition for the GVME ensemble: <<>>= m3 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) @ This is markedly different from the GV1 consensus partition <<>>= table(GV1 = cl_class_ids(m1), Euclidean = cl_class_ids(m3)) @ with countries <<>>= rownames(m1)[cl_class_ids(m1) != cl_class_ids(m3)] @ % classified differently, being with the ``richer'' class for the GV1 and the ``poorer'' for the Euclidean consensus partition. (In fact, all these countries end up in the ``middle'' class for the 3-class GV1 consensus partition.) \subsection{Rosenberg-Kim kinship terms data} \cite{cluster:Rosenberg+Kim:1975} describe an experiment where perceived similarities of the kinship terms were obtained from six different ``sorting'' experiments. In one of these, 85 female undergraduates at Rutgers University were asked to sort 15 English terms into classes ``on the basis of some aspect of meaning''. These partitions were printed in \citet[Table~7.1]{cluster:Rosenberg:1982}. Comparison with the original data indicates that the partition data have the ``nephew'' and ``niece'' columns interchanged, which is corrected in data set \code{Kinship82}. \citet[Table~6]{cluster:Gordon+Vichi:2001} provide consensus partitions for these data based on their models 1--3 (available in data set \code{Kinship82\_Consensus}). We compare their results using co-membership dissimilarities (model 3) to ours as obtained by \code{cl\_consensus()} with method \code{"GV3"}. <<>>= data("Kinship82") Kinship82 set.seed(1) m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) @ % This results in a soft partition with average co-membership dissimilarity (the criterion function to be optimized by the consensus partition) of <<>>= mean(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) @ % Again, we compare this to the corresponding consensus solution given in \cite{cluster:Gordon+Vichi:2001}: <<>>= data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] mean(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) @ % Interestingly, again we obtain a (this time only ``slightly'') better solution, with <<>>= cl_dissimilarity(m1, m2, "comem") table(CLUE = cl_class_ids(m1), GV2001 = cl_class_ids(m2)) @ % indicating that the two solutions are reasonably close, even though <<>>= cl_fuzziness(cl_ensemble(m1, m2)) @ % shows that the solution found by \pkg{clue} is ``softer''. \subsection{Miller-Nicely consonant phoneme confusion data} \cite{cluster:Miller+Nicely:1955} obtained the data on the auditory confusions of 16 English consonant phonemes by exposing female subjects to a series of syllables consisting of one of the consonants followed by the vowel `a' under 17 different experimental conditions. Data set \code{Phonemes} provides consonant misclassification probabilities (i.e., similarities) obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. These data are used in \cite{cluster:DeSoete:1986} as an illustration of the SUMT approach for finding least squares optimal fits to dissimilarities by ultrametrics. We can reproduce this analysis as follows. <<>>= data("Phonemes") d <- as.dist(1 - Phonemes) @ % (Note that the data set has the consonant misclassification probabilities, i.e., the similarities between the phonemes.) <<>>= u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) @ % This gives an ultrametric~$u$ for which Figure~\ref{fig:Phonemes} plots the corresponding dendrogram, ``basically'' reproducing Figure~1 in \cite{cluster:DeSoete:1986}. <>= plot(u) @ % \begin{figure} \centering <>= <> @ % \caption{Dendrogram for least squares fit to the Miller-Nicely consonant phoneme confusion data.} \label{fig:Phonemes} \end{figure} We can also compare the least squares fit obtained to that of other hierarchical clusterings of $d$, e.g.\ those obtained by \code{hclust()}. The ``optimal''~$u$ has Euclidean dissimilarity <<>>= round(cl_dissimilarity(d, u), 4) @ % to $d$. For the \code{hclust()} results, we get <<>>= hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hens <- cl_ensemble(list = lapply(hclust_methods, function(m) hclust(d, m))) names(hens) <- hclust_methods round(sapply(hens, cl_dissimilarity, d), 4) @ % which all exhibit greater Euclidean dissimilarity to $d$ than $u$. (We exclude methods \code{"median"} and \code{"centroid"} as these do not yield valid hierarchies.) We can also compare the ``structure'' of the different hierarchies, e.g.\ by looking at the rate of inversions between them: <<>>= ahens <- c(L2opt = cl_ensemble(u), hens) round(cl_dissimilarity(ahens, method = "gamma"), 2) @ % \section{Outlook} \label{sec:outlook} Package~\pkg{clue} was designed as an \emph{extensible} environment for computing on cluster ensembles. It currently provides basic data structures for representing partitions and hierarchies, and facilities for computing on these, including methods for measuring proximity and obtaining consensus and ``secondary'' clusterings. Many extensions to the available functionality are possible and in fact planned (some of these enhancements were already discussed in more detail in the course of this paper). \begin{itemize} \item Provide mechanisms to generate cluster ensembles based on reweighting (assuming base clusterers allowing for case weights) the data set. \item Explore recent advances (e.g., parallelized random search) in heuristics for solving the multi-dimensional assignment problem. \item Add support for \emph{additive trees} \citep[e.g.,][]{cluster:Barthelemy+Guenoche:1991}. \item Add heuristics for finding least squares fits based on iterative projection on convex sets of constraints, see e.g.\ \cite{cluster:Hubert+Arabie+Meulman:2006} and the accompanying MATLAB code available at \url{http://cda.psych.uiuc.edu/srpm_mfiles} for using these methods (instead of SUMT approaches) to fit ultrametrics and additive trees to proximity data. \item Add an ``$L_1$ View''. Emphasis in \pkg{clue}, in particular for obtaining consensus clusterings, is on using Euclidean dissimilarities (based on suitable least squares distances); arguably, more ``robust'' consensus solutions should result from using Manhattan dissimilarities (based on absolute distances). Adding such functionality necessitates developing the corresponding structure theory for soft Manhattan median partitions. Minimizing average Manhattan dissimilarity between co-memberships and ultrametrics results in constrained $L_1$ approximation problems for the weighted medians of the co-memberships and ultrametrics, respectively, and could be approached by employing SUMTs analogous to the ones used for the $L_2$ approximations. \item Provide heuristics for obtaining \emph{hard} consensus partitions. \item Add facilities for tuning hyper-parameters (most prominently, the number of classes employed) and ``cluster validation'' of partitioning algorithms, as recently proposed by \cite{cluster:Roth+Lange+Braun:2002}, \cite{cluster:Lange+Roth+Braun:2004}, \cite{cluster:Dudoit+Fridlyand:2002}, and \cite{cluster:Tibshirani+Walther:2005}. \end{itemize} We are hoping to be able to provide many of these extensions in the near future. \subsubsection*{Acknowledgments} We are grateful to Walter B\"ohm for providing efficient C code for solving assignment problems. {\small \bibliographystyle{abbrvnat} \bibliography{cluster} } \end{document} clue/vignettes/cluster.bib0000644000175100001440000012355713020247005015375 0ustar hornikusers@Book{cluster:Arabie+Carroll+Desarbo:1987, author = {Arabie, Phipps and Carroll, J. Douglas and DeSarbo, Wayne}, title = {Three-way Scaling and Clustering}, year = 1987, pages = 92, publisher = {Sage Publications Inc}, } @Book{cluster:Arabie+Hubert+DeSoete:1996, author = {Phipps Arabie and Lawrence J. Hubert and Geert de Soete}, title = {Clustering and Classification}, year = 1996, pages = 490, publisher = {World Scientific Publications}, } @Book{cluster:Barthelemy+Guenoche:1991, author = {Jean-Pierry Barth\'el\'emy and Alain Gu\'enoche}, title = {Trees and Proximity Representations}, publisher = {John Wiley \& Sons}, year = 1991, series = {Wiley-Interscience Series in Discrete Mathematics and Optimization}, address = {Chichester}, note = {{ISBN 0-471-92263-3}}, } @Article{cluster:Barthelemy+Leclerc+Monjardet:1986, author = {Jean-Pierre Barth\'el\'emy and Bruno Leclerc and Bernard Monjardet}, title = {On the Use of Ordered Sets in Problems of Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, number = 2, pages = {187--224}, doi = {10.1007/BF01894188}, } @Article{cluster:Barthelemy+Mcmorris:1986, author = {Jean-Pierre Barth\'el\'emy and F. R. McMorris}, title = {The Median Procedure for $n$-trees}, year = 1986, journal = {Journal of Classification}, volume = 3, pages = {329--334}, doi = {10.1007/BF01894194}, } @Article{cluster:Barthelemy+Monjardet:1981, author = {Jean-Pierre Barth\'el\'emy and Bernard Monjardet}, title = {The Median Procedure in Cluster Analysis and Social Choice Theory}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {235--267}, doi = {10.1016/0165-4896(81)90041-X}, } @TechReport{cluster:Bertsekas+Tseng:1994, author = {Dimitri P. Bertsekas and P. Tseng}, title = {{RELAX-IV}: A Faster Version of the {RELAX} Code for Solving Minimum Cost Flow Problems}, institution = {Massachusetts Institute of Technology}, year = 1994, number = {P-2276}, url = {http://www.mit.edu/dimitrib/www/noc.htm}, } @Book{cluster:Bezdek:1981, author = {James C. Bezdek}, title = {Pattern Recognition with Fuzzy Objective Function Algorithms}, publisher = {Plenum}, address = {New York}, year = 1981, } @InCollection{cluster:Boorman+Arabie:1972, author = {Scott A. Boorman and Phipps Arabie}, title = {Structural Measures and the Method of Sorting}, booktitle = {Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory}, pages = {225--249}, publisher = {Seminar Press}, year = 1972, editor = {Roger N. Shepard and A. Kimball Romney and Sara Beth Nerlove}, address = {New York}, } @Article{cluster:Boorman+Olivier:1973, author = {Scott A. Boorman and Donald C. Olivier}, title = {Metrics on Spaces of Finite Trees}, journal = {Journal of Mathematical Psychology}, year = 1973, volume = 10, number = 1, pages = {26--59}, doi = {10.1016/0022-2496(73)90003-5}, } @Article{cluster:Breiman:1996, author = {Leo Breiman}, title = {Bagging Predictors}, journal = {Machine Learning}, year = 1996, volume = 24, number = 2, pages = {123--140}, doi = {10.1023/A:1018054314350}, } @Manual{cluster:Buchta+Hahsler:2005, title = {cba: Clustering for Business Analytics}, author = {Christian Buchta and Michael Hahsler}, year = 2005, note = {R package version 0.1-6}, } @Article{cluster:Buttrey:2005, author = {Samuel E. Buttrey}, title = {Calling the \texttt{lp\_solve} Linear Program Software from {R}, {S-PLUS} and {Excel}}, journal = {Journal of Statistical Software}, year = 2005, volume = 14, number = 4, url = {http://www.jstatsoft.org/v14/i04/}, doi = {10.18637/jss.v014.i04}, } @article{cluster:Carpaneto+Toth:1980, author = {Giorgio Carpaneto and Paolo Toth}, title = {Algorithm 548: Solution of the Assignment Problem}, journal = {ACM Transactions on Mathematical Software}, volume = 6, number = 1, year = 1980, issn = {0098-3500}, pages = {104--111}, doi = {10.1145/355873.355883}, publisher = {ACM Press}, } @Article{cluster:Carroll+Clark+Desarbo:1984, author = {Carroll, J. Douglas and Clark, Linda A. and DeSarbo, Wayne S.}, title = {The Representation of Three-way Proximity Data by Single and Multiple Tree Structure Models}, year = 1984, journal = {Journal of Classification}, volume = 1, pages = {25--74}, keywords = {Clustering analysis; Alternating least squares; Discrete optimization}, doi = {10.1007/BF01890116}, } @InCollection{cluster:Carroll+Pruzansky:1980, author = {J. D. Carroll and S. Pruzansky}, title = {Discrete and Hybrid Scaling Models}, booktitle = {Similarity and Choice}, address = {Bern, Switzerland}, publisher = {Huber}, year = 1980, editor = {E. D. Lantermann and H. Feger}, } @Article{cluster:Carroll:1976, author = {Carroll, J. Douglas}, title = {Spatial, Non-spatial and Hybrid Models for Scaling}, year = 1976, journal = {Psychometrika}, volume = 41, pages = {439--464}, keywords = {Multidimensional scaling; Hierarchical tree structure; Clustering; Geometric model; Multivariate data}, doi = {10.1007/BF02296969}, } @TechReport{cluster:Charon+Denoeud+Guenoche:2005, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, institution = {Ecole Nationale Sup{\'e}rieure des T{\'e}l{\'e}communications --- Paris}, year = 2005, number = {2005D003}, month = {May}, note = {ISSN 0751-1345 ENST D}, } @Article{cluster:Charon+Denoeud+Guenoche:2006, author = {Ir{\`e}ne Charon and Lucile Denoeud and Alain Gu{\'e}noche and Olivier Hudry}, title = {Maximum Transfer Distance Between Partitions}, journal = {Journal of Classification}, year = 2006, volume = 23, number = 1, pages = {103-121}, month = {June}, doi = {10.1007/s00357-006-0006-2}, } @Article{cluster:Day:1981, author = {William H. E. Day}, title = {The Complexity of Computing Metric Distances Between Partitions}, journal = {Mathematical Social Sciences}, year = 1981, volume = 1, pages = {269--287}, doi = {10.1016/0165-4896(81)90042-1}, } @Article{cluster:Day:1986, author = {William H. E. Day}, title = {Foreword: Comparison and Consensus of Classifications}, journal = {Journal of Classification}, year = 1986, volume = 3, pages = {183--185}, doi = {10.1007/BF01894187}, } @Article{cluster:Day:1987, author = {Day, William H. E.}, title = {Computational Complexity of Inferring Phylogenies from Dissimilarity Matrices}, year = 1987, journal = {Bulletin of Mathematical Biology}, volume = 49, pages = {461--467}, doi = {10.1007/BF02458863}, } @Article{cluster:DeSoete+Carroll+Desarbo:1987, author = {De Soete, Geert and Carroll, J. Douglas and DeSarbo, Wayne S.}, title = {Least Squares Algorithms for Constructing Constrained Ultrametric and Additive Tree Representations of Symmetric Proximity Data}, year = 1987, journal = {Journal of Classification}, volume = 4, pages = {155--173}, keywords = {Hierarchical clustering; Classification}, doi = {10.1007/BF01896984}, } @Article{cluster:DeSoete+Desarbo+Furnas:1984, author = {De Soete, Geert and DeSarbo, Wayne S. and Furnas, George W. and Carroll, J. Douglas}, title = {The Estimation of Ultrametric and Path Length Trees from Rectangular Proximity Data}, year = 1984, journal = {Psychometrika}, volume = 49, pages = {289--310}, keywords = {Cluster analysis}, doi = {10.1007/BF02306021}, } @Article{cluster:DeSoete:1983, author = {De Soete, Geert}, title = {A Least Squares Algorithm for Fitting Additive Trees to Proximity Data}, year = 1983, journal = {Psychometrika}, volume = 48, pages = {621--626}, keywords = {Clustering}, doi = {10.1007/BF02293884}, } @Article{cluster:DeSoete:1984, author = {Geert de Soete}, title = {Ultrametric Tree Representations of Incomplete Dissimilarity Data}, journal = {Journal of Classification}, year = 1984, volume = 1, pages = {235--242}, doi = {10.1007/BF01890124}, } @Article{cluster:DeSoete:1986, author = {Geert de Soete}, title = {A Least Squares Algorithm for Fitting an Ultrametric Tree to a Dissimilarity Matrix}, journal = {Pattern Recognition Letters}, year = 1986, volume = 2, pages = {133--137}, doi = {10.1016/0167-8655(84)90036-9}, } @Manual{cluster:Dimitriadou+Hornik+Leisch:2005, title = {e1071: Misc Functions of the Department of Statistics (e1071), TU Wien}, author = {Evgenia Dimitriadou and Kurt Hornik and Friedrich Leisch and David Meyer and Andreas Weingessel}, year = 2005, note = {R package version 1.5-7}, url = {http://CRAN.R-project.org/}, } @Article{cluster:Dimitriadou+Weingessel+Hornik:2002, author = {Evgenia Dimitriadou and Andreas Weingessel and Kurt Hornik}, title = {A Combination Scheme for Fuzzy Clustering}, journal = {International Journal of Pattern Recognition and Artificial Intelligence}, year = 2002, volume = 16, number = 7, pages = {901--912}, doi = {10.1142/S0218001402002052}, } @Manual{cluster:Dimitriadou:2005, title = {cclust: Convex Clustering Methods and Clustering Indexes}, author = {Evgenia Dimitriadou}, year = 2005, note = {R package version 0.6-12}, url = {http://CRAN.R-project.org/}, } @Article{cluster:Dudoit+Fridlyand:2002, author = {Sandrine Dudoit and Jane Fridlyand}, title = {A Prediction-based Resampling Method for Estimating the Number of Clusters in a Dataset}, journal = {Genome Biology}, year = 2002, volume = 3, number = 7, pages = {1--21}, url = {http://genomebiology.com/2002/3/7/resarch0036.1}, doi = {10.1186/gb-2002-3-7-research0036}, } @Article{cluster:Dudoit+Fridlyand:2003, author = {Sandrine Dudoit and Jane Fridlyand}, title = {Bagging to Improve the Accuracy of a Clustering Procedure}, journal = {Bioinformatics}, year = 2003, volume = 19, number = 9, pages = {1090--1099}, doi = {10.1093/bioinformatics/btg038}, } @InProceedings{cluster:Fern+Brodley:2004, author = {Xiaoli Zhang Fern and Carla E. Brodley}, title = {Solving Cluster Ensemble Problems by Bipartite Graph Partitioning}, booktitle = {ICML '04: Twenty-first International Conference on Machine Learning}, year = 2004, isbn = {1-58113-828-5}, location = {Banff, Alberta, Canada}, doi = {10.1145/1015330.1015414}, publisher = {ACM Press}, } @comment address = {New York, NY, USA}, @Book{cluster:Fiacco+McCormick:1968, author = {Anthony V. Fiacco and Garth P. McCormick}, title = {Nonlinear Programming: Sequential Unconstrained Minimization Techniques}, publisher = {John Willey \& Sons}, year = 1968, address = {New York}, } @Article{cluster:Forgy:1965, author = {Forgy, E. W.}, title = {Cluster Analysis of Multivariate Data: Efficiency vs Interpretability of Classifications}, journal = {Biometrics}, year = 1965, volume = 21, pages = {768--769}, } @Article{cluster:Fowlkes+Mallows:1983a, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {A Method for Comparing Two Hierarchical Clusterings}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {553--569}, keywords = {Similarity; Graphics}, doi = {10.1080/01621459.1983.10478008}, } @Article{cluster:Fowlkes+Mallows:1983b, author = {Fowlkes, E. B. and Mallows, C. L.}, title = {Reply to Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {584--584}, } @Manual{cluster:Fraley+Raftery+Wehrens:2005, title = {mclust: Model-based Cluster Analysis}, author = {Chris Fraley and Adrian E. Raftery and Ron Wehrens}, year = 2005, note = {R package version 2.1-11}, url = {http://www.stat.washington.edu/mclust}, } @TechReport{cluster:Fraley+Raftery:2002, author = {Chris Fraley and Adrian E. Raftery}, title = {{MCLUST}: Software for Model-based Clustering, Discriminant Analysis, and Density Estimation}, institution = {Department of Statistics, University of Washington}, year = 2002, number = 415, month = {October}, url = {ftp://ftp.u.washington.edu/public/mclust/tr415.pdf}, } @Article{cluster:Fraley+Raftery:2003, author = {Chris Fraley and Adrian E. Raftery}, title = {Enhanced Model-based Clustering, Density Estimation, and Discriminant Analysis Software: {MCLUST}}, year = 2003, journal = {Journal of Classification}, volume = 20, number = 2, pages = {263--286}, keywords = {clustering software; Mixture models; Cluster analysis; supervised classification; unsupervised classification; software abstract}, doi = {10.1007/s00357-003-0015-3}, } @InProceedings{cluster:Fred+Jain:2002, author = {Ana L. N. Fred and Anil K. Jain}, title = {Data Clustering Using Evidence Accumulation}, booktitle = {Proceedings of the 16th International Conference on Pattern Recognition (ICPR 2002)}, pages = {276--280}, year = 2002, url = {http://citeseer.ist.psu.edu/fred02data.html}, } @Article{cluster:Friedman+Hastie+Tibshirani:2000, author = {Jerome Friedman and Travor Hastie and Robert Tibshirani}, title = {Additive Logistic Regression: A Statistical View of Boosting}, journal = {The Annals of Statistics}, year = 2000, volume = 28, number = 2, pages = {337--407}, doi = {10.1214/aos/1016218223}, } @Book{cluster:Garey+Johnson:1979, author = {M. R. Garey and D. S. Johnson}, title = {Computers and Intractability: A Guide to the Theory of {NP}-Completeness}, address = {San Francisco}, publisher = {W. H. Freeman}, year = 1979, } @Article{cluster:Gaul+Schader:1988, author = {Wolfgang Gaul and Manfred Schader}, title = {Clusterwise Aggregation of Relations}, journal = {Applied Stochastic Models and Data Analysis}, year = 1988, volume = 4, pages = {273--282}, doi = {10.1002/asm.3150040406}, } @Manual{cluster:Gentleman+Whalen:2005, author = {Robert Gentleman and Elizabeth Whalen}, title = {graph: A Package to Handle Graph Data Structures}, year = 2005, note = {R package version 1.5.9}, url = {http://www.bioconductor.org/}, } @Article{cluster:Gordon+Vichi:1998, author = {Gordon, A. D. and Vichi, M.}, title = {Partitions of Partitions}, year = 1998, journal = {Journal of Classification}, volume = 15, pages = {265--285}, keywords = {Classification}, doi = {10.1007/s003579900034}, } @Article{cluster:Gordon+Vichi:2001, author = {Gordon, A. D. and Vichi, M.}, title = {Fuzzy Partition Models for Fitting a Set of Partitions}, year = 2001, journal = {Psychometrika}, volume = 66, number = 2, pages = {229--248}, keywords = {Classification; Cluster analysis; consensus fuzzy partition; membership function; three-way data}, doi = {10.1007/BF02294837}, } @Article{cluster:Gordon:1996, author = {Gordon, A. D.}, title = {A Survey of Constrained Classification}, year = 1996, journal = {Computational Statistics \& Data Analysis}, volume = 21, pages = {17--29}, keywords = {Model selection}, doi = {10.1016/0167-9473(95)00005-4}, } @Book{cluster:Gordon:1999, author = {A. D. Gordon}, title = {Classification}, address = {Boca Raton, Florida}, publisher = {Chapman \& Hall/CRC}, year = 1999, pages = 256, edition = {2nd}, } @Article{cluster:Grundel+Oliveira+Pardalos:2005, author = {Don Grundel and Carlos A.S. Oliveira and Panos M. Pardalos and Eduardo Pasiliao}, title = {Asymptotic Results for Random Multidimensional Assignment Problems}, journal = {Computational Optimization and Applications}, year = 2005, volume = 31, number = 3, pages = {275--293}, pdf = {http://www.okstate.edu/ceat/iem/iepeople/oliveira/papers/asympt.pdf}, doi = {10.1007/s10589-005-3227-0}, } @Article{cluster:Guha+Rastogi+Shim:2000, author = {Sudipto Guha and Rajeev Rastogi and Kyuseok Shim}, title = {{ROCK}: A Robust Clustering Algorithm for Categorical Attributes}, journal = {Information Systems}, year = 2000, volume = 25, number = 5, pages = {345--366}, doi = {10.1016/S0306-4379(00)00022-3}, } @Article{cluster:Gusfield:2002, author = {Dan Gusfield}, title = {Partition-Distance: A Problem and Class of Perfect Graphs Arising in Clustering}, journal = {Information Processing Letters}, year = 2002, volume = 82, pages = {159--164}, doi = {10.1016/S0020-0190(01)00263-0}, } @Manual{cluster:Hansen:2005, title = {optmatch: Functions for Optimal Matching}, author = {Ben B. Hansen}, year = 2005, note = {R package version 0.1-3}, url = {http://www.stat.lsa.umich.edu/~bbh/optmatch.html}, } @Article{cluster:Hartigan+Wong:1979, author = {Hartigan, J. A. and Wong, M. A.}, title = {A $K$-Means Clustering Algorithm}, journal = {Applied Statistics}, year = 1979, volume = 28, pages = {100--108}, doi = {10.2307/2346830}, } @Article{cluster:Hoeting+Madigan+Raftery:1999, author = {Jennifer Hoeting and David Madigan and Adrian Raftery and Chris Volinsky}, title = {Bayesian Model Averaging: A Tutorial}, journal = {Statistical Science}, year = 1999, volume = 14, pages = {382--401}, doi = {10.1214/ss/1009212519}, } @Manual{cluster:Hornik+Hothorn+Karatzoglou:2006, title = {RWeka: {R/Weka} Interface}, author = {Kurt Hornik and Torsten Hothorn and Alexandros Karatzoglou}, year = 2006, note = {R package version 0.2-0}, } @InProceedings{cluster:Hornik:2005a, author = {Kurt Hornik}, title = {Cluster Ensembles}, booktitle = {Classification -- The Ubiquitous Challenge}, pages = {65--72}, year = 2005, editor = {Claus Weihs and Wolfgang Gaul}, publisher = {Springer-Verlag}, note = {Proceedings of the 28th Annual Conference of the Gesellschaft f{\"u}r Klassifikation e.V., University of Dortmund, March 9--11, 2004}, } @comment address = {Heidelberg}, @Article{cluster:Hornik:2005b, author = {Kurt Hornik}, title = {A {CLUE} for {CLUster Ensembles}}, year = 2005, journal = {Journal of Statistical Software}, volume = 14, number = 12, url = {http://www.jstatsoft.org/v14/i12/}, month = {September}, doi = {10.18637/jss.v014.i12}, } @Misc{cluster:Hubert+Arabie+Meulman:2004, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, year = 2004, url = {http://cda.psych.uiuc.edu/srpm_mfiles}, } @Book{cluster:Hubert+Arabie+Meulman:2006, author = {Lawrence Hubert and Phipps Arabie and Jacqueline Meulman}, title = {The Structural Representation of Proximity Matrices With {MATLAB}}, publisher = {SIAM}, address = {Philadelphia}, year = 2006, } @Article{cluster:Hubert+Arabie:1985, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Comparing Partitions}, year = 1985, journal = {Journal of Classification}, volume = 2, pages = {193--218}, keywords = {Agreement; Association measure; Consensus index}, doi = {10.1007/bf01908075}, } @Article{cluster:Hubert+Arabie:1994, author = {Hubert, Lawrence and Arabie, Phipps}, title = {The Analysis of Proximity Matrices through Sums of Matrices Having (anti-) {R}obinson Forms}, year = 1994, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 47, pages = {1--40}, doi = {10.1111/j.2044-8317.1994.tb01023.x}, } @Article{cluster:Hubert+Arabie:1995, author = {Hubert, Lawrence and Arabie, Phipps}, title = {Iterative Projection Strategies for the Least Squares Fitting of Tree Structures to Proximity Data}, year = 1995, journal = {British Journal of Mathematical and Statistical Psychology}, volume = 48, pages = {281--317}, keywords = {Graph theory}, doi = {10.1111/j.2044-8317.1995.tb01065.x}, } @Article{cluster:Hubert+Baker:1978, author = {Hubert, Lawrence J. and Baker, Frank B.}, title = {Evaluating the Conformity of Sociometric Measurements}, year = 1978, journal = {Psychometrika}, volume = 43, pages = {31--42}, keywords = {Permutation test; Nonparametric test}, doi = {10.1007/BF02294087}, } @Article{cluster:Hutchinson:1989, author = {Hutchinson, J. Wesley}, title = {{NETSCAL}: {A} Network Scaling Algorithm for Nonsymmetric Proximity Data}, year = 1989, journal = {Psychometrika}, volume = 54, pages = {25--51}, keywords = {Similarity; Graph theory}, doi = {10.1007/BF02294447}, } @Article{cluster:Karatzoglou+Smola+Hornik:2004, title = {kernlab -- An {S4} Package for Kernel Methods in {R}}, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 9, pages = {1--20}, url = {http://www.jstatsoft.org/v11/i09/}, doi = {10.18637/jss.v011.i09}, } @Article{cluster:Katz+Powell:1953, author = {L. Katz and J. H. Powell}, title = {A Proposed Index of the Conformity of one Sociometric Measurement to Another}, journal = {Psychometrika}, year = 1953, volume = 18, pages = {249--256}, doi = {10.1007/BF02289063}, } @Book{cluster:Kaufman+Rousseeuw:1990, author = {Kaufman, Leonard and Rousseeuw, Peter J.}, title = {Finding Groups in Data: An Introduction to Cluster Analysis}, year = 1990, pages = 342, publisher = {John Wiley \& Sons}, } @Article{cluster:Klauer+Carroll:1989, author = {Klauer, K. C. and Carroll, J. D.}, title = {A Mathematical Programming Approach to Fitting General Graphs}, year = 1989, journal = {Journal of Classification}, volume = 6, pages = {247--270}, keywords = {Multivariate analysis; Proximity data}, doi = {10.1007/BF01908602}, } @Article{cluster:Klauer+Carroll:1991, author = {Klauer, K. C. and Carroll, J. O.}, title = {A Comparison of Two Approaches to Fitting Directed Graphs to Nonsymmetric Proximity Measures}, year = 1991, journal = {Journal of Classification}, volume = 8, pages = {251--268}, keywords = {Clustering}, doi = {10.1007/BF02616242}, } @Article{cluster:Krieger+Green:1999, author = {Abba M. Krieger and Paul E. Green}, title = {A Generalized {Rand}-index Method for Consensus Clustering of Separate Partitions of the Same Data Base}, journal = {Journal of Classification}, year = 1999, volume = 16, pages = {63--89}, doi = {10.1007/s003579900043}, } @Article{cluster:Krivanek+Moravek:1986, author = {M. Krivanek and J. Moravek}, title = {{NP}-hard Problems in Hierarchical Tree Clustering}, journal = {Acta Informatica}, year = 1986, volume = 23, pages = {311--323}, doi = {10.1007/BF00289116}, } @InProceedings{cluster:Krivanek:1986, author = {Krivanek, Mirko}, title = {On the Computational Complexity of Clustering}, year = 1986, booktitle = {Data Analysis and Informatics 4}, editor = {Diday, E. and Escoufier, Y. and Lebart, L. and Pages, J. and Schektman, Y. and Tomassone, R.}, publisher = {Elsevier/North-Holland}, pages = {89--96}, } @comment address = {Amsterdam}, @Article{cluster:Lange+Roth+Braun:2004, author = {Tilman Lange and Volker Roth and Mikio L. Braun and Joachim M. Buhmann}, title = {Stability-Based Validation of Clustering Solutions}, journal = {Neural Computation}, year = 2004, volume = 16, number = 6, pages = {1299--1323}, doi = {10.1162/089976604773717621}, } @Manual{cluster:Leisch+Dimitriadou:2005, title = {mlbench: Machine Learning Benchmark Problems}, author = {Friedrich Leisch and Evgenia Dimitriadou}, year = 2005, note = {R package version 1.0-1}, url = {http://CRAN.R-project.org/}, } @TechReport{cluster:Leisch:1999, author = {Friedrich Leisch}, title = {Bagged Clustering}, institution = {SFB ``Adaptive Information Systems and Modeling in Economics and Management Science''}, year = 1999, type = {Working Paper}, number = 51, month = {August}, url = {http://www.ci.tuwien.ac.at/~leisch/papers/wp51.ps}, } @Article{cluster:Leisch:2004, title = {{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in {R}}, author = {Friedrich Leisch}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 8, url = {http://www.jstatsoft.org/v11/i08/}, doi = {10.18637/jss.v011.i08}, } @Manual{cluster:Leisch:2005, author = {Friedrich Leisch}, title = {flexclust: Flexible Cluster Algorithms}, note = {R package 0.7-0}, year = 2005, url = {http://CRAN.R-project.org/}, } @Article{cluster:Leisch:2006a, author = {Friedrich Leisch}, title = {A Toolbox for $K$-Centroids Cluster Analysis}, journal = {Computational Statistics and Data Analysis}, year = 2006, volume = 51, number = 2, pages = {526--544}, doi = {10.1016/j.csda.2005.10.006}, } @Unpublished{cluster:Lloyd:1957, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, note = {Technical Note, Bell Laboratories}, year = 1957, } @Article{cluster:Lloyd:1982, author = {Lloyd, S. P.}, title = {Least Squares Quantization in {PCM}}, journal = {IEEE Transactions on Information Theory}, year = 1982, volume = 28, pages = {128--137}, doi = {10.1109/TIT.1982.1056489}, } @Article{cluster:Margush+Mcmorris:1981, author = {T. Margush and F. R. McMorris}, title = {Consensus $n$-Trees}, journal = {Bulletin of Mathematical Biology}, year = 1981, volume = 43, number = 2, pages = {239--244}, doi = {10.1007/BF02459446}, } @InProceedings{cluster:Meila:2003, author = {Marina Meila}, title = {Comparing Clusterings by the Variation of Information}, booktitle = {Learning Theory and Kernel Machines}, editor = {Bernhard Sch{\"o}lkopf and Manfred K. Warmuth}, series = {Lecture Notes in Computer Science}, publisher = {Springer-Verlag}, volume = 2777, year = 2003, pages = {173--187}, ee = {http://springerlink.metapress.com/openurl.asp?genre=article&issn=0302-9743&volume=2777&spage=173}, bibsource = {DBLP, http://dblp.uni-trier.de}, } @comment address = {Heidelberg}, @Article{cluster:Messatfa:1992, author = {Messatfa, H.}, title = {An Algorithm to Maximize the Agreement Between Partitions}, year = 1992, journal = {Journal of Classification}, volume = 9, pages = {5--15}, keywords = {Association; Contingency table}, doi = {10.1007/BF02618465}, } @Article{cluster:Miller+Nicely:1955, author = {G. A. Miller and P. E. Nicely}, title = {An Analysis of Perceptual Confusions Among some {English} Consonants}, journal = {Journal of the Acoustical Society of America}, year = 1955, volume = 27, pages = {338--352}, doi = {10.1121/1.1907526}, } @Book{cluster:Mirkin:1996, author = {Boris G. Mirkin}, title = {Mathematical Classification and Clustering}, year = 1996, pages = 428, publisher = {Kluwer Academic Publishers Group}, } @Article{cluster:Monti+Tamayo+Mesirov:2003, author = {Stefano Monti and Pablo Tamayo and Jill Mesirov and Todd Golub}, title = {Consensus Clustering: A Resampling-based Method for Class Discovery and Visualization of Gene Expression Microarray Data}, journal = {Machine Learning}, volume = 52, number = {1--2}, year = 2003, issn = {0885-6125}, pages = {91--118}, publisher = {Kluwer Academic Publishers}, address = {Hingham, MA, USA}, doi = {10.1023/A:1023949509487}, } @Article{cluster:Oliveira+Pardalos:2004, author = {Carlos A. S. Oliveira and Panos M. Pardalos}, title = {Randomized Parallel Algorithms for the Multidimensional Assignment Problem}, journal = {Applied Numerical Mathematics}, year = 2004, volume = 49, number = 1, pages = {117--133}, month = {April}, doi = {10.1016/j.apnum.2003.11.014}, } @Book{cluster:Papadimitriou+Steiglitz:1982, author = {Christos Papadimitriou and Kenneth Steiglitz}, title = {Combinatorial Optimization: Algorithms and Complexity}, publisher = {Prentice Hall}, year = 1982, address = {Englewood Cliffs}, } @Manual{cluster:R:2005, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2005, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org}, } @article{cluster:Rajski:1961, author = {C. Rajski}, title = {A Metric Space of Discrete Probability Distributions}, journal = {Information and Control}, year = 1961, volume = 4, number = 4, pages = {371--377}, doi = {10.1016/S0019-9958(61)80055-7}, } @Article{cluster:Rand:1971, author = {William M. Rand}, title = {Objective Criteria for the Evaluation of Clustering Methods}, journal = {Journal of the American Statistical Association}, year = 1971, volume = 66, number = 336, pages = {846--850}, keywords = {Pattern recognition}, doi = {10.2307/2284239}, } @Article{cluster:Rosenberg+Kim:1975, author = {S. Rosenberg and M. P. Kim}, title = {The Method of Sorting as a Data-Gathering Procedure in Multivariate Research}, journal = {Multivariate Behavioral Research}, year = 1975, volume = 10, pages = {489--502}, doi = {10.1207/s15327906mbr1004_7}, } @InCollection{cluster:Rosenberg:1982, author = {S. Rosenberg}, title = {The Method of Sorting in Multivariate Research with Applications Selected from Cognitive Psychology and Person Perception}, booktitle = {Multivariate Applications in the Social Sciences}, pages = {117--142}, address = {Hillsdale, New Jersey}, publisher = {Erlbaum}, year = 1982, editor = {N. Hirschberg and L. G. Humphreys}, } @InProceedings{cluster:Roth+Lange+Braun:2002, author = {Volker Roth and Tilman Lange and Mikio Braun and Joachim M. Buhmann}, title = {A Resampling Approach to Cluster Validation}, booktitle = {{COMPSTAT} 2002 -- Proceedings in Computational Statistics}, pages = {123--128}, year = 2002, editor = {Wolfgang H{\"a}rdle and Bernd R{\"o}nz}, publisher = {Physika Verlag}, note = {ISBN 3-7908-1517-9}, } @comment address = {Heidelberg, Germany}, @Manual{cluster:Rousseeuw+Struyf+Hubert:2005, title = {cluster: Functions for Clustering (by Rousseeuw et al.)}, author = {Peter Rousseeuw and Anja Struyf and Mia Hubert and Martin Maechler}, year = 2005, note = {R package version 1.9.8}, url = {http://CRAN.R-project.org/}, } @InCollection{cluster:Roux:1988, author = {M. Roux}, title = {Techniques of Approximation for Building Two Tree Structures}, booktitle = {Recent Developments in Clustering and Data Analysis}, pages = {151--170}, publisher = {Academic Press}, year = 1988, editor = {C. Hayashi and E. Diday and M. Jambu and N. Ohsumi}, address = {New York}, } @article{cluster:Rubin:1967, author = {Jerrold Rubin}, title = {Optimal Classification into Groups: An Approach for Solving the Taxonomy Problem}, journal = {Journal of Theoretical Biology}, year = 1967, volume = 15, number = 1, pages = {103--144}, doi = {10.1016/0022-5193(67)90046-X}, } @Article{cluster:Sato+Sato:1994, author = {M. Sato and Y. Sato}, title = {On a Multicriteria Fuzzy Clustering Method for 3-way Data}, journal = {International Journal of Uncertainty, Fuzziness and Knowledge-based Systems}, year = 1994, volume = 2, pages = {127--142}, doi = {10.1142/S0218488594000122}, } @Article{cluster:Smith:2000, author = {Smith, Thomas J.}, title = {${L}_1$ Optimization under Linear Inequality Constraints}, year = 2000, journal = {Journal of Classification}, volume = 17, number = 2, pages = {225--242}, keywords = {$L_1$-norm; Ultrametric; stuctural representation}, doi = {10.1007/s003570000020}, } @Article{cluster:Smith:2001, author = {Smith, Thomas J.}, title = {Constructing Ultrametric and Additive Trees Based on the ${L}_1$ Norm}, year = 2001, journal = {Journal of Classification}, volume = 18, number = 2, pages = {185--207}, keywords = {iteratively re-weighted iterative projection (IRIP); Combinatorial probability; explicit machine computation; Combinatorics; Trees; Graph theory; Linear regression; probabilistic Monte Carlo methods}, doi = {10.1007/s00357-001-0015-0}, } @Article{cluster:Sokal+Rohlf:1962, author = {R. R. Sokal and F. J. Rohlf}, title = {The Comparisons of Dendrograms by Objective Methods}, journal = {Taxon}, year = 1962, volume = 11, pages = {33--40}, doi = {10.2307/1217208}, } @Article{cluster:Strehl+Ghosh:2003a, author = {Alexander Strehl and Joydeep Ghosh}, title = {Cluster Ensembles -- {A} Knowledge Reuse Framework for Combining Multiple Partitions}, journal = {Journal of Machine Learning Research}, volume = 3, year = 2003, issn = {1533-7928}, pages = {583--617}, publisher = {MIT Press}, url = {http://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}, } @Article{cluster:Strehl+Ghosh:2003b, author = {Alexander Strehl and Joydeep Ghosh}, title = {Relationship-based Clustering and Visualization for High-Dimensional Data Mining}, journal = {{INFORMS} Journal on Computing}, year = 2003, volume = 15, issue = 2, pages = {208--230}, ISSN = {1526-5528}, doi = {10.1287/ijoc.15.2.208.14448}, } @Article{cluster:Struyf+Hubert+Rousseeuw:1996, author = {Anja Struyf and Mia Hubert and Peter Rousseeuw}, title = {Clustering in an Object-Oriented Environment}, journal = {Journal of Statistical Software}, year = 1996, volume = 1, number = 4, url = {http://www.jstatsoft.org/v01/i04/}, doi = {10.18637/jss.v001.i04}, } @Article{cluster:Tibshirani+Walther+Hastie:2001, author = {Tibshirani, Robert and Walther, Guenther and Hastie, Trevor}, title = {Estimating the Number of Clusters in a Data Set Via the Gap Statistic}, year = 2001, journal = {Journal of the Royal Statistical Society, Series B: Statistical Methodology}, volume = 63, number = 2, pages = {411--423}, keywords = {Clustering; groups; Hierarchy; $k$-means; Uniform distribution}, doi = {10.1111/1467-9868.00293}, } @Article{cluster:Tibshirani+Walther:2005, author = {Tibshirani, Robert and Walther, Guenther}, title = {Cluster Validation by Prediction Strength}, year = 2005, journal = {Journal of Computational and Graphical Statistics}, volume = 14, number = 3, pages = {511--528}, keywords = {number of clusters; prediction; Unsupervised learning}, doi = {10.1198/106186005X59243}, } @InProceedings{cluster:Topchy+Jain+Punch:2003, author = {A. Topchy and A. Jain and W. Punch}, title = {Combining Multiple Weak Clusterings}, booktitle = {Proceedings of the Third IEEE International Conference on Data Mining (ICDM'03)}, year = 2003, url = {http://citeseer.ist.psu.edu/topchy03combining.html}, } @Article{cluster:Vichi:1999, author = {Vichi, Maurizio}, title = {One-mode Classification of a Three-way Data Matrix}, year = 1999, journal = {Journal of Classification}, volume = 16, pages = {27--44}, keywords = {Cluster analysis}, doi = {10.1007/s003579900041}, } @Article{cluster:Wallace:1983, author = {Wallace, David L.}, title = {Comments on ``{A} Method for Comparing Two Hierarchical Clusterings''}, year = 1983, journal = {Journal of the American Statistical Association}, volume = 78, pages = {569--576}, doi = {10.2307/2288118}, } @Inproceedings{cluster:Zhou+Li+Zha:2005, author = {Ding Zhou and Jia Li and Hongyuan Zha}, title = {A New {Mallows} Distance Based Metric for Comparing Clusterings}, booktitle = {ICML '05: Proceedings of the 22nd International Conference on Machine Learning}, year = 2005, isbn = {1-59593-180-5}, pages = {1028--1035}, location = {Bonn, Germany}, doi = {10.1145/1102351.1102481}, publisher = {ACM Press}, address = {New York, NY, USA}, } %%% Local Variables: *** %%% bibtex-maintain-sorted-entries: t *** %%% End: *** clue/MD50000644000175100001440000001136313435050401011527 0ustar hornikuserseeda78f55be8251568dbdb21bbe15c30 *DESCRIPTION d9dfb1c0aac7ba6fae4e921fd35698a5 *NAMESPACE 281c7577f564a5acbecf046c5d1b8e64 *R/AAA.R 1956f4cd36476e3223d028f743f41cad *R/addtree.R 5ca7bd63f1ed85f171358c12fcf08e53 *R/agreement.R 61f26eec5666c409d3a7369f9cc0c99a *R/bag.R cd56914218fa9922aba0f76ff8b94909 *R/boot.R 74c617065ccf4f72df1353534f85da75 *R/classes.R c6a0f185f75ff0e903eb69c1e4d3a5b8 *R/consensus.R 6920384b112acd8962490a58131f04ab *R/dissimilarity.R c8a21520e911951d95d7ebd74e113265 *R/ensemble.R f4bbabdccc0b0dc31dbf14373ded5d11 *R/fuzziness.R 5999d867614d17cd53a043cbd99703c9 *R/hierarchy.R d67f188882f5aae752df482d3473fbd0 *R/lattice.R 285f76623c207f44342d7d1ca33d07e8 *R/lsap.R d10944e1875825af11bcea65571432fc *R/margin.R 6197a2bae482c876b955d9a418e2720a *R/medoid.R a3dccf831a311ed068578f817f38161e *R/membership.R 27369e3ebfc5ade758ebb2e49bb213fc *R/objects.R 4b8e8ee574a015903622e264e7560aa8 *R/partition.R 00c4dfcc2d401d810d70f246b7628f6b *R/pava.R 6131a8ffa97003764405701373a3bd48 *R/pclust.R 480eb9d4a5ec63da63f059511b5e4dd4 *R/predict.R 44b2e289ec1ed4ea042eccd8816080c5 *R/prototypes.R 5eabc5a234802b0f5a9f4c244ebe9aa9 *R/proximity.R f1a133ffc362372bc24ff24581964b1e *R/registration.R 69049e632bf64e2a11ed5b4f0c276570 *R/sumt.R 8cfa16132f28d693fcd03d396a678deb *R/tabulate.R f415cbecc8d1694bca125998db11a2ae *R/ultrametric.R b43e58c5919eab710f6d598830027845 *R/utilities.R 9c6964efc6762066184463e4c363a2d3 *R/validity.R 64223eaa3dbaa0962a96c54259480446 *build/partial.rdb a2a61cadb5c7323ee55147f6fc2c7b27 *build/vignette.rds 4564b854036a9e1b80eb4ce2aed3fd56 *data/CKME.rda f347cee2e21deedef9a094b961e0281b *data/Cassini.rda a2f10607433ff773185e6254695bd617 *data/GVME.rda bc9aadf92dd6031d5093bb94b4dffb41 *data/GVME_Consensus.rda 4b4f352feacbb79ba8a6f3acb6cc9705 *data/Kinship82.rda db83d3ca198efac51e78ec12977c1cdc *data/Kinship82_Consensus.rda 26fb94912f9b6fe3e6eb271da66ace07 *data/Phonemes.rda adf865dd114dae54c3db24060e42d4a5 *inst/CITATION 2fc643dc007499bced4eb7fde83f5625 *inst/doc/clue.R ec5243c6beee816b6e93e5cbda9f722a *inst/doc/clue.Rnw f2737b65f2eb2b948148a0634db38c05 *inst/doc/clue.pdf fc5c32ebcb85203fa533853fb18d18d3 *inst/po/en@quot/LC_MESSAGES/R-clue.mo 6b382525256a1a059e58ce576eff7106 *man/CKME.Rd 0d61696816866774419c7fda98f05d5f *man/Cassini.Rd b18fd96f640a1001be51feae0fe8e66d *man/GVME.Rd 1b6144d910daf97b691b547c3bcf2d51 *man/GVME_Consensus.Rd b5b6f767a686a52c78824712442fa8f5 *man/Kinship82.Rd 0b423e42f1f2cfba9b9d52e163c0abf8 *man/Kinship82_Consensus.Rd 45e11496f6cac656142f7d6f01022de1 *man/Phonemes.Rd 50375af82b3d133984605c006831a07d *man/addtree.Rd d916f98ea50f33d35b6da465086c6706 *man/cl_agreement.Rd 6e6b84b94724a71dc1b025a0ba7503eb *man/cl_bag.Rd 5dca26838651ac5caca862e459b4920f *man/cl_boot.Rd d4081e72f3447131afc6a61d0af7f3d2 *man/cl_classes.Rd 488ccf099c041a9cdc00b973935b0f25 *man/cl_consensus.Rd 6e672adfe90c3da3a6ed084d610e1aeb *man/cl_dissimilarity.Rd 872ecad639c4ade222bba29873cb5465 *man/cl_ensemble.Rd 470ed7e017a8ac83fd359967ca29a3b5 *man/cl_fuzziness.Rd af83eebbfd3d600999346facaa4308d5 *man/cl_margin.Rd d4a61b7498b939cd372cf4b181378e11 *man/cl_medoid.Rd e26070e22290e167ec900cdeea0567ac *man/cl_membership.Rd 2ddf43cfa7b4809e1b211e2f89080d5c *man/cl_object_names.Rd 818d072c048b86741b39db9967dae2b2 *man/cl_pam.Rd d9486d40bc389102f8d0b5dbf4830b0c *man/cl_pclust.Rd 1eb04a9edb42f0c3ad50321b36475d6a *man/cl_predict.Rd 9e88e1119f27cc732e7b865471521f1f *man/cl_prototypes.Rd 931b58a667da8aab28dc441fd0c630f7 *man/cl_tabulate.Rd a79724c42916ad2db16343e6539e53b4 *man/cl_ultrametric.Rd 0c0e58d6062025f81c6c34ecf026a3e4 *man/cl_validity.Rd ffe8dcd2639eb402c485d2ae30ff7b55 *man/fit_ultrametric_target.Rd 3cbae2b63263993d541d67892e307696 *man/hierarchy.Rd 7175d60e57286b9735d26ff996592517 *man/kmedoids.Rd d1b212bcbf61720cc380d2aeb01c95e3 *man/l1_fit_ultrametric.Rd a4e59cdaf9fe676f62abff55f369346c *man/lattice.Rd c393ed09b16d0d60bab37dedd95731a2 *man/ls_fit_addtree.Rd 9b0469edf996e2e47e8d3bb00dbb5ea4 *man/ls_fit_sum_of_ultrametrics.Rd 4de00e99c87ae942624b33a73fc10bbd *man/ls_fit_ultrametric.Rd 115623ffe35fcef24928738206942374 *man/n_of_classes.Rd e4822d78d50337d163d881298c234bb1 *man/n_of_objects.Rd da27a64e2cd173b00f81361e10bcab81 *man/partition.Rd 1bc099d43549aa2805afe8e5c5912696 *man/pclust.Rd ebf265d86f6eac729e616a40ef94ac08 *man/solve_LSAP.Rd f9e7119e8be0492354b6605b81fb5ff1 *man/sumt.Rd 6985140eb3d61464356b3a4ad86ec71c *po/R-clue.pot c1cb790e0fd0e4d3f38106f10318b585 *src/assignment.c 914912fa18b8403e505ac5d8e1f4ee29 *src/assignment.h e2f17003f4c681661ea31175a99503cf *src/clue.c 815d26eb16b0115f4a1e73c71c7d9402 *src/clue.h 1d83eaf5af08f3fc312d6dd0363e5c49 *src/init.c 76301856024f2491f73fee44641b5c86 *src/lsap.c 1db06fea8e5ba8856f5def041c22bf54 *src/trees.c ec5243c6beee816b6e93e5cbda9f722a *vignettes/clue.Rnw 7f7483cf85a37996403431f09cd1a1af *vignettes/cluster.bib clue/build/0000755000175100001440000000000013435045064012323 5ustar hornikusersclue/build/vignette.rds0000644000175100001440000000031113435045064014655 0ustar hornikusersb```b`feb`b2 1# 'H)M +Gt -.I-Rp+NMI-ƪ % M b fa(DXT%bZ]?tWxVaaqIY0AAn0Ez0?¹Ht&${+%$Q/nOEclue/build/partial.rdb0000644000175100001440000032404413435045060014453 0ustar hornikusers |Iv'  $HB $PA7N3{ z8ͪJtfAt5l-ʷ,+/J%{5:wmK>4Z˟em}#ggUT8{~_gQE^DxjmDa[|<=?pчVg{_Gsܔk<ot_:fg,F+_i[̥*r)'m+^sf*~o0{??țM_C~UTk>^x*U|61H &F͑Gr//.G;>6aiH*SFfn|խ[_zy43aWooN~’6{{[eѲ?F=/~1pGBt:\D}.R S3c^  "V?!M i'7Q;afusvfy;o;9jb f *#=Q}j *#FGorGR}mtCxcUq:?rYFX^\fE?dM1㘝;l\e%}_f1`7nyI^߰Z4b,eG} /5fA*!淯ZަK zԌVkNJ?9e|FST7TP;U9+b\ M[oj޴s~_.xyg ^}UeB2ML-8bV=$x4jNqWeX{\+k£ 5\,<15'qȐmSw# fmEܴ}uu< )9 C!="ˌmNʳx%|_nO'{t`)_oB K+@/-Z3;˽u_B&O5fٶokß_+V'[ 1?rqEE߆~ma|[}ǖOQHBb|ttlp}O&ZeN['z5V{\@93:VfVUqzP~ɟiec4~ؕ/IIch需Df:T3B5Bf&BraLT֓IU)cKZti1me`\yCLYymK韂̄⠭ qOeuMy۩ 6WOEC$R'hGxӊlİmU̐fO>M3hQN&öx9eub3ׅ.I2=܄{&Tkj]p/OgVIcﶛ˽lоnq2ލ7;1p~l`=#cɱk#sK~O^bo<^|pvѫ1o< iM,l/a+j`OzT)$ GmUi;>C#FIYVnW28p_owtU~UtW YG/`捑KI]A w_JIwoS3o"~&7EC .!ѧ))Nl+9gյ̧ަ}/N e@os Ղ- |tϹڿDc}@mu@> Aamo˘ކ](?@N-O`jǁymо|*m@AN[+`ґh`_~&<[wk5WBli iFvOHjb۱bY/.=#`҄gLn)䞟2TWBwVJ8ݹ^tiUU]+w,ʥj>h,P*$Y֠]휙SK :[C$e+x4'Qg=Apgt6[yjmo9n(E?#Rbxz?3چvJG`NѪb&OW/H~}ˁW-~m%r_;T+{VT@*>\2A/Xع'{]U>]M^yU[w:Vkq_;ZQn{vbI_ط 9'_7t"rROI;c6V6P_y~f^uYוVյVpv=+N!ʾ/ ,xN غֹmM`@RN6kx;9\>oyTʦ)䪼U:VV~R<3Pj:oUzݳxW[ۼ+Ϳ"Q7NU~of53Sv4Yɳ!q")jZUU`V:׫;V}%V4LTW鎞UC;vdNoC; ]/YOZi:;~f}nW"j 'wջJ[5m*5F;ĶV~;;%:oߕvH_8Vh6-lR՜\f5ms䶳5zo{.TcϾeS94Ōs9([i;_7ܦn+ކ] j׷7﵋/ ަ͘A&}+ys)wwio}jk?JY6@Uu岨^hlVLc.Ǧ(xPZ OkG2ߡ4{V6b}wK+f" YĕmcvFg!ίvjk/`@Vq*~W9OOrPz29BI&#7g? $7G ӵFGµ㈷cuxj7sBGR6ʕsGcU"ʔ#V.|/[^^ĀnM޻,~ݐ{nHTyqa\,w2?}%tt:ޥwcd~w7xyw@?c~;|j"?P+g`VnOV7Je*ժЃѐz3ѡ;C{o +;je]:Zx !6B~ p%`^_~=m!p2ķGm@X}{áfwś Ŀޘpf:[!Eo- _D~hڹ4&ϋ<[!. DbPh(oӵ6}W.pȫ;DɼW𫑎UN8 ;/jԱc3pg&ny_}ǷZQAߗ:U c%vVUPx$7FŘbU庻bL\4Z58x4fF1¸L՘nQtUyA5&V9K*$%k*U)zhA @i3BU'AT K2WEcɔ:;TZY9typ6%+jXT4U:SBm3nd}:"ԑr=b,ԮeUR)zAM3KEݮÂaֵb,"XA8T4/7i"R}DXQpJU> KEOʫ՚eff4?^2eZUjH g j^(?UG0$"rޖf]&@'+QJUAЃنUl%-}v^*]WK%`4DdEͶJy^ qCkjyY9!MFڪ\@k(CÖ8!(8 huqC<O> >Q|}PM `ˠ/7wxO,2LN*fޟ%-z2eGANJN.gdL `A+¥Ɇ. Ao z/E//Si>.GA6IcqK-# -r{:e=ԏ =nlVЭlǂvmE? SF -R _;tb!2|ARx1SǦA@$ff&TVAe1Fs{Ϭ`҂qZP$)]%n$Բ(kڢڡ+dWzzM.B|d<-5VZ42 I(M VHZl8b &^=sǙ csMX$tTP͂N%XT+ -*vQZjT90 x1*&:ir-MJe^Yrj5#^2cGAheu&v}sƎВP)M uφj-ﺇe Yt=*wv;]׀k@5 {Pz,j@ 6Ik4mէyթtG3vԀh ӺՀVlЂvZ9,ס Ǝ׀}l;j@+]?䉟2JyE {;%73U>7*MF'xQ`Q$W$T r1Caیc&K;y˵MsdC & XAl۠o+Z} զj`̰9YcoRKCAhY'NwoV??9[+n ϙtph5p R{-w Yr鸄89ģM1Bb6 kcI\֋=,d^p T';b?!F)bG>o[#GǮO^>539:::6jNڀ@ _~MN_)N^|U¨+[.auSOEFD nWkp 6&$1m7hfbSʢO)mVMσO/HiϮ~%_5c^ п!mXo o}o$!*̂܂ܖnnM4&/>㑐΃֗1Fw0$}X8Z\RJ|JV/IVHv+N!Ko)"eeLMdFY$UQK1 KǗPҊ^5;4hFIPBgusb(V|2n }r`o |B^)N[ҥ󎵶fl+o0 :,t*d-:QCݯ,ߴ,faDAMDJO˝XQޣ Y;c-h3VD֠HGT9LҩJ:mf)'ke%QK8Z*]-gɒ؍ ~A[9T!v,%oA/cMzu8TAxƫؿ!k2Yӥ!~Fiy$k 7UJ+sކy=v5*ݠKzX>w+FzO予bm5Vp #4$HiŠj-:Uv=, ?!MZU]e{=(d1jMznoڈx$նWU"qGUXFbcS)L(V_fקؚ53+Ҿf22[KeGz썜\ f[ۢJjJs|( #R=}gp)e(1pjzv`$ۓV,3x2wGAj^j\/ЁNM8f7U ̉8pSnKG+e# V{xNFx mJ kz賓TO8!Jx[%_޾?mn> ӤR1 GbQb244jl9#ty艱kS,Wy4Ǭ盖kųê_SF @)"c|<bd9=i:Y=gi{-غv!>ar aUy} Zji+<ڀG@-fOq]KW@xµ;//MQ^sfFBQ5:ʚfe3+ OcQpIE0 mV3Hvͺm yuL&{lL(Φ4o2_C9EDx%U``_bD$=ssiri0t ›oj +a*+Fzpa[~huk: Q|XjX[[[ɜثAe6DmD3wl&NӞZݚc /vxKиXblqB*41t\>>eJ+ ヾ3d='ǶFU@< {YVJa\PF.tAgoܰ$d ='A++]V3צmEU[D n2Z:}7/S愷!HY2eeqnQrMmu+Lg{aT8]x)}.Th*ԕMV#*`P ^A]iv-9FS9f=AyU22i,력ࢣ1 =gҐ|x?*k/^Z&/ ,y:9M^eTexV":c4u8Pm8*99ģ*ӌ,F>crKbd1aS AO7aa͌z=uT;(LP>jIH LNh뀱5]p\|LYRЮ,I^V 6[=b7|^UOR6dbXS"_i0IEiT2aыʢ.7KwEx VD6]'e EnoPf^8P39\dQXu-&P%&0S6g&!m'%VBìp:6VSVب<@xpzobgFkב$S{I+ƈSA ;~C< 3j#_*׮;I5FGxcSfF[?ڌI6ޒuQ|M{XWA w?S%?uoo:QWMړ^북eεF}<;@tD}A6뀜fσ63S_b__2_j #7:oMmQ+J^[+ivwkr-CtV-]*aYDHL4=Ntˤ6/(agTHL5qT}C̓d}Aڀ@^gº`UK,ߺVƸ-S}1zPQ,سߦn &j,ה ~mW6gݞv&(`TJ{,!(?b-Lz|'jhhLع1lOƎ9Įxi@'gnDy~8TC=KQ5i-%З4.c @N/%B59*qtlPrOH)N7Xpk67u}ťj@E7@hm?ZTtKu 1kLkm$zIXq6 EʹNOKdFu>NBx9euW"3ׅ.I2=܄˧FuĪS:1:zmdnioRGO*boGQe& +.#p7Я4^Wߣ Miu 40:%#Q-[2UZfs&Ms/Xf[O¼~àXy QgA!v#ρT[͏?Zm@AN[+e٣? 9?2/+Mxv-j֯-m1/힐&G?!MX\ v=t Wo±6Hg a5YOX4o?&rW |AP#BGH΁7a+V)9qV9['k΀QV__Ii*9JHGUuV JG ?^vtljN= ʗKQ5݅EԲ}DLmIpvsznf73U `7l\O OyľC)oG$Ru=ƎZ9Z.QZFؿaE 3- >0.USYh\ymY.J]}Q˿r{('jʯ%nFB#!Nۧ)+*j Zqr]Q Ub!MJI>3~NY3syeOuzM)SUӠFx$U|5-$[.c6CqYHyݦ8YccA\"04: -*6D$BFXQ0RM juP@:\xC8Ӭ*sT)5}x^PG^Þ݊jڙ J*5@)Ys(_,,_ BLUM8U$[e)MSÉ9\lӰV| BkV_xZ%\. oY|-.z ήzAǰ`5M;12YaL BR>a6J+*5uؖX>fYF߹!MKgf7c^^}UY]GY!5ׂMaϔ3 Z3M.@M YUxaJn3;.xQ9߭;uZ7OΞ32q5csY6Pxte#s!j%?DmZVjc8@h ez/ m}Sα*њk]r)[6$Mn]GGwfV܌UqzP=ȍ8tqKR&(.WmVqj;572Z2-"F7AKY.+We{x -eþ9 G +aZIR'EH\BV2g3jM{#9y+4@<sjN{V n&khukOu>!Ƶhly^GIطr6IT[d$&kI,_Jɷt$,032FB_;Z-Y[='k ΈK|x>2W Z +Сohm>r =EȎf٬I$q'p1rL.2kNō-U b *:.z ZjZ :{!s>e8zNYML\OQBjR#L?x0~\N%fFMOǼW͒Rm>}/GzIe&KB)(3e_o7e/r-1'ARZD]!>4A2ի : #\̪1*gMc?/4z}Oiu@< !m֥RK oKi j~ R[#<)?%ml9~ /4ڵXQOc^Awml_P UWA*2}xSAoд~0JEIOH=D;=ƎcLʻǘ=jǘ&/qXqI1 ]VJ?cR;)S҉mT批ZM;t|(9:k_/Ta2"Zk 4$EY(Cw9+:huw瘪8ob=%GbFZasV~˲r s\ @ iFOrGRo+՛NvBEB"7oRO@?Pnn6J%ǙAwk3mbaw/hb{v+Ni!NqGYF#|m7^~taR;m=ᚇZGшuN>=;m`zAK.ru#h-!V'uլ E#cb tDZ3 fxᙄ@)e7Txa$h|SJ=`6 -9 {lF8 hѵRkL]$M+eWBs)žs2{e=FY 3SA,1 '$[+mjW}|jSHh0Yap dlyTp,CxyR\ƞYnPpoj^v& hċlx%Kr%<:S{7斐-=\&A'?#ʦwlb8 Znf p!nA\ŜW!v׀ y4+TFX.Vc|4XD]FA?4G 4kg֜+= wt.> ^breMr<8SGIc>΀1}LorG_) N[LF5 jPfzv9_^P#+P Ԑ8޳ێs 5( 9V="u-`SVɱ!faPIFIgcǴ)0 ER .V_% >x#((Q++L?d|v\c|}q }PP>!z7)i3ʇJ㜿-۳tL$^*ڴuFxDA7`|"vC%M}O,!T6w7Aơ 1uוIT#1f735[E}K 8xm+Į(y=FYz[zWQ:g]I T]cnxK[9#iTš~+7rJ!|?,I6hC}K[_Yp+QYDےS۟L O$!2e6%>ÎcS~B?(T?:m*a'c0n=pyi&?9ϦΗ!*ŋS2Xy?vLW9uLHx&vdHPV_<\+e35ʖ'?<]\Rm8V^mXXZ>e /@Bl Y=ö'V)YC<=+۹G@0yђJB//Z(3 OrxYe Ī7 3ur,WL3l.Z>d`-U9'EE,LQ6Dfըp42x_]RIQl=#( wʈnjښg㮢>uL9S.iRہAt`c,FXVp&.@K|[Q̮ h,LCl_X'Ih5'$ɡDZ}K]5_ Ǝ E]{)e˺ME_+ v&Yq/ysoS2gg b&8],eɯ@`kΥh]\UDν]}GYΊ~5,.>huwLhXK]d yg…*NxԲ@j9!:q;k%<,#I+WcBr**xpZ6NV%8 3|Z}d`qcA޵ʹSbܺy5k2tN'·~qsS>? ]lstr3I4aD6b5sf>/$ { \g.:P֙$`֙ĭuf{GL&Ygi`י$muB8=u&ro2hyME낼٫Q`x'VhUE$c{)KZr J'U%sCTk:쵵▓zQs͚j%tC< PE w^~0=_/oIeC]a+ k9;+:8Q<h bDBVqђl>nb!&Ā3sk Nmqa =w1R`\.yt6&[¥A\B.ӲilAKՆ7bC= oF6bxFAL캀*Zb$7Q<{hRN/H'j`$ `Q_.-pi5a`//z46_y[v9{{' -0WUȣT۾$aƏIh)_M?~]pL|$Xcd(:)(CXCCc-+ZɐLǁYzDu7bvxzMUO?[*C"^>ٝrU.a떓(3$_AW <GSw^@Ŋk>n!+H^tqu+o*D"-w 4#:^@ 6)5sx\W}Ļ\vf8TZk׌R+28"qJ4-v8zPYiAW`TQ$4@C3*kUxUIgAKŤ+RG?|)@ɄGAK]#Fx i x`s~CdR )..G@i-Ulj.Zd0r&70gK5.ְU֍P;;.̳e(?Ru> uʬLҨ> m>i 8ۼmgb?afO ?,P+Ɇ*ݷ*6.`Q|[r9QgÄeTG<D< ZiյpMO#46rlraZ}?Y梛{E%LBEd&CrOM؝PV>_< hro> $^7'VB[l%k>bܓwh 8'mL<<ZjcH(C:@˝|?K/)q + tbH^[z"h+$qRl~kx;gkvʥ+~k!jU$ipBF7aoϘ P-qD:AKЩsz9<H=7a⩷& $h[@EQ0b#SFGZ8I4 G@4ˌ'AKE?4y$M^jQLoV?`H9sM>*5:t6stĭocӔwfh<;=3)IR#Z<؏pGQ x\2t%3QGb3:7C40>Bf^$8x<a x̓$FʕUH;`@!}LhmY3q'vKxfh*cjZ79l'O즀<}XIq`躟WQELVa $y6 n/~IY=R q(hO/2L}!"8Խ"l@4~EsGQV%T*[AjBJRZ骰6n+=!FFGGQƖP&ZATlö\M 1igUsT3aYZ 'nAzFڼKiܭ*Qa-C)iP 7RzeB&!=abvWVl"qq~:G W_!qD8<ZzseHs`k4p]4i< m8Ճ£Q^|ڷPiLGYHQl[h B*c-37 zx+lOuVĮ x|e/K,=(8IR%BqQ}CrYY$(hh)-ijVaoQt#)@or,ahT)O:JpX퉧C /ݜ^}YY7]Y ;!pv u*ZO<ͼ'%Rq~a]MV/;x؏m>=_< -{&c1[YRBCܺUL2x}wCJ “&&TW԰HJ>p͜Jbn{jf. Oa&ʾdJAK'6YFB1!'nSգT? Zj^,B};^-WTx 3QՂphe;4[AjB3{u=6;hXft0KOW0_43Â;L}%LxeMooT瑱6ܒ xAmc%ݾ]C.˹ӒN(Җ&Kf$E5ꑙٰHn_5hxvExZt=$L?h}1h4^tmr&.*X {PϰŦ 4ᴵ*eK:pDCj1+q uf|껽eB.7o9~GŴ耶GG5z^Og`[,j-@49A[ erjڡTIQXJon@"b^#p8 Z*LyFNJ@)4x+֦,jW\m6 kw"a[ Tn(..CG춵B[ ϟ>\BT'쑕rsUd nަfFE : 㗇i"霠Tߍ66[͍h=5$j^dícIǰ= < Z.O( 0Z!#{Jʍa> Zny*<!4| дT2tf|$nz˛m~ݼd7nz8;՘l}N%v4/zyKF=yy^P5N\:'4~+wo4ԇ#[ߡu`ESDxO壖 /Jn\ wtU~H>9_W?<(c~e };iX5,W|e`o:2:6=>6955jT ۀ@_qʾP~PjQ ^+ig76 -jt]HT7WkWt쏀#Mc?B}A$g@6;뀬J|9%/ڂWxjׁwHͨW ]п+m_@H[tUV]nM?҄0 /4&/`0 Ϙoy-f]sն)qYF8z@YW|'}  c՟oBcU4lxswMŇ?s6-6$mB/*QUK-\6m!9u^L ְ&܋?jj1R$X3^Y]Oצb^ftO>)=:5WΗ\|%4 |+% {]} MiuE? LNHT(<ArpչֈA?n9Bhh'a^?a?ͼT"? ,6Į~9Пj /6]d絵B͠6 m? 9?2/+Mxv-j֯-m1/힐&Qq{H֟1y⧌GyS[~X8Rz8ǒa=hZblژx CBp]^ J,rmJH6eJB{)a⡶ -!T[7!o60oصLgu[g8g4,o63o/-_>5*9>1V5ÀJm@=DÀ_aQ7 %\o_%1QWo~SFzSA-#pS@+$1sKTU6prOe!IER 1?kT[)RC1Q@&Yi#m;3J]_6ƴ~ _A51'}ߢtBM8 >l r>d5gd(8%Q/A8+ zJlΦC{Mnw .nXŬ`!yGe']t۩ C.] ~ Fy۩POM :@KŠc#kU׌pbxYW@3ꈉ<>9q`O OM w]d8 z ]/j P64FsKc73:=b5~]_-"sJ6W;A w_1v~%QsEl@%jBlp@\& f?E۪Oy)v93v \b+ i`ܿs{4/m?EcL)0shb_&#<wLxBUFx{0ѵՌa6˞札^k-kiTtٹukp{g,/͌r$8rY௃7Z@HS|h6^LYEj%$:3DŽ]pp'#G)Et&&19ģM1Bb7 Z?s a6$݌oBc_n՞]s3,_ߖ~R-֗<dss3(o!VDv% 1'-P:3(Az !# AeynhlHd珨/!,/AJ«nڕ!~2$#7]Wf>(ųXHeeXQfŮs f&#> D&Z/VIdž%|CCT=5 <۾ذ W_!>h?>o7܋@( :\:ᅒQ|>8&Gt|#M0D~\xMxmwjҥW3$XW!3 eY̐Ɏٿh,Qr T37kە9SCxQeѦkLz(l?vJjq[NV_M+“ ɥWX77L9%=S2We۹b=7ə9`/ +6+od̵r F5eSk{q%:4AZ!G8#Փ;v| m$}2[~v~d#_5JiJgJ6!v$e:H̍cT2jMWbޘUzaǎ>K9iߧ+5e)~Wt' NԉߐEmM+QnTD E-5Hi<ZirݻVjsv+ڡMb!M*3iFjۿelZπ>QEy;kڠ³6^E8]UD<*ګ*lty'B*aU<:rAi'gfodE_%\:jdn2/ٚd})Bc/<+?;O Ak7W3R΁U;V)8j} ÉXUݢ͕;]Vn?376>}_Y4F[IA9|چ;oec lσ><ݑ_Oûav9 :9T뎕L9fֺ!OYZgfN]{@߫}M΂V훾3!7OPO~*!V'jCF"}LƜA!ݫl0#,B&?8Z9 {zQY YP(g$}0KOG!g@ M6`[Z(bC< s#DZNCTgJ$rÖ&xd qbKFyüM+emG=]T(p Ns9%r KĶQx :v8|FiF4ʒ{h3͂46i;p*ڤy)-q1Y.-L{a/tNX|z1hȵLa>s0_O+;(!\'t67x,:'qKNN8A>UqLn9 b$OA݄,\-?OqU[hz+ŅTQi`OFGQMԘqѹG؃|yp*ݣ0̟_,Ix]m~HFw#Kr,qގ,Z}[Z\-5VwWeR}N[9T Uq3EXQR>ϓ梴ǵc&sM{lY?em >YOQue p1sDbl9v/V-;3W:Kwn(7Uq%KxNx> cY {ӓج;78^b{X,!>?\(UN5|~ 9Np;qVI?yY џǢYV$I9eqlGXJ! K6!C2U8TI%$ \7)K즀A <Ę<>݄_POa/9q fy ǁ &hJ;hJ1$vA}Rg(ɖ?9Hl1x8Z={^B^V.RĦyH欭L/󂗴L*y]_6tz^,~w6_fi}L j I6ZkHxWn;!Hbd:WyA~:xGeDM\n3 H c N  Ow*;-w 'b  xԪ]5F':L[ŝ!~etp 0F׶B 8(MV5Dcֆ*OހP_~ U<*F>PF B!f>!|<{/@xz.pa=,UT~sv~/VP} nS?=X3C0q4ǨޒC @!v?^ esinNnl3 Play۶2=y'iysR0}1 #MS .A=S1riD0  gpu5dT[=-7t x Lrİ xx6% peR,16333HAgA'xRCPI!ryܻt] ْ R gojVj&2*諲] XNR6^B.:YгMBuD<ZDu16A"Cn~JʛvZ ^39 KU"s A-vq(әJLw_NIQo_%?[γֿJ\3} .+@l۠o7aX7z lQLInBL^нИdu1]D[jdTsZ*\U5c309cǭXu2'qxdfΌ$y8&_!{83+'suM۹owwƍG''8?60~Ѥ߮#ej~}tz;vxGƼhQri3$|Ɓ/~y[tjQ\tADR˺C#yDg~~M!]Y21SZ^6##>R J.;v- ~gӷM ړRDπ۠e$:uU2zJz8jT @p15AtϐHWh?YV:16>1)~tTw!J7=Ab΋K&aM%T*5@۸p-Q;jw :qt":H% i/{1W¢!x]=j"TH SRPM/at2ݜ@Re zh Y ,[\vv@"7W]aY?"OW\ӕ ClBn +4 QyOX`N"Ve[y{P@ԽWyNayWTk{_&{@4^+xzpkxmC ̽*w׼DLÚ]~4g&Q7nH'I9nAE#!>@VtK*_e?Q!8ģI1^+0rM7a[ (U ͍1F+sGRaIdoly>YȇJet2 WB:^J|[+H7c%KqFC'a7nmK#b('V,1-U9,w9.rey{'ŗНgh܍хkSaGAUq(&~Ê%[E|+<"!x񇁑+gO8jM#\91 HTǜ^Ow9I<'$DKnBzWq.jm qƬJ0 ,'*hNTVnӠ5~M=pP:mm3!6j !̇®`FfHN+'*qW:9+B0a[D>2[#\n \m=.fHIppj;#r9|j­gik=  *bR7ӗ1c2"Pif+k,c0AԤ;(vLjgb8[V,ף`{a1AGi缼L*nOZIAٵ /4chX*^^XB.G 7ҭ;J==ƽ%d)BF1N) 6$Dtqc/3#$%hmbbj);-?=8;ѡJA }C~B҃1WmF}Zz7l5-=: [zQyydBS}no@Yf(:+'@/=Q|dS+U;_dj31+8j OnZH/{|{H$LfQ,1 g-תswfi+17v^p|JUJn!; =V9 8st61sxm6\51^-XH $ՐQn0Xi9--*휿5E3UCrSMal9N7Yd}?6 wqK.k^!؀p xJ0OoWJ̥:!d<(h= D|ng`t:KBcǀ[cO%46!hlx =΋P!s}$M}0c5h67H&l< C `E%rs6mʂG;tt%9ģ,22C5mpОNjq멸$Z(hA\,&C캀@KՍJ(%ϭc cJh6h˪#IB}:OƬM8K9ģ jP& B*cTNZɡp]\R(Է,3?k+>5!SD"B^q> ܒeg8J(*h5Z3RI14@aQpeR{xhDjq ΂mn@i( : pĥMZ-Ϫ!U3EM+6_nCJ$"vxzeV!iOw311jgG4m:FNFi=4 U~Ѿ LG΢1}!l؞5¾^4@~/:d*>Vэ$! cP}@%KU~큢N[yg~IoM2I+yA^YfsZBW5>m7H>4)$j]H;6AQ+{-+@, Ƥ[k"7>~k֧hL L_VNx4& jG="'M7.)l Ĉ>'QWh+9[.KD}IxG?$tV߲Rs0K<=2J3zLl:w SչƤA 4k(#5tLw:Y8HC nF ZqctWHmcXa=:CЁS|yľKv;vSaEYx΁^V~]Z]b&Ew?]xqcoݏ2?jVk%gN4`ڰХAmui^NҞTo蘽^N׮3,Cij/kyimF=G׭lCv;#nb}"!UСTҏ_p{ }A}lZ#rg^a' EhRS> C15*Ε\{-RIXwFkP28%+׬?ٛ*kK-%Q_857' P"ᲡKnOuzچT0IniZRlsrݰ)|v{b6Yd+:1 :cXyRb3K@וik\|AthSlh;ضFw7+O>B=㋌9<'氻 $[2.`/h)?<ڃ+kp;KJx\`պ^T/j$::6XqVWԒH}@Ztue%qsxhD,Mg ]Ɖhw4oOmu7BneB{&9acQAC!3?iVؠ"QeNi8 Z* )R Ru0`$4pe&gu90 r3}Q]Ew?GxtJWT/C0yc rZ[-ic N 56\"MˠCSB*݅{"<Zj:B=ZBg1'AKτ+C`B-w&IpTX$vׁs13?\>I6` =18نԘFꉙ6ZEKȘ[I2nl`܌ufwd.krvOYٱ3ѷxHxеڙdeΤΟ}#D&}ODA'$Anx'Tb9C>n펛-FRmqb~zِ 6\a!wl]'$ϏĽ@Z$M:fc۴Rv4HJQ" _I7(WeØ2i뮵n+g gb[]4ɼ%% 4#Q(1XK TCt)73(:|3"<l'Y`sIq§)oAxʵ94Z tuϡ}9&3 ܬ}vmf@=b4|zB&2Kh=F.#v]@K*Ϸ::[hK`/#Qi}[h.V9ۍz !qpؘĭ|KHC%u$x!KH :6ph%u:ph%u$<.є.qE8;`a4?q;M>r8k4,;-SkNX3|aC0Xu]aI8 zX-qM 5 l`=b.ͫBKF3|l*ZmUUzURgKFϖ|I2dA"v]JȊd=CȊaHHl`Ȋ61dE9 }}⡙0ƪHh ΁ֱUQ*VE1**YRd;|ŪH1"nCq*aXX՘nn"nׁa!.V:`UpR8X5(c0?f45X?u`B57*E^}T|Qf&8!@O@Lzukk`K"$l*ZmUSh[OCU$Z*bHGnIl`61PE9Lh ΁VwU T88lLu߯*}"q8lL ǍH ,PE"6.PEܮ-%U$<.**@8hjρu@C77*G~}T|Af&~ P!&aݲU$$*b63PE TaUTk&B6)PhUa׷\M$[z]P;Q92-UyKHtxemٷb\']m8! Uc$ &*<߰ݴhC,AyPN澱1UZ1j[`|T4%x zx `pTĹ!:uw'wpZݱ TCF]CB'1[Rp c$!$I횓yDZ ko x]eK2xl ")nV%CS'I/dE̎gAuO>lgB?q؛OAI>ht-H9s Y(i<&ɉx@5H"Խ mFP=NP!S "e+|qk?D?."EkCY9!:Ϩĭ_YUxUi#*V^| 8 ZjLΓI5<|G.eY9QIJїF5Ze/ ׏Vȼ)UrYj#gQ";Uj@n˰k Q]=*Ğ Zw8i4pz/*\d> ׍9"FfFxAw7`˻ A@ˠUNo2#nσI'@OuCIr !LAOkI GF)M@kY/ ]r p6k5 +fĮx&X<6->4B/O[&}:$]&NhoUdzds8 Z*QI\1IIuW2*O鈢+4\>צm#=k~ "΀ncz:!mi&sFSM,>n9^lktmNڴ֊ZJnt#{~ nMSХcK;׉1Ѿ9 :ҘY(=k}_)ľKh?}}ΊbF;oQK;[Aʃ?]Ho;6+.LLe ]e^8Q"w`q'=PN8W97$s5$nKQ&rtP8nICHYǢiHWX3Y0*|K]gelo+6®<5gH KFzh HF@+m?pB!&&H6hҞڈ#9SeH 5 }~MG`KjlE'8a~b*ךR<x(ǡ[F`(RpLI֟2͌$Cy:;W]WV%:?%.^V~ɨOC6tFeN=p'lm:6M<*O<>B=M2p9jy(,Vmg)v@tIBς>['^|TI&9AKm\j;A GAjSLp TRiDށh&YƁwAUK:CeZnZevQp(N<:Ğ .pH!Rq;-DʌU)A/jX$Wxz y24UdK_LPz"dfm x\}-KI[yFW2 \\JDʱs);-Qga@r]KW]ҡW/ 8]JÏxaUmE_:B=s*&_1P<r8z5>V# E'dc_A6puNj;6qu l6ou yPQvuNB:'v]~Cuu~M}zn!7^\; 9iEbĻAsuwԉ[p􀲶.փJfE$<\,ʐ!oJkfpJu~ E/vheHsO4&9s.^sP2@bρHE E-{)c+کIAweskGIf~ǝzN5~VB%+#^Z3U?m]pWNAfNOf{=2s¡o@/^2\TJ̥,uӴU'g9Sg]s$-;<3!:wxNV?m3X] B wg Z}E}/}B=d DqsPjF;͡,úϑ½Knf$l FKl׵چD%&T!O@c'k*2vڒ$-UPlJ v]ӠO˰ TE./C 7@7+k)F=μx>Xu}J*=z-"Z Ae)m%\:\QF9ZvE].VL_e]J8/a/6> {!h,CDeW_q[B_AdUtGA뫸}[7h+rgg7nH3gMNSwIš=]vD8I0\>,~ ((&ݫh  txٺ q;ݯ*Ue@3pW.;[3 IǀwAmsmhrd'kv 7>JhEib 4҈-@]K:?{*H+|8Bܮ@)/tm3/Z#yƁsOwX+2߅C]vRf4bPO.o]aPK~CR憕vj n͑Hf1%@SV9? ?a)Mv?*Owh-2߫.XB'v]@~wH"|2L/s$-5TZ?q<LVe!iFw@i҂?:~p='5HsFNjF%-;]U8i@ׄgpbCIQ 0 i&\zVx2~;h--"c jZWyoQҶ BOEb- e=MmM;GeQ #yVB6#Er9y <4 z踮$f bLm$w gw4=H{MTI^`ԚX+=U#ʪ;^}%jW$(p ˬ#,hl0+:xl; >F.BEJw oG FBX<\POF=k5l sCL? OV_`2I(-&9E@O)+pDkDeʒq MyFޕ5YqM^aճ³M[V*?:/K@űd:S8q?ClKFG٦`蛄g@{ nGgA oZn2*~``"q&9l̘EFƬ`^[oiL©$p$EU$Q~CjRdpV .~'NWw.䳛#^^\|#>6aiڈXFзCMU+kUj_7X^6##w>A }wW@vAYd{mRBuSOEl$o~[F|JYu-iwߋ.&~+o: I7Ik$v $컀?ǴXĔfeVmH7ļ~ A-O{vm75T/h_ToHk=$Z7>Puro0 +k4r13-Í17܁Ғ iBMl;V5+UK>#Ox$ 88}C[Z*,组W'QBw4bivjªcaS$U`R=4|:i<̳๐h2<9>>3<<>y]$nYtݳ]14w\k#sK~O^b5crg O o|=&5Wɕ\|0{*QW@R_% ߔQW蔌D-[2 I砟K7I* _>?Ͷ: y A6R (?@N-O`v!Ɓ'~ȑ"75ƿizpF+"mpHP4H|{rpu2@ :!݀zN-HO tTs%sifh4'CZK9)=2Nn-˹ s3k%dF :"<8uH0z춧${IbP4gz%x+OSR9Rp;|u+>5(TH%*N>\!v._-aey$ef؜dm i;X76vB!vmP8V|_^ iɼ۪'BژШb7 i G4ic"$^H67_FoܛXqݐFowڹ܋; JhXev뚶!(b;v/Q;v/\*!$1S+YslPlw(W;hW?!_l0r !h x?_~ر!1ڽ(QO"VPd+Q7Ɓ'~?[(mwTڵ'J5^-J8 {4pDj-bUq(򲿄Eh슡+dVmLed-NK </n_ع1l;'BZ.7mL1b i슥IRaE Zg㌶$$; T*B֯VfnD؃#+ D;c{U³-gTx$'%Mo/,֊C{  z 2BřufVFb,?] 9!*=?ZYy&[n$bB}GwIyT-3i쬝1]_b@lQ=,91m a<( ̎[wrSI(ehm6pqc!y; OTvP:'&3]Xoqo'^qTL5F@I3Lz/)V P+1 {*ǘ53[s,"Aا!,$&1I: bdž@A)6.SFDe(]\4_GJW$J;]wƻOWOs舫yU 4:Vå7:$%ԏOآU-zhKE?,زغgnQMÒ9'&:K}a8[ax yބА8hX AX[p( ڇfʁDm-ސ!aSCPChv^7 3ykY |rꛏTK?#џE[{P P$2,@}F P"EQﵰrv~y >fKפ CG.Jhݎ2|ҳtbL\%Dfxv**W̦()'͖/$)@4$r ?a  /? Է-.}걂0&أo,d !19KxB}ԡj;U{tȸ̒UI & k5&uŞ+Y.z{ dƯ b>j#o<炫jv֟3;ѣxM1uO(q]h}qjn/ HkIK՚xj~{4ע%ߣӅ`"qZW QN/W 7ĕIɵb "8&Da@M g!l`M~} ,Tύ蓐#[+xB}z:oBb) > 3%Y\;N I}dԨ+ѯ;VeNtÕSH8O[R^r%KufKwW*BN]/kb뿟J TVrf6^T(Oc HQgJ.`7nf\ʡ8T aЇb!MJQ| >qR[3cU\_ 3F+ƊGrFxͰ9+eJ.嚮S>4iT@_Ȁ55^w:x$uZ!- ,f܁QB7X~׍ЩCe ȼ6J+R8Ԋu`%my),(#4yC -9 {l@]W4nԆ隩 9'mZ){mH:끌W۱Qj=% GT9#B @Yy#o>r).ݰ %K2S4?9+WK2vhye-_8/(T3me|jsݴs^Q hcˋ|f:E 5*KdEK xBtP5͠%dKA9EIO4p􈲩tRNf,XyCn0u.߯\kAt8!ˠ5 q;QtFãKhx؟OF P(@bt5X9\Y+v(\^Wqo]ev ֐8޷ێs5( 9Vb$ )R qF>iebf9oY|PE;0t8zLY4qOT*ȎgUMʇwe)t]w ܂t(85h9ZW&PnnBef2uFNx}eh0SX'pnڨDrO %߬Бu~J,uG@4qArLs:^a#R^u h}+R%/uϨtľCfAL`\7g? (%qqZIѽlxOZjF82Y3a Il#C캀LG5Ga&ZϑyPӑ0s5kU o3 $LMw4:-$3$CkP߼"EFeà7uH z,':)tv=_PϙgMX;7=?2Lz?P8 zZXp8aZ+eGWFa||yOmV`˂5 6 v݇k-Dڹ"jT$]=O}zbG14Qߧ4 {$_t_yb< Zj{%6ݴ33%5f{#k-΂+MEY<Σ 5J+O(OIB}<Ҩ~Rщ)欔^Zo3767&H9TA?)+ŽӐ}/'f3y͙ uph,wfĪ .L{\+eq) n튢] ] cKA|Yj"U_"ڠ6 ԗ7?qmMu5ň94}M(ismOX7g#"p=^нq]8hcU F&=Baw<>PyaE-d> o*Zh+w;Ê-$]w_"cG4ŗzsih 9 喑5L553!:p UqgiZ5+fZ=xY5sޚf/Fz@ue016A6m;=Xw["n+(k/u5*+J,FM/D߅k5z}㮬x"i*mۓpz\x;΢ ;Wz$[[@캀<֠IV UJ*Y&۠f5L ;e޷`e1qA.ܥrUxt0rOT<=-C=+w,pGԆp h w%8Db8Dp\PLN4;[SP_B0 Zj3%X͍#V9'7:M`iMa&F=RS[L:RHCF!CBW$ /MK>R+\bC<^@@B4UB J fGDA$Z(f9x ^ݳtcpK].Rz`oU4åb<*wcIUvrAў*w.ˊ9ͺm̵qߙ* mDp$ x$h}R%"=$7Kr斛ǬEb31l;@RHk < >mKh8FP_}]9m*rX94B~AuD ^}YB%O#QVȉGjxz"6ͅrh$KR|}R-Q[~#e٨ָ + Q +bDm 6vA!]+:Dʲ3[&DMD> |Z=i k |0'朠hoppڢO}J#*ʞD!̐ (튴$U(3>wt!#VKku}_؄\&YW:f4kFy၆9l@C:&c6tF2Izx]]KqnXCl|Go|MWNДY)F]I^^ ^TG;ᡩy%$-u+9sZ`tݼp !ˠYv3WQ||9k9Acc̋W|sEHIԨW$< _hz'2O%-H$PPLH%SuшpݲTY>-&yVBKOxvJKf￿'VL,&'nG@K_e/0br|]hQ~x0$%Ի?; Z.TXN&cDH@ȶF&8[F=΀?=rc =CI1 |Z},yn5e +||/4eɶ\;+6)|(+hTorq '0m(4L Z3n|xgPP5 H/>{A2|޵W 8`>ү϶o$ZhGZYkR>a@w*!}¨[ma@t&Dh;1}P_m_}Q)rl4x:'!0amXIs--\?ڰSD;5I5\33Dl{i51n8 Zjbn Nt$pz4'@kgRSuQy`0MSLZ۬F'·> [+R2%ބD'@ee.x%r UJoI|8 Z8Fs电*GwF(j[$C?,&Rg" Q 8a1&E\P*!"n력1+73FEA㍊`}L s 63:)n>qKx sxhخv}rtf\ "qpؘzĭxUovzl\TSf!π >6(cT$!xpGQ Y={͚wW@$ZP5TbĮ x TP*GMCϲ\{ 1:в3fPD. `'A7+晃] ]QtB#Vi'ňqZ4֎r<Z}zܟ6r A񣜙Q9' hLՓ[)c=Nn+ 3|2R$u^)[ٴR6K*u%tBx T&\NC[;؞^\x.'g8ӄ|mOXs࡙8[N]N캀' ի* "RH^ Nal80*%K6o݌X?R嗀>tr\!wNwa߆E΂V+`Q YXpkf~pX}=)&qF8S0q=ÔѼ>(Q"ƀ@OHՒm:n>al yKnHB:@羈@//]W]=tj'O%T5-eª Z[vlgP3Cd{-aQ& IΊH#1RBVDAOI^SI̬\3\.7ql n ;}Bv֮a1I:a)M:]O"2^ۃVB7W@׿ q; Zff2tJ,dT<G\mzHމxa)Tr\N[^ 0ǢBxw-߾Ğ3kqX % -Jgt:o23ŭ*amlCz*}w jw_lHxC<:rlH-58c^н^kګ3sRMUr8z@W:@2&@K*LRc«8h)_\+ ZgEmIIh}K}M#ܫ%~CʼcA-FXa*[Ar ^p18nF:.% G`d3\:J%Y! vBO}~{C'of4+pt͊΃V7ZXԴ D zI#g` 19buhM3(LuUϑG'@/,ׁ,@^}QY;Z;Aj$u xt|X86'q3t3,>ޒ%b7WM2 vV$G/x꽈}"qRj`+уI@L\O{0q;Wn Vp^-knrf x ܩiSE/qd} Z A_A6o r"g4U.5TwN! hRM%=}aa><*8pW@KyB qc$huu_R$I1TZ'z*Y4@5ՏdCxGR\.V[@Ґg $SflJ@|MISCI/`ЙTx)|pyOZx$Z_ v%%mz@x%م9h{ϱm}f6k O$]hb+;|TPQD]'ڹS,!Q"AOVK^9@»lPAؚo N$(pd'G6~H*L"L;V[7O@/5D7ׁw@KNfbC_w!n= *Ki"PZzhU0 M7$~"sb1 `;z_.3 q{Rcx;ER'fSyM93SJeyKxL΁VXZN(j}#|js_ K}[ .KκSЈx]wqOZʕ C:ocgR뙙*dP#сJ=b7Ys+.V?3'^$[D2 '@KB3+q>'t5gJ}p Qlޅ_@o QYE}6"[/Z>Q60[gTGM b$iM7,:Y1I/[yJs>N!nU+ ?}$K{}[Ȱp/@£ #ն1Re0bΊhyfڴP:O]gR~ bC<!YݥS+٪(2IʿȤ Ž4Db}m,CP@i[&斄@^н:]DTbx\.dAT$sils޲Ð5)0p\& \slpV!ry䊩R> {r|ӛ MW ד~@2֗9LU>O3Fi֗o%mL;^z ӕ4ľCiL j=Z}{%SN ψ>$y$IeN0{n:Y9eH|jP reNOg!g/G(`Uf?!#ZF<#s&5;gp \X /VO/&Tc}/9E`؛A@A~;duѡl^-bصVtY<%O@hSWȰ< y:t A$h|N Lfc x ]@>n_ŸZyd=t?{/xSբINq2هiN(Mٓ oxy->?՘l-+GѲ+-FWN)XD㢤v`hAZwt_[9NǾF?kdUSJm{ a-[f5k)'wTP!9O/=Md|tlzd4SP*S~CMmU+чNYVXfF9;4}}qׇj~ wWA矻OV7֌YY۠ߖzKc)ABp D\JYu-̽np1Iw?d 1\J7Is\ 3VV}H$0/h3}+ߐ07@FC,,u- 4s󠥎#G{+ 0 ZV:bvh ӎ$|nL g@B7jVպqqO =vbjkhh*z Yƒ}; 7^)o4D^]K z][ 'Hk!|2ka q.k ~/$1xn fa\@G/ OIXYxπmV?b^? T[T~ۍ^_k"s~_&TH{% :w@NC]п=rP-_Ն~ ה;Pr|ڬkKM^At|@77ܰ״M ؾc0M>\3$ ÿ;<_3#c1Avܦ #7} z%ㅠb,ՁXpJ*l,gB.}A_/cA YJ ĒxT$=funC>B9gfdrb`|zf23q6Ws+ sf;[ q~|ՙ{3 _B]UcǙ(RiApf%څ3 /pƙ&[e>-yfAsfAtfV̂@[Tp#ӑJ3 Z~fMo;,H`E/P̿FY8PGń3 ^~!M//{fApoˮqfbnufAI%$m^;a,"hbg1ꈉO*#fGfFFes -w)ƈ޲rcVp%\r'918 nC㮄12>:hxtdzxxd|ቸEq- zYR ْ#%Q>iz_w0t3}APnx~vt Ut`4OTȀ-yA 8ݠheu:PGY`?yny_m\?65t wu7?jX44,Kدj=要Ƙ+4 4&ϙ<<|~g3ʾ{<̿aU&Cģ(yFۃK?o6l?*GtA˕e?elɏYI{V킟Nl_>̄y7tQĔxb“ZGAU׌px~1 _ЦI$3@iTGLxqY{Txrtǧ.>fVպn14_x|z,Lr͘7_̼ܭ#e]j=(+_)k/? MoJ+ ` tJFzEl-$i3Ϥ$n~=_>Oh-"~ JǁէY?ρV~YW灿=z Jk5lj 視/힀&="mր&?c #GE#H'AFH(Qي5ʤ%S G!z+W,ϲ9:+Кy]ss#LdD_+c,c'dTI8[AO/bK?uRV=+2/\2r0 ٵ/ UF} `@T<=*!0p$nQGt4gw8ƨ: p_&Iƀ7ڸ }9<Ҫܗ SG:0<232960 y7#|EH]@ 򟨄 ӠeeS]…0:/-pS6m.@pa,?+$qHWh Ia/%m9U?bbO@?mmGB-_~WE{:*QWR!+bzeń |PY1M2?G׌|9_f]oz؄Ax@*VaR[fW Micwjiz= _^4+݋Jw7oVT8PQ=s2^2ݴ 7\dռ S♭MmUO(͐YJHEagNnK<Є"ƌD11ab<Є ʓ6sri'7ް3ֶX42NO;~_k?(uegL{mL`uOg[p&mHq{Syͼƞz[1;3w@|? ~pU`g3O͵˃  ,a›'!2D;Lgn qInF%.%",0V^ܜaԆ7Gna{G%cb 0vX=zl\ld./sx15DأMꭖ;~͘]<߻^˯ѩ)> ۓի] !* 2hOSH EwJAvǴBkIo1N{5'6fX2dt(mҮفU3oW23yKp x opm|L(M[ٶr2κWIЄycw@Dx$=ހ&Ԥ y_ h3Lbwx"1UPI62=1/yČp%)NdZ={Xv z(5=3<4<302qV30zaX[Kqd-G2F)à */_xo=+IC?$-LMЦD$%AeZcjS&i4z@5ijēO迡ͺT~ !f7ߖj  3?M'r; g?gv׭~@m갸σyY6uxI?mC ݐ>^Q?@? X4/6٨8=_MXwrE,b\|Y[(UYV (aC'%B2B,5ާ`=aeچJy,jgV:ګERRTz)4V^<@ lRrD U^prH<ЄB 1NbdaZmNCf,%G?ambx1QصwO Fn_aѰ f2pYlδW0#Iƀ7ݫ`v}r_64OX›f'>5g! ϣCAm#m"e}Q_B]UЫe.,moK+^nޒ^0@pgMM"$퇁?gLcs_ M%޴!V~ڢV2/kӊ\@_ o5D mпXB1{o2x4|H3,=7늴{oT~z(_%D[2ZqQ&l}l_ hBMl#nMXwQ^^0$׸g;}4 E1w|׆SB'ߥ&g!YC筼s;}3q,=e0VNTT $k5_/q$(9B1h%j_aBQΚ_(jt3;]03]f~'VmTzpK8 0 J UV8@}P!fXbfv׽.FFѥHãޏ&; \ h ^:{D=u2ŬFF d\N qCdp=p|>^:! !)5?_oh1F'F<$yvvx&ŏ7h#hh0{ėUʩ.po2+~/N8~n-?ǵX4fc* __.+q~P=v2uײVNpIK|1`ع'1lہ'Z.T6夣!b<лqC8?U jYf&t 2+ W~bH+NÅWo/Fȓ@GYN;GG\ʮf85;B@wBL3ѷž~a!a8lzmƮ 0C#l ),d# k1QLy.vΘ]N-8 ) @Jah=b6kۉTkr@e.EڇFf'L2zS\QHK𷆓v~jf5dE|q5r͂~<Y !~Pm碛2ɨ T]KYY̜n޶n_\g}֮ Lg/0@\dj`6l5@Ř?+,$&ĀE( Q~s#dUB}'73N!%|ls#VbC#F77Іt1DKLAGLGrǸ{Rqr}v'lМvrR 9$9ohS߄7^Ͽ()r8w[G5,pPjVGsk d#w=g:q!>ŽN,^H9VKtJsܹ릷6$di oX<$zQko2*7}m!Z2?@mQ'F^V%B{>r P3tm$}:jO?릟!iFyTYF?$׎Mϙ4yC`ذk;Ze=-a0lؽm~Vaێ~K9i?5lrJܗr/^WtJׄ^,i[)+z~Zəٸ]Z7|CD ~TDrݻbfl3js\&V &{yྕvt=(Ō(P> (p tDxeDu!1"(}$x$ h, Ý*1»sd.NA &ݳbzGY9қ]Zጒ/a/pXI[ؽpiQ1PpbJvU?C<2Ҵ~nN]g}V1ӖKU,i*}I]xt 8z^Q̛QSFN]vRjvuPU;8ģ*™J }1=UQ8MG\,#j23|IW{YckKtAKkM$~a yKONHF\p{֖0=m̊R@id; =>&Jp׷ V6O~C< E=GX'X٬޵=g\1|ӰjQz l(\NKӉhǫ IGC:At6b O;^ZooK-e!L{d5Yjt͔ete<dkŜ" wt׋u.c8Z%CNPnh0\ֲvchb}) PVxZzzAvW9IHyfYjWJ_ jc k,I߹!zϒH_FgIM4|9KA0 ځdwL}K7G_7|F(?דJp􌲁wEv;f zYYr\8Vz6\]uY&jUB!"1iQdNgR3΀`S,bПYŽz]>3HsS̥Yx,QE)ziE93k,l4n0Ľ ?}WmUt|NvÐPiA]ӋloQC/q7"%=< ;~%З4E̘@캀 e{ROua7$}Kd=pr0;/:EqB7MIY<ϒu[ndɞE5Mh fsT+f>Nu:7tnǎ2nzaW $ $ɐKHGaJ b+y[(ž']WmTev KIľC<#1Uq>aT Y}p vJ"8D s$I94Q8 yu֦k>rVZԒH o* ( q}6 ~ >lV+:(!-à+ed[vjÛ2ޢmy^oKk#"a/N\U.]Hc⾸s]\;P.s+.\PgȨɄ:/|@BAq[CD(xnϧ`çjc))XHQqRѤO{)Vɠ5gpPLzQl/R3SM!hU|&|:iii-\g| σӍKGà K!3!M,n4Lp zh+ۑh#I h%0 񞁢 |4!l@Ky=jC|/D< Z~C`{ʵYxKHv 7!96WuFfhyIrjX~Ioq>Y藰=lwǝBׇ38N)$`;hGd~GQ5GwSAHsP6!_ׇ.Gl*usEx4vg/A/@3ph~on7d "Cwq(D Z=*XZ:a?޺2%r6c[HA0ẛqVe ?y$_IQ9=dt jְEO퇸_K'+%tsxT^`Mt/<IL)l᱌gtTĭ8Z=LFp_0%KBa+{QajI H\c\ߣɦocJ}NȻ~Vv;o;x]|jNx\={V`઄}M@[Ҁ1u3步?-WE*:& -2ch*eV:~;t?3r [Op)&CDZ K#c(9hߧ=`3L89Sa5YC@ʪ.A3*jX$]xw:Q_^ VKAm?Kf^i~y)n^m`R}9Wpt1؇ETd>23y[4^Z* $k늕܈a . GZ}w7쌵dX[샎DX0#7( &E5 RSgln S4D U0->68Ý=#'YU 5NB\ cĝG fPu%zQP/5'_8k-f`}J-%etx=GiydBB+$h1Xu2ёdݘ$rtû1wwb7e#٦!3u7uGʛ'}~L{}F #Ã#SC᱉)p<_Wc^ȥ }c2;ܿ~sT/SBjhvVC#y}~ɪhZ,߿|@wvH--yʜef?76 ١!{>~ =w_ZY_jYbklQB蔴-՝]B5 ږ(rRZ9e>o)wㄫq|2ńfb![?I7Is. ^~ WDfaVK!eZ? Y?Iρ9?=\6SC=2_+"~ +ETgh"/~o(|ۄoӑn:3 hBMlV5+) 0>M|FG?mH;Dw*S q@-΀>mm{`ZTU~[G o^U߭ 9#hUj8PO:+}B.xO x 4؂eĮh<1u] 8F18dbVrLmPy)/A/+KŔ[aD^1;,3wbz6qi(oPmk{^qbU2RC!Ql޴Y0}/۹9='~7tD WY~Rm%l,ٴ&!!stOB{9h1!JhbrKgZϺgQ*'r1>[>CY37Lރasn/_vkvގJ/~j|@s>lϽaێ~(]n>ܗrnHM*o˒Nb{93 24a .! 1Z?MݠZ'3cQC3وba< [(n n;Vv2ݱ#EFU%⑴Yd>aUf+ϒI2,z{_=j$?>EX(;(IVr`mƓIH݅}ˆ}Ab\ ݵNwj~HU.KxF Qf#fr=BiXHL^qfyنm 0$"[+\']-B"k57: x=Rh[5~%g9˓C5J3torNe|t96|2w 9\.xfNf8񰄙}д^!㱵G?Z*DҚ&fMN Grq6w *ɤ. M2LoV&)=.P˔=jĊ_!qsXuN+ĭxed-3gffqF(Z3BFB\o:4D͊[ Ymne.[X}u QRyQ<!=E+.8⩳ `]* nj^5,fxfoyZ&E W%O9y\E^r_"HO *WL$9S~A.@(+_.|HA5Rb Z:_i‡ۏ§ [µ@wI/l ˵S쩕*8~7rֺ? y<x -e ߢNetK Uc>UqlޢI-vgcs H;wLQ/sGQ)۳}+y-:a*qʩgm/+OÇ3hy: ITʫΙ~rMtR%-g(QluBB]΁b*M04z^3~C<fm&a/rG_1;D Lt.efH f>$ n9+0A$ˆ7G> }> u>; N2tsݒR0CԠH+%KIŜ3tx^FRBCH5vAa 08=敷03;=w8ZGe2e_)]ǓD׀K [IW\.pc[vj.Kί7JҎ/lN&lя0p@|`>}kF:sUJ5J9:'# I#a*#glͣpz$.nzB`u>z/r(Qnݣz7ՇUrRwKH#KjvDvFHAhrz^.'suwb20pz<1n"Koޅ/rg;CP0w@PKx.ޠD@2]^-^}EzDbPofA@bR |Z=@AtSqg!& Yjv{0B%#@#3gB+SD$^-!f 4 7o&xn0pV! T( >*ܚfn*a [|8ZIj ^9%y-\/ρ>W3=cALZjHXRsyE{V.jP6 t.L W` ߴ[i-!];t6gxӵRA4 %VYf=;Q󠥦1&\-i'aɺZ$9D}*Zd(|U棰fqRbsLh'@Dq&v瀓F)Wb:Sd&[3=Z 2W_q;_pBXAP1*!7*mŷj!a^-5; dEhn @-IfZI(}"'*S ֤C(M Z~١m~#qq(}nWh"n-vR^;AwjKŮQ Z*$.\+@#|Ď<ZZ"* ^ƖWVgo PW1 *6J7V[UDlNJ=hˤRISwtǛe|  Z}=#I[.KLO7]}ꯙpooƁxbx8z^Ym,AUDMX.V7FܼM'kڹ|?`m]+< 9Jw}kkh&-zgY͌_Nxz2uC3G]fl$YJ_#y&d$RF}di x R1db7Z穎 Z=$[eltGc QOGZVk3_t<灙Ca6JApņUt|NEuG( <h2cEo暍[3Ϊ7f [2eY-ZapSx|-E" _2tT@;+>e%pQ|\| fExƏN/uzIo`)FV[D#ԷN0GʨT)!,}HHUDh0 xƮmŏ ] s[rfovHvZĮ8Z*^&5їKsC0Uh8ŁѩQnM.VwŻR}T >Yyd=t?Zbn]t'}~L{vD߹w  ɘks _yiЧEߐѱMW<[p _z&F$=#TM|=Y뎞kK|oF}İ~>Y:hB2_ _͠Z,D7;PKCC?{<7ѱ1LMZSC)?;iX5|zʜev?76 ١!>~ MweM{lCbjh-ЖdPKH|DZJYu-I~LY'\^6 6c?&#\4X~n\}@$@6j&SLlJ>c7l~b[=O-O;hj ik֕ KlkMVgh/ >=lj hBɏYyB<M'ch 'P۶{[]GmL&?ceO!w>?4?L:„[0L ˥L};TE8߰7a$R3x$+N!(o;^Ynp0{Zqp}ؗx ^bfr=,={:H$^/,k5βw&Hvm~hͲ7 yiilg;kA|gEn*~NFC z~?$#<6R$㆟R 9%{?m8FJ\@0Ӯ,m߾Fi3/qTnGBrBa+,' } >rJ-Rł'I6F95 A'x[ʼIS F^.a׿2˔.a~r+ ԡ0o"Ԕ|@ Z=|23C;ΙYaV Z8ig* -+oBNНBg#fR|8b~2L51D{Uͭ{cO.JHEx);&g#Iu[0 }\YJNиI#c;$ ! r#ٶXf&r>K oPmSӐMknԲo創Q[ihPR@X+ľClхs)'9},}+Ǐ1Ve/S"* oi%gfBUPCƢ#!Nۧyj TDrݻbfl3jnFK4WJB{8ģI)_h%L-P '4*`2QJ!< dDOqGRA{TšC\D[UE{#0pXI[ko1D,ñ8Ī哠7+ ê 7NT2{IZޓ ys=j.0hKFQHTštx[EpRKo+Q2b c^x4}m|a* Ʈ.^쿄jqUq(ê$[e/c,ዎ^V^4{{UVgzn !q:8+@ڀATVUrND+IJE-}W\oq53.k>hb v1v#za G+u92L"q.r^}IY/G$^}}m4.Vl~` l}*J25$eDɔwVAˆee˜OLs=g@(+;Y &jZ$,p(]ڣ(p9/QtMm7CrOb <Zn-I{=8 < Z.ZMq=o*j-$hZ^xD_c,Qu2[\ʉDi8t{\|h}x4N0 4 Z_@\].V ZcK.h cvem6|ަPpТѨ{*H@iYۀs !!M |x[{h.ݣ`V g2cyzW`.:M?!˹뤋A*%QMRzvh~sG)An1$n_.V1 Q `AvxXD@.2 'HvY] toO&Η&-5LHs- 8eut$ݟU?HpP~xxlb<y'O}J߹>9l?'/q'W]ۓ/.k_+{yc` tJZ}'mжD< ]bIܾct4q@Jy) 1O mrwO6Ĵ~sN-O>P瀿 5Dm?п"mՃE_ Po .NFH@7i} l4&qq2.l:M|FG?mHzq2gКֶqq 7 7D[jGj1%7)!(6%$%Ѥ}TM*f8eGX-3PŌQQj!8#уYah=g6*VbzIof]x\\3k\;%GZ+_.)jPƷ_=?*Ñ)͉fgxCE OĖTUF@-[jHw >V?!MyϿ{nܱm"/~z˫*G١/b/[QsawKbC<0NU<?Ae쮙afhpt-X⾙0 _d.)k2ʗ5A5r*id&]?`\U :^UFu b!zwטEo7'PYj'$g/Y j. / (ۥތZ8*NG*i  ,JBC/j%'8_OH]F00oB Ʌ/SS&Km\Ma|[C_&sWh5ifٶ҃/KNΒ }Z̰7X ˠ/%6:җNH[kP'7GB?I g2\@Kڪ2en ZעŜvb[ ^zck9k3-w[hLK#dLZGi%4|EomCEBv 7JNUBMttۢpSoGa?~sr9kݳV"s/|ʨyꐕLHhYfNΓMk7K2[=cGX\g+DK9b6gYxͬE q쇵GW"hRki&/MrO'#"!wn.}8[Ѥmsݡqt 3 $ o( { UQ.$kS++NX˿OWFw=2yٞ)ftp6x te5o/q\!HnS#paoPfG?fρxd ג<#=mi͗*pvLG9CK&a/p̱|(5Ѩa/rG׾޾#0qc`lU8ԓz{>Ms%[J}8!Mig*!q'{ b-|'Y f<=oTkt[kvo"D lݮ2/r*pє1|1j7GBAh(R$}+furQaFQRP,z6=Ge xMe+~^R~^Yp}P\e{4,o+z$^}MaFE/AmA_`C7@P_3sӛ!M/ A/`S%N?]wʹmqQA A+G+ .yp1Ҿ5aσGq3;D#(({ qIjcI@ vàwa|-7JxT &hW\)c[oiPu:sFV9CP'oiݬ8Z͸X Iځ@Kf ص;@k8,[A`'hX]t28!F xno!+c#=U'w)s~cT)eR*Ia6؆ˁCs+#|!De й{AXDGAK1U;<Ǯ]0lV9~d-o$ 9ѫAXgzV>x$Te6"fh1 ThAM[kHi͹ŜFn@"k_1su˥R]aP|:ZJ|: |(_wДDF6-?b J9k#dFdSSYg?0>0'l/nW;l=Ppd Xaq'8:huAOWwX¿Kc͋ c?BTyjV=1r8Z~Pt"jT#[O5Apb]~ dLBv3%"a~QCWv@evSo5sp'l`~=̶!ǀ7ATwVrBI)?;!9D ,3irLs͘ _ IR 4e? @W[/zf+\lT]8f&P }pW0L>)pV [oS\Nv`iMhMCqHeuHV._k%aEN.x];h}j삇صOVwV:'d7~28;Y'm=HS۠7P.0o!`Yp7%K3sqrW5=upqj 4{FxkL{FO:]5xyt޲Lމ?b4tզ鋪Fp"D<AVqK ,!w*Fog}aRkK#qJ eT[XY)HK8ZМﴡkh=*l9GGi6eJH|T?fd1l[AWWHE rP<:&!Ohu:t ۓ&Jb(.Q\ֵsV]|4S;$ %#\4X_@?n9/X~'V0O?S[Df]83?4?+`j槁9mк)6?/mò?H/~{_~W&<[-j֯M-1_=ݴG۶{"wa 1n F0&_Qȱ(;[ G-NVoA!>eA ט7:!< lW[pr[L[2\gZpHj߀Є\xЉC$ptOk۠55+bȡJf< ާ,x-&lݬb'7^rnP~5;|J6UL0ص mTXM [:bp::ldzb``txJ$3ЍjQ7JN}iy0IN;rCt$+J~~~xxlbjAkn&k pzwHoA%[PFٜO_^)syJl\؆ԶI<ZZSAOr BIY )1گx';(,Fx .rZ)/HM;ނn ]H鸅 5(F6-ji5B V?_.\bC< W:Z)wӷi::g@w?ZRP/8jq P-ɦbCb4>e 54;]GBd΂[$r@Kɵ}󡜴[a o)hR}x4)sg-+6/f!zqݨZیrm[#jt҂-mq:y˱ֶq:=iN@{vW'{M ( QN+|XÝO 4bC.I' zNqGz{V.:랗,6h fO֌+UQ!/rGT>i%ضb6YC:FmP .m)m Aȑ C!{0޳=&D޳WUZ_vrGUPr[aAit<|9.EїZ _f󲨴dyJVWA5"9ha,!(WIt}^Qוi$.mK+k͐KLϔ£Ao_IF㤟KELL{E8w@KHY^j[6]`b!I>+Nl ]>(6K'\gI/,51@ۉ@PWUϡ7ף1JޕoSL-֦k孜,Q =(&GaAߖyq+,5\ .^Tn@K8{ -66hPCv:*q&L]T= @NPW-w:Ɩsk-\e[x&C22c] C8`;[5P[ tj%)ct UN{}v2$qVAA+k v'ӛTeգRB„ ң*+L1)l@a\2J9R؛&wwA;~̿$Ԏ†waP8.2t AK)(8 䪸SJtAwЉ>`eʸ$B/(h}q$9<Lctr @3eURM }-< \b)2=|"AK'IxJ{Dz'Aku}hB?ņ(YƖwyY>H?ER +M,~Hp7;z]tP$B32zPFe42C552Tڝ"0ĪL5HF@3=sCgq[ P߈{2_ $~iЧ_At&/5v%I*jkDqO =]9=okOJitN@XM%@bCX“sC/(̃PsCN{V*c)qpɽezNmFfG˖h/>Пd܆޺:!&eRBxB.ԫ"W=lgKpduRke ٜpP&_@<5&lݬGDNem-!e4܊*Įxlc@ ͔mC@% px9moL;"LuC(<1^bR8* 0D LVAYހVߛT8oV~#hfnĮ xD*TH"}\^d#4 xzyx rYZނ_e)N4@eRI$G;x4ٯJ{'@,ՓsC<Ӡ\j$8ݷ=Cn_άL?=N;vH_092<82<<544<2=3>2=b?Z|xn1o|oGў+-XEs\"#nuZwt_[@{~RDǾF?kdUSl{ a-[f5kެ nł@w. C-.C0GǦGL35iM |ב;iX5N#tS,is,{lP؜4C;/.+_)kSJoS[3g"7A)-bu$c5D@d$ʺVrΪkOfz8bH(zKFh+N5 |t4?Ͷ y})Пf^V!eW? 4Į> ,J6i@N[+l_d !W@U$<ӿw7K='n-᪎h^3'^l\:;VkQ3hV1(.[p؝ȹK%v !ݩE(ڵ*A2ZZLSR"e/mo/}+[MP_(ԴcY) e>,<a GA* > 1&M'c lj~ā܍?_;I}'YVnB>B&ym|ru_oCnx.cHT{o'! t.9en;[UfC@‹/* {ҿjgcϚτE,@,“Wg&9Q u7sƪlPO )?@MjٷrܨݧakrG;xچAFg34~(Nem|νġ¡=7Nͯ}1tfc3.x˼8K 8.aywK_[_AzPW4V忕Q`D,8(x$Ĉ3 E tHqs.нg1T15J)ľC<6о>Rvu(h4F&tA5)+Wp;-=>6cIQtC_&ܼxvnKdK"o.)mFa*y˴BK4[VgV0AtIrGR{TšC}}çIu} $z~۱),&sUtQ7s޽76sbU9fYH%%ZΆfY*%Yl˲7 y #5bߢ % [ed<3^/NHD+YgkS9#Z=q#+q cCΒ|'< aoI ˠ/%6:3M%A@b/|$I@OiZ$p\ct3 7Jx)5ɜTZJYޭeLnOa̓D9-5pk5w÷w-p̃TUzj)rD]#(>[b;zO]~~Onµ@Kr3a\1kQ5۳oYsY9'7M[o g=\b-VIEۃz䞙JNzooFEK2[=mEO)arc7.Jd\eҕ&dA{t{Al{CF9D)7f b0{:/u:/AΖg4i\w(%7Yb[>q 7J*ͨtTеR۩e/C16aD]݋!L1#v׀ ("9Sooz6?}G-F}Gx qp@uo:~C$ZonG%Ӛqr>nd+*pU{L2b7w3~C<fNmR0C9ģzџ#x,w٥-hb!>٪*CǡĐ#(Eb R0qOU: ^}Ic/qQ`tBYYg; g`g=<) zYY^V>>N30"^нڴ_x1cWGb̡sMUJ!V+(A+};wKr}c5D9[``<5*Z}ac+ \^uzӞkOO4m"Sg۰Tp9ͰA䡬cF A 7"wFlg~)(N"nHfT&EB.޾< j_4&/VBCǀA_Ԧ= c|HV̔䶣(%8h,z)6e8Z9Sv[] 5+'%ْmRZpܬzd#3ӱ'd 0oVZMo6SGU))G2 i躞f2‡ ˠ#.=/Ř/nn:nA"'er= uz4$%@B#rG!à"v\~fn/'_tQ&Qσ>^}AY)ˮ,,הN 7@PoVP߷졕.݃ ^W-7u5WmW ,EN &~_N?MW]+Nn[x@̔jxJE  wsb!E1.Imfpe*u6`>A3MvC32ofzsV]'TcPصF-%=t( *zӳI\&GI5הvo:.hY{߄r tK)IRH}@ha-;@bz[ex^-;N7sFc[ԲH-kDaz~4Ke)['[0 B.IV! m9 Os!6+˪xBģ*Wd䷴ -MN ~>rjz.VsosR'\wͬ7 ϲ&+V)f=E&Wh.VvB)zl G]/Y͍\gUah3bm:Ycok]dvj81.8z2k.Vk-I^oҌpsWsjLp?h}S <Z0iGN L 7$Ih:rFq_@|:z~BN uݿsw >L?=N;vH_:2<8zkptx||`ژf`\->?՘m^-\ѱ}Ww>o +E> ֳch. ׃]ig?/f>Yҷu\ٸ|[rH-}S,{odޛeАw}]ltsse e!-OmEpB$$e$ޫX9e>o)wㄫqM.S,pMc?#A>Uп*mkյR :ˠ3f~C_ F-*> HB/'߁wҼ2 #AF M[sl_dWv_@7aMP9xK4>&6PW%0Q5Є 0nnzmۊfeq+ ƙn41-Cr%Ǝ!οEtRz,~ QO,|p Y{A*u]gvD^x o}K[, kn{G[m;mnKfvAFYb1BO[ǘ7a3f)9fGS+l./d_Cy,l.gf#  Z_Rl0صVSڗdx1+:6hL L G%BR:'Ϲv=q5HE=,DuQSeQ}WTCB8Nflj#OfEŗʭD7yҿa -JSBv:kN D/Q.nwwc@%.$9hQ18,3[Mz3p%Iv~WVպnqwSCKã3cqA߅G(}&};a U ݿKh Ww/v>LNHT>zKFh5bi3Ϥ9~ξK4ǀ m$O6Rqg@!vigAV-OO?sZ6?/m͚"_ **Mx[լ_%Zc`{P"B5 Ϙ<F#?aHz仺EĿIRQHVXEe#CJm 7F4!MᆶTj2WjR־'FVж={Z!Ѥ}8'*zɛPśh#77 ;j!8#C CA?릟!8M<_*'rᇡoa0lM~JFN`5ʒ@s]a9@4\/m*ӐI>`% |+Ebܾ'sxC%x b]]>bDF8czna8%#R$ϧr($5GnBB{8ģI)H{W̌mFyܞv}RTţI=G^;4_/+߽]YT @iWA_ը]D z{4BVHsGRձGaqh!VOD_g#oջ-&?a}=;"beũM ż5)WܷKZ'8*A"VOF: #*9DY7DݚᄈG\,_=U\7#f6,rrluq~g)*4{3ӹ;]»*kz tA]Fk՟x49{h^Gd$ Ĕ_Y}T}R1h{ҫĮ_lcV.oe{|.33)aǁ3g4]1 [g q_^zX{QBv 7IN[U2M,aR!x˥Q4OyK@XǢUʇ+u2}xeE nfm+=b*3'wLA64ڦI ˠ/%6|3+(뫃%֚魦SI$p 6H] F΁knuJSh&Rwin-JQ0HNRyVs RɹaE-3WW]& rD}^|ph}R ؎SwEO]{jX'ק !1 ځ]l90Y(s=5='Y9TFtLzIY4DaEg`9"$;˴Ūk7K2[=6A:{xJX\li)fsyKGhz#OpEo"G E5ĽDG!Q"iN]GzO]GEcb)[_&m4tɔ$ o( { UQE=tv*\Ua 7c04Bb]3c!L1#v׀ [D!bC<`vۋv&b!{C/Λp@uo:~iP8RTFM-BB]΁ֹj9KN&a/p wY Mm!WLwOì]ڳ&%Rš}8\.܁6Yh'$Bn R[8-TB31BnE뢜 DxM0]}3jgTšQl-Qg+:AGP3y絍fۡ z=0 :I?o5MO-I[S݌124I _of??D]g+b;vwٖ$8>Ѹ79PBAK-c>}+kNPVxI'9ģ(Ɯ7`f23 䲂5TSQJHEEsu{A*kg01wJ7y,aZI{Ӥi A/uv@X!N쉠]PqkvRWp.9|y&՜ yn+YY$Mo=y_BeǀFna'A=ߡ$I)є=9[9n<`f]|j=II dȦ ܁+%Y*s#OrV/B~%лr 7_U']wQuEځ ucTbx;_P:^]zRD+ {ɸZ3ʻTTсRzt&`“QN jFH렯7DUo/#&[iz"{{j)۰/Pe' Y[rbdH%:D=-8Ct6_QhHx1"< Zn?fvv3eIc/jRD1R9\)mG%[KqG# T"Q&sn+DΗ76cAl6S)'=7ktlͣ7۠WJwXpsI$*oΛg[ccy*zJn镅þ#x;շͦzG\ɶBôJH䓏BDBn#;{-BMg@ ? gAKEĦbwx!+u\0q Z=lp w EMv fJDc^"I@5d^RTǣ*li=eOK9:Lf*<6h~Yԗuχ-tSnfX|)~.1Z4 y-(gfFvJ8+$Q7w'{<L/nvn.luQh@p`18x\4_^k6cͭn(2h :&\Ͽq,a>)m x -mzj[If0hC8Z}pF")ˠY`5r' ~!FՍO{l[;X0<W7<]8!z{ 0ʊdA%Mx;mBSܛ%gA('j:30fLg)OBr{Aj4] 0<5/e{T-[d2T)w#+$ƥ=2h ce8Fy3t9j _MX2 -*pzxzt4+tr]E!_U1lg@4~L i_2Ht5ǑC-њ {8h1= C_~Oa8ǜB9SoȤ[u`uL~&v~)flqwMc,y'gfٖYV.üaj[-%2d^6*4kZY3#ZDY*]ɵF1 Z$pX~d؋ L/4;A 2YQ$9<b +F^gfk&HzLKwǁ/\v> wܴa_by[7}x,N|Ch8jr5\\!q-u¦2-)W]^tF뜄j8\΃WVF$p $H})m胨d)>=XԗLd}oڐ*q>w}Й[Xdql^x|&35#3ɑLGAﴉ/_*/kn`Wiy#!Q 4M}3">&|9<ZwvYRR1ص@[>?<|W;'ptkh9"<Ԯ$%eK5`ɽ~$?D͈IV }o0,vd:b-3?IV՝N(#E-^dY(5&qhQLKGCXEY62=R#vGvzq%$Qƀ7A H`$A%i30R51!v㠏+kgxǸD3HM/Ao0rWf":D,Sf/ۜ-f fFx 0gބl$vjG[ݠu2̝X|3( o)z pp^x%{\NE3g=#] @]m-? bybxzjrr͂烲CQnꑨ=Gwo[|ι퟿(Ul,~u}㋙qX?ѩG]MnJM> 37*t<~We7afK{/潀gwAl6Ns+%{gb7f3`!F)@}Rד~AY>Do~\@˕1?ITe73f*L-l!ěCo5g ӠRjZվMY}kE#i<;{=r] {AĨfQ:^m?A*:82s0|l:q.),~$< 6K1Is xQs1p7 /H !C_$|E1x|{T8!f984ѡMcƷ!8[ k#dFFi]<:D8ij #*ڞ#)):l=P}ػcΔ-̰@Tyȳ5ɲE>7F,9AS04OhsإCbFNρ+z>˵JW ɞr EI|+jI{3u,_LmEFx -8T,|f΂0\Hޓ!`{zXx2$G8&< ?eg}O xh}H8Se͌{8z^7'W}VtX2a0VEoq{ڞ@KAZ}< NY,5FY4Ou+g`Ǖ%H.(KbbBdo8n^S}}]D;dYkyF}λ4$>scg?!E1w8s͠ifֻXOp!>C;NZ&4Q_nd =X؝R\ċLE\7Y9%oþo ބJxhةEbݩ1[$VKm1q`Vf[9XwD;W8YaLq׮u+#Q_iOr}NC,S܀ z<&bQ75;/|:6`#0?°0O~':6=2fIkj()ZFEߡ6OMkvƪRZY6?76 ١! :»R{Q#b^kc  IPJ*ErDǻ=z1nZ)/>%mCi??Z/ MJHM*$}HJ@:gzf4>v{νܟ9ν]ɜYr+{_GW$ _*, a+ D@JD#ϱv$MDfJ_:᧜Nf#rRGRYNYg$?Q PyWZᛒW ~UUdZf\ nYM{ E޷ WfHD"o(Eoߔ\|ݳG:K.pҡP]H8% +pJc)C?-G"&ƆDecф>im3ni-Wu:6dFr[oC[BuCilxHHN?$k ׂ<'+lNXz4ofx(埑/k]FF7Hw`KBuz+ǁNr{=x^;ƓQ:44u&{if=GXϤ[n-&w) wVQI=͚SÊd y#kX3LOuZ'[JKjp&W!^c@A='l~LO T #VmղOjٌ)nzg%M|ٔ6gx .GW cH$~DX^#Q#c {"c1-O1v֙WC+Fp%@$ո},Ϧ$06?>H*xl]XA4lS!^GPp|9XlB::=m8w ׃/}J*MMVАfpԄKH.ĥ> lO]*͑HnVLDbw+v0r.PMw[Wo[͋hyw4֗ ] 4E4Mµ@Knu˖T k +-U,AY%!)-7 &h$}4 I*Q~ <4uzqKHւhOCeH?+ }g R:k둠+)`IqB];cXEV>3_IG4VJ8[vhɼ\64bKc-+Njǹ+gЈp-c(3C!"xu l9RcI=g9R0 lW@'H*GI~ )-NąY'H> FM;D"=1HN`x23U-kuFk~i^[#[6onԶgKdOϠ itݸ:.x("B~ѩ$IQ*f33"*Vx4"qtSFJ"9Z@}xUzF Vv^΁)s4_^ 7"OU ~UUvx 6eǛwߑǪV,`_[fY x ajcDžh?C}VD]"_6ٜNۜPE":?fs >;q5.TS='czCWaBuX_GpI q Djey9`j .x,l@U ګ?%M '3ӼhҬx8wH0>bp:|H*6+CdO~Bu`:0⺖lucR~&=tX1󾱼GyK 4BuϯrWY*j:e'A6aDi. Y d /-H\ I;'$(hSjHFjqYt.=GpPnvjKVd 67G _-X<`Z=d$z:2lĬգZ`Yv%.LYTR>2,y7|5 "ljR逭dwtЅp5ji57adiGEXFϲq:v=gu6idby%qw(ӿwSڨBngFnyT;}з愻w{s0-AS(ۄk.@ Jy4iv8x%H}s&7Jgd>>_?@k'|S'55'R鴞\[W+Jԉs.B jpҰYe,]a>US2G@X^%NY=Mjځ41d9f0[24Ʀ:[ '(M.bSlxD?+^Q!g! {h$>B9ZK%T2e_m>;J{- Kp[׻"TEރ"`9TNhpe ܩ,$~ qUw{L矓 0zLgXUQ ?k/kAK:'dTSS|Bh#}nhO{\Gdfܘ}vg^ҙ>%kci]Mar͏Gb qqb&]VhbR'?0ڜ=U=v-uG5clue/DESCRIPTION0000644000175100001440000000151313435050401012721 0ustar hornikusersPackage: clue Version: 0.3-57 Encoding: UTF-8 Title: Cluster Ensembles Description: CLUster Ensembles. Authors@R: c(person("Kurt", "Hornik", role = c("aut", "cre"), email = "Kurt.Hornik@R-project.org", comment = c(ORCID = "0000-0003-4198-9911")), person("Walter", "Böhm", role = "ctb")) License: GPL-2 Depends: R (>= 3.2.0) Imports: stats, cluster, graphics, methods Suggests: e1071, lpSolve (>= 5.5.7), quadprog (>= 1.4-8), relations Enhances: RWeka, ape, cba, cclust, flexclust, flexmix, kernlab, mclust, movMF, modeltools NeedsCompilation: yes Packaged: 2019-02-25 20:04:05 UTC; hornik Author: Kurt Hornik [aut, cre] (), Walter Böhm [ctb] Maintainer: Kurt Hornik Repository: CRAN Date/Publication: 2019-02-25 20:33:05 UTC clue/man/0000755000175100001440000000000012734172047012002 5ustar hornikusersclue/man/ls_fit_addtree.Rd0000644000175100001440000000676612734173577015271 0ustar hornikusers\name{ls_fit_addtree} \encoding{UTF-8} \alias{ls_fit_addtree} \alias{ls_fit_centroid} \title{Least Squares Fit of Additive Tree Distances to Dissimilarities} \description{ Find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_addtree(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) ls_fit_centroid(x) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_addtree"} containing the optimal additive tree distances. } \details{ See \code{\link{as.cl_addtree}} for details on additive tree distances and centroid distances. With \eqn{L(d) = \sum w_{ij} (x_{ij} - d_{ij})^2}, the problem to be solved by \code{ls_fit_addtree} is minimizing \eqn{L} over all additive tree distances \eqn{d}. This problem is known to be NP hard. We provide three heuristics for solving this problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1983). Incomplete dissimilarities are currently not supported. Methods \code{"IP"} and \code{"IR"} implement the Iterative Projection and Iterative Reduction approaches of Hubert and Arabie (1995) and Roux (1988), respectively. Non-identical weights and incomplete dissimilarities are currently not supported. See \code{\link{ls_fit_ultrametric}} for details on these methods and available control parameters. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. \code{ls_fit_centroid} finds the centroid distance \eqn{d} minimizing \eqn{L(d)} (currently, only for the case of identical weights). This optimization problem has a closed-form solution. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1983). A least squares algorithm for fitting additive trees to proximity data. \emph{Psychometrika}, \bold{48}, 621--626. \doi{10.1007/BF02293884}. } \keyword{cluster} \keyword{optimize} clue/man/fit_ultrametric_target.Rd0000644000175100001440000000621411304023137017022 0ustar hornikusers\name{fit_ultrametric_target} \alias{ls_fit_ultrametric_target} \alias{l1_fit_ultrametric_target} \title{Fit Dissimilarities to a Hierarchy} \description{ Find the ultrametric from a target equivalence class of hierarchies which minimizes weighted Euclidean or Manhattan dissimilarity to a given dissimilarity object. } \usage{ ls_fit_ultrametric_target(x, y, weights = 1) l1_fit_ultrametric_target(x, y, weights = 1) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}.} \item{y}{a target hierarchy.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}. Otherwise, it is recycled to the number of elements in \code{x}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the optimal ultrametric distances. } \details{ The target equivalence class consists of all dendrograms for which the corresponding \eqn{n}-trees are the same as the one corresponding to \code{y}. I.e., all splits are the same as for \code{y}, and optimization is over the height of the splits. The criterion function to be optimized over all ultrametrics from the equivalence class is \eqn{\sum w_{ij} |x_{ij} - u_{ij}|^p}, where \eqn{p = 2} in the Euclidean and \eqn{p = 1} in the Manhattan case, respectively. The optimum can be computed as follows. Suppose split \eqn{s} joins object classes \eqn{A} and \eqn{B}. As the ultrametric dissimilarities of all objects in \eqn{A} to all objects in \eqn{B} must be the same value, say, \eqn{u_{A,B} = u_s}, the contribution from the split to the criterion function is of the form \eqn{f_s(u_s) = \sum_{i \in A, j \in B} w_{ij} |x_{ij} - u_s|^p}. We need to minimize \eqn{\sum_s f_s(u_s)} under the constraint that the \eqn{u_s} form a non-decreasing sequence, which is accomplished by using the Pool Adjacent Violator Algorithm (\acronym{PAVA}) using the weighted mean (\eqn{p = 2}) or weighted median (\eqn{p = 1}) for solving the blockwise optimization problems. } \seealso{ \code{\link{ls_fit_ultrametric}} for finding the ultrametric minimizing Euclidean dissimilarity (without fixing the splits). } \examples{ data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) ## Find the maximal dominated and miminal dominating ultrametrics by ## hclust() with single and complete linkage: y1 <- hclust(d, "single") y2 <- hclust(d, "complete") ## Note that these are quite different: cl_dissimilarity(y1, y2, "gamma") ## Now find the L2 optimal members of the respective dendrogram ## equivalence classes. u1 <- ls_fit_ultrametric_target(d, y1) u2 <- ls_fit_ultrametric_target(d, y2) ## Compute the L2 optimal ultrametric approximation to d. u <- ls_fit_ultrametric(d) ## And compare ... cl_dissimilarity(cl_ensemble(Opt = u, Single = u1, Complete = u2), d) ## The solution obtained via complete linkage is quite close: cl_agreement(u2, u, "cophenetic") } \keyword{cluster} \keyword{optimize} clue/man/GVME.Rd0000644000175100001440000000207412734171420013024 0ustar hornikusers\name{GVME} \alias{GVME} \title{Gordon-Vichi Macroeconomic Partition Ensemble Data} \description{ Soft partitions of 21 countries based on macroeconomic data for the years 1975, 1980, 1985, 1990, and 1995. } \usage{data("GVME")} \format{ A named cluster ensemble of 5 soft partitions of 21 countries into 2 or 3 classes. The names are the years to which the partitions correspond. } \details{ The partitions were obtained using fuzzy \eqn{c}-means on measurements of the following variables: the annual per capita gross domestic product (GDP) in USD (converted to 1987 prices); the percentage of GDP provided by agriculture; the percentage of employees who worked in agriculture; and gross domestic investment, expressed as a percentage of the GDP. See Gordon and Vichi (2001), page 230, for more details. } \source{ Table 1 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. } \keyword{datasets} clue/man/cl_margin.Rd0000644000175100001440000000165011304023137014211 0ustar hornikusers\name{cl_margin} \alias{cl_margin} \title{Membership Margins} \description{ Compute the \emph{margin} of the memberships of a partition, i.e., the difference between the largest and second largest membership values of the respective objects. } \usage{ cl_margin(x) } \arguments{ \item{x}{an \R object representing a partition of objects.} } \details{ For hard partitions, the margins are always 1. For soft partitions, the margins may be taken as an indication of the \dQuote{sureness} of classifying an object to the class with maximum membership value. } \examples{ data("GVME") ## Look at the classes obtained for 1980: split(cl_object_names(GVME[["1980"]]), cl_class_ids(GVME[["1980"]])) ## Margins: x <- cl_margin(GVME[["1980"]]) ## Add names, and sort: names(x) <- cl_object_names(GVME[["1980"]]) sort(x) ## Note the "uncertainty" of assigning Egypt to the "intermediate" class ## of nations. } \keyword{cluster} clue/man/n_of_classes.Rd0000644000175100001440000000347612211412701014721 0ustar hornikusers\name{n_of_classes} \alias{n_of_classes} \alias{cl_class_ids} \alias{as.cl_class_ids} \title{Classes in a Partition} \description{Determine the number of classes and the class ids in a partition of objects.} \usage{ n_of_classes(x) cl_class_ids(x) as.cl_class_ids(x) } \arguments{ \item{x}{an object representing a (hard or soft) partition (for \code{n_of_classes} and \code{cl_class_ids}), or raw class ids (for \code{as.cl_class_ids}).} } \value{ For \code{n_of_classes}, an integer giving the number of classes in the partition. For \code{cl_class_ids}, a vector of integers with the corresponding class ids. For soft partitions, the class ids returned are those of the \emph{nearest hard partition} obtained by taking the class ids of the (first) maximal membership values. } \details{ These function are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). Note that the number of classes is taken as the number of distinct class ids actually used in the partition, and may differ from the number of columns in a membership matrix representing the partition. \code{as.cl_class_ids} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) to class id objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ data("Cassini") party <- kmeans(Cassini$x, 3) n_of_classes(party) ## A simple confusion matrix: table(cl_class_ids(party), Cassini$classes) ## For an "oversize" membership matrix representation: n_of_classes(cl_membership(party, 6)) } \keyword{cluster} clue/man/cl_bag.Rd0000644000175100001440000000640612734174125013505 0ustar hornikusers\name{cl_bag} \alias{cl_bag} \title{Bagging for Clustering} \description{ Construct partitions of objects by running a base clustering algorithm on bootstrap samples from a given data set, and \dQuote{suitably} aggregating these primary partitions. } \usage{ cl_bag(x, B, k = NULL, algorithm = "kmeans", parameters = NULL, method = "DFBC1", control = NULL) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{method}{a character string indicating the bagging method to use. Currently, only method \code{"DFBC1"} is available, which implements algorithm \emph{BagClust1} in Dudoit & Fridlyand (2003).} \item{control}{a list of control parameters for the aggregation. Currently, not used.} } \value{ An R object representing a partition of the objects given in \code{x}. } \details{ Bagging for clustering is really a rather general conceptual framework than a specific algorithm. If the primary partitions generated in the bootstrap stage form a cluster ensemble (so that class memberships of the objects in \code{x} can be obtained), consensus methods for cluster ensembles (as implemented, e.g., in \code{\link{cl_consensus}} and \code{\link{cl_medoid}}) can be employed for the aggregation stage. In particular, (possibly new) bagging algorithms can easily be realized by directly running \code{\link{cl_consensus}} on the results of \code{\link{cl_boot}}. In BagClust1, aggregation proceeds by generating a reference partition by running the base clustering algorithm on the whole given data set, and averaging the ensemble memberships after optimally matching them to the reference partition (in fact, by minimizing Euclidean dissimilarity, see \code{\link{cl_dissimilarity}}). If the base clustering algorithm yields prototypes, aggregation can be based on clustering these. This is the idea underlying the \dQuote{Bagged Clustering} algorithm introduced in Leisch (1999) and implemented by function \code{\link[e1071]{bclust}} in package \pkg{e1071}. } \references{ S. Dudoit and J. Fridlyand (2003). Bagging to improve the accuracy of a clustering procedure. \emph{Bioinformatics}, \bold{19}/9, 1090--1099. \doi{10.1093/bioinformatics/btg038}. F. Leisch (1999). \emph{Bagged Clustering}. Working Paper 51, SFB \dQuote{Adaptive Information Systems and Modeling in Economics and Management Science}. \url{epub.wu.ac.at/1272/}. } \examples{ set.seed(1234) ## Run BagClust1 on the Cassini data. data("Cassini") party <- cl_bag(Cassini$x, 50, 3) plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## Actually, using fuzzy c-means as a base learner works much better: if(require("e1071", quiet = TRUE)) { party <- cl_bag(Cassini$x, 20, 3, algorithm = "cmeans") plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") } } \keyword{cluster} clue/man/cl_pclust.Rd0000644000175100001440000001036512734173132014262 0ustar hornikusers\name{cl_pclust} \alias{cl_pclust} \title{Prototype-Based Partitions of Clusterings} \description{ Compute prototype-based partitions of a cluster ensemble by minimizing \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the elements \eqn{x_b} of the ensemble and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ cl_pclust(x, k, method = NULL, m = 1, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{the consensus method to be employed, see \code{\link{cl_consensus}}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"cl_partition"} representing the obtained \dQuote{secondary} partition by an object of class \code{"cl_pclust"}, which is a list containing at least the following components. \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{silhouette}{Silhouette information for the partition, see \code{\link[cluster]{silhouette}}.} \item{validity}{precomputed validity measures for the partition.} \item{m}{the softness control argument.} \item{call}{the matched call.} \item{d}{the dissimilarity function \eqn{d = d(x, p)} employed.} \item{e}{the exponent \eqn{e} employed.} } \details{ Partitioning is performed using \code{\link{pclust}} via a family constructed from \code{method}. The dissimilarities \eqn{d} and exponent \eqn{e} are implied by the consensus method employed, and inferred via a registration mechanism currently only made available to built-in consensus methods. The default methods compute Least Squares Euclidean consensus clusterings, i.e., use Euclidean dissimilarity \eqn{d} and \eqn{e = 2}. For \eqn{m = 1}, the partitioning procedure was introduced by Gaul and Schader (1988) for \dQuote{Clusterwise Aggregation of Relations} (with the same domains), containing equivalence relations, i.e., hard partitions, as a special case. Available control parameters are as for \code{\link{pclust}}. The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum (as this is already true for the computation of consensus clusterings). Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. W. Gaul and M. Schader (1988). Clusterwise aggregation of relations. \emph{Applied Stochastic Models and Data Analysis}, \bold{4}:273--282. \doi{10.1002/asm.3150040406}. } \examples{ ## Use a precomputed ensemble of 50 k-means partitions of the ## Cassini data. data("CKME") CKME <- CKME[1 : 30] # for saving precious time ... diss <- cl_dissimilarity(CKME) hc <- hclust(diss) plot(hc) ## This suggests using a partition with three classes, which can be ## obtained using cutree(hc, 3). Could use cl_consensus() to compute ## prototypes as the least squares consensus clusterings of the classes, ## or alternatively: set.seed(123) x1 <- cl_pclust(CKME, 3, m = 1) x2 <- cl_pclust(CKME, 3, m = 2) ## Agreement of solutions. cl_dissimilarity(x1, x2) table(cl_class_ids(x1), cl_class_ids(x2)) } \keyword{cluster} clue/man/CKME.Rd0000644000175100001440000000071011304023137012771 0ustar hornikusers\name{CKME} \alias{CKME} \title{Cassini Data Partitions Obtained by K-Means} \description{ A cluster ensemble of 50 \eqn{k}-means partitions of the Cassini data into three classes. } \usage{data("CKME")} \format{ A cluster ensemble of 50 (\eqn{k}-means) partitions. } \details{ The ensemble was generated via \preformatted{ require("clue") data("Cassini") set.seed(1234) CKME <- cl_boot(Cassini$x, 50, 3) } } \keyword{datasets} clue/man/kmedoids.Rd0000644000175100001440000000414612734173215014073 0ustar hornikusers\name{kmedoids} \alias{kmedoids} \title{K-Medoids Clustering} \description{ Compute a \eqn{k}-medoids partition of a dissimilarity object. } \usage{ kmedoids(x, k) } \arguments{ \item{x}{a dissimilarity object inheriting from class \code{"\link{dist}"}, or a square matrix of pairwise object-to-object dissimilarity values.} \item{k}{an integer giving the number of classes to be used in the partition.} } \value{ An object of class \code{"kmedoids"} representing the obtained partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{criterion}{the value of the criterion function of the partition.} } \details{ Let \eqn{d} denote the pairwise object-to-object dissimilarity matrix corresponding to \code{x}. A \eqn{k}-medoids partition of \code{x} is defined as a partition of the numbers from 1 to \eqn{n}, the number of objects in \code{x}, into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_l \min_{j \in C_l} \sum_{i \in C_l} d_{ij}} is minimized. This is an NP-hard optimization problem. PAM (Partitioning Around Medoids, see Kaufman & Rousseeuw (1990), Chapter 2) is a very popular heuristic for obtaining optimal \eqn{k}-medoids partitions, and provided by \code{\link[cluster]{pam}} in package \pkg{cluster}. \code{kmedoids} is an exact algorithm based on a binary linear programming formulation of the optimization problem (e.g., Gordon & Vichi (1998), [P4']), using \code{\link[lpSolve]{lp}} from package \pkg{lpSolve} as solver. Depending on available hardware resources (the number of constraints of the program is of the order \eqn{n^2}), it may not be possible to obtain a solution. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \keyword{cluster} \keyword{optimize} clue/man/cl_validity.Rd0000644000175100001440000000674113140644223014574 0ustar hornikusers\name{cl_validity} \alias{cl_validity} \alias{cl_validity.default} \title{Validity Measures for Partitions and Hierarchies} \description{ Compute validity measures for partitions and hierarchies, attempting to measure how well these clusterings capture the underlying structure in the data they were obtained from. } \usage{ cl_validity(x, ...) \method{cl_validity}{default}(x, d, ...) } \arguments{ \item{x}{an object representing a partition or hierarchy.} \item{d}{a dissimilarity object from which \code{x} was obtained.} \item{\dots}{arguments to be passed to or from methods.} } \value{ A list of class \code{"cl_validity"} with the computed validity measures. } \details{ \code{cl_validity} is a generic function. For partitions, its default method gives the \dQuote{dissimilarity accounted for}, defined as \eqn{1 - a_w / a_t}, where \eqn{a_t} is the average total dissimilarity, and the \dQuote{average within dissimilarity} \eqn{a_w} is given by \deqn{\frac{\sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij}}{ \sum_{i,j} \sum_k m_{ik}m_{jk}}}{% \sum_{i,j} \sum_k m_{ik}m_{jk} d_{ij} / \sum_{i,j} \sum_k m_{ik}m_{jk}} where \eqn{d} and \eqn{m} are the dissimilarities and memberships, respectively, and the sums are over all pairs of objects and all classes. For hierarchies, the validity measures computed by default are \dQuote{variance accounted for} (VAF, e.g., Hubert, Arabie & Meulman, 2006) and \dQuote{deviance accounted for} (DEV, e.g., Smith, 2001). If \code{u} is the ultrametric corresponding to the hierarchy \code{x} and \code{d} the dissimilarity \code{x} was obtained from, these validity measures are given by \deqn{\mathrm{VAF} = \max\left(0, 1 - \frac{\sum_{i,j} (d_{ij} - u_{ij})^2}{ \sum_{i,j} (d_{ij} - \mathrm{mean}(d)) ^ 2}\right)}{ max(0, 1 - sum_{i,j} (d_{ij} - u_{ij})^2 / sum_{i,j} (d_{ij} - mean(d))^2)} and \deqn{\mathrm{DEV} = \max\left(0, 1 - \frac{\sum_{i,j} |d_{ij} - u_{ij}|}{ \sum_{i,j} |d_{ij} - \mathrm{median}(d)|}\right)}{ max(0, 1 - sum_{i,j} |d_{ij} - u_{ij}| / sum_{i,j} |d_{ij} - median(d)|)} respectively. Note that VAF and DEV are not invariant under rescaling \code{u}, and may be \dQuote{arbitrarily small} (i.e., 0 using the above definitions) even though \code{u} and \code{d} are \dQuote{structurally close} in some sense. For the results of using \code{\link[cluster]{agnes}} and \code{\link[cluster]{diana}}, the agglomerative and divisive coefficients are provided in addition to the default ones. } \references{ L. Hubert, P. Arabie and J. Meulman (2006). \emph{The structural representation of proximity matrices with MATLAB}. Philadelphia, PA: SIAM. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}/2, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \seealso{ \code{\link[fpc]{cluster.stats}} in package \pkg{fpc} for a variety of cluster validation statistics; \code{\link[e1071]{fclustIndex}} in package \pkg{e1071} for several fuzzy cluster indexes; \code{\link[cclust:Rindexes]{clustIndex}} in package \pkg{cclust}; \code{\link[cluster]{silhouette}} in package \pkg{cluster}. } \keyword{cluster} clue/man/l1_fit_ultrametric.Rd0000644000175100001440000001111313140644252016051 0ustar hornikusers\name{l1_fit_ultrametric} \alias{l1_fit_ultrametric} \title{Least Absolute Deviation Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal absolute distance (Manhattan dissimilarity) to a given dissimilarity object. } \usage{ l1_fit_ultrametric(x, method = c("SUMT", "IRIP"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default) or \code{"IRIP"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ The problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} |x_{ij} - u_{ij}|} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). We provide two heuristics for solving this problem. Method \code{"SUMT"} implements a \acronym{SUMT} (Sequential Unconstrained Minimization Technique, see \code{\link{sumt}}) approach using the sign function for the gradients of the absolute value function. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } Method \code{"IRIP"} implements a variant of the Iteratively Reweighted Iterative Projection approach of Smith (2001), which attempts to solve the \eqn{L_1} problem via a sequence of weighted \eqn{L_2} problems, determining \eqn{u(t+1)} by minimizing the criterion function \deqn{\sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2 / \max(|x_{ij} - u_{ij}(t)|, m)} with \eqn{m} a \dQuote{small} non-zero value to avoid zero divisors. We use the \acronym{SUMT} method of \code{\link{ls_fit_ultrametric}} for solving the weighted least squares problems. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in \eqn{u} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the \eqn{L_1} criterion is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{MIN}}{the cutoff \eqn{m}. Defaults to \eqn{10^{-3}}.} \item{\code{start}}{a dissimilarity object to be used as the starting value for \eqn{u}.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed for solving the weighted \eqn{L_2} problems.} } One may need to adjust the default control parameters to achieve convergence. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. } \seealso{ \code{\link{cl_consensus}} for computing least absolute deviation (Manhattan) consensus hierarchies; \code{\link{ls_fit_ultrametric}}. } \references{ M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. T. J. Smith (2001). Constructing ultrametric and additive trees based on the \eqn{L_1} norm. \emph{Journal of Classification}, \bold{18}, 185--207. \url{https://link.springer.com/article/10.1007/s00357-001-0015-0}. %% The above web page has %% \doi{10.1007/s00357-001-0015-0}. %% which does not work. Reported to the responsible DOI Registration %% Agency on 2017-08-03, let's use the URL instead of the DOI for now. } \keyword{cluster} \keyword{optimize} clue/man/cl_pam.Rd0000644000175100001440000000515312734173110013520 0ustar hornikusers\name{cl_pam} \alias{cl_pam} \title{K-Medoids Partitions of Clusterings} \description{ Compute \eqn{k}-medoids partitions of clusterings. } \usage{ cl_pam(x, k, method = "euclidean", solver = c("pam", "kmedoids")) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{k}{an integer giving the number of classes to be used in the partition.} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} \item{solver}{a character string indicating the \eqn{k}-medoids solver to be employed. May be abbreviated. If \code{"pam"} (default), the Partitioning Around Medoids (Kaufman & Rousseeuw (1990), Chapter 2) heuristic \code{\link[cluster]{pam}} of package \pkg{cluster} is used. Otherwise, the exact algorithm of \code{\link{kmedoids}} is employed.} } \value{ An object of class \code{"cl_pam"} representing the obtained \dQuote{secondary} partition, which is a list with the following components. \item{cluster}{the class ids of the partition.} \item{medoid_ids}{the indices of the medoids.} \item{prototypes}{a cluster ensemble with the \eqn{k} prototypes (medoids).} \item{criterion}{the value of the criterion function of the partition.} \item{description}{a character string indicating the dissimilarity method employed.} } \details{ An optimal \eqn{k}-medoids partition of the given cluster ensemble is defined as a partition of the objects \eqn{x_i} (the elements of the ensemble) into \eqn{k} classes \eqn{C_1, \ldots, C_k} such that the criterion function \eqn{L = \sum_{l=1}^k \min_{j \in C_l} \sum_{i \in C_l} d(x_i, x_j)} is minimized. Such secondary partitions (e.g., Gordon & Vichi, 1998) are obtained by computing the dissimilarities \eqn{d} of the objects in the ensemble for the given dissimilarity method, and applying a dissimilarity-based \eqn{k}-medoids solver to \eqn{d}. } \references{ L. Kaufman and P. J. Rousseeuw (1990). \emph{Finding Groups in Data: An Introduction to Cluster Analysis}. Wiley, New York. A. D. Gordon and M. Vichi (1998). Partitions of partitions. \emph{Journal of Classification}, \bold{15}, 265--285. \doi{10.1007/s003579900034}. } \seealso{ \code{\link{cl_pclust}} for more general prototype-based partitions of clusterings. } \examples{ data("Kinship82") party <- cl_pam(Kinship82, 3, "symdiff") ## Compare results with tables 5 and 6 in Gordon & Vichi (1998). party lapply(cl_prototypes(party), cl_classes) table(cl_class_ids(party)) } \keyword{cluster} clue/man/lattice.Rd0000644000175100001440000001236312734173524013724 0ustar hornikusers\name{lattice} \encoding{UTF-8} \alias{cl_meet} \alias{cl_join} \alias{Ops.cl_partition} \alias{Summary.cl_partition} \alias{Ops.cl_dendrogram} \alias{Ops.cl_hierarchy} \alias{Summary.cl_hierarchy} \title{Cluster Lattices} \description{ Computations on the lattice of all (hard) partitions, or the lattice of all dendrograms, or the meet semilattice of all hierarchies (\eqn{n}-trees) of/on a set of objects: meet, join, and comparisons. } \usage{ cl_meet(x, y) cl_join(x, y) } \arguments{ \item{x}{an ensemble of partitions or dendrograms or hierarchies, or an R object representing a partition or dendrogram or hierarchy.} \item{y}{an R object representing a partition or dendrogram or hierarchy. Ignored if \code{x} is an ensemble.} } \details{ For a given finite set of objects \eqn{X}, the set \eqn{H(X)} of all (hard) partitions of \eqn{X} can be partially ordered by defining a partition \eqn{P} to be \dQuote{finer} than a partition \eqn{Q}, i.e., \eqn{P \le Q}, if each class of \eqn{P} is contained in some class of \eqn{Q}. With this partial order, \eqn{H(X)} becomes a bounded \dfn{lattice}, with intersection and union of two elements given by their greatest lower bound (\dfn{meet}) and their least upper bound (\dfn{join}), respectively. Specifically, the meet of two partitions computed by \code{cl_meet} is the partition obtained by intersecting the classes of the partitions; the classes of the join computed by \code{cl_join} are obtained by joining all elements in the same class in at least one of the partitions. Obviously, the least and greatest elements of the partition lattice are the partitions where each object is in a single class (sometimes referred to as the \dQuote{splitter} partition) or in the same class (the \dQuote{lumper} partition), respectively. Meet and join of an arbitrary number of partitions can be defined recursively. In addition to computing the meet and join, the comparison operations corresponding to the above partial order as well as \code{min}, \code{max}, and \code{range} are available at least for R objects representing partitions inheriting from \code{"\link{cl_partition}"}. The summary methods give the meet and join of the given partitions (for \code{min} and \code{max}), or a partition ensemble with the meet and join (for \code{range}). If the partitions specified by \code{x} and \code{y} are soft partitions, the corresponding nearest hard partitions are used. Future versions may optionally provide suitable \dQuote{soft} (fuzzy) extensions for computing meets and joins. The set of all dendrograms on \eqn{X} can be ordered using pointwise inequality of the associated ultrametric dissimilarities: i.e., if \eqn{D} and \eqn{E} are the dendrograms with ultrametrics \eqn{u} and \eqn{v}, respectively, then \eqn{D \le E} if \eqn{u_{ij} \le v_{ij}} for all pairs \eqn{(i, j)} of objects. This again yields a lattice (of dendrograms). The join of \eqn{D} and \eqn{E} is the dendrogram with ultrametrics given by \eqn{\max(u_{ij}, v_{ij})} (as this gives an ultrametric); the meet is the dendrogram with the maximal ultrametric dominated by \eqn{\min(u_{ij}, v_{ij})}, and can be obtained by applying single linkage hierarchical clustering to the minima. The set of all hierarchies on \eqn{X} can be ordered by set-wise inclusion of the classes: i.e., if \eqn{H} and \eqn{G} are two hierarchies, then \eqn{H \le G} if all classes of \eqn{H} are also classes of \eqn{G}. This yields a meet semilattice, with meet given by the classes contained in both hierarchies. The join only exists if the union of the classes is a hierarchy. In each case, a modular semilattice is obtained, which allows for a natural metrization via least element (semi)lattice move distances, see Barthélémy, Leclerc and Monjardet (1981). These latticial metrics are given by the BA/C (partitions), Manhattan (dendrograms), and symdiff (hierarchies) dissimilarities, respectively (see \code{\link{cl_dissimilarity}}). } \value{ For \code{cl_meet} and \code{cl_join}, an object of class \code{"\link{cl_partition}"} or \code{"\link{cl_dendrogram}"} with the class ids or ultrametric dissimilarities of the meet and join of the partitions or dendrograms, respectively. } \references{ J.-P. Barthélémy, B. Leclerc and B. Monjardet (1981). On the use of ordered sets in problems of comparison and consensus of classification. \emph{Journal of Classification}, \bold{3}, 187--224. \doi{10.1007/BF01894188}. } \examples{ ## Two simple partitions of 7 objects. A <- as.cl_partition(c(1, 1, 2, 3, 3, 5, 5)) B <- as.cl_partition(c(1, 2, 2, 3, 4, 5, 5)) ## These disagree on objects 1-3, A splits objects 4 and 5 into ## separate classes. Objects 6 and 7 are always in the same class. (A <= B) || (B <= A) ## (Neither partition is finer than the other.) cl_meet(A, B) cl_join(A, B) ## Meeting with the lumper (greatest) or joining with the splitter ## (least) partition does not make a difference: C_lumper <- as.cl_partition(rep(1, n_of_objects(A))) cl_meet(cl_ensemble(A, B, C_lumper)) C_splitter <- as.cl_partition(seq(length = n_of_objects(A))) cl_join(cl_ensemble(A, B, C_splitter)) ## Another way of computing the join: range(A, B, C_splitter)$max } \keyword{cluster} clue/man/Cassini.Rd0000644000175100001440000000237411304023137013653 0ustar hornikusers\name{Cassini} \alias{Cassini} \title{Cassini Data} \description{ A Cassini data set with 1000 points in 2-dimensional space which are drawn from the uniform distribution on 3 structures. The two outer structures are banana-shaped; the \dQuote{middle} structure in between them is a circle. } \usage{data("Cassini")} \format{ A classed list with components \describe{ \item{\code{x}}{a matrix with 1000 rows and 2 columns giving the coordinates of the points.} \item{\code{classes}}{a factor indicating which structure the respective points belong to.} } } \details{ Instances of Cassini data sets can be created using function \code{\link[mlbench]{mlbench.cassini}} in package \pkg{mlbench}. The data set at hand was obtained using \preformatted{ library("mlbench") set.seed(1234) Cassini <- mlbench.cassini(1000) } } \examples{ data("Cassini") op <- par(mfcol = c(1, 2)) ## Plot the data set: plot(Cassini$x, col = as.integer(Cassini$classes), xlab = "", ylab = "") ## Create a "random" k-means partition of the data: set.seed(1234) party <- kmeans(Cassini$x, 3) ## And plot that. plot(Cassini$x, col = cl_class_ids(party), xlab = "", ylab = "") ## (We can see the problem ...) par(op) } \keyword{datasets} clue/man/addtree.Rd0000644000175100001440000000364312037226501013676 0ustar hornikusers\name{addtree} \encoding{UTF-8} \alias{as.cl_addtree} \title{Additive Tree Distances} \description{ Objects representing additive tree distances. } \usage{ as.cl_addtree(x) } \arguments{ \item{x}{an R object representing additive tree distances.} } \value{ An object of class \code{"cl_addtree"} containing the additive tree distances. } \details{ Additive tree distances are object dissimilarities \eqn{d} satisfying the so-called \emph{additive tree conditions}, also known as \emph{four-point conditions} \eqn{d_{ij} + d_{kl} \le \max(d_{ik} + d_{jl}, d_{il} + d_{jk})} for all quadruples \eqn{i, j, k, l}. Equivalently, for each such quadruple, the largest two values of the sums \eqn{d_{ij} + d_{kl}}, \eqn{d_{ik} + d_{jl}}, and \eqn{d_{il} + d_{jk}} must be equal. Centroid distances are additive tree distances where the inequalities in the four-point conditions are strengthened to equalities (such that all three sums are equal), and can be represented as \eqn{d_{ij} = g_i + g_j}, i.e., as sums of distances from a \dQuote{centroid}. See, e.g., Barthélémy and Guénoche (1991) for more details on additive tree distances. \code{as.cl_addtree} is a generic function. Its default method can handle objects representing ultrametric distances and raw additive distance matrices. In addition, there is a method for coercing objects of class \code{"\link[ape:as.phylo]{phylo}"} from package \pkg{ape}. Functions \code{\link{ls_fit_addtree}} and \code{\link{ls_fit_centroid}} can be used to find the additive tree distance or centroid distance minimizing least squares distance (Euclidean dissimilarity) to a given dissimilarity object. There is a \code{\link{plot}} method for additive tree distances. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. } \keyword{cluster} clue/man/cl_membership.Rd0000644000175100001440000000462312211412501015065 0ustar hornikusers\name{cl_membership} \alias{cl_membership} \alias{as.cl_membership} \title{Memberships of Partitions} \description{ Compute the memberships values for objects representing partitions. } \usage{ cl_membership(x, k = n_of_classes(x)) as.cl_membership(x) } \arguments{ \item{x}{an R object representing a partition of objects (for \code{cl_membership}) or raw memberships or class ids (for \code{as.cl_membership}).} \item{k}{an integer giving the number of columns (corresponding to class ids) to be used in the membership matrix. Must not be less, and default to, the number of classes in the partition.} } \value{ An object of class \code{"cl_membership"} with the matrix of membership values. } \details{ \code{cl_membership} is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{as.cl_membership} can be used for coercing \dQuote{raw} class ids (given as atomic vectors) or membership values (given as numeric matrices) to membership objects. } \seealso{ \code{\link{is.cl_partition}} } \examples{ ## Getting the memberships of a single soft partition. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) ## And add the results of agnes and diana. require("cluster") hens <- c(hens, list(agnes = agnes(d), diana = diana(d))) ## Create a dissimilarity object from this. d1 <- cl_dissimilarity(hens) ## And compute a soft partition. party <- fanny(d1, 2) round(cl_membership(party), 5) ## The "nearest" hard partition to this: as.cl_hard_partition(party) ## (which has the same class ids as cl_class_ids(party)). ## Extracting the memberships from the elements of an ensemble of ## partitions. pens <- cl_boot(USArrests, 30, 3) pens mems <- lapply(pens, cl_membership) ## And turning these raw memberships into an ensemble of partitions. pens <- cl_ensemble(list = lapply(mems, as.cl_partition)) pens pens[[length(pens)]] } \keyword{cluster} clue/man/cl_consensus.Rd0000644000175100001440000003671212734174375015006 0ustar hornikusers\name{cl_consensus} \alias{cl_consensus} \title{Consensus Partitions and Hierarchies} \description{ Compute the consensus clustering of an ensemble of partitions or hierarchies. } \usage{ cl_consensus(x, method = NULL, weights = 1, control = list()) } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string specifying one of the built-in methods for computing consensus clusterings, or a function to be taken as a user-defined method, or \code{NULL} (default value). If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods and defaults.} \item{weights}{a numeric vector with non-negative case weights. Recycled to the number of elements in the ensemble given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ The consensus partition or hierarchy. } \details{ Consensus clusterings \dQuote{synthesize} the information in the elements of a cluster ensemble into a single clustering, often by minimizing a criterion function measuring how dissimilar consensus candidates are from the (elements of) the ensemble (the so-called \dQuote{optimization approach} to consensus clustering). The most popular criterion functions are of the form \eqn{L(x) = \sum w_b d(x_b, x)^p}, where \eqn{d} is a suitable dissimilarity measure (see \code{\link{cl_dissimilarity}}), \eqn{w_b} is the case weight given to element \eqn{x_b} of the ensemble, and \eqn{p \ge 1}. If \eqn{p = 1} and minimization is over all possible base clusterings, a consensus solution is called a \emph{median} of the ensemble; if minimization is restricted to the elements of the ensemble, a consensus solution is called a \emph{medoid} (see \code{\link{cl_medoid}}). For \eqn{p = 2}, we obtain \emph{least squares} consensus partitions and hierarchies (generalized means). See also Gordon (1999) for more information. If all elements of the ensemble are partitions, the built-in consensus methods compute consensus partitions by minimizing a criterion of the form \eqn{L(x) = \sum w_b d(x_b, x)^p} over all hard or soft partitions \eqn{x} with a given (maximal) number \eqn{k} of classes. Available built-in methods are as follows. \describe{ \item{\code{"SE"}}{a fixed-point algorithm for obtaining \emph{soft} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all soft partitions with a given maximal number of classes). This iterates between individually matching all partitions to the current approximation to the consensus partition, and computing the next approximation as the membership matrix closest to a suitable weighted average of the memberships of all partitions after permuting their columns for the optimal matchings of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a matrix with number of rows equal to the number of objects of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, suitable random membership matrices are used.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } In the case of multiple runs, the first optimum found is returned. This method can also be referred to as \code{"soft/euclidean"}. } \item{\code{"GV1"}}{the fixed-point algorithm for the \dQuote{first model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being GV1 dissimilarity and \eqn{p = 2} over all soft partitions with a given maximal number of classes. This is similar to \code{"SE"}, but uses GV1 rather than Euclidean dissimilarity. Available control parameters are the same as for \code{"SE"}. } \item{\code{"DWH"}}{an extension of the greedy algorithm in Dimitriadou, Weingessel and Hornik (2002) for (approximately) obtaining soft least squares Euclidean consensus partitions. The reference provides some structure theory relating finding the consensus partition to an instance of the multiple assignment problem, which is known to be NP-hard, and suggests a simple heuristic based on successively matching an individual partition \eqn{x_b} to the current approximation to the consensus partition, and compute the memberships of the next approximation as a weighted average of those of the current one and of \eqn{x_b} after permuting its columns for the optimal matching of class ids. The following control parameters are available for this method. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{order}}{a permutation of the integers from 1 to the size of the ensemble, specifying the order in which the partitions in the ensemble should be aggregated. Defaults to using a random permutation (unlike the reference, which does not permute at all).} } } \item{\code{"HE"}}{a fixed-point algorithm for obtaining \emph{hard} least squares Euclidean consensus partitions (i.e., for minimizing \eqn{L} with Euclidean dissimilarity \eqn{d} and \eqn{p = 2} over all hard partitions with a given maximal number of classes.) Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/euclidean"}. } \item{\code{"SM"}}{a fixed-point algorithm for obtaining \emph{soft} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all soft partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"soft/manhattan"}. } \item{\code{"SM"}}{a fixed-point algorithm for obtaining \emph{hard} median Manhattan consensus partitions (i.e., for minimizing \eqn{L} with Manhattan dissimilarity \eqn{d} and \eqn{p = 1} over all hard partitions with a given maximal number of classes). Available control parameters are the same as for \code{"SE"}. This method can also be referred to as \code{"hard/manhattan"}. } \item{\code{"GV3"}}{a \acronym{SUMT} algorithm for the \dQuote{third model} in Gordon and Vichi (2001) for minimizing \eqn{L} with \eqn{d} being co-membership dissimilarity and \eqn{p = 2}. (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) This optimization problem is equivalent to finding the membership matrix \eqn{m} for which the sum of the squared differences between \eqn{C(m) = m m'} and the weighted average co-membership matrix \eqn{\sum_b w_b C(m_b)} of the partitions is minimal. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{k}}{an integer giving the number of classes to be used for the least squares consensus partition. By default, the maximal number of classes in the ensemble is used.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a matrix with number of rows equal to the size of the cluster ensemble, and \eqn{k} columns, to be used as a starting value, or a list of such matrices. By default, a membership based on a rank \eqn{k} approximation to the weighted average co-membership matrix is used.} } In the case of multiple runs, the first optimum found is returned. } \item{\code{"soft/symdiff"}}{a \acronym{SUMT} approach for minimizing \eqn{L = \sum w_b d(x_b, x)} over all soft partitions with a given maximal number of classes, where \eqn{d} is the Manhattan dissimilarity of the co-membership matrices (coinciding with symdiff partition dissimilarity in the case of hard partitions). Available control parameters are the same as for \code{"GV3"}. } \item{\code{"hard/symdiff"}}{an exact solver for minimizing \eqn{L = \sum w_b d(x_b, x)} over all hard partitions (possibly with a given maximal number of classes as specified by the control parameter \code{k}), where \eqn{d} is symdiff partition dissimilarity (so that soft partitions in the ensemble are replaced by their closest hard partitions), or equivalently, Rand distance or pair-bonds (Boorman-Arabie \eqn{D}) distance. The consensus solution is found via mixed linear or quadratic programming. } } By default, method \code{"SE"} is used for ensembles of partitions. If all elements of the ensemble are hierarchies, the following built-in methods for computing consensus hierarchies are available. \describe{ \item{\code{"euclidean"}}{an algorithm for minimizing \eqn{L(x) = \sum w_b d(x_b, x) ^ 2} over all dendrograms, where \eqn{d} is Euclidean dissimilarity. This is equivalent to finding the best least squares ultrametric approximation of the weighted average \eqn{d = \sum w_b u_b} of the ultrametrics \eqn{u_b} of the hierarchies \eqn{x_b}, which is attempted by calling \code{\link{ls_fit_ultrametric}} on \eqn{d} with appropriate control parameters. This method can also be referred to as \code{"cophenetic"}. } \item{\code{"manhattan"}}{a \acronym{SUMT} for minimizing \eqn{L = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is Manhattan dissimilarity. Available control parameters are the same as for \code{"euclidean"}. } \item{\code{"majority"}}{a hierarchy obtained from an extension of the majority consensus tree of Margush and McMorris (1981), which minimizes \eqn{L(x) = \sum w_b d(x_b, x)} over all dendrograms, where \eqn{d} is the symmetric difference dissimilarity. The unweighted \eqn{p}-majority tree is the \eqn{n}-tree (hierarchy in the strict sense) consisting of all subsets of objects contained in more than \eqn{100 p} percent of the \eqn{n}-trees \eqn{T_b} induced by the dendrograms, where \eqn{1/2 \le p < 1} and \eqn{p = 1/2} (default) corresponds to the standard majority tree. In the weighted case, it consists of all subsets \eqn{A} for which \eqn{\sum_{b: A \in T_b} w_b > W p}, where \eqn{W = \sum_b w_b}. We also allow for \eqn{p = 1}, which gives the \emph{strict consensus tree} consisting of all subsets contained in each of the \eqn{n}-trees. The majority dendrogram returned is a representation of the majority tree where all splits have height one. The fraction \eqn{p} can be specified via the control parameter \code{p}. } } By default, method \code{"euclidean"} is used for ensembles of hierarchies. If a user-defined consensus method is to be employed, it must be a function taking the cluster ensemble, the case weights, and a list of control parameters as its arguments, with formals named \code{x}, \code{weights}, and \code{control}, respectively. Most built-in methods use heuristics for solving hard optimization problems, and cannot be guaranteed to find a global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the methods. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. T. Margush and F. R. McMorris (1981). Consensus \eqn{n}-trees. \emph{Bulletin of Mathematical Biology}, \bold{43}, 239--244. \doi{10.1007/BF02459446}. } \seealso{ \code{\link{cl_medoid}}, \code{\link[ape]{consensus}} } \examples{ ## Consensus partition for the Rosenberg-Kim kinship terms partition ## data based on co-membership dissimilarities. data("Kinship82") m1 <- cl_consensus(Kinship82, method = "GV3", control = list(k = 3, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value for criterion function to be minimized: sum(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("Kinship82_Consensus") m2 <- Kinship82_Consensus[["JMF"]] sum(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) ## Seems we get a better solution ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "comem") ## How "fuzzy" are they? cl_fuzziness(cl_ensemble(m1, m2)) ## Do the "nearest" hard partitions fully agree? cl_dissimilarity(as.cl_hard_partition(m1), as.cl_hard_partition(m2)) ## Consensus partition for the Gordon and Vichi (2001) macroeconomic ## partition data based on Euclidean dissimilarities. data("GVME") set.seed(1) ## First, using k = 2 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 2, verbose = TRUE)) ## (Note that one should really use several replicates of this.) ## Value of criterion function to be minimized: sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). data("GVME_Consensus") m2 <- GVME_Consensus[["MF1/2"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## Seems we get a slightly better solution ... ## But note that cl_dissimilarity(m1, m2, "GV1") ## and that the maximal deviation of the memberships is max(abs(cl_membership(m1) - cl_membership(m2))) ## so the differences seem to be due to rounding. ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) ## And now for k = 3 classes. m1 <- cl_consensus(GVME, method = "GV1", control = list(k = 3, verbose = TRUE)) sum(cl_dissimilarity(GVME, m1, "GV1") ^ 2) ## Compare to the consensus solution given in Gordon & Vichi (2001). m2 <- GVME_Consensus[["MF1/3"]] sum(cl_dissimilarity(GVME, m2, "GV1") ^ 2) ## This time we look much better ... ## How dissimilar are these solutions? cl_dissimilarity(m1, m2, "GV1") ## Do the "nearest" hard partitions fully agree? table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/Phonemes.Rd0000644000175100001440000000216212734171677014060 0ustar hornikusers\name{Phonemes} \alias{Phonemes} \title{Miller-Nicely Consonant Phoneme Confusion Data} \description{ Miller-Nicely data on the auditory confusion of 16 consonant phonemes. } \usage{data("Phonemes")} \format{ A symmetric matrix of the misclassification probabilities of 16 English consonant phonemes. } \details{ Miller and Nicely (1955) obtained the confusions by exposing female subjects to a series of syllables consisting of one of the 16 consonants followed by the vowel \samp{a} under 17 different experimental conditions. The data provided are obtained from aggregating the six so-called flat-noise conditions in which only the speech-to-noise ratio was varied into a single matrix of misclassification frequencies. } \source{ The data set is also contained in file \file{mapclus.data} in the shell archive \url{http://www.netlib.org/mds/mapclus.shar}. } \references{ G. A. Miller and P. E. Nicely (1955). An analysis of perceptual confusions among some English consonants. \emph{Journal of the Acoustical Society of America}, \bold{27}, 338--352. \doi{10.1121/1.1907526}. } \keyword{datasets} clue/man/hierarchy.Rd0000644000175100001440000000735412211412651014244 0ustar hornikusers\name{hierarchy} \alias{cl_hierarchy} % class ... \alias{is.cl_hierarchy} \alias{as.cl_hierarchy} \alias{cl_dendrogram} % class ... \alias{is.cl_dendrogram} \alias{as.cl_dendrogram} \alias{plot.cl_dendrogram} \title{Hierarchies} \description{ Determine whether an R object represents a hierarchy of objects, or coerce to an R object representing such.} \usage{ is.cl_hierarchy(x) is.cl_dendrogram(x) as.cl_hierarchy(x) as.cl_dendrogram(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_hierarchy"}, with a suitable representation of the hierarchy given by \code{x}. } \details{ These functions are generic functions. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). The hierarchies considered by \pkg{clue} are \emph{\eqn{n}-trees} (hierarchies in the strict sense) and \emph{dendrograms} (also known as valued \eqn{n}-trees or total indexed hierarchies), which are represented by the virtual classes \code{"cl_hierarchy"} and \code{"cl_dendrogram"} (which inherits from the former), respectively. \eqn{n}-trees on a set \eqn{X} of objects correspond to collections \eqn{H} of subsets of \eqn{X}, usually called \emph{classes} of the hierarchy, which satisfy the following properties: \itemize{ \item \eqn{H} contains all singletons with objects of \eqn{X}, \eqn{X} itself, but not the empty set; \item The intersection of two sets \eqn{A} and \eqn{B} in \eqn{H} is either empty or one of the sets. } The classes of a hierarchy can be obtained by \code{\link{cl_classes}}. Dendrograms are \eqn{n}-trees where additionally a height \eqn{h} is associated with each of the classes, so that for two classes \eqn{A} and \eqn{B} with non-empty intersection we have \eqn{h(A) \le h(B)} iff \eqn{A} is a subset of \eqn{B}. For each pair of objects one can then define \eqn{u_{ij}} as the height of the smallest class containing both \eqn{i} and \eqn{j}: this results in a dissimilarity on \eqn{X} which satisfies the ultrametric (3-point) conditions \eqn{u_{ij} \le \max(u_{ik}, u_{jk})} for all triples \eqn{(i, j, k)} of objects. Conversely, an ultrametric dissimilarity induces a unique dendrogram. The ultrametric dissimilarities of a dendrogram can be obtained by \code{\link{cl_ultrametric}}. \code{as.cl_hierarchy} returns an object of class \code{"cl_hierarchy"} \dQuote{containing} the given object \code{x} if this already represents a hierarchy (i.e., \code{is.cl_hierarchy(x)} is true), or the ultrametric obtained from \code{x} via \code{\link{as.cl_ultrametric}}. \code{as.cl_dendrogram} returns an object which has class \code{"cl_dendrogram"} and inherits from \code{"cl_hierarchy"}, and contains \code{x} if it represents a dendrogram (i.e., \code{is.cl_dendrogram(x)} is true), or the ultrametric obtained from \code{x}. Conceptually, hierarchies and dendrograms are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing dendrograms and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. There is also a \code{plot} method. } \examples{ hcl <- hclust(dist(USArrests)) is.cl_dendrogram(hcl) is.cl_hierarchy(hcl) } \keyword{cluster} clue/man/cl_classes.Rd0000644000175100001440000000126711304023137014375 0ustar hornikusers\name{cl_classes} \alias{cl_classes} \title{Cluster Classes} \description{ Extract the classes in a partition or hierarchy. } \usage{ cl_classes(x) } \arguments{ \item{x}{an R object representing a partition or hierarchy of objects.} } \value{ A list inheriting from \code{"cl_classes_of_objects"} of vectors indicating the classes. } \details{ For partitions, the classes are the equivalence classes (\dQuote{clusters}) of the partition; for soft partitions, the classes of the nearest hard partition are used. For hierarchies represented by trees, the classes are the sets of objects corresponding to (joined at or split by) the nodes of the tree. } \keyword{cluster} clue/man/cl_agreement.Rd0000644000175100001440000002370613020247107014713 0ustar hornikusers\name{cl_agreement} \alias{cl_agreement} \title{Agreement Between Partitions or Hierarchies} \description{Compute the agreement between (ensembles) of partitions or hierarchies. } \usage{ cl_agreement(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing agreement, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_agreement"} containing the agreements between the all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_agreement"} with the agreements between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring agreement between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{\eqn{1 - d / m}, where \eqn{d} is the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Euclidean dissimilarity. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{\eqn{1 - d / m}, where \eqn{d} is the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}, and \eqn{m} is an upper bound for the maximal Manhattan dissimilarity.} \item{\code{"Rand"}}{the Rand index (the rate of distinct pairs of objects both in the same class or both in different classes in both partitions), see Rand (1971) or Gordon (1999), page 198. For soft partitions, (currently) the Rand index of the corresponding nearest hard partitions is used.} \item{\code{"cRand"}}{the Rand index corrected for agreement by chance, see Hubert and Arabie (1985) or Gordon (1999), page 198. Can only be used for hard partitions.} \item{\code{"NMI"}}{Normalized Mutual Information, see Strehl and Ghosh (2002). For soft partitions, (currently) the NMI of the corresponding nearest hard partitions is used.} \item{\code{"KP"}}{the Katz-Powell index, i.e., the product-moment correlation coefficient between the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, respectively, see Katz and Powell (1953). For soft partitions, (currently) the Katz-Powell index of the corresponding nearest hard partitions is used. (Note that for hard partitions, the \eqn{(i,j)} entry of \eqn{C(u)} is one iff objects \eqn{i} and \eqn{j} are in the same class.)} \item{\code{"angle"}}{the maximal cosine of the angle between the elements of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"diag"}}{the maximal co-classification rate, i.e., the maximal rate of objects with the same class ids in both partitions after arbitrarily permuting the ids.} \item{\code{"FM"}}{the index of Fowlkes and Mallows (1983), i.e., the ratio \eqn{N_{xy} / \sqrt{N_x N_y}}{N_xy / sqrt(N_x N_y)} of the number \eqn{N_{xy}}{N_xy} of distinct pairs of objects in the same class in both partitions and the geometric mean of the numbers \eqn{N_x} and \eqn{N_y} of distinct pairs of objects in the same class in partition \eqn{x} and partition \eqn{y}, respectively. For soft partitions, (currently) the Fowlkes-Mallows index of the corresponding nearest hard partitions is used.} \item{\code{"Jaccard"}}{the Jaccard index, i.e., the ratio of the numbers of distinct pairs of objects in the same class in both partitions and in at least one partition, respectively. For soft partitions, (currently) the Jaccard index of the corresponding nearest hard partitions is used.} \item{\code{"purity"}}{the purity of the classes of \code{x} with respect to those of \code{y}, i.e., \eqn{\sum_j \max_i n_{ij} / n}, where \eqn{n_{ij}} is the joint frequency of objects in class \eqn{i} for \code{x} and in class \eqn{j} for \code{y}, and \eqn{n} is the total number of objects.} \item{\code{"PS"}}{Prediction Strength, see Tibshirani and Walter (2005): the minimum, over all classes \eqn{j} of \code{y}, of the maximal rate of objects in the same class for \code{x} and in class \eqn{j} for \code{y}.} } If all components are hierarchies, available built-in methods for measuring agreement between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{\eqn{1 / (1 + d)}, where \eqn{d} is the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{The cophenetic correlation coefficient. (I.e., the product-moment correlation of the ultrametrics.)} \item{\code{"angle"}}{the cosine of the angle between the ultrametrics.} \item{\code{"gamma"}}{\eqn{1 - d}, where \eqn{d} is the rate of inversions between the associated ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}). (This agreement measure is a linear transformation of Kruskal's \eqn{\gamma}{gamma}.)} } The measures based on ultrametrics also allow computing agreement with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined agreement method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric agreement objects of class \code{"cl_agreement"} are implemented as symmetric proximity objects with self-proximities identical to one, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric agreement) object of class \code{"cl_cross_agreement"}. } \references{ E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. E. B. Fowlkes and C. L. Mallows (1983). A method for comparing two hierarchical clusterings. \emph{Journal of the American Statistical Association}, \bold{78}, 553--569. \cr \doi{10.1080/01621459.1983.10478008}. A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. L. Hubert and P. Arabie (1985). Comparing partitions. \emph{Journal of Classification}, \bold{2}, 193--218. \doi{10.1007/bf01908075}. W. M. Rand (1971). Objective criteria for the evaluation of clustering methods. \emph{Journal of the American Statistical Association}, \bold{66}, 846--850. \doi{10.2307/2284239}. L. Katz and J. H. Powell (1953). A proposed index of the conformity of one sociometric measurement to another. \emph{Psychometrika}, \bold{18}, 249--256. \doi{10.1007/BF02289063}. A. Strehl and J. Ghosh (2002). Cluster ensembles --- A knowledge reuse framework for combining multiple partitions. \emph{Journal of Machine Learning Research}, \bold{3}, 583--617. \cr \url{http://www.jmlr.org/papers/volume3/strehl02a/strehl02a.pdf}. R. Tibshirani and G. Walter (2005). Cluster validation by Prediction Strength. \emph{Journal of Computational and Graphical Statistics}, \bold{14}/3, 511--528. \doi{10.1198/106186005X59243}. } \seealso{ \code{\link{cl_dissimilarity}}; \code{\link[e1071]{classAgreement}} in package \pkg{e1071}. } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] # for saving precious time ... summary(c(cl_agreement(pens))) summary(c(cl_agreement(pens, method = "Rand"))) summary(c(cl_agreement(pens, method = "diag"))) cl_agreement(pens[1:5], pens[6:7], method = "NMI") ## Equivalently, using subscripting. cl_agreement(pens, method = "NMI")[1:5, 6:7] ## An ensemble of hierarchies. d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods hens <- cl_ensemble(list = hclust_results) summary(c(cl_agreement(hens))) ## Note that the Euclidean agreements are *very* small. ## This is because the ultrametrics differ substantially in height: u <- lapply(hens, cl_ultrametric) round(sapply(u, max), 3) ## Rescaling the ultrametrics to [0, 1] gives: u <- lapply(u, function(x) (x - min(x)) / (max(x) - min(x))) shens <- cl_ensemble(list = lapply(u, as.cl_dendrogram)) summary(c(cl_agreement(shens))) ## Au contraire ... summary(c(cl_agreement(hens, method = "cophenetic"))) cl_agreement(hens[1:3], hens[4:5], method = "gamma") } \keyword{cluster} clue/man/cl_object_names.Rd0000644000175100001440000000176712211412557015404 0ustar hornikusers\name{cl_object_names} \alias{cl_object_names} \title{Find Object Names} \description{ Find the names of the objects from which a taxonomy (partition or hierarchy) or proximity was obtained. } \usage{ cl_object_names(x) } \arguments{ \item{x}{an \R object representing a taxonomy or proximity.} } \value{ A character vector of length \code{\link{n_of_objects}(x)} in case the names of the objects could be determined, or \code{NULL}. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself), in as much as possible. There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \keyword{cluster} clue/man/Kinship82.Rd0000644000175100001440000000352312734174662014060 0ustar hornikusers\name{Kinship82} \alias{Kinship82} \title{Rosenberg-Kim Kinship Terms Partition Data} \description{ Partitions of 15 kinship terms given by 85 female undergraduates at Rutgers University who were asked to sort the terms into classes \dQuote{on the basis of some aspect of meaning}. } \usage{data("Kinship82")} \format{ A cluster ensemble of 85 hard partitions of the 15 kinship terms. } \details{ Rosenberg and Kim (1975) describe an experiment where perceived similarities of the kinship terms were obtained from six different \dQuote{sorting} experiments. These \dQuote{original} Rosenberg-Kim kinship terms data were published in Arabie, Carroll and de Sarbo (1987), and are also contained in file \file{indclus.data} in the shell archive \url{http://www.netlib.org/mds/indclus.shar}. For one of the experiments, partitions of the terms were printed in Rosenberg (1982). Comparison with the original data indicates that the partition data have the \dQuote{nephew} and \dQuote{niece} columns interchanged, which is corrected in the data set at hand. } \source{ Table 7.1 in Rosenberg (1982), with the \dQuote{nephew} and \dQuote{niece} columns interchanged. } \references{ P. Arabie, J. D. Carroll and W. S. de Sarbo (1987). \emph{Three-way scaling and clustering}. Newbury Park, CA: Sage. S. Rosenberg and M. P. Kim (1975). The method of sorting as a data-gathering procedure in multivariate research. \emph{Multivariate Behavioral Research}, \bold{10}, 489--502. \cr \doi{10.1207/s15327906mbr1004_7}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \keyword{datasets} clue/man/cl_fuzziness.Rd0000644000175100001440000000500211304023137014767 0ustar hornikusers\name{cl_fuzziness} \alias{cl_fuzziness} \title{Partition Fuzziness} \description{ Compute the fuzziness of partitions. } \usage{ cl_fuzziness(x, method = NULL, normalize = TRUE) } \arguments{ \item{x}{a cluster ensemble of partitions, or an R object coercible to such.} \item{method}{a character string indicating the fuzziness measure to be employed, or \code{NULL} (default), or a function to be taken as a user-defined method. Currently available built-in methods are \code{"PC"} (Partition Coefficient) and \code{"PE"} (Partition Entropy), with the default corresponding to the first one. If \code{method} is a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}.} \item{normalize}{a logical indicating whether the fuzziness measure should be normalized in a way that hard partitions have value 0, and \dQuote{completely fuzzy} partitions (where for all objects, all classes get the same membership) have value 1.} } \details{ If \eqn{m} contains the membership values of a partition, the (unnormalized) Partition Coefficient and Partition Entropy are given by \eqn{\sum_{n,i} m_{n,i}^2} and \eqn{\sum_{n,i} H(m_{n,i})}, respectively, where \eqn{H(u) = u \log u - (1-u) \log(1-u)}{u log(u) - (1-u) log(1-u)}. Note that the normalization used here is different from the normalizations typically found in the literature. If a user-defined fuzziness method is to be employed, is must be a function taking a matrix of membership values and a logical to indicate whether normalization is to be performed as its arguments (in that order; argument names are not used). } \value{ An object of class \code{"cl_fuzziness"} giving the fuzziness values. } \references{ J. C. Bezdek (1981). \emph{Pattern Recognition with Fuzzy Objective Function Algorithms}. New York: Plenum. } \seealso{ Function \code{\link[e1071]{fclustIndex}} in package \pkg{e1071}, which also computes several other \dQuote{fuzzy cluster indexes} (typically based on more information than just the membership values). } \examples{ if(require("e1071", quiet = TRUE)) { ## Use an on-line version of fuzzy c-means from package e1071 if ## available. data("Cassini") pens <- cl_boot(Cassini$x, B = 15, k = 3, algorithm = "cmeans", parameters = list(method = "ufcl")) pens summary(cl_fuzziness(pens, "PC")) summary(cl_fuzziness(pens, "PE")) } } \keyword{cluster} clue/man/cl_medoid.Rd0000644000175100001440000000331211726357356014217 0ustar hornikusers\name{cl_medoid} \alias{cl_medoid} \title{Medoid Partitions and Hierarchies} \description{ Compute the medoid of an ensemble of partitions or hierarchies, i.e., the element of the ensemble minimizing the sum of dissimilarities to all other elements. } \usage{ cl_medoid(x, method = "euclidean") } \arguments{ \item{x}{an ensemble of partitions or hierarchies, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{method}{a character string or a function, as for argument \code{method} of function \code{\link{cl_dissimilarity}}.} } \value{ The medoid partition or hierarchy. } \details{ Medoid clusterings are special cases of \dQuote{consensus} clusterings characterized as the solutions of an optimization problem. See Gordon (2001) for more information. The dissimilarities \code{d} for determining the medoid are obtained by calling \code{cl_dissimilarity} with arguments \code{x} and \code{method}. The medoid can then be found as the (first) row index for which the row sum of \code{as.matrix(d)} is minimal. Modulo possible differences in the case of ties, this gives the same results as (the medoid obtained by) \code{\link[cluster]{pam}} in package \pkg{cluster}. } \references{ A. D. Gordon (1999). \emph{Classification} (2nd edition). Boca Raton, FL: Chapman & Hall/CRC. } \seealso{ \code{\link{cl_consensus}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 20] m1 <- cl_medoid(pens) diss <- cl_dissimilarity(pens) require("cluster") m2 <- pens[[pam(diss, 1)$medoids]] ## Agreement of medoid consensus partitions. cl_agreement(m1, m2) ## Or, more straightforwardly: table(cl_class_ids(m1), cl_class_ids(m2)) } \keyword{cluster} clue/man/solve_LSAP.Rd0000644000175100001440000000331511304023137014225 0ustar hornikusers\name{solve_LSAP} \encoding{UTF-8} \alias{solve_LSAP} \title{Solve Linear Sum Assignment Problem} \description{ Solve the linear sum assignment problem using the Hungarian method. } \usage{ solve_LSAP(x, maximum = FALSE) } \arguments{ \item{x}{a matrix with nonnegative entries and at least as many columns as rows.} \item{maximum}{a logical indicating whether to minimize of maximize the sum of assigned costs.} } \details{ If \eqn{nr} and \eqn{nc} are the numbers of rows and columns of \code{x}, \code{solve_LSAP} finds an optimal \emph{assignment} of rows to columns, i.e., a one-to-one map \code{p} of the numbers from 1 to \eqn{nr} to the numbers from 1 to \eqn{nc} (a permutation of these numbers in case \code{x} is a square matrix) such that \eqn{\sum_{i=1}^{nr} x[i, p[i]]} is minimized or maximized. This assignment can be found using a linear program (and package \pkg{lpSolve} provides a function \code{lp.assign} for doing so), but typically more efficiently and provably in polynomial time \eqn{O(n^3)} using primal-dual methods such as the so-called Hungarian method (see the references). } \value{ An object of class \code{"solve_LSAP"} with the optimal assignment of rows to columns. } \references{ C. Papadimitriou and K. Steiglitz (1982), \emph{Combinatorial Optimization: Algorithms and Complexity}. Englewood Cliffs: Prentice Hall. } \author{ Walter Böhm \email{Walter.Boehm@wu-wien.ac.at} kindly provided C code implementing the Hungarian method. } \examples{ x <- matrix(c(5, 1, 4, 3, 5, 2, 2, 4, 4), nrow = 3) solve_LSAP(x) solve_LSAP(x, maximum = TRUE) ## To get the optimal value (for now): y <- solve_LSAP(x) sum(x[cbind(seq_along(y), y)]) } \keyword{optimize} clue/man/cl_ultrametric.Rd0000644000175100001440000000440311304023137015266 0ustar hornikusers\name{cl_ultrametric} \alias{cl_ultrametric} \alias{as.cl_ultrametric} \title{Ultrametrics of Hierarchies} \description{ Compute the ultrametric distances for objects representing (total indexed) hierarchies. } \usage{ cl_ultrametric(x, size = NULL, labels = NULL) as.cl_ultrametric(x) } \arguments{ \item{x}{an R object representing a (total indexed) hierarchy of objects.} \item{size}{an integer giving the number of objects in the hierarchy.} \item{labels}{a character vector giving the names of the objects in the hierarchy.} } \value{ An object of class \code{"cl_ultrametric"} containing the ultrametric distances. } \details{ If \code{x} is not an ultrametric or a hierarchy with an ultrametric representation, \code{cl_ultrametric} uses \code{\link[stats]{cophenetic}} to obtain the ultrametric (also known as cophenetic) distances from the hierarchy, which in turn by default calls the S3 generic \code{\link[stats]{as.hclust}} on the hierarchy. Support for a class which represents hierarchies can thus be added by providing \code{as.hclust} methods for this class. In R 2.1.0 or better, \code{cophenetic} is an S3 generic as well, and one can also more directly provide methods for this if necessary. \code{as.cl_ultrametric} is a generic function which can be used for coercing \emph{raw} (non-classed) ultrametrics, represented as numeric vectors (of the lower-half entries) or numeric matrices, to ultrametric objects. Ultrametric objects are implemented as symmetric proximity objects with a dissimilarity interpretation so that self-proximities are zero, and inherit from classes \code{"\link{cl_dissimilarity}"} and \code{"cl_proximity"}. See section \bold{Details} in the documentation for \code{\link{cl_dissimilarity}} for implications. Ultrametric objects can also be coerced to classes \code{"\link[stats]{dendrogram}"} and \code{"\link[stats]{hclust}"}, and hence in particular use the \code{plot} methods for these classes. By default, plotting an ultrametric object uses the plot method for dendrograms. } \seealso{ \code{\link{is.cl_hierarchy}} } \examples{ hc <- hclust(dist(USArrests)) u <- cl_ultrametric(hc) ## Subscripting. u[1 : 5, 1 : 5] u[1 : 5, 6 : 7] ## Plotting. plot(u) } \keyword{cluster} clue/man/Kinship82_Consensus.Rd0000644000175100001440000000307212734171653016114 0ustar hornikusers\name{Kinship82_Consensus} \alias{Kinship82_Consensus} \title{Gordon-Vichi Kinship82 Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the Rosenberg-Kim kinship terms partition data given in Gordon and Vichi (2001). } \usage{data("Kinship82_Consensus")} \format{ A named cluster ensemble of three soft partitions of the 15 kinship terms into three classes. } \details{ The elements of the ensemble are named \code{"MF1"}, \code{"MF2"}, and \code{"JMF"}, and correspond to the consensus partitions obtained by applying models 1, 2, and 3 in Gordon and Vichi (2001) to the kinship terms partition data in Rosenberg (1982), which are available as data set \code{\link{Kinship82}}. } \source{ Table 6 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. S. Rosenberg (1982). The method of sorting in multivariate research with applications selected from cognitive psychology and person perception. In N. Hirschberg and L. G. Humphreys (eds.), \emph{Multivariate Applications in the Social Sciences}, 117--142. Hillsdale, NJ: Erlbaum. } \examples{ ## Load the consensus partitions. data("Kinship82_Consensus") ## Fuzziness using the Partition Coefficient. cl_fuzziness(Kinship82_Consensus) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(Kinship82_Consensus) cl_dissimilarity(Kinship82_Consensus, method = "comem") } \keyword{datasets} clue/man/cl_dissimilarity.Rd0000644000175100001440000003665512734174403015652 0ustar hornikusers\name{cl_dissimilarity} \encoding{UTF-8} \alias{cl_dissimilarity} \title{Dissimilarity Between Partitions or Hierarchies} \description{Compute the dissimilarity between (ensembles) of partitions or hierarchies.} \usage{ cl_dissimilarity(x, y = NULL, method = "euclidean", \dots) } \arguments{ \item{x}{an ensemble of partitions or hierarchies and dissimilarities, or something coercible to that (see \code{\link{cl_ensemble}}).} \item{y}{\code{NULL} (default), or as for \code{x}.} \item{method}{a character string specifying one of the built-in methods for computing dissimilarity, or a function to be taken as a user-defined method. If a character string, its lower-cased version is matched against the lower-cased names of the available built-in methods using \code{\link{pmatch}}. See \bold{Details} for available built-in methods.} \item{\dots}{further arguments to be passed to methods.} } \value{ If \code{y} is \code{NULL}, an object of class \code{"cl_dissimilarity"} containing the dissimilarities between all pairs of components of \code{x}. Otherwise, an object of class \code{"cl_cross_dissimilarity"} with the dissimilarities between the components of \code{x} and the components of \code{y}. } \details{ If \code{y} is given, its components must be of the same kind as those of \code{x} (i.e., components must either all be partitions, or all be hierarchies or dissimilarities). If all components are partitions, the following built-in methods for measuring dissimilarity between two partitions with respective membership matrices \eqn{u} and \eqn{v} (brought to a common number of columns) are available: \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the memberships, i.e., the square root of the minimal sum of the squared differences of \eqn{u} and all column permutations of \eqn{v}. See Dimitriadou, Weingessel and Hornik (2002).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the memberships, i.e., the minimal sum of the absolute differences of \eqn{u} and all column permutations of \eqn{v}.} \item{\code{"comemberships"}}{the Euclidean dissimilarity of the elements of the co-membership matrices \eqn{C(u) = u u'} and \eqn{C(v)}, i.e., the square root of the sum of the squared differences of \eqn{C(u)} and \eqn{C(v)}.} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of co-classified pairs of distinct objects in the partitions. I.e., the number of distinct pairs of objects in the same class in exactly one of the partitions. (Alternatively, the cardinality of the symmetric set difference between the (binary) equivalence relations corresponding to the partitions.) For soft partitions, (currently) the symmetric set difference of the corresponding nearest hard partitions is used.} \item{\code{"Rand"}}{the Rand distance, i.e., the rate of distinct pairs of objects in the same class in exactly one of the partitions. (Related to the Rand index \eqn{a} via the linear transformation \eqn{d = (1 - a) / 2}.) For soft partitions, (currently) the Rand distance of the corresponding nearest hard partitions is used.} \item{\code{"GV1"}}{the square root of the dissimilarity \eqn{\Delta_1}{Delta_1} used for the first model in Gordon and Vichi (2001), i.e., the square root of the minimal sum of the squared differences of the \emph{matched} non-zero columns of \eqn{u} and \eqn{v}.} \item{\code{"BA/\var{d}"}}{distance measures for hard partitions discussed in Boorman and Arabie (1972), with \var{d} one of \samp{A}, \samp{C}, \samp{D}, or \samp{E}. For soft partitions, the distances of the corresponding nearest hard partitions are used. \code{"BA/A"} is the minimum number of single element moves (move from one class to another or a new one) needed to transform one partition into the other. Introduced in Rubin (1967). \code{"BA/C"} is the minimum number of lattice moves for transforming one partition into the other, where partitions are said to be connected by a lattice move if one is \emph{just} finer than the other (i.e., there is no other partition between them) in the partition lattice (see \code{\link{cl_meet}}). Equivalently, with \eqn{z} the join of \code{x} and \code{y} and \eqn{S} giving the number of classes, this can be written as \eqn{S(x) + S(y) - 2 S(z)}. Attributed to David Pavy. \code{"BA/D"} is the \dQuote{pair-bonds} distance, which can be defined as \eqn{S(x) + S(y) - 2 S(z)}, with \eqn{z} the meet of \code{x} and \code{y} and \eqn{S} the \emph{supervaluation} (i.e., non-decreasing with respect to the partial order on the partition lattice) function \eqn{\sum_i (n_i (n_i - 1)) / (n (n - 1))}, where the \eqn{n_i} are the numbers of objects in the respective classes of the partition (such that \eqn{n_i (n_i - 1) / 2} are the numbers of pair bonds in the classes), and \eqn{n} the total number of objects. \code{"BA/E"} is the normalized information distance, defined as \eqn{1 - I / H}, where \eqn{I} is the average mutual information between the partitions, and \eqn{H} is the average entropy of the meet \eqn{z} of the partitions. Introduced in Rajski (1961). (Boorman and Arabie also discuss a distance measure (\eqn{B}) based on the minimum number of set moves needed to transform one partition into the other, which, differently from the \eqn{A} and \eqn{C} distance measures is hard to compute (Day, 1981) and (currently) not provided.)} \item{\code{"VI"}}{Variation of Information, see Meila (2003). If \code{\dots} has an argument named \code{weights}, it is taken to specify case weights.} \item{\code{"Mallows"}}{the Mallows-type distance by Zhou, Li and Zha (2005), which is related to the Monge-Kantorovich mass transfer problem, and given as the \eqn{p}-th root of the minimal value of the transportation problem \eqn{\sum w_{jk} \sum_i |u_{ij} - v_{ik}| ^ p} with constraints \eqn{w_{jk} \ge 0}, \eqn{\sum_j w_{jk} = \alpha_j}, \eqn{\sum_k w_{jk} = \beta_k}, where \eqn{\sum_j \alpha_j = \sum_k \beta_k}. The parameters \eqn{p}, \eqn{\alpha} and \eqn{\beta} all default to one (in this case, the Mallows distance coincides with the Manhattan dissimilarity), and can be specified via additional arguments named \code{p}, \code{alpha}, and \code{beta}, respectively.} \item{\code{"CSSD"}}{the Cluster Similarity Sensitive Distance of Zhou, Li and Zha (2005), which is given as the minimal value of \eqn{\sum_{k,l} (1 - 2 w_{kl} / (\alpha_k + \beta_l)) L_{kl}}, where \eqn{L_{kl} = \sum_i u_{ik} v_{il} d(p_{x;k}, p_{y;l})} with \eqn{p_{x;k}} and \eqn{p_{y;l}} the prototype of the \eqn{k}-th class of \code{x} and the \eqn{l}-th class of \code{y}, respectively, \eqn{d} is the distance between these, and the \eqn{w_{kl}} as for Mallows distance. If prototypes are matrices, the Euclidean distance between these is used as default. Using the additional argument \code{L}, one can give a matrix of \eqn{L_{kl}} values, or the function \eqn{d}. Parameters \eqn{\alpha} and \eqn{\beta} all default to one, and can be specified via additional arguments named \code{alpha} and \code{beta}, respectively.} } For hard partitions, both Manhattan and squared Euclidean dissimilarity give twice the \emph{transfer distance} (Charon et al., 2005), which is the minimum number of objects that must be removed so that the implied partitions (restrictions to the remaining objects) are identical. This is also known as the \emph{\eqn{R}-metric} in Day (1981), i.e., the number of augmentations and removals of single objects needed to transform one partition into the other, and the \emph{partition-distance} in Gusfield (2002), and equals twice the number of single element moves distance of Boorman and Arabie. For hard partitions, the pair-bonds (Boorman-Arabie \eqn{D}) distance is identical to the Rand distance, and can also be written as the Manhattan distance between the co-membership matrices corresponding to the partitions, or equivalently, their symdiff distance, normalized by \eqn{n (n - 1)}. If all components are hierarchies, available built-in methods for measuring dissimilarity between two hierarchies with respective ultrametrics \eqn{u} and \eqn{v} are as follows. \describe{ \item{\code{"euclidean"}}{the Euclidean dissimilarity of the ultrametrics (i.e., the square root of the sum of the squared differences of \eqn{u} and \eqn{v}).} \item{\code{"manhattan"}}{the Manhattan dissimilarity of the ultrametrics (i.e., the sum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"cophenetic"}}{\eqn{1 - c^2}, where \eqn{c} is the cophenetic correlation coefficient (i.e., the product-moment correlation of the ultrametrics).} \item{\code{"gamma"}}{the rate of inversions between the ultrametrics (i.e., the rate of pairs \eqn{(i,j)} and \eqn{(k,l)} for which \eqn{u_{ij} < u_{kl}} and \eqn{v_{ij} > v_{kl}}).} \item{\code{"symdiff"}}{the cardinality of the symmetric set difference of the sets of classes (hierarchies in the strict sense) induced by the dendrograms. I.e., the number of sets of objects obtained by a split in exactly one of the hierarchies.} \item{\code{"Chebyshev"}}{the Chebyshev (maximal) dissimilarity of the ultrametrics (i.e., the maximum of the absolute differences of \eqn{u} and \eqn{v}).} \item{\code{"Lyapunov"}}{the logarithm of the product of the maximal and minimal ratios of the ultrametrics. This is also known as the \dQuote{Hilbert projective metric} on the cone represented by the ultrametrics (e.g., Jardine & Sibson (1971), page 107), and only defined for \emph{strict} ultrametrics (which are strictly positive for distinct objects).} \item{\code{"BO"}}{the \eqn{m_\delta} family of tree metrics by Boorman and Olivier (1973), which are of the form \eqn{m_\delta = \int_0^\infty \delta(p(h), q(h)) dh}, where \eqn{p(h)} and \eqn{q(h)} are the hard partitions obtaining by cutting the trees (dendrograms) at height \eqn{h}, and \eqn{\delta} is a suitably dissimilarity measure for partitions. In particular, when taking \eqn{\delta} as symdiff or Rand dissimilarity, \eqn{m_\delta} is the Manhattan dissimilarity of the hierarchies. If \code{\dots} has an argument named \code{delta} it is taken to specify the partition dissimilarity \eqn{\delta} to be employed.} \item{\code{"spectral"}}{the spectral norm (2-norm) of the differences of the ultrametrics, suggested in Mérigot, Durbec, and Gaertner (2010).} } The measures based on ultrametrics also allow computing dissimilarity with \dQuote{raw} dissimilarities on the underlying objects (R objects inheriting from class \code{"dist"}). If a user-defined dissimilarity method is to be employed, it must be a function taking two clusterings as its arguments. Symmetric dissimilarity objects of class \code{"cl_dissimilarity"} are implemented as symmetric proximity objects with self-proximities identical to zero, and inherit from class \code{"cl_proximity"}. They can be coerced to dense square matrices using \code{as.matrix}. It is possible to use 2-index matrix-style subscripting for such objects; unless this uses identical row and column indices, this results in a (non-symmetric dissimilarity) object of class \code{"cl_cross_dissimilarity"}. Symmetric dissimilarity objects also inherit from class \code{"\link{dist}"} (although they currently do not \dQuote{strictly} extend this class), thus making it possible to use them directly for clustering algorithms based on dissimilarity matrices of this class, see the examples. } \references{ S. A. Boorman and P. Arabie (1972). Structural measures and the method of sorting. In R. N. Shepard, A. K. Romney, & S. B. Nerlove (eds.), \emph{Multidimensional Scaling: Theory and Applications in the Behavioral Sciences, 1: Theory} (pages 225--249). New York: Seminar Press. S. A. Boorman and D. C. Olivier (1973). Metrics on spaces of finite trees. \emph{Journal of Mathematical Psychology}, \bold{10}, 26--59. \doi{10.1016/0022-2496(73)90003-5}. I. Charon, L. Denoeud, A. Guénoche and O. Hudry (2006). \emph{Maximum Transfer Distance Between Partitions}. \emph{Journal of Classification}, \bold{23}, 103--121. \doi{10.1007/s00357-006-0006-2}. W. E. H. Day (1981). The complexity of computing metric distances between partitions. \emph{Mathematical Social Sciences}, \bold{1}, 269--287. \doi{10.1016/0165-4896(81)90042-1}. E. Dimitriadou, A. Weingessel and K. Hornik (2002). A combination scheme for fuzzy clustering. \emph{International Journal of Pattern Recognition and Artificial Intelligence}, \bold{16}, 901--912. \cr \doi{10.1142/S0218001402002052}. A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. D. Gusfield (2002). Partition-distance: A problem and class of perfect graphs arising in clustering. \emph{Information Processing Letters}, \bold{82}, 159--164. \doi{10.1016/S0020-0190(01)00263-0}. N. Jardine and E. Sibson (1971). \emph{Mathematical Taxonomy}. London: Wiley. M. Meila (2003). Comparing clusterings by the variation of information. In B. Schölkopf and M. K. Warmuth (eds.), \emph{Learning Theory and Kernel Machines}, pages 173--187. Springer-Verlag: Lecture Notes in Computer Science 2777. B. Mérigot, J.-P. Durbec and J.-C. Gaertner (2010). On goodness-of-fit measure for dendrogram-based analyses. \emph{Ecology}, \bold{91}, 1850—-1859. \doi{10.1890/09-1387.1}. C. Rajski (1961). A metric space of discrete probability distributions, \emph{Information and Control}, \bold{4}, 371--377. \doi{10.1016/S0019-9958(61)80055-7}. J. Rubin (1967). Optimal classification into groups: An approach for solving the taxonomy problem. \emph{Journal of Theoretical Biology}, \bold{15}, 103--144. \doi{10.1016/0022-5193(67)90046-X}. D. Zhou, J. Li and H. Zha (2005). A new Mallows distance based metric for comparing clusterings. In \emph{Proceedings of the 22nd international Conference on Machine Learning} (Bonn, Germany, August 07--11, 2005), pages 1028--1035. ICML '05, volume 119. ACM Press, New York, NY. \doi{10.1145/1102351.1102481}. } \seealso{ \code{\link{cl_agreement}} } \examples{ ## An ensemble of partitions. data("CKME") pens <- CKME[1 : 30] diss <- cl_dissimilarity(pens) summary(c(diss)) cl_dissimilarity(pens[1:5], pens[6:7]) ## Equivalently, using subscripting. diss[1:5, 6:7] ## Can use the dissimilarities for "secondary" clustering ## (e.g. obtaining hierarchies of partitions): hc <- hclust(diss) plot(hc) ## Example from Boorman and Arabie (1972). P1 <- as.cl_partition(c(1, 2, 2, 2, 3, 3, 2, 2)) P2 <- as.cl_partition(c(1, 1, 2, 2, 3, 3, 4, 4)) cl_dissimilarity(P1, P2, "BA/A") cl_dissimilarity(P1, P2, "BA/C") ## Hierarchical clustering. d <- dist(USArrests) x <- hclust(d) cl_dissimilarity(x, d, "cophenetic") cl_dissimilarity(x, d, "gamma") } \keyword{cluster} clue/man/cl_ensemble.Rd0000644000175100001440000000462311547637750014556 0ustar hornikusers\name{cl_ensemble} \alias{cl_ensemble} \alias{as.cl_ensemble} \alias{is.cl_ensemble} \title{Cluster Ensembles} \description{Creation and manipulation of cluster ensembles.} \usage{ cl_ensemble(..., list = NULL) as.cl_ensemble(x) is.cl_ensemble(x) } \arguments{ \item{\dots}{R objects representing clusterings of or dissimilarities between the same objects.} \item{list}{a list of R objects as in \code{\dots}.} \item{x}{for \code{as.cl_ensemble}, an R object as in \code{\dots}; for \code{is.cl_ensemble}, an arbitrary R object.} } \details{ \code{cl_ensemble} creates \dQuote{cluster ensembles}, which are realized as lists of clusterings (or dissimilarities) with additional class information, always inheriting from \code{"cl_ensemble"}. All elements of the ensemble must have the same number of objects. If all elements are partitions, the ensemble has class \code{"cl_partition_ensemble"}; if all elements are dendrograms, it has class \code{"cl_dendrogram_ensemble"} and inherits from \code{"cl_hierarchy_ensemble"}; if all elements are hierarchies (but not always dendrograms), it has class \code{"cl_hierarchy_ensemble"}. Note that empty or \dQuote{mixed} ensembles cannot be categorized according to the kind of elements they contain, and hence only have class \code{"cl_ensemble"}. The list representation makes it possible to use \code{lapply} for computations on the individual clusterings in (i.e., the components of) a cluster ensemble. Available methods for cluster ensembles include those for subscripting, \code{c}, \code{rep}, and \code{print}. There is also a \code{plot} method for ensembles for which all elements can be plotted (currently, additive trees, dendrograms and ultrametrics). } \value{ \code{cl_ensemble} returns a list of the given clusterings or dissimilarities, with additional class information (see \bold{Details}). } \examples{ d <- dist(USArrests) hclust_methods <- c("ward", "single", "complete", "average", "mcquitty") hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) names(hclust_results) <- hclust_methods ## Now create an ensemble from the results. hens <- cl_ensemble(list = hclust_results) hens ## Subscripting. hens[1 : 3] ## Replication. rep(hens, 3) ## Plotting. plot(hens, main = names(hens)) ## And continue to analyze the ensemble, e.g. round(cl_dissimilarity(hens, method = "gamma"), 4) } \keyword{cluster} clue/man/GVME_Consensus.Rd0000644000175100001440000000343512734174635015101 0ustar hornikusers\name{GVME_Consensus} \alias{GVME_Consensus} \title{Gordon-Vichi Macroeconomic Consensus Partition Data} \description{ The soft (\dQuote{fuzzy}) consensus partitions for the macroeconomic partition data given in Gordon and Vichi (2001). } \usage{data("GVME_Consensus")} \format{ A named cluster ensemble of eight soft partitions of 21 countries terms into two or three classes. } \details{ The elements of the ensemble are consensus partitions for the macroeconomic partition data in Gordon and Vichi (2001), which are available as data set \code{\link{GVME}}. Element names are of the form \code{"\var{m}/\var{k}"}, where \var{m} indicates the consensus method employed (one of \samp{MF1}, \samp{MF2}, \samp{JMF}, and \samp{S&S}, corresponding to the application of models 1, 2, and 3 in Gordon and Vichi (2001) and the approach in Sato and Sato (1994), respectively), and \var{k} denotes the number classes (2 or 3). } \source{ Tables 4 and 5 in Gordon and Vichi (2001). } \references{ A. D. Gordon and M. Vichi (2001). Fuzzy partition models for fitting a set of partitions. \emph{Psychometrika}, \bold{66}, 229--248. \doi{10.1007/BF02294837}. M. Sato and Y. Sato (1994). On a multicriteria fuzzy clustering method for 3-way data. \emph{International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems}, \bold{2}, 127--142. \cr \doi{10.1142/S0218488594000122}. } \examples{ ## Load the consensus partitions. data("GVME_Consensus") ## Pick the partitions into 2 classes. GVME_Consensus_2 <- GVME_Consensus[1 : 4] ## Fuzziness using the Partition Coefficient. cl_fuzziness(GVME_Consensus_2) ## (Corresponds to 1 - F in the source.) ## Dissimilarities: cl_dissimilarity(GVME_Consensus_2) cl_dissimilarity(GVME_Consensus_2, method = "comem") } \keyword{datasets} clue/man/partition.Rd0000644000175100001440000000525312734174303014304 0ustar hornikusers\name{partition} \alias{cl_partition} % class ... \alias{is.cl_partition} \alias{is.cl_hard_partition} \alias{is.cl_soft_partition} \alias{cl_hard_partition} % class ... \alias{as.cl_partition} \alias{as.cl_hard_partition} \title{Partitions} \description{ Determine whether an R object represents a partition of objects, or coerce to an R object representing such.} \usage{ is.cl_partition(x) is.cl_hard_partition(x) is.cl_soft_partition(x) as.cl_partition(x) as.cl_hard_partition(x) } \arguments{ \item{x}{an R object.} } \value{ For the testing functions, a logical indicating whether the given object represents a clustering of objects of the respective kind. For the coercion functions, a container object inheriting from \code{"cl_partition"}, with a suitable representation of the partition given by \code{x}. } \details{ \code{is.cl_partition} and \code{is.cl_hard_partition} are generic functions. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). \code{is.cl_soft_partition} gives true iff \code{is.cl_partition} is true and \code{is.cl_hard_partition} is false. \code{as.cl_partition} returns an object of class \code{"cl_partition"} \dQuote{containing} the given object \code{x} if this already represents a partition (i.e., \code{is.cl_partition(x)} is true), or the memberships obtained from \code{x} via \code{\link{as.cl_membership}}. \code{as.cl_hard_partition(x)} returns an object which has class \code{"cl_hard_partition"} and inherits from \code{"cl_partition"}, and contains \code{x} if it already represents a hard partition (i.e., provided that \code{is.cl_hard_partition(x)} is true), or the class ids obtained from \code{x}, using \code{x} if this is an atomic vector of raw class ids, or, if \code{x} represents a soft partition or is a raw matrix of membership values, using the class ids of the \emph{nearest hard partition}, defined by taking the class ids of the (first) maximal membership values. Conceptually, partitions and hard partitions are \emph{virtual} classes, allowing for a variety of representations. There are group methods for comparing partitions and computing their minimum, maximum, and range based on the meet and join operations, see \code{\link{cl_meet}}. } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) is.cl_partition(pcl) is.cl_hard_partition(pcl) is.cl_soft_partition(pcl) } \keyword{cluster} clue/man/cl_tabulate.Rd0000644000175100001440000000105311304023137014532 0ustar hornikusers\name{cl_tabulate} \alias{cl_tabulate} \title{Tabulate Vector Objects} \description{Tabulate the unique values in vector objects.} \usage{ cl_tabulate(x) } \arguments{ \item{x}{a vector.} } \value{ A data frame with components: \item{values}{the unique values.} \item{counts}{an integer vector with the number of times each of the unique values occurs in \code{x}.} } \examples{ data("Kinship82") tab <- cl_tabulate(Kinship82) ## The counts: tab$counts ## The most frequent partition: tab$values[[which.max(tab$counts)]] } \keyword{utilities} clue/man/cl_prototypes.Rd0000644000175100001440000000265311304023137015170 0ustar hornikusers\name{cl_prototypes} \alias{cl_prototypes} \title{Partition Prototypes} \description{ Determine prototypes for the classes of an R object representing a partition. } \usage{ cl_prototypes(x) } \arguments{ \item{x}{an R object representing a partition of objects.} } \details{ Many partitioning methods are based on prototypes (\dQuote{centers}, \dQuote{centroids}, \dQuote{medoids}, \dots). In typical cases, these are points in the feature space for the measurements on the objects to be partitioned, such that one can quantify the distance between the objects and the prototypes, and, e.g., classify objects to their closest prototype. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{kernlab}, and \pkg{mclust} (and of course, \pkg{clue} itself). } \examples{ ## Show how prototypes ("centers") vary across k-means runs on ## bootstrap samples from the Cassini data. data("Cassini") nr <- NROW(Cassini$x) out <- replicate(50, { kmeans(Cassini$x[sample(nr, replace = TRUE), ], 3) }, simplify = FALSE) ## Plot the data points in light gray, and the prototypes found. plot(Cassini$x, col = gray(0.8)) points(do.call("rbind", lapply(out, cl_prototypes)), pch = 19) } \keyword{cluster} clue/man/cl_predict.Rd0000644000175100001440000000411412211412617014367 0ustar hornikusers\name{cl_predict} \alias{cl_predict} \title{Predict Memberships} \description{ Predict class ids or memberships from R objects representing partitions. } \usage{ cl_predict(object, newdata = NULL, type = c("class_ids", "memberships"), ...) } \arguments{ \item{object}{an R object representing a partition of objects.} \item{newdata}{an optional data set giving the objects to make predictions for. This must be of the same \dQuote{kind} as the data set employed for obtaining the partition. If omitted, the original data are used.} \item{type}{a character string indicating whether class ids or memberships should be returned. May be abbreviated.} \item{\dots}{arguments to be passed to and from methods.} } \value{ Depending on \code{type}, an object of class \code{"cl_class_ids"} with the predicted class ids, or of class \code{"cl_membership"} with the matrix of predicted membership values. } \details{ Many algorithms resulting in partitions of a given set of objects can be taken to induce a partition of the underlying feature space for the measurements on the objects, so that class memberships for \dQuote{new} objects can be obtained from the induced partition. Examples include partitions based on assigning objects to their \dQuote{closest} prototypes, or providing mixture models for the distribution of objects in feature space. This is a generic function. The methods provided in package \pkg{clue} handle the partitions obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). } \examples{ ## Run kmeans on a random subset of the Cassini data, and predict the ## memberships for the "test" data set. data("Cassini") nr <- NROW(Cassini$x) ind <- sample(nr, 0.9 * nr, replace = FALSE) party <- kmeans(Cassini$x[ind, ], 3) table(cl_predict(party, Cassini$x[-ind, ]), Cassini$classes[-ind]) } \keyword{cluster} clue/man/ls_fit_sum_of_ultrametrics.Rd0000644000175100001440000000603012116170572017711 0ustar hornikusers\name{ls_fit_sum_of_ultrametrics} \alias{ls_fit_sum_of_ultrametrics} \title{Least Squares Fit of Sums of Ultrametrics to Dissimilarities} \description{ Find a sequence of ultrametrics with sum minimizing square distance (Euclidean dissimilarity) to a given dissimilarity object. } \usage{ ls_fit_sum_of_ultrametrics(x, nterms = 1, weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}.} \item{nterms}{an integer giving the number of ultrametrics to be fitted.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \details{ The problem to be solved is minimizing the criterion function \deqn{L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2}{ L(u(1), \dots, u(n)) = \sum_{i,j} w_{ij} \left(x_{ij} - \sum_{k=1}^n u_{ij}(k)\right)^2} over all \eqn{u(1), \ldots, u(n)} satisfying the ultrametric constraints. We provide an implementation of the iterative heuristic suggested in Carroll & Pruzansky (1980) which in each step \eqn{t} sequentially refits the \eqn{u(k)} as the least squares ultrametric fit to the \dQuote{residuals} \eqn{x - \sum_{l \ne k} u(l)} using \code{\link{ls_fit_ultrametric}}. Available control parameters include \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iteration steps to be performed. Defaults to 100.} \item{\code{eps}}{a nonnegative number controlling the iteration, which stops when the maximal change in all \eqn{u(k)} is less than \code{eps}. Defaults to \eqn{10^{-6}}.} \item{\code{reltol}}{the relative convergence tolerance. Iteration stops when the relative change in the criterion function is less than \code{reltol}. Defaults to \eqn{10^{-6}}.} \item{\code{method}}{a character string indicating the fitting method to be employed by the individual least squares fits.} \item{\code{control}}{a list of control parameters to be used by the method of \code{\link{ls_fit_ultrametric}} employed. By default, if the \acronym{SUMT} method method is used, 10 inner \acronym{SUMT} runs are performed for each refitting.} } It should be noted that the method used is a heuristic which can not be guaranteed to find the global minimum. } \value{ A list of objects of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \references{ J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. } \keyword{cluster} \keyword{optimize} clue/man/pclust.Rd0000644000175100001440000001551611430740706013606 0ustar hornikusers\name{pclust} \alias{pclust} \alias{pclust_family} \alias{pclust_object} \title{Prototype-Based Partitioning} \description{ Obtain prototype-based partitions of objects by minimizing the criterion \eqn{\sum w_b u_{bj}^m d(x_b, p_j)^e}, the sum of the case-weighted and membership-weighted \eqn{e}-th powers of the dissimilarities between the objects \eqn{x_b} and the prototypes \eqn{p_j}, for suitable dissimilarities \eqn{d} and exponents \eqn{e}. } \usage{ pclust(x, k, family, m = 1, weights = 1, control = list()) pclust_family(D, C, init = NULL, description = NULL, e = 1, .modify = NULL, .subset = NULL) pclust_object(prototypes, membership, cluster, family, m = 1, value, ..., classes = NULL, attributes = NULL) } \arguments{ \item{x}{the object to be partitioned.} \item{k}{an integer giving the number of classes to be used in the partition.} \item{family}{an object of class \code{"pclust_family"} as generated by \code{pclust_family}, containing the information about \eqn{d} and \eqn{e}.} \item{m}{a number not less than 1 controlling the softness of the partition (as the \dQuote{fuzzification parameter} of the fuzzy \eqn{c}-means algorithm). The default value of 1 corresponds to hard partitions obtained from a generalized \eqn{k}-means problem; values greater than one give partitions of increasing softness obtained from a generalized fuzzy \eqn{c}-means problem.} \item{weights}{a numeric vector of non-negative case weights. Recycled to the number of elements given by \code{x} if necessary.} \item{control}{a list of control parameters. See \bold{Details}.} \item{D}{a function for computing dissimilarities \eqn{d} between objects and prototypes.} \item{C}{a \sQuote{consensus} function with formals \code{x}, \code{weights} and \code{control} for computing a consensus prototype \eqn{p} minimizing \eqn{\sum_b w_b d(x_b, p) ^ e}.} \item{init}{a function with formals \code{x} and \code{k} initializing an object with \eqn{k} prototypes from the object \code{x} to be partitioned.} \item{description}{a character string describing the family.} \item{e}{a number giving the exponent \eqn{e} of the criterion.} \item{.modify}{a function with formals \code{x}, \code{i} and \code{value} for modifying a single prototype, or \code{NULL} (default).} \item{.subset}{a function with formals \code{x} and \code{i} for subsetting prototypes, or \code{NULL} (default).} \item{prototypes}{an object representing the prototypes of the partition.} \item{membership}{an object of class \code{"\link{cl_membership}"} with the membership values \eqn{u_{bj}}.} \item{cluster}{the class ids of the nearest hard partition.} \item{value}{the value of the criterion to be minimized.} \item{...}{further elements to be included in the generated pclust object.} \item{classes}{a character vector giving further classes to be given to the generated pclust object in addition to \code{"pclust"}, or \code{NULL} (default).} \item{attributes}{a list of attributes, or \code{NULL} (default).} } \value{ \code{pclust} returns the partition found as an object of class \code{"pclust"} (as obtained by calling \code{pclust_object}) which in addition to the \emph{default} components contains \code{call} (the matched call) and a \code{converged} attribute indicating convergence status (i.e., whether the maximal number of iterations was reached). \code{pclust_family} returns an object of class \code{"pclust_family"}, which is a list with components corresponding to the formals of \code{pclust_family}. \code{pclust_object} returns an object inheriting from class \code{"pclust"}, which is a list with components corresponding to the formals (up to and including \code{...}) of \code{pclust_object}, and additional classes and attributes specified by \code{classes} and \code{attributes}, respectively. } \details{ For \eqn{m = 1}, a generalization of the Lloyd-Forgy variant of the \eqn{k}-means algorithm is used, which iterates between reclassifying objects to their closest prototypes (according to the dissimilarities given by \code{D}), and computing new prototypes as the consensus for the classes (using \code{C}). For \eqn{m > 1}, a generalization of the fuzzy \eqn{c}-means recipe (e.g., Bezdek (1981)) is used, which alternates between computing optimal memberships for fixed prototypes, and computing new prototypes as the suitably weighted consensus clusterings for the classes. This procedure is repeated until convergence occurs, or the maximal number of iterations is reached. Currently, no local improvement heuristics are provided. It is possible to perform several runs of the procedure via control arguments \code{nruns} or \code{start} (the default is to perform a single run), in which case the first partition with the smallest value of the criterion is returned. The dissimilarity and consensus functions as well as the exponent \eqn{e} are specified via \code{family}. In principle, arbitrary representations of objects to be partitioned and prototypes (which do not necessarily have to be \dQuote{of the same kind}) can be employed. In addition to \code{D} and \code{C}, what is needed are means to obtain an initial collection of \eqn{k} prototypes (\code{init}), to modify a single prototype (\code{.modify}), and subset the prototypes (\code{.subset}). By default, list and (currently, only dense) matrix (with the usual convention that the rows correspond to the objects) are supported. Otherwise, the family has to provide the functions needed. Available control parameters are as follows. \describe{ \item{\code{maxiter}}{an integer giving the maximal number of iterations to be performed. Defaults to 100.} \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{reltol}}{the relative convergence tolerance. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{\code{start}}{a list of prototype objects to be used as starting values.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{\code{control}}{control parameters to be used in the consensus function.} } The fixed point approach employed is a heuristic which cannot be guaranteed to find the global minimum, in particular if \code{C} is not an exact minimizer. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J. C. Bezdek (1981). \emph{Pattern recognition with fuzzy objective function algorithms}. New York: Plenum. } \seealso{ \code{\link[stats]{kmeans}}, \code{\link[e1071]{cmeans}}. } clue/man/sumt.Rd0000644000175100001440000000670012734174226013265 0ustar hornikusers\name{sumt} \alias{sumt} \title{Sequential Unconstrained Minimization Technique} \description{ Solve constrained optimization problems via the Sequential Unconstrained Minimization Technique (\acronym{SUMT}). } \usage{ sumt(x0, L, P, grad_L = NULL, grad_P = NULL, method = NULL, eps = NULL, q = NULL, verbose = NULL, control = list()) } \arguments{ \item{x0}{a list of starting values, or a single starting value.} \item{L}{a function to minimize.} \item{P}{a non-negative penalty function such that \eqn{P(x)} is zero iff the constraints are satisfied.} \item{grad_L}{a function giving the gradient of \code{L}, or \code{NULL} (default).} \item{grad_P}{a function giving the gradient of \code{P}, or \code{NULL} (default).} \item{method}{a character string, or \code{NULL}. If not given, \code{"CG"} is used. If equal to \code{"nlm"}, minimization is carried out using \code{\link[stats]{nlm}}. Otherwise, \code{\link[stats]{optim}} is used with \code{method} as the given method.} \item{eps}{the absolute convergence tolerance. The algorithm stops if the (maximum) distance between successive \code{x} values is less than \code{eps}. Defaults to \code{sqrt(.Machine$double.eps)}.} \item{q}{a double greater than one controlling the growth of the \eqn{\rho_k} as described in \bold{Details}. Defaults to 10.} \item{verbose}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} \item{control}{a list of control parameters to be passed to the minimization routine in case \code{optim} is used.} } \details{ The Sequential Unconstrained Minimization Technique is a heuristic for constrained optimization. To minimize a function \eqn{L} subject to constraints, one employs a non-negative function \eqn{P} penalizing violations of the constraints, such that \eqn{P(x)} is zero iff \eqn{x} satisfies the constraints. One iteratively minimizes \eqn{L(x) + \rho_k P(x)}, where the \eqn{\rho} values are increased according to the rule \eqn{\rho_{k+1} = q \rho_k} for some constant \eqn{q > 1}, until convergence is obtained in the sense that the Euclidean distance between successive solutions \eqn{x_k} and \eqn{x_{k+1}} is small enough. Note that the \dQuote{solution} \eqn{x} obtained does not necessarily satisfy the constraints, i.e., has zero \eqn{P(x)}. Note also that there is no guarantee that global (approximately) constrained optima are found. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the algorithm. The unconstrained minimizations are carried out by either \code{\link[stats]{optim}} or \code{\link[stats]{nlm}}, using analytic gradients if both \code{grad_L} and \code{grad_P} are given, and numeric ones otherwise. If more than one starting value is given, the solution with the minimal augmented criterion function value is returned. } \value{ A list inheriting from class \code{"sumt"}, with components \code{x}, \code{L}, \code{P}, and \code{rho} giving the solution obtained, the value of the criterion and penalty function at \code{x}, and the final \eqn{\rho} value used in the augmented criterion function. } \references{ A. V. Fiacco and G. P. McCormick (1968). \emph{Nonlinear programming: Sequential unconstrained minimization techniques}. New York: John Wiley & Sons. } \keyword{optimize} clue/man/cl_boot.Rd0000644000175100001440000000455211304023137013703 0ustar hornikusers\name{cl_boot} \alias{cl_boot} \title{Bootstrap Resampling of Clustering Algorithms} \description{ Generate bootstrap replicates of the results of applying a \dQuote{base} clustering algorithm to a given data set. } \usage{ cl_boot(x, B, k = NULL, algorithm = if (is.null(k)) "hclust" else "kmeans", parameters = list(), resample = FALSE) } \arguments{ \item{x}{the data set of objects to be clustered, as appropriate for the base clustering algorithm.} \item{B}{an integer giving the number of bootstrap replicates.} \item{k}{\code{NULL} (default), or an integer giving the number of classes to be used for a partitioning base algorithm.} \item{algorithm}{a character string or function specifying the base clustering algorithm.} \item{parameters}{a named list of additional arguments to be passed to the base algorithm.} \item{resample}{a logical indicating whether the data should be resampled in addition to \dQuote{sampling from the algorithm}. If resampling is used, the class memberships of the objects given in \code{x} are predicted from the results of running the base algorithm on bootstrap samples of \code{x}.} } \value{ A cluster ensemble of length \eqn{B}, with either (if resampling is not used, default) the results of running the base algorithm on the given data set, or (if resampling is used) the memberships for the given data predicted from the results of running the base algorithm on bootstrap samples of the data. } \details{ This is a rather simple-minded function with limited applicability, and mostly useful for studying the effect of (uncontrolled) random initializations of fixed-point partitioning algorithms such as \code{\link[stats]{kmeans}} or \code{\link[e1071]{cmeans}}, see the examples. To study the effect of varying control parameters or explicitly providing random starting values, the respective cluster ensemble has to be generated explicitly (most conveniently by using \code{\link{replicate}} to create a list \code{lst} of suitable instances of clusterings obtained by the base algorithm, and using \code{cl_ensemble(list = lst)} to create the ensemble). } \examples{ ## Study e.g. the effect of random kmeans() initializations. data("Cassini") pens <- cl_boot(Cassini$x, 15, 3) diss <- cl_dissimilarity(pens) summary(c(diss)) plot(hclust(diss)) } \keyword{cluster} clue/man/ls_fit_ultrametric.Rd0000644000175100001440000002076712734173740016201 0ustar hornikusers\name{ls_fit_ultrametric} \encoding{UTF-8} \alias{ls_fit_ultrametric} \title{Least Squares Fit of Ultrametrics to Dissimilarities} \description{ Find the ultrametric with minimal square distance (Euclidean dissimilarity) to given dissimilarity objects. } \usage{ ls_fit_ultrametric(x, method = c("SUMT", "IP", "IR"), weights = 1, control = list()) } \arguments{ \item{x}{a dissimilarity object inheriting from or coercible to class \code{"\link{dist}"}, or an ensemble of such objects.} \item{method}{a character string indicating the fitting method to be employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or \code{"IR"}, or a unique abbreviation thereof.} \item{weights}{a numeric vector or matrix with non-negative weights for obtaining a weighted least squares fit. If a matrix, its numbers of rows and columns must be the same as the number of objects in \code{x}, and the lower diagonal part is used. Otherwise, it is recycled to the number of elements in \code{x}.} \item{control}{a list of control parameters. See \bold{Details}.} } \value{ An object of class \code{"\link{cl_ultrametric}"} containing the fitted ultrametric distances. } \details{ For a single dissimilarity object \code{x}, the problem to be solved is minimizing \deqn{L(u) = \sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2} over all \eqn{u} satisfying the ultrametric constraints (i.e., for all \eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem is known to be NP hard (Krivanek and Moravek, 1986). For an ensemble of dissimilarity objects, the criterion function is \deqn{L(u) = \sum_b w_b \sum_{i,j} w_{ij} (x_{ij}(b) - u_{ij})^2,} where \eqn{w_b} is the weight given to element \eqn{x_b} of the ensemble and can be specified via control parameter \code{weights} (default: all ones). This problem reduces to the above basic problem with \eqn{x} as the \eqn{w_b}-weighted mean of the \eqn{x_b}. We provide three heuristics for solving the basic problem. Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential Unconstrained Minimization Technique, Fiacco and McCormick, 1968) approach of de Soete (1986) which in turn simplifies the suggestions in Carroll and Pruzansky (1980). (See \code{\link{sumt}} for more information on the \acronym{SUMT} approach.) We then use a final single linkage hierarchical clustering step to ensure that the returned object exactly satisfies the ultrametric constraints. The starting value \eqn{u_0} is obtained by \dQuote{random shaking} of the given dissimilarity object (if not given). If there are missing values in \code{x}, i.e., the given dissimilarities are \emph{incomplete}, we follow a suggestion of de Soete (1984), imputing the missing values by the weighted mean of the non-missing ones, and setting the corresponding weights to zero. Available control parameters are \code{method}, \code{control}, \code{eps}, \code{q}, and \code{verbose}, which have the same roles as for \code{\link{sumt}}, and the following. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{start}}{a single dissimilarity, or a list of dissimilarities to be employed as starting values.} } The default optimization using conjugate gradients should work reasonably well for medium to large size problems. For \dQuote{small} ones, using \code{nlm} is usually faster. Note that the number of ultrametric constraints is of the order \eqn{n^3}, where \eqn{n} is the number of objects in the dissimilarity object, suggesting to use the \acronym{SUMT} approach in favor of \code{\link[stats]{constrOptim}}. If starting values for the \acronym{SUMT} are provided via \code{start}, the number of starting values gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} starting values are obtained by random shaking of the dissimilarity to be fitted. In the case of multiple \acronym{SUMT} runs, the (first) best solution found is returned. Method \code{"IP"} implements the Iterative Projection approach of Hubert and Arabie (1995). This iteratively projects the current dissimilarities to the closed convex set given by the ultrametric constraints (3-point conditions) for a single index triple \eqn{(i, j, k)}, in fact replacing the two largest values among \eqn{d_{ij}, d_{ik}, d_{jk}} by their mean. The following control parameters can be provided via the \code{control} argument. \describe{ \item{\code{nruns}}{an integer giving the number of runs to be performed. Defaults to 1.} \item{\code{order}}{a permutation of the numbers from 1 to the number of objects in \code{x}, specifying the order in which the ultrametric constraints are considered, or a list of such permutations.} \item{\code{maxiter}}{an integer giving the maximal number of iterations to be employed.} \item{\code{tol}}{a double indicating the maximal convergence tolerance. The algorithm stops if the total absolute change in the dissimilarities in an iteration is less than \code{tol}.} \item{\code{verbose}}{a logical indicating whether to provide some output on minimization progress. Defaults to \code{getOption("verbose")}.} } If permutations are provided via \code{order}, the number of these gives the number of runs to be performed, and control option \code{nruns} is ignored. Otherwise, \code{nruns} randomly generated orders are tried. In the case of multiple runs, the (first) best solution found is returned. Non-identical weights and incomplete dissimilarities are currently not supported. Method \code{"IR"} implements the Iterative Reduction approach suggested by Roux (1988), see also Barthélémy and Guénoche (1991). This is similar to the Iterative Projection method, but modifies the dissimilarities between objects proportionally to the aggregated change incurred from the ultrametric projections. Available control parameters are identical to those of method \code{"IP"}. Non-identical weights and incomplete dissimilarities are currently not supported. It should be noted that all methods are heuristics which can not be guaranteed to find the global minimum. Standard practice would recommend to use the best solution found in \dQuote{sufficiently many} replications of the base algorithm. } \references{ J.-P. Barthélémy and A. Guénoche (1991). \emph{Trees and proximity representations}. Chichester: John Wiley & Sons. ISBN 0-471-92263-3. J. D. Carroll and S. Pruzansky (1980). Discrete and hybrid scaling models. In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}. Bern (Switzerland): Huber. L. Hubert and P. Arabie (1995). Iterative projection strategies for the least squares fitting of tree structures to proximity data. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{48}, 281--317. \doi{10.1111/j.2044-8317.1995.tb01065.x}. M. Krivanek and J. Moravek (1986). NP-hard problems in hierarchical tree clustering. \emph{Acta Informatica}, \bold{23}, 311--323. \doi{10.1007/BF00289116}. M. Roux (1988). Techniques of approximation for building two tree structures. In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.), \emph{Recent Developments in Clustering and Data Analysis}, pages 151--170. New York: Academic Press. G. de Soete (1984). Ultrametric tree representations of incomplete dissimilarity data. \emph{Journal of Classification}, \bold{1}, 235--242. \doi{10.1007/BF01890124}. G. de Soete (1986). A least squares algorithm for fitting an ultrametric tree to a dissimilarity matrix. \emph{Pattern Recognition Letters}, \bold{2}, 133--137. \doi{10.1016/0167-8655(84)90036-9}. } \seealso{ \code{\link{cl_consensus}} for computing least squares (Euclidean) consensus hierarchies by least squares fitting of average ultrametric distances; \code{\link{l1_fit_ultrametric}}. } \examples{ ## Least squares fit of an ultrametric to the Miller-Nicely consonant ## phoneme confusion data. data("Phonemes") ## Note that the Phonemes data set has the consonant misclassification ## probabilities, i.e., the similarities between the phonemes. d <- as.dist(1 - Phonemes) u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) ## Cophenetic correlation: cor(d, u) ## Plot: plot(u) ## ("Basically" the same as Figure 1 in de Soete (1986).) } \keyword{cluster} \keyword{optimize} clue/man/n_of_objects.Rd0000644000175100001440000000221212211412717014707 0ustar hornikusers\name{n_of_objects} \alias{n_of_objects} \title{Number of Objects in a Partition or Hierarchy} \description{Determine the number of objects from which a partition or hierarchy was obtained.} \usage{ n_of_objects(x) } \arguments{ \item{x}{an \R object representing a (hard of soft) partition or a hierarchy of objects, or dissimilarities between objects.} } \value{ An integer giving the number of objects. } \details{ This is a generic function. The methods provided in package \pkg{clue} handle the partitions and hierarchies obtained from clustering functions in the base R distribution, as well as packages \pkg{RWeka}, \pkg{ape}, \pkg{cba}, \pkg{cclust}, \pkg{cluster}, \pkg{e1071}, \pkg{flexclust}, \pkg{flexmix}, \pkg{kernlab}, \pkg{mclust}, \pkg{movMF} and \pkg{skmeans} (and of course, \pkg{clue} itself). There is also a method for object dissimilarities which inherit from class \code{"\link{dist}"}. } \seealso{ \code{\link{is.cl_partition}}, \code{\link{is.cl_hierarchy}} } \examples{ data("Cassini") pcl <- kmeans(Cassini$x, 3) n_of_objects(pcl) hcl <- hclust(dist(USArrests)) n_of_objects(hcl) } \keyword{cluster}