fpc/0000755000176200001440000000000014674451412011032 5ustar liggesusersfpc/tests/0000755000176200001440000000000014674374130012175 5ustar liggesusersfpc/tests/fpctests_notallin.Rout.save0000644000176200001440000015553314674370012017547 0ustar liggesusers R Under development (unstable) (2024-09-11 r87117) -- "Unsuffered Consequences" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > # This tests a few things that are not run in the examples. > > library(fpc) > library(MASS) > library(diptest) > library(mclust) Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > options(digits=3) > > set.seed(4634) > face <- rFace(300,dMoNo=2,dNoEy=0,p=3) > grface <- as.integer(attr(face,"grouping")) > # discrproj(face,grface, clnum=1, method="bc")$units > discrproj(face,grface, clnum=1, method="anc")$units [,1] [,2] [,3] [1,] -1.3912 -0.3093 0.1093 [2,] 0.6211 -0.2233 0.0164 [3,] -0.0313 0.0749 -0.8074 > discrproj(face,grface, clnum=1, method="awc")$units [,1] [,2] [,3] [1,] 0.215 -0.3389 -0.51886 [2,] -0.370 0.0144 -0.00893 [3,] 0.111 0.7914 -0.23574 > > > pamk(face,krange=1:5,criterion="ch",usepam=FALSE,critout=TRUE) 1 clusters 0 2 clusters 1321 3 clusters 963 4 clusters 833 5 clusters 934 $pamobject Call: clara(x = sdata, k = k) Medoids: [,1] [,2] [,3] [1,] 0.119 3.53 1.49 [2,] 1.742 17.02 1.12 Objective function: 2.44 Clustering vector: int [1:300] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 ... Cluster sizes: 202 98 Best sample: [1] 5 11 18 21 28 40 50 61 62 65 79 82 83 86 93 94 105 119 130 [20] 160 172 180 182 194 195 202 206 208 217 223 230 231 239 248 250 256 259 261 [39] 264 268 271 274 277 299 Available components: [1] "sample" "medoids" "i.med" "clustering" "objective" [6] "clusinfo" "diss" "call" "silinfo" "data" $nc [1] 2 $crit [1] 0 1321 963 833 934 > > set.seed(20000) > face50 <- rFace(50,dMoNo=2,dNoEy=0,p=2) > pamk(dist(face50),krange=1:5,criterion="asw",critout=TRUE) 1 clusters 0 2 clusters 0.742 3 clusters 0.748 4 clusters 0.581 5 clusters 0.544 $pamobject Medoids: ID [1,] "22" "22" [2,] "34" "34" [3,] "49" "49" Clustering vector: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 2 3 3 Objective function: build swap 2.14 2.09 Available components: [1] "medoids" "id.med" "clustering" "objective" "isolation" [6] "clusinfo" "silinfo" "diss" "call" $nc [1] 3 $crit [1] 0.000 0.742 0.748 0.581 0.544 > > x <- c(1,2,3,6,6,7,8,120) > ff8 <- fixmahal(x) > summary(ff8) * Mahalanobis Fixed Point Clusters * Often a clear cluster in the data leads to several similar FPCs. The summary shows the representative FPCs of groups of similar FPCs. Method fuzzy was used. Number of representative FPCs: 1 FPCs with less than 4 points were skipped. FPCs with ratio of times found to number of points less than 0.1 were skipped. 0 iteration runs led to 0 skipped clusters. Weight 1 for r^2<= 3.84 weight 0 for r^2> 7.88 Constant ca= 3.84 corresponding to alpha= 0.95 FPC 1 Times found (group members): 9 Ratio to size: 1.29 Mean: [1] 4.71 Covariance matrix: [,1] [1,] 6.2 Number of points (sum of weights): 7 Number of points (rounded weights) in intersection of representative FPCs [,1] [1,] 7 > # ...dataset a bit too small for the defaults... > ff9 <- fixmahal(x, mnc=3, startn=3) > summary(ff9) * Mahalanobis Fixed Point Clusters * Often a clear cluster in the data leads to several similar FPCs. The summary shows the representative FPCs of groups of similar FPCs. Method fuzzy was used. Number of representative FPCs: 3 FPCs with less than 3 points were skipped. FPCs with ratio of times found to number of points less than 0.1 were skipped. 0 iteration runs led to 0 skipped clusters. Weight 1 for r^2<= 3.84 weight 0 for r^2> 7.88 Constant ca= 3.84 corresponding to alpha= 0.95 FPC 1 Times found (group members): 4 Ratio to size: 1.33 Mean: [1] 6.33 Covariance matrix: [,1] [1,] 0.222 Number of points (sum of weights): 3 FPC 2 Times found (group members): 3 Ratio to size: 1 Mean: [1] 2 Covariance matrix: [,1] [1,] 0.667 Number of points (sum of weights): 3 FPC 3 Times found (group members): 2 Ratio to size: 0.286 Mean: [1] 4.71 Covariance matrix: [,1] [1,] 6.2 Number of points (sum of weights): 7 Number of points (rounded weights) in intersection of representative FPCs [,1] [,2] [,3] [1,] 3 0 3 [2,] 0 3 3 [3,] 3 3 7 > > set.seed(776655) > v1 <- rnorm(100) > v2 <- rnorm(100) > d1 <- sample(1:5,100,replace=TRUE) > d2 <- sample(1:4,100,replace=TRUE) > ldata <- cbind(v1,v2,d1,d2) > fr <- flexmixedruns(ldata, + continuous=2,discrete=2,simruns=1,initial.cluster=c(rep(1,5),rep(2,45), + rep(3,50)), + control=list(minprior=0.1), + n.cluster=3,allout=FALSE) k= 3 new best fit found in run 1 k= 3 BIC= 1299 > print(fr$optsummary) Call: flexmix(formula = x ~ 1, k = k, cluster = initial.cluster, model = lcmixed(continuous = continuous, discrete = discrete, ppdim = ppdim, diagonal = diagonal), control = control) prior size post>0 ratio Comp.1 0.204 23 61 0.377 Comp.2 0.284 30 71 0.423 Comp.3 0.512 47 77 0.610 'log Lik.' -569 (df=35) AIC: 1208 BIC: 1299 > > dface <- dist(face50) > > > hclusttreeCBI(face50,minlevel=2,method="complete",scaling=TRUE) $result Call: hclust(d = dist(sdata), method = method) Cluster method : complete Distance : euclidean Number of objects: 50 $nc [1] 48 $clusterlist $clusterlist[[1]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] TRUE TRUE $clusterlist[[2]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[3]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[4]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[5]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[6]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[7]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [37] FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[8]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] TRUE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[9]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[10]] [1] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[11]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[12]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[13]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [37] TRUE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[14]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[15]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[16]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[17]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [37] TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[18]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[19]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE TRUE TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[20]] [1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[21]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [13] TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[22]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[23]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE [37] TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[24]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[25]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE TRUE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[26]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[27]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE TRUE [37] TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[28]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE TRUE TRUE TRUE FALSE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[29]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE FALSE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[30]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[31]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[32]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[33]] [1] TRUE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[34]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE [13] TRUE FALSE FALSE FALSE FALSE TRUE TRUE TRUE FALSE TRUE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[35]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[36]] [1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE FALSE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[37]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[38]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[39]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[40]] [1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE TRUE TRUE TRUE [13] TRUE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[41]] [1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE TRUE TRUE TRUE [13] TRUE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[42]] [1] TRUE TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[43]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[44]] [1] TRUE TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[45]] [1] FALSE FALSE FALSE TRUE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [13] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[46]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [49] TRUE TRUE $clusterlist[[47]] [1] TRUE TRUE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [49] FALSE FALSE $clusterlist[[48]] [1] FALSE FALSE FALSE TRUE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [13] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [49] TRUE TRUE $partition [1] 1 1 1 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 [39] 1 1 1 2 2 2 2 2 2 1 2 2 $clustermethod [1] "hclust, full tree" > > disthclusttreeCBI(dface,minlevel=2,method="complete") $result Call: hclust(d = as.dist(dmatrix), method = method) Cluster method : complete Distance : euclidean Number of objects: 50 $nc [1] 48 $clusterlist $clusterlist[[1]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] TRUE TRUE $clusterlist[[2]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[3]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[4]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[5]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [37] FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[6]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [37] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[7]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[8]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [37] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[9]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [13] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[10]] [1] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[11]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[12]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[13]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[14]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[15]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[16]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[17]] [1] FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[18]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[19]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[20]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[21]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[22]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [37] FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[23]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[24]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] TRUE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[25]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE [37] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[26]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] FALSE FALSE TRUE FALSE TRUE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[27]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE [13] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE FALSE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[28]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [13] TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[29]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE [37] TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[30]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE [37] FALSE TRUE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[31]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [13] TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[32]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] FALSE FALSE TRUE TRUE TRUE FALSE FALSE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[33]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[34]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE [13] FALSE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[35]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[36]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[37]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE [13] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[38]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[39]] [1] TRUE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[40]] [1] FALSE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[41]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[42]] [1] FALSE TRUE FALSE TRUE FALSE TRUE TRUE FALSE FALSE TRUE TRUE TRUE [13] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[43]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[44]] [1] TRUE FALSE TRUE FALSE TRUE FALSE FALSE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[45]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE [49] TRUE TRUE $clusterlist[[46]] [1] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [13] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[47]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE [49] TRUE TRUE $clusterlist[[48]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE [49] TRUE TRUE $partition [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 [39] 2 2 2 2 2 2 2 2 2 2 2 2 $clustermethod [1] "hclust, full tree" > > noisemclustCBI(face50,G=1:5,emModelNames="VVV",nnk=2) $result Bayesian Information Criterion (BIC): EII VII EEI VEI EVI VVI EEE VEE EVE VVE EEV VEV EVV VVV 1 -521 -521 -506 -506 -506 -506 -525 -525 -525 -525 -525 -525 -525 -525 2 -498 -501 -501 -477 -464 -466 -505 -481 -467 -470 -467 -470 -470 -473 3 -468 -467 -480 -466 -476 -461 -482 -470 -479 -465 -480 -468 -486 -469 4 -449 -448 -485 -449 -445 -449 -488 -452 -439 -452 -486 -482 -444 -455 5 -456 -452 -456 -454 -444 -458 -460 -458 -448 -462 -451 -485 -456 -469 Top 3 models based on the BIC criterion: EVE,4 EVV,4 EVI,5 -439 -444 -444 $nc [1] 5 $nccl [1] 4 $clusterlist $clusterlist[[1]] [1] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE [13] TRUE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[2]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE [25] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[3]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE [37] TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE $clusterlist[[4]] [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE [49] FALSE FALSE $clusterlist[[5]] [1] TRUE TRUE TRUE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE [13] FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE [49] TRUE TRUE $partition [1] 5 5 5 1 5 5 5 5 5 1 1 1 1 5 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 [39] 3 3 3 4 4 4 4 4 5 5 5 5 $nnk [1] 2 $initnoise [1] TRUE TRUE TRUE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE [49] TRUE TRUE $clustermethod [1] "mclustBIC" > > distnoisemclustCBI(dface,G=5,emModelNames="EEE",nnk=2, + mdsmethod="classical", + mdsdim=2) $result Bayesian Information Criterion (BIC): EII VII EEI VEI EVI VVI EEE VEE EVE VVE EEV VEV EVV VVV 5 -461 NA -496 NA NA NA -500 NA NA NA -496 NA NA NA Top 3 models based on the BIC criterion: EII,5 EEI,5 EEV,5 -461 -496 -496 $nc [1] 6 $nccl [1] 5 $clusterlist $clusterlist[[1]] 1 2 3 4 5 6 7 8 9 10 11 12 13 FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE 14 15 16 17 18 19 20 21 22 23 24 25 26 TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE 27 28 29 30 31 32 33 34 35 36 37 38 39 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 40 41 42 43 44 45 46 47 48 49 50 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE $clusterlist[[2]] 1 2 3 4 5 6 7 8 9 10 11 12 13 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 14 15 16 17 18 19 20 21 22 23 24 25 26 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE 27 28 29 30 31 32 33 34 35 36 37 38 39 TRUE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 40 41 42 43 44 45 46 47 48 49 50 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE $clusterlist[[3]] 1 2 3 4 5 6 7 8 9 10 11 12 13 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 14 15 16 17 18 19 20 21 22 23 24 25 26 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 27 28 29 30 31 32 33 34 35 36 37 38 39 FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE 40 41 42 43 44 45 46 47 48 49 50 TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE $clusterlist[[4]] 1 2 3 4 5 6 7 8 9 10 11 12 13 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 14 15 16 17 18 19 20 21 22 23 24 25 26 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 27 28 29 30 31 32 33 34 35 36 37 38 39 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 40 41 42 43 44 45 46 47 48 49 50 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE $clusterlist[[5]] 1 2 3 4 5 6 7 8 9 10 11 12 13 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 14 15 16 17 18 19 20 21 22 23 24 25 26 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 27 28 29 30 31 32 33 34 35 36 37 38 39 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 40 41 42 43 44 45 46 47 48 49 50 FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE $clusterlist[[6]] 1 2 3 4 5 6 7 8 9 10 11 12 13 TRUE TRUE TRUE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE 14 15 16 17 18 19 20 21 22 23 24 25 26 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 27 28 29 30 31 32 33 34 35 36 37 38 39 FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE 40 41 42 43 44 45 46 47 48 49 50 FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE $partition 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 6 6 6 1 6 6 6 6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 5 5 5 5 5 6 6 6 6 $nnk [1] 2 $initnoise [1] TRUE TRUE TRUE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE [49] TRUE TRUE $clustermethod [1] "mclustBIC" > > mahalCBI(face50,clustercut=0.5) $result Mahalanobis Fixed Point Cluster object 3 representative stable fixed point clusters of totally 7 found fixed point clusters. $nc [1] 3 $clusterlist $clusterlist[[1]] [1] 0 0 0 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 0 [39] 0 0 0 0 0 0 0 0 0 0 0 0 $clusterlist[[2]] [1] 0 0 0 0 0 1 1 0 0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 [39] 0 0 0 0 0 0 0 0 0 0 0 0 $clusterlist[[3]] [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [39] 1 1 1 1 1 1 1 1 0 0 0 0 $partition [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [39] 1 1 1 1 1 1 1 1 1 1 1 1 $clustermethod [1] "fixmahal" > > set.seed(20000) > face100 <- rFace(100,dMoNo=2,dNoEy=0,p=2) > cbf <- clusterboot(face100,B=2,clustermethod=speccCBI,showplots=TRUE,k=6,seed=50000) boot 1 boot 2 > cbf$nc [1] 6 > cbf$noisemethod [1] FALSE > cbf$bootmethod [1] "boot" > # suppressWarnings(if(require(tclust)) > # print(clusterboot(face100,B=2,clustermethod=tclustCBI,showplots=TRUE,k=5,seed=50000,noisemethod=TRUE))) > > > complete3 <- cutree(hclust(dface),3) > > cluster.stats(dface,complete3,G2=TRUE) $n [1] 50 $cluster.number [1] 3 $cluster.size [1] 32 14 4 $min.cluster.size [1] 4 $noisen [1] 0 $diameter [1] 8.53 4.94 9.00 $average.distance [1] 2.95 2.15 7.05 $median.distance [1] 2.94 1.44 8.32 $separation [1] 8.67 7.46 7.46 $average.toother [1] 15.5 12.9 20.6 $separation.matrix [,1] [,2] [,3] [1,] 0.00 8.67 17.24 [2,] 8.67 0.00 7.46 [3,] 17.24 7.46 0.00 $ave.between.matrix [,1] [,2] [,3] [1,] 0.0 13 24.4 [2,] 13.0 0 12.0 [3,] 24.4 12 0.0 $average.between [1] 15.2 $average.within [1] 3.05 $n.between [1] 632 $n.within [1] 593 $max.diameter [1] 9 $min.separation [1] 7.46 $within.cluster.ss [1] 319 $clus.avg.silwidths 1 2 3 0.764 0.821 0.346 $avg.silwidth [1] 0.746 $g2 [1] 1 $g3 NULL $pearsongamma [1] 0.838 $dunn [1] 0.829 $dunn2 [1] 1.71 $entropy [1] 0.844 $wb.ratio [1] 0.2 $ch [1] 229 $cwidegap [1] 2.90 2.49 8.32 $widestgap [1] 8.32 $sindex [1] 7.78 $corrected.rand NULL $vi NULL > > set.seed(55667788) > > data(crabs) > dc <- crabs[,4:8] > cmo <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE") > # set.seed(12345) > cm <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE", + initialization=list(noise=(1:200)[sample(200,50)])) > > > scm <- summary(cm,crabs[,4:8]) > scmo <- summary(cmo,crabs[,4:8]) > > set.seed(334455) > summary(mergenormals(crabs[,4:8],scm,method="ridge.ratio",by=0.05)) * Merging Gaussian mixture components * Method: ridge.ratio , cutoff value: 0.2 Original number of components: 9 (not including noise which is denoted by clustering=0) Number of clusters after merging: 1 Values at which clusters were merged: [,1] [,2] [1,] 8 0.828 [2,] 7 1.000 [3,] 6 1.000 [4,] 5 0.888 [5,] 4 1.000 [6,] 3 1.000 [7,] 2 0.784 [8,] 1 0.845 Components assigned to clusters: [,1] [1,] 0 [2,] 1 [3,] 1 [4,] 1 [5,] 1 [6,] 1 [7,] 1 [8,] 1 [9,] 1 [10,] 1 > summary(mergenormals(crabs[,4:8],scmo,method="ridge.uni",by=0.05)) * Merging Gaussian mixture components * Method: ridge.uni , cutoff value: 1 Original number of components: 9 Number of clusters after merging: 8 Values at which clusters were merged: [,1] [1,] 8 [2,] 9 Components assigned to clusters: [,1] [1,] 1 [2,] 1 [3,] 2 [4,] 3 [5,] 4 [6,] 5 [7,] 6 [8,] 7 [9,] 8 > # summary(mergenormals(crabs[,4:8],scm,method="diptantrum",by=0.05)) > # summary(mergenormals(crabs[,4:8],scmo,method="dipuni",by=0.05)) > # summary(mergenormals(crabs[,4:8],scm,method="predictive",M=2)) > > set.seed(20000) > x1 <- rnorm(50) > y <- rnorm(100) > x2 <- rnorm(40,mean=20) > x3 <- rnorm(10,mean=25,sd=100) > x0 <- cbind(c(x1,x2,x3),y) > > prediction.strength(x0,M=10,Gmax=4, + clustermethod=noisemclustCBI, + classification="qda") Prediction strength Clustering method: mclustBIC Maximum number of clusters: 4 Resampled data sets: 10 Mean pred.str. for numbers of clusters: 1 0.815 0.874 0.591 Cutoff value: 0.8 Largest number of clusters better than cutoff: 3 > > prediction.strength(dist(x0),M=10,Gmax=4, + clustermethod=claraCBI, + classification="centroids") Prediction strength Clustering method: clara/pam Maximum number of clusters: 4 Resampled data sets: 10 Mean pred.str. for numbers of clusters: 1 0 0 0 Cutoff value: 0.8 Largest number of clusters better than cutoff: 1 > > > set.seed(20000) > xdata <- c(rnorm(10,0,1),rnorm(10,8,1)) > clustermethod=c("claraCBI","dbscanCBI") > > clustermethodpars <- list() > clustermethodpars[[1]] <- clustermethodpars[[2]] <- list() > clustermethodpars[[2]]$eps <- 2 > clustermethodpars[[2]]$MinPts <- 2 > cbs <- clusterbenchstats(xdata,G=3,clustermethod=clustermethod, + distmethod=rep(TRUE,2),ncinput=c(TRUE,FALSE),scaling=FALSE, + clustermethodpars=clustermethodpars,nnruns=2,kmruns=2,fnruns=1,avenruns=1,useallg=TRUE) [1] "claraCBI" [1] "dbscanCBI" [1] "Computation of validity statistics" comsum 1 comsum 2 [1] "Simulation" 3 clusters; nn run 1 3 clusters; nn run 2 3 clusters; fn run 1 3 clusters; aven run 1 3 clusters; km run 1 3 clusters; km run 2 [1] "Simulation quantile re-standardisation" [1] "Simulation sd re-standardisation" > > print(cbs$sstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1),include.othernc=cbs$cm$othernc) avewithin method 2 3 1 claraCBI NA 0.67 2 dbscanCBI 0.14 NA mnnd method 2 3 1 claraCBI NA 0.37 2 dbscanCBI 0.39 NA cvnnd method 2 3 1 claraCBI NA 0.32 2 dbscanCBI 0.3 NA maxdiameter method 2 3 1 claraCBI NA 0.66 2 dbscanCBI 0.56 NA widestgap method 2 3 1 claraCBI NA 0.59 2 dbscanCBI 0.59 NA sindex method 2 3 1 claraCBI NA 0.1 2 dbscanCBI 5.4 NA minsep method 2 3 1 claraCBI NA -1.6 2 dbscanCBI 17 NA asw method 2 3 1 claraCBI NA 0.55 2 dbscanCBI 1.7 NA dindex method 2 3 1 claraCBI NA 1.4 2 dbscanCBI -1.3 NA denscut method 2 3 1 claraCBI NA -2.3 2 dbscanCBI 0.38 NA highdgap method 2 3 1 claraCBI NA 0.73 2 dbscanCBI 0.55 NA pearsongamma method 2 3 1 claraCBI NA 0.54 2 dbscanCBI 1.9 NA withinss method 2 3 1 claraCBI NA 0.63 2 dbscanCBI 0.42 NA entropy method 2 3 1 claraCBI NA 0.85 2 dbscanCBI 1.6 NA pamc method 2 3 1 claraCBI NA 0.83 2 dbscanCBI 0.052 NA dmode method 2 3 1 claraCBI NA 1.2 2 dbscanCBI -0.87 NA aggregate method 2 3 1 claraCBI NA 0.15 2 dbscanCBI 1.2 NA > print(cbs$qstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1),include.othernc=cbs$cm$othernc) avewithin method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.38 NA mnnd method 2 3 1 claraCBI NA 0.62 2 dbscanCBI 0.75 NA cvnnd method 2 3 1 claraCBI NA 0.75 2 dbscanCBI 0.75 NA maxdiameter method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.38 NA widestgap method 2 3 1 claraCBI NA 0.38 2 dbscanCBI 0.38 NA sindex method 2 3 1 claraCBI NA 0.38 2 dbscanCBI 1 NA minsep method 2 3 1 claraCBI NA 0.12 2 dbscanCBI 1 NA asw method 2 3 1 claraCBI NA 0.62 2 dbscanCBI 1 NA dindex method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.12 NA denscut method 2 3 1 claraCBI NA 0.12 2 dbscanCBI 0.25 NA highdgap method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.38 NA pearsongamma method 2 3 1 claraCBI NA 0.5 2 dbscanCBI 1 NA withinss method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.38 NA entropy method 2 3 1 claraCBI NA 0.75 2 dbscanCBI 1 NA pamc method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.38 NA dmode method 2 3 1 claraCBI NA 0.88 2 dbscanCBI 0.19 NA aggregate method 2 3 1 claraCBI NA 0.6 2 dbscanCBI 0.53 NA > > > > proc.time() user system elapsed 6.65 0.08 6.72 fpc/tests/fpctests_notallin.R0000644000176200001440000000716314536666602016067 0ustar liggesusers# This tests a few things that are not run in the examples. library(fpc) library(MASS) library(diptest) library(mclust) options(digits=3) set.seed(4634) face <- rFace(300,dMoNo=2,dNoEy=0,p=3) grface <- as.integer(attr(face,"grouping")) # discrproj(face,grface, clnum=1, method="bc")$units discrproj(face,grface, clnum=1, method="anc")$units discrproj(face,grface, clnum=1, method="awc")$units pamk(face,krange=1:5,criterion="ch",usepam=FALSE,critout=TRUE) set.seed(20000) face50 <- rFace(50,dMoNo=2,dNoEy=0,p=2) pamk(dist(face50),krange=1:5,criterion="asw",critout=TRUE) x <- c(1,2,3,6,6,7,8,120) ff8 <- fixmahal(x) summary(ff8) # ...dataset a bit too small for the defaults... ff9 <- fixmahal(x, mnc=3, startn=3) summary(ff9) set.seed(776655) v1 <- rnorm(100) v2 <- rnorm(100) d1 <- sample(1:5,100,replace=TRUE) d2 <- sample(1:4,100,replace=TRUE) ldata <- cbind(v1,v2,d1,d2) fr <- flexmixedruns(ldata, continuous=2,discrete=2,simruns=1,initial.cluster=c(rep(1,5),rep(2,45), rep(3,50)), control=list(minprior=0.1), n.cluster=3,allout=FALSE) print(fr$optsummary) dface <- dist(face50) hclusttreeCBI(face50,minlevel=2,method="complete",scaling=TRUE) disthclusttreeCBI(dface,minlevel=2,method="complete") noisemclustCBI(face50,G=1:5,emModelNames="VVV",nnk=2) distnoisemclustCBI(dface,G=5,emModelNames="EEE",nnk=2, mdsmethod="classical", mdsdim=2) mahalCBI(face50,clustercut=0.5) set.seed(20000) face100 <- rFace(100,dMoNo=2,dNoEy=0,p=2) cbf <- clusterboot(face100,B=2,clustermethod=speccCBI,showplots=TRUE,k=6,seed=50000) cbf$nc cbf$noisemethod cbf$bootmethod # suppressWarnings(if(require(tclust)) # print(clusterboot(face100,B=2,clustermethod=tclustCBI,showplots=TRUE,k=5,seed=50000,noisemethod=TRUE))) complete3 <- cutree(hclust(dface),3) cluster.stats(dface,complete3,G2=TRUE) set.seed(55667788) data(crabs) dc <- crabs[,4:8] cmo <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE") # set.seed(12345) cm <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE", initialization=list(noise=(1:200)[sample(200,50)])) scm <- summary(cm,crabs[,4:8]) scmo <- summary(cmo,crabs[,4:8]) set.seed(334455) summary(mergenormals(crabs[,4:8],scm,method="ridge.ratio",by=0.05)) summary(mergenormals(crabs[,4:8],scmo,method="ridge.uni",by=0.05)) # summary(mergenormals(crabs[,4:8],scm,method="diptantrum",by=0.05)) # summary(mergenormals(crabs[,4:8],scmo,method="dipuni",by=0.05)) # summary(mergenormals(crabs[,4:8],scm,method="predictive",M=2)) set.seed(20000) x1 <- rnorm(50) y <- rnorm(100) x2 <- rnorm(40,mean=20) x3 <- rnorm(10,mean=25,sd=100) x0 <- cbind(c(x1,x2,x3),y) prediction.strength(x0,M=10,Gmax=4, clustermethod=noisemclustCBI, classification="qda") prediction.strength(dist(x0),M=10,Gmax=4, clustermethod=claraCBI, classification="centroids") set.seed(20000) xdata <- c(rnorm(10,0,1),rnorm(10,8,1)) clustermethod=c("claraCBI","dbscanCBI") clustermethodpars <- list() clustermethodpars[[1]] <- clustermethodpars[[2]] <- list() clustermethodpars[[2]]$eps <- 2 clustermethodpars[[2]]$MinPts <- 2 cbs <- clusterbenchstats(xdata,G=3,clustermethod=clustermethod, distmethod=rep(TRUE,2),ncinput=c(TRUE,FALSE),scaling=FALSE, clustermethodpars=clustermethodpars,nnruns=2,kmruns=2,fnruns=1,avenruns=1,useallg=TRUE) print(cbs$sstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1),include.othernc=cbs$cm$othernc) print(cbs$qstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1),include.othernc=cbs$cm$othernc) fpc/tests/Examples/0000755000176200001440000000000014674374130013753 5ustar liggesusersfpc/tests/Examples/fpc-Ex.Rout.save0000644000176200001440000032267314674367634016735 0ustar liggesusers R Under development (unstable) (2024-09-11 r87117) -- "Unsuffered Consequences" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "fpc" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('fpc') > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv') > cleanEx() > nameEx("adcoord") > ### * adcoord > > flush(stderr()); flush(stdout()) > > ### Name: adcoord > ### Title: Asymmetric discriminant coordinates > ### Aliases: adcoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > adcf <- adcoord(face,grface==2) > adcf2 <- adcoord(face,grface==4) > plot(adcf$proj,col=1+(grface==2)) > plot(adcf2$proj,col=1+(grface==4)) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("ancoord") > ### * ancoord > > flush(stderr()); flush(stdout()) > > ### Name: ancoord > ### Title: Asymmetric neighborhood based discriminant coordinates > ### Aliases: ancoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > ancf2 <- ancoord(face,grface==4) > plot(ancf2$proj,col=1+(grface==4)) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("awcoord") > ### * awcoord > > flush(stderr()); flush(stdout()) > > ### Name: awcoord > ### Title: Asymmetric weighted discriminant coordinates > ### Aliases: awcoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > awcf <- awcoord(face,grface==1) > # awcf2 <- ancoord(face,grface==1, method="mcd") > plot(awcf$proj,col=1+(grface==1)) > # plot(awcf2$proj,col=1+(grface==1)) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("batcoord") > ### * batcoord > > flush(stderr()); flush(stdout()) > > ### Name: batcoord > ### Title: Bhattacharyya discriminant projection > ### Aliases: batcoord batvarcoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > bcf2 <- batcoord(face,grface==2) > plot(bcf2$proj,col=1+(grface==2)) > bcfv2 <- batcoord(face,grface==2,dom="variance") > plot(bcfv2$proj,col=1+(grface==2)) > bcfvv2 <- batvarcoord(face,grface==2) > plot(bcfvv2$proj,col=1+(grface==2)) > > > > cleanEx() > nameEx("bhattacharyya.dist") > ### * bhattacharyya.dist > > flush(stderr()); flush(stdout()) > > ### Name: bhattacharyya.dist > ### Title: Bhattacharyya distance between Gaussian distributions > ### Aliases: bhattacharyya.dist > ### Keywords: multivariate > > ### ** Examples > > round(bhattacharyya.dist(c(1,1),c(2,5),diag(2),diag(2)),digits=2) modulus 2.12 > > > > cleanEx() > nameEx("bhattacharyya.matrix") > ### * bhattacharyya.matrix > > flush(stderr()); flush(stdout()) > > ### Name: bhattacharyya.matrix > ### Title: Matrix of pairwise Bhattacharyya distances > ### Aliases: bhattacharyya.matrix > ### Keywords: cluster multivariate > > ### ** Examples > > muarray <-cbind(c(0,0),c(0,0.1),c(10,10)) > sigmaarray <- array(c(diag(2),diag(2),diag(2)),dim=c(2,2,3)) > bhattacharyya.matrix(muarray,sigmaarray,ipairs=list(c(1,2),c(2,3))) [,1] [,2] [,3] [1,] NA 9.987508e-01 NA [2,] 0.9987508 NA 1.78102e-11 [3,] NA 1.781020e-11 NA > > > > > cleanEx() > nameEx("calinhara") > ### * calinhara > > flush(stderr()); flush(stdout()) > > ### Name: calinhara > ### Title: Calinski-Harabasz index > ### Aliases: calinhara > ### Keywords: cluster > > ### ** Examples > > set.seed(98765) > iriss <- iris[sample(150,20),-5] > km <- kmeans(iriss,3) > round(calinhara(iriss,km$cluster),digits=2) [1] 91.75 > > > > cleanEx() > nameEx("can") > ### * can > > flush(stderr()); flush(stdout()) > > ### Name: can > ### Title: Generation of the tuning constant for regression fixed point > ### clusters > ### Aliases: can > ### Keywords: arith > > ### ** Examples > > can(429,3) [1] 8.806634 > > > > cleanEx() > nameEx("cat2bin") > ### * cat2bin > > flush(stderr()); flush(stdout()) > > ### Name: cat2bin > ### Title: Recode nominal variables to binary variables > ### Aliases: cat2bin > ### Keywords: manip > > ### ** Examples > > set.seed(776655) > v1 <- rnorm(20) > v2 <- rnorm(20) > d1 <- sample(1:5,20,replace=TRUE) > d2 <- sample(1:4,20,replace=TRUE) > ldata <-cbind(v1,v2,d1,d2) > lc <- cat2bin(ldata,categorical=3:4) > > > > cleanEx() > nameEx("cdbw") > ### * cdbw > > flush(stderr()); flush(stdout()) > > ### Name: cdbw > ### Title: CDbw-index for cluster validation > ### Aliases: cdbw > ### Keywords: cluster > > ### ** Examples > > options(digits=3) > iriss <- as.matrix(iris[c(1:5,51:55,101:105),-5]) > irisc <- as.numeric(iris[c(1:5,51:55,101:105),5]) > cdbw(iriss,irisc) $cdbw [1] 2.35 $cohesion [1] 1.28 $compactness [1] 1.33 $sep [1] 1.38 > > > > cleanEx() > nameEx("cgrestandard") > ### * cgrestandard > > flush(stderr()); flush(stdout()) > > ### Name: cgrestandard > ### Title: Standardise cluster validation statistics by random clustering > ### results > ### Aliases: cgrestandard > ### Keywords: multivariate cluster > > ### ** Examples > > > set.seed(20000) > options(digits=3) > face <- rFace(10,dMoNo=2,dNoEy=0,p=2) > dif <- dist(face) > clusum <- list() > clusum[[2]] <- list() > cl12 <- kmeansCBI(face,2) > cl13 <- kmeansCBI(face,3) > cl22 <- claraCBI(face,2) > cl23 <- claraCBI(face,2) > ccl12 <- clustatsum(dif,cl12$partition) > ccl13 <- clustatsum(dif,cl13$partition) > ccl22 <- clustatsum(dif,cl22$partition) > ccl23 <- clustatsum(dif,cl23$partition) > clusum[[1]] <- list() > clusum[[1]][[2]] <- ccl12 > clusum[[1]][[3]] <- ccl13 > clusum[[2]][[2]] <- ccl22 > clusum[[2]][[3]] <- ccl23 > clusum$maxG <- 3 > clusum$minG <- 2 > clusum$method <- c("kmeansCBI","claraCBI") > clusum$name <- c("kmeansCBI","claraCBI") > clusim <- randomclustersim(dist(face),G=2:3,nnruns=1,kmruns=1, + fnruns=1,avenruns=1,monitor=FALSE) > cgr <- cgrestandard(clusum,clusim,2:3) > cgr2 <- cgrestandard(clusum,clusim,2:3,useallg=TRUE) > cgr3 <- cgrestandard(clusum,clusim,2:3,percentage=TRUE) > print(str(cgr)) List of 6 $ :List of 3 ..$ : NULL ..$ :List of 16 .. ..$ avewithin : num 0.378 .. ..$ mnnd : num 2.46 .. ..$ cvnnd : num -21.3 .. ..$ maxdiameter : num 0.879 .. ..$ widestgap : num -0.799 .. ..$ sindex : num -0.125 .. ..$ minsep : num -0.125 .. ..$ asw : num 0.369 .. ..$ dindex : num 0.5 .. ..$ denscut : num 0.5 .. ..$ highdgap : num -0.576 .. ..$ pearsongamma: num 0.574 .. ..$ withinss : num 0.572 .. ..$ entropy : num 0.294 .. ..$ pamc : num 0.798 .. ..$ dmode : num 0.231 ..$ :List of 16 .. ..$ avewithin : num 0.964 .. ..$ mnnd : num NA .. ..$ cvnnd : num NA .. ..$ maxdiameter : num 0.998 .. ..$ widestgap : num NaN .. ..$ sindex : num 0.85 .. ..$ minsep : num 0.797 .. ..$ asw : num 0.939 .. ..$ dindex : num NaN .. ..$ denscut : num 0.538 .. ..$ highdgap : num NaN .. ..$ pearsongamma: num 1.01 .. ..$ withinss : num 0.882 .. ..$ entropy : num 0.359 .. ..$ pamc : num 0.824 .. ..$ dmode : num NaN $ :List of 3 ..$ : NULL ..$ :List of 16 .. ..$ avewithin : num 0.749 .. ..$ mnnd : num 3.81 .. ..$ cvnnd : num 9.29 .. ..$ maxdiameter : num 0.826 .. ..$ widestgap : num 0.865 .. ..$ sindex : num 0.746 .. ..$ minsep : num 0.746 .. ..$ asw : num 0.743 .. ..$ dindex : num 0.5 .. ..$ denscut : num 0.5 .. ..$ highdgap : num 0.845 .. ..$ pearsongamma: num 0.732 .. ..$ withinss : num 0.744 .. ..$ entropy : num 0.294 .. ..$ pamc : num 0.707 .. ..$ dmode : num 0.586 ..$ :List of 16 .. ..$ avewithin : num -0.782 .. ..$ mnnd : num NA .. ..$ cvnnd : num NA .. ..$ maxdiameter : num -0.861 .. ..$ widestgap : num NaN .. ..$ sindex : num 1.82 .. ..$ minsep : num 1.74 .. ..$ asw : num 1.24 .. ..$ dindex : num NaN .. ..$ denscut : num 0.538 .. ..$ highdgap : num NaN .. ..$ pearsongamma: num 1.52 .. ..$ withinss : num -1.25 .. ..$ entropy : num 0.496 .. ..$ pamc : num -1.42 .. ..$ dmode : num NaN $ maxG : num 3 $ minG : num 2 $ method: chr [1:2] "kmeansCBI" "claraCBI" $ name : chr [1:2] "kmeansCBI" "claraCBI" - attr(*, "class")= chr "valstat" NULL > print(str(cgr2)) List of 6 $ :List of 3 ..$ : NULL ..$ :List of 16 .. ..$ avewithin : num -0.174 .. ..$ mnnd : num 2.46 .. ..$ cvnnd : num -23.2 .. ..$ maxdiameter : num 0.217 .. ..$ widestgap : num -1.54 .. ..$ sindex : num 0.0591 .. ..$ minsep : num 0.305 .. ..$ asw : num 0.452 .. ..$ dindex : num 0.354 .. ..$ denscut : num 0.399 .. ..$ highdgap : num -1.25 .. ..$ pearsongamma: num 0.562 .. ..$ withinss : num 0.0414 .. ..$ entropy : num 0.431 .. ..$ pamc : num -0.0345 .. ..$ dmode : num -0.0485 ..$ :List of 16 .. ..$ avewithin : num 1.03 .. ..$ mnnd : num 5.09 .. ..$ cvnnd : num 10.1 .. ..$ maxdiameter : num 1.19 .. ..$ widestgap : num 0.54 .. ..$ sindex : num 0.429 .. ..$ minsep : num 0.305 .. ..$ asw : num 0.73 .. ..$ dindex : num 0.354 .. ..$ denscut : num 0.399 .. ..$ highdgap : num 0.531 .. ..$ pearsongamma: num 0.611 .. ..$ withinss : num 0.866 .. ..$ entropy : num 0.26 .. ..$ pamc : num 1.17 .. ..$ dmode : num 0.398 $ :List of 3 ..$ : NULL ..$ :List of 16 .. ..$ avewithin : num 0.233 .. ..$ mnnd : num 3.91 .. ..$ cvnnd : num 10.3 .. ..$ maxdiameter : num 0.165 .. ..$ widestgap : num 0.54 .. ..$ sindex : num 1.17 .. ..$ minsep : num 1.17 .. ..$ asw : num 0.953 .. ..$ dindex : num 0.354 .. ..$ denscut : num 0.399 .. ..$ highdgap : num 0.531 .. ..$ pearsongamma: num 0.787 .. ..$ withinss : num 0.235 .. ..$ entropy : num 0.431 .. ..$ pamc : num -0.111 .. ..$ dmode : num 0.398 ..$ :List of 16 .. ..$ avewithin : num 0.233 .. ..$ mnnd : num 3.91 .. ..$ cvnnd : num 10.3 .. ..$ maxdiameter : num 0.165 .. ..$ widestgap : num 0.54 .. ..$ sindex : num 1.17 .. ..$ minsep : num 1.17 .. ..$ asw : num 0.953 .. ..$ dindex : num 0.354 .. ..$ denscut : num 0.399 .. ..$ highdgap : num 0.531 .. ..$ pearsongamma: num 0.787 .. ..$ withinss : num 0.235 .. ..$ entropy : num 0.431 .. ..$ pamc : num -0.111 .. ..$ dmode : num 0.398 $ maxG : num 3 $ minG : num 2 $ method: chr [1:2] "kmeansCBI" "claraCBI" $ name : chr [1:2] "kmeansCBI" "claraCBI" - attr(*, "class")= chr "valstat" NULL > print(cgr3[[1]][[2]]) $avewithin [1] 0.6 $mnnd [1] 1 $cvnnd [1] 0.2 $maxdiameter [1] 1 $widestgap [1] 0.4 $sindex [1] 0.4 $minsep [1] 0.4 $asw [1] 0.6 $dindex [1] 0.4 $denscut [1] 0.4 $highdgap [1] 0.4 $pearsongamma [1] 0.6 $withinss [1] 0.6 $entropy [1] 0.4 $pamc [1] 1 $dmode [1] 0.4 > > > > cleanEx() > nameEx("classifdist") > ### * classifdist > > flush(stderr()); flush(stdout()) > > ### Name: classifdist > ### Title: Classification of unclustered points > ### Aliases: classifdist classifnp > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > x1 <- rnorm(50) > y <- rnorm(100) > x2 <- rnorm(40,mean=20) > x3 <- rnorm(10,mean=25,sd=100) > x <-cbind(c(x1,x2,x3),y) > truec <- c(rep(1,50),rep(2,40),rep(3,10)) > topredict <- c(1,2,51,52,91) > clumin <- truec > clumin[topredict] <- -1 > > classifnp(x,clumin, method="averagedist") [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [38] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [75] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 3 3 3 3 3 3 3 3 3 > classifnp(x,clumin, method="qda") [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [38] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [75] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 > classifdist(dist(x),clumin, centroids=c(3,53,93),method="centroid") [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [38] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [75] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 3 3 3 3 3 3 3 3 3 > classifdist(dist(x),clumin,method="knn") [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [38] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [75] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 > > > > > cleanEx() > nameEx("clucols") > ### * clucols > > flush(stderr()); flush(stdout()) > > ### Name: clucols > ### Title: Sets of colours and symbols for cluster plotting > ### Aliases: clucols clugrey clusym > ### Keywords: cluster > > ### ** Examples > > set.seed(112233) > require(MASS) Loading required package: MASS > require(flexmix) Loading required package: flexmix Loading required package: lattice > data(Cars93) > Cars934 <- Cars93[,c(3,5,8,10)] > cc <- + discrete.recode(Cars934,xvarsorted=FALSE,continuous=c(2,3),discrete=c(1,4)) > fcc <- flexmix(cc$data~1,k=3, + model=lcmixed(continuous=2,discrete=2,ppdim=c(6,3),diagonal=TRUE)) > plot(Cars934[,c(2,3)],col=clucols(3)[fcc@cluster],pch=clusym[fcc@cluster]) > > > > cleanEx() detaching ‘package:flexmix’, ‘package:lattice’, ‘package:MASS’ > nameEx("clujaccard") > ### * clujaccard > > flush(stderr()); flush(stdout()) > > ### Name: clujaccard > ### Title: Jaccard similarity between logical vectors > ### Aliases: clujaccard > ### Keywords: cluster > > ### ** Examples > > c1 <- rep(TRUE,10) > c2 <- c(FALSE,rep(TRUE,9)) > clujaccard(c1,c2) [1] 0.9 > > > > cleanEx() > nameEx("clusexpect") > ### * clusexpect > > flush(stderr()); flush(stdout()) > > ### Name: clusexpect > ### Title: Expected value of the number of times a fixed point cluster is > ### found > ### Aliases: clusexpect > ### Keywords: univar cluster > > ### ** Examples > > round(clusexpect(500,4,150,2000),digits=2) [1] 1.36 > > > > cleanEx() > nameEx("clustatsum") > ### * clustatsum > > flush(stderr()); flush(stdout()) > > ### Name: clustatsum > ### Title: Compute and format cluster validation statistics > ### Aliases: clustatsum > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(20,dMoNo=2,dNoEy=0,p=2) > dface <- dist(face) > complete3 <- cutree(hclust(dface),3) > clustatsum(dface,complete3) $avewithin [1] 0.875 $mnnd [1] 0.63 $cvnnd [1] 0.88 $maxdiameter [1] 0.696 $widestgap [1] 0.719 $sindex [1] 0.287 $minsep [1] 0.274 $asw [1] 0.683 $dindex [1] 0.984 $denscut [1] 1 $highdgap [1] 0.904 $pearsongamma [1] 0.895 $withinss [1] 0.916 $entropy [1] 0.908 $pamc [1] 0.919 > > > > > cleanEx() > nameEx("cluster.magazine") > ### * cluster.magazine > > flush(stderr()); flush(stdout()) > > ### Name: cluster.magazine > ### Title: Run many clustering methods on many numbers of clusters > ### Aliases: cluster.magazine > ### Keywords: multivariate cluster > > ### ** Examples > > > set.seed(20000) > options(digits=3) > face <- rFace(10,dMoNo=2,dNoEy=0,p=2) > clustermethod=c("kmeansCBI","hclustCBI","hclustCBI") > # A clustering method can be used more than once, with different > # parameters > clustermethodpars <- list() > clustermethodpars[[2]] <- clustermethodpars[[3]] <- list() > clustermethodpars[[2]]$method <- "complete" > clustermethodpars[[3]]$method <- "average" > cmf <- cluster.magazine(face,G=2:3,clustermethod=clustermethod, + distmethod=rep(FALSE,3),clustermethodpars=clustermethodpars) [1] "kmeansCBI" [1] "hclustCBI" [1] "hclustCBI" > print(str(cmf)) List of 4 $ output :List of 3 ..$ :List of 3 .. ..$ : logi NA .. ..$ :List of 4 .. .. ..$ result :List of 11 .. .. .. ..$ cluster : int [1:10] 2 2 2 2 2 1 1 2 1 1 .. .. .. ..$ centers : num [1:2, 1:2] -0.711 0.474 0.852 -0.568 .. .. .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. .. .. ..$ : chr [1:2] "1" "2" .. .. .. .. .. ..$ : NULL .. .. .. ..$ totss : num 18 .. .. .. ..$ withinss : num [1:2] 3.44 6.35 .. .. .. ..$ tot.withinss: num 9.79 .. .. .. ..$ betweenss : num 8.21 .. .. .. ..$ size : int [1:2] 4 6 .. .. .. ..$ iter : int 1 .. .. .. ..$ ifault : int 0 .. .. .. ..$ crit : num [1:2] 0 6.7 .. .. .. ..$ bestk : int 2 .. .. .. ..- attr(*, "class")= chr "kmeans" .. .. ..$ nc : int 2 .. .. ..$ partition : int [1:10] 2 2 2 2 2 1 1 2 1 1 .. .. ..$ clustermethod: chr "kmeans" .. ..$ :List of 4 .. .. ..$ result :List of 11 .. .. .. ..$ cluster : int [1:10] 1 1 1 1 3 2 2 3 2 2 .. .. .. ..$ centers : num [1:3, 1:2] 0.0512 -0.7113 1.3201 -1.0511 0.8515 ... .. .. .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. .. .. ..$ : chr [1:3] "1" "2" "3" .. .. .. .. .. ..$ : NULL .. .. .. ..$ totss : num 18 .. .. .. ..$ withinss : num [1:3] 0.828 3.439 0.576 .. .. .. ..$ tot.withinss: num 4.84 .. .. .. ..$ betweenss : num 13.2 .. .. .. ..$ size : int [1:3] 4 4 2 .. .. .. ..$ iter : int 2 .. .. .. ..$ ifault : int 0 .. .. .. ..$ crit : num [1:3] 0 0 9.51 .. .. .. ..$ bestk : int 3 .. .. .. ..- attr(*, "class")= chr "kmeans" .. .. ..$ nc : int 3 .. .. ..$ partition : int [1:10] 1 1 1 1 3 2 2 3 2 2 .. .. ..$ clustermethod: chr "kmeans" ..$ :List of 3 .. ..$ : logi NA .. ..$ :List of 5 .. .. ..$ result :List of 7 .. .. .. ..$ merge : int [1:9, 1:2] -9 -2 -4 -1 -6 4 -8 6 -7 -10 ... .. .. .. ..$ height : num [1:9] 0 0.158 0.226 1.033 1.26 ... .. .. .. ..$ order : int [1:10] 7 1 5 6 4 2 3 8 9 10 .. .. .. ..$ labels : NULL .. .. .. ..$ method : chr "complete" .. .. .. ..$ call : language hclust(d = dist(sdata), method = method) .. .. .. ..$ dist.method: chr "euclidean" .. .. .. ..- attr(*, "class")= chr "hclust" .. .. ..$ noise : logi FALSE .. .. ..$ nc : int 2 .. .. ..$ partition : int [1:10] 1 1 1 1 1 1 2 1 1 1 .. .. ..$ clustermethod: chr "hclust/cutree" .. ..$ :List of 5 .. .. ..$ result :List of 7 .. .. .. ..$ merge : int [1:9, 1:2] -9 -2 -4 -1 -6 4 -8 6 -7 -10 ... .. .. .. ..$ height : num [1:9] 0 0.158 0.226 1.033 1.26 ... .. .. .. ..$ order : int [1:10] 7 1 5 6 4 2 3 8 9 10 .. .. .. ..$ labels : NULL .. .. .. ..$ method : chr "complete" .. .. .. ..$ call : language hclust(d = dist(sdata), method = method) .. .. .. ..$ dist.method: chr "euclidean" .. .. .. ..- attr(*, "class")= chr "hclust" .. .. ..$ noise : logi FALSE .. .. ..$ nc : int 3 .. .. ..$ partition : int [1:10] 1 1 1 1 1 1 2 3 3 3 .. .. ..$ clustermethod: chr "hclust/cutree" ..$ :List of 3 .. ..$ : logi NA .. ..$ :List of 5 .. .. ..$ result :List of 7 .. .. .. ..$ merge : int [1:9, 1:2] -9 -2 -4 -1 -5 -6 1 6 -7 -10 ... .. .. .. ..$ height : num [1:9] 0 0.158 0.195 1.032 1.073 ... .. .. .. ..$ order : int [1:10] 7 6 1 4 2 3 9 10 5 8 .. .. .. ..$ labels : NULL .. .. .. ..$ method : chr "average" .. .. .. ..$ call : language hclust(d = dist(sdata), method = method) .. .. .. ..$ dist.method: chr "euclidean" .. .. .. ..- attr(*, "class")= chr "hclust" .. .. ..$ noise : logi FALSE .. .. ..$ nc : int 2 .. .. ..$ partition : int [1:10] 1 1 1 1 1 1 2 1 1 1 .. .. ..$ clustermethod: chr "hclust/cutree" .. ..$ :List of 5 .. .. ..$ result :List of 7 .. .. .. ..$ merge : int [1:9, 1:2] -9 -2 -4 -1 -5 -6 1 6 -7 -10 ... .. .. .. ..$ height : num [1:9] 0 0.158 0.195 1.032 1.073 ... .. .. .. ..$ order : int [1:10] 7 6 1 4 2 3 9 10 5 8 .. .. .. ..$ labels : NULL .. .. .. ..$ method : chr "average" .. .. .. ..$ call : language hclust(d = dist(sdata), method = method) .. .. .. ..$ dist.method: chr "euclidean" .. .. .. ..- attr(*, "class")= chr "hclust" .. .. ..$ noise : logi FALSE .. .. ..$ nc : int 3 .. .. ..$ partition : int [1:10] 1 1 1 1 2 1 3 2 2 2 .. .. ..$ clustermethod: chr "hclust/cutree" $ clustering:List of 3 ..$ :List of 3 .. ..$ : logi NA .. ..$ : int [1:10] 2 2 2 2 2 1 1 2 1 1 .. ..$ : int [1:10] 1 1 1 1 3 2 2 3 2 2 ..$ :List of 3 .. ..$ : logi NA .. ..$ : int [1:10] 1 1 1 1 1 1 2 1 1 1 .. ..$ : int [1:10] 1 1 1 1 1 1 2 3 3 3 ..$ :List of 3 .. ..$ : logi NA .. ..$ : int [1:10] 1 1 1 1 1 1 2 1 1 1 .. ..$ : int [1:10] 1 1 1 1 2 1 3 2 2 2 $ noise :List of 3 ..$ :List of 3 .. ..$ : logi NA .. ..$ : logi FALSE .. ..$ : logi FALSE ..$ :List of 3 .. ..$ : logi NA .. ..$ : logi FALSE .. ..$ : logi FALSE ..$ :List of 3 .. ..$ : logi NA .. ..$ : logi FALSE .. ..$ : logi FALSE $ othernc : list() NULL > > > > > cleanEx() > nameEx("cluster.stats") > ### * cluster.stats > > flush(stderr()); flush(stdout()) > > ### Name: cluster.stats > ### Title: Cluster validation statistics > ### Aliases: cluster.stats > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > dface <- dist(face) > complete3 <- cutree(hclust(dface),3) > cluster.stats(dface,complete3, + alt.clustering=as.integer(attr(face,"grouping"))) $n [1] 200 $cluster.number [1] 3 $cluster.size [1] 136 60 4 $min.cluster.size [1] 4 $noisen [1] 0 $diameter [1] 10.80 5.76 9.00 $average.distance [1] 3.03 2.21 7.05 $median.distance [1] 2.84 1.48 8.32 $separation [1] 5.87 5.87 7.22 $average.toother [1] 13.8 13.0 20.8 $separation.matrix [,1] [,2] [,3] [1,] 0.00 5.87 14.98 [2,] 5.87 0.00 7.22 [3,] 14.98 7.22 0.00 $ave.between.matrix [,1] [,2] [,3] [1,] 0.0 13.1 24.5 [2,] 13.1 0.0 12.2 [3,] 24.5 12.2 0.0 $average.between [1] 13.7 $average.within [1] 2.86 $n.between [1] 8944 $n.within [1] 10956 $max.diameter [1] 10.8 $min.separation [1] 5.87 $within.cluster.ss [1] 1198 $clus.avg.silwidths 1 2 3 0.752 0.818 0.355 $avg.silwidth [1] 0.764 $g2 NULL $g3 NULL $pearsongamma [1] 0.883 $dunn [1] 0.544 $dunn2 [1] 1.73 $entropy [1] 0.702 $wb.ratio [1] 0.209 $ch [1] 699 $cwidegap [1] 1.81 1.21 8.32 $widestgap [1] 8.32 $sindex [1] 6.19 $corrected.rand [1] 0.345 $vi [1] 0.97 > > > > > cleanEx() > nameEx("cluster.varstats") > ### * cluster.varstats > > flush(stderr()); flush(stdout()) > > ### Name: cluster.varstats > ### Title: Variablewise statistics for clusters > ### Aliases: cluster.varstats print.varwisetables > ### Keywords: cluster > > ### ** Examples > > set.seed(112233) > options(digits=3) > require(MASS) Loading required package: MASS > require(flexmix) Loading required package: flexmix Loading required package: lattice > data(Cars93) > Cars934 <- Cars93[,c(3,5,8,10)] > cc <- + discrete.recode(Cars934,xvarsorted=FALSE,continuous=c(2,3),discrete=c(1,4)) > fcc <- flexmix(cc$data~1,k=2, + model=lcmixed(continuous=2,discrete=2,ppdim=c(6,3),diagonal=TRUE)) > cv <- + cluster.varstats(fcc@cluster,Cars934, contdata=Cars934[,c(2,3)], + tablevar=c(1,4),catvar=c(2,3),quantvar=c(2,3),projmethod="awc", + ask=FALSE) Cluster 1 42 out of 93 points. Cluster 1 Type Type In cluster 1 Compact Large Midsize Small Sporty Van FALSE 12 0 2 21 7 9 TRUE 4 11 20 0 7 0 Cluster 1 Price Mean= 26.5 all obs.= 19.5 Standard deviation= 9.83 all obs.= 9.66 0% 25% 50% 75% 100% 13.9 18.9 25.1 32.4 61.9 [1] "All obs.:" 0% 25% 50% 75% 100% 7.4 12.2 17.7 23.3 61.9 Cluster 1 MPG.highway Mean= 26.5 all obs.= 29.1 Standard deviation= 1.86 all obs.= 5.33 0% 25% 50% 75% 100% 22 25 26 28 30 [1] "All obs.:" 0% 25% 50% 75% 100% 20 26 28 31 50 Cluster 1 DriveTrain DriveTrain In cluster 1 4WD Front Rear FALSE 9 42 0 TRUE 1 25 16 Cluster 2 51 out of 93 points. Cluster 2 Type Type In cluster 2 Compact Large Midsize Small Sporty Van FALSE 4 11 20 0 7 0 TRUE 12 0 2 21 7 9 Cluster 2 Price Mean= 13.7 all obs.= 19.5 Standard deviation= 4.09 all obs.= 9.66 0% 25% 50% 75% 100% 7.4 10.2 13.3 16.6 22.7 [1] "All obs.:" 0% 25% 50% 75% 100% 7.4 12.2 17.7 23.3 61.9 Cluster 2 MPG.highway Mean= 31.2 all obs.= 29.1 Standard deviation= 6.26 all obs.= 5.33 0% 25% 50% 75% 100% 20.0 28.5 31.0 34.0 50.0 [1] "All obs.:" 0% 25% 50% 75% 100% 20 26 28 31 50 Cluster 2 DriveTrain DriveTrain In cluster 2 4WD Front Rear FALSE 1 25 16 TRUE 9 42 0 > print(cv) Type Cluster Compact Large Midsize Small Sporty Van 1 4 11 20 0 7 0 2 12 0 2 21 7 9 Sum 16 11 22 21 14 9 Categorised Price Cluster 1 2 3 4 5 6 7 8 9 10 1 0 0 0 4 3 5 3 8 9 10 2 10 9 9 5 7 4 6 1 0 0 Sum 10 9 9 9 10 9 9 9 9 10 Categorised MPG.highway Cluster 1 2 3 4 5 6 7 8 9 10 1 2 11 10 4 9 6 0 0 0 0 2 8 1 1 2 1 9 7 8 6 8 Sum 10 12 11 6 10 15 7 8 6 8 DriveTrain Cluster 4WD Front Rear 1 1 25 16 2 9 42 0 Sum 10 67 16 > > > > cleanEx() detaching ‘package:flexmix’, ‘package:lattice’, ‘package:MASS’ > nameEx("clusterbenchstats") > ### * clusterbenchstats > > flush(stderr()); flush(stdout()) > > ### Name: clusterbenchstats > ### Title: Run and validate many clusterings > ### Aliases: clusterbenchstats print.clusterbenchstats > ### Keywords: multivariate cluster > > ### ** Examples > > > set.seed(20000) > options(digits=3) > face <- rFace(10,dMoNo=2,dNoEy=0,p=2) > clustermethod=c("kmeansCBI","hclustCBI") > # A clustering method can be used more than once, with different > # parameters > clustermethodpars <- list() > clustermethodpars[[2]] <- list() > clustermethodpars[[2]]$method <- "average" > # Last element of clustermethodpars needs to have an entry! > methodname <- c("kmeans","average") > cbs <- clusterbenchstats(face,G=2:3,clustermethod=clustermethod, + methodname=methodname,distmethod=rep(FALSE,2), + clustermethodpars=clustermethodpars,nnruns=1,kmruns=1,fnruns=1,avenruns=1) [1] "kmeansCBI" [1] "hclustCBI" [1] "Computation of validity statistics" comsum 1 comsum 2 [1] "Simulation" 2 clusters; nn run 1 2 clusters; fn run 1 2 clusters; aven run 1 2 clusters; km run 1 3 clusters; nn run 1 3 clusters; fn run 1 3 clusters; aven run 1 3 clusters; km run 1 [1] "Simulation quantile re-standardisation" [1] "Simulation sd re-standardisation" > print(cbs) Output object of clusterbenchstats. Clustering methods: kmeans average Cluster validation statistics: avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex denscut highdgap pearsongamma withinss entropy pamc dmode Numbers of clusters minimum: 2 maximum: 3 Output components are cm, stat, sim, qstat, sstat.stat, qstat, and sstat are valstat-objects.Use plot.valstat and print.valstat on these to get more information. > print(cbs$qstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,1,0,1,0,1,0,0,1,1)) avewithin method 2 3 1 kmeans 0.43 0.57 2 average 0.14 0.14 mnnd method 2 3 1 kmeans 0.29 0.71 2 average 0.86 0.14 cvnnd method 2 3 1 kmeans 0.14 0.29 2 average 0.43 0.14 maxdiameter method 2 3 1 kmeans 0.43 0.14 2 average 0.14 0.57 widestgap method 2 3 1 kmeans 0.14 0.29 2 average 0.43 0.14 sindex method 2 3 1 kmeans 0.29 0.57 2 average 0.57 0.29 minsep method 2 3 1 kmeans 0.29 0.43 2 average 0.57 0.43 asw method 2 3 1 kmeans 0.43 0.57 2 average 0.14 0.14 dindex method 2 3 1 kmeans 0.43 0.14 2 average 0.14 0.14 denscut method 2 3 1 kmeans 0.29 0.43 2 average 0.29 0.43 highdgap method 2 3 1 kmeans 0.43 0.29 2 average 0.14 0.14 pearsongamma method 2 3 1 kmeans 0.43 0.57 2 average 0.14 0.29 withinss method 2 3 1 kmeans 0.43 0.57 2 average 0.14 0.14 entropy method 2 3 1 kmeans 0.43 0.43 2 average 0.14 0.29 pamc method 2 3 1 kmeans 0.43 0.57 2 average 0.14 0.29 dmode method 2 3 1 kmeans 0.43 0.18 2 average 0.14 0.14 aggregate method 2 3 1 kmeans 0.39 0.49 2 average 0.22 0.24 > # The weights are weights for the validation statistics ordered as in > # cbs$qstat$statistics for computation of an aggregated index, see > # ?print.valstat. > > # Now using bootstrap stability assessment as in Akhanli and Hennig (2020): > bootclassif <- c("centroid","averagedist") > cbsboot <- clusterbenchstats(face,G=2:3,clustermethod=clustermethod, + methodname=methodname,distmethod=rep(FALSE,2), + clustermethodpars=clustermethodpars, + useboot=TRUE,bootclassif=bootclassif,bootmethod="nselectboot", + bootruns=2,nnruns=1,kmruns=1,fnruns=1,avenruns=1,useallg=TRUE) [1] "kmeansCBI" [1] "hclustCBI" [1] "Computation of validity statistics" comsum 1 comsum 2 [1] "Simulation" 2 clusters; nn run 1 2 clusters; fn run 1 2 clusters; aven run 1 2 clusters; km run 1 3 clusters; nn run 1 3 clusters; fn run 1 3 clusters; aven run 1 3 clusters; km run 1 [1] "Simulation quantile re-standardisation" [1] "Simulation sd re-standardisation" > print(cbsboot) Output object of clusterbenchstats. Clustering methods: kmeans average Cluster validation statistics: avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex denscut highdgap pearsongamma withinss entropy pamc boot dmode Numbers of clusters minimum: 2 maximum: 3 Output components are cm, stat, sim, qstat, sstat.stat, qstat, and sstat are valstat-objects.Use plot.valstat and print.valstat on these to get more information. > ## Not run: > ##D # Index A1 in Akhanli and Hennig (2020) (need these weights choices): > ##D print(cbsboot$sstat,aggregate=TRUE,weights=c(1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0)) > ##D # Index A2 in Akhanli and Hennig (2020) (need these weights choices): > ##D print(cbsboot$sstat,aggregate=TRUE,weights=c(0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,0)) > ## End(Not run) > > # Results from nselectboot: > plot(cbsboot$stat,cbsboot$sim,statistic="boot") > > > > cleanEx() > nameEx("clusterboot") > ### * clusterboot > > flush(stderr()); flush(stdout()) > > ### Name: clusterboot > ### Title: Clusterwise cluster stability assessment by resampling > ### Aliases: clusterboot print.clboot plot.clboot > ### Keywords: cluster multivariate > > ### ** Examples > > options(digits=3) > set.seed(20000) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > cf1 <- clusterboot(face,B=3,bootmethod= + c("boot","noise","jitter"),clustermethod=kmeansCBI, + krange=5,seed=15555) boot 1 boot 2 boot 3 noise 1 noise 2 noise 3 jitter 1 jitter 2 jitter 3 > print(cf1) * Cluster stability assessment * Cluster method: kmeans Full clustering results are given as parameter result of the clusterboot object, which also provides further statistics of the resampling results. Number of resampling runs: 3 Number of clusters found in data: 5 Clusterwise Jaccard bootstrap (omitting multiple points) mean: [1] 0.792 0.743 0.778 0.500 0.398 dissolved: [1] 1 1 1 1 2 recovered: [1] 2 2 2 0 1 Clusterwise Jaccard replacement by noise mean: [1] 0.880 0.874 0.902 0.630 0.120 dissolved: [1] 0 0 0 1 3 recovered: [1] 3 2 3 1 0 Clusterwise Jaccard jittering mean: [1] 0.926 0.929 1.000 0.833 0.407 dissolved: [1] 0 0 0 1 2 recovered: [1] 3 3 3 2 1 > plot(cf1) > cf2 <- clusterboot(dist(face),B=3,bootmethod= + "subset",clustermethod=disthclustCBI, + k=5, cut="number", method="average", showplots=TRUE, seed=15555) subset 1 subset 2 subset 3 > print(cf2) * Cluster stability assessment * Cluster method: hclust Full clustering results are given as parameter result of the clusterboot object, which also provides further statistics of the resampling results. Number of resampling runs: 3 Number of clusters found in data: 5 Clusterwise Jaccard subsetting mean: [1] 0.630 0.889 0.667 0.000 0.667 dissolved: [1] 1 0 1 3 1 recovered: [1] 1 2 2 0 2 > d1 <- c("a","b","a","c") > d2 <- c("a","a","a","b") > dx <- as.data.frame(cbind(d1,d2)) > cpx <- clusterboot(dx,k=2,B=10,clustermethod=claraCBI, + multipleboot=TRUE,usepam=TRUE,datatomatrix=FALSE) boot 1 boot 2 boot 3 boot 4 boot 5 boot 6 boot 7 boot 8 boot 9 boot 10 > print(cpx) * Cluster stability assessment * Cluster method: clara/pam Full clustering results are given as parameter result of the clusterboot object, which also provides further statistics of the resampling results. Number of resampling runs: 10 Number of clusters found in data: 2 Clusterwise Jaccard bootstrap mean: [1] 0.9 0.6 dissolved: [1] 0 4 recovered: [1] 6 6 > > > > cleanEx() > nameEx("cmahal") > ### * cmahal > > flush(stderr()); flush(stdout()) > > ### Name: cmahal > ### Title: Generation of tuning constant for Mahalanobis fixed point > ### clusters. > ### Aliases: cmahal > ### Keywords: cluster > > ### ** Examples > > plot(1:100,cmahal(100,3,nmin=5,cmin=qchisq(0.99,3),nc1=90), + xlab="FPC size", ylab="cmahal") > > > > cleanEx() > nameEx("concomp") > ### * concomp > > flush(stderr()); flush(stdout()) > > ### Name: con.comp > ### Title: Connectivity components of an undirected graph > ### Aliases: con.comp > ### Keywords: array cluster > > ### ** Examples > > set.seed(1000) > x <- rnorm(20) > m <- matrix(0,nrow=20,ncol=20) > for(i in 1:20) + for(j in 1:20) + m[i,j] <- abs(x[i]-x[j]) > d <- m<0.2 > cc <- con.comp(d) > max(cc) # number of connectivity components [1] 6 > plot(x,cc) > # The same should be produced by > # cutree(hclust(as.dist(m),method="single"),h=0.2). > > > > cleanEx() > nameEx("confusion") > ### * confusion > > flush(stderr()); flush(stdout()) > > ### Name: confusion > ### Title: Misclassification probabilities in mixtures > ### Aliases: confusion > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(12345) > m <- rpois(20,lambda=5) > dim(m) <- c(5,4) > pro <- apply(m,2,sum) > pro <- pro/sum(pro) > m <- m/apply(m,1,sum) > round(confusion(m,pro,1,2),digits=2) [1] 0.7 > > > > cleanEx() > nameEx("cov.wml") > ### * cov.wml > > flush(stderr()); flush(stdout()) > > ### Name: cov.wml > ### Title: Weighted Covariance Matrices (Maximum Likelihood) > ### Aliases: cov.wml > ### Keywords: multivariate > > ### ** Examples > > x <- c(1,2,3,4,5,6,7,8,9,10) > y <- c(1,2,3,8,7,6,5,8,9,10) > cov.wml(cbind(x,y),wt=c(0,0,0,1,1,1,1,1,0,0)) $cov x y x 2.0 -0.40 y -0.4 1.36 $center x y 6.0 6.8 $n.obs [1] 10 $wt [1] 0.0 0.0 0.0 0.2 0.2 0.2 0.2 0.2 0.0 0.0 > cov.wt(cbind(x,y),wt=c(0,0,0,1,1,1,1,1,0,0)) $cov x y x 2.5 -0.5 y -0.5 1.7 $center x y 6.0 6.8 $n.obs [1] 10 $wt [1] 0.0 0.0 0.0 0.2 0.2 0.2 0.2 0.2 0.0 0.0 > > > > cleanEx() > nameEx("cqcluster.stats") > ### * cqcluster.stats > > flush(stderr()); flush(stdout()) > > ### Name: cqcluster.stats > ### Title: Cluster validation statistics (version for use with > ### clusterbenchstats > ### Aliases: cqcluster.stats summary.cquality print.summary.cquality > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > dface <- dist(face) > complete3 <- cutree(hclust(dface),3) > cqcluster.stats(dface,complete3, + alt.clustering=as.integer(attr(face,"grouping"))) $n [1] 200 $cluster.number [1] 3 $cluster.size [1] 136 60 4 $min.cluster.size [1] 4 $noisen [1] 0 $diameter [1] 10.80 5.76 9.00 $average.distance [1] 3.03 2.21 7.05 $median.distance [1] 2.84 1.48 8.32 $separation [1] 5.87 5.87 7.22 $average.toother [1] 13.8 13.0 20.8 $separation.matrix [,1] [,2] [,3] [1,] 0.00 5.87 14.98 [2,] 5.87 0.00 7.22 [3,] 14.98 7.22 0.00 $ave.between.matrix [,1] [,2] [,3] [1,] 0.0 13.1 24.5 [2,] 13.1 0.0 12.2 [3,] 24.5 12.2 0.0 $avebetween [1] 0.428 $avewithin [1] 0.0893 $n.between [1] 8944 $n.within [1] 10956 $maxdiameter [1] 0.336 $minsep [1] 0.183 $withinss [1] 0.124 $clus.avg.silwidths 1 2 3 0.752 0.818 0.355 $asw [1] 0.764 $g2 NULL $g3 NULL $pearsongamma [1] 0.942 $dunn [1] 0.544 $dunn2 [1] 1.73 $entropy [1] 0.639 $wb.ratio [1] 0.209 $ch [1] 699 $cwidegap [1] 1.81 1.21 8.32 $widestgap [1] 0.259 $corrected.rand [1] 0.345 $vi [1] 0.907 $sindex [1] 0.232 $svec [1] 8.55 8.81 7.98 7.82 7.90 8.50 7.30 8.99 7.96 8.78 7.29 8.60 5.87 8.69 6.13 [16] 5.90 5.98 6.07 5.87 6.04 7.22 $psep [1] 11.19 13.98 8.55 13.20 9.06 16.34 16.54 8.81 10.85 10.76 16.39 13.27 [13] 16.24 9.28 16.10 10.32 16.54 9.50 15.07 16.28 7.98 7.82 14.54 15.59 [25] 10.30 16.50 10.43 11.52 15.06 14.21 10.35 7.90 15.48 8.50 15.84 7.30 [37] 15.20 16.41 13.82 13.92 13.24 13.44 13.28 13.14 13.31 13.02 13.23 13.45 [49] 13.40 13.64 13.59 13.26 13.00 13.50 13.11 13.48 13.40 13.08 13.18 13.69 [61] 13.17 13.66 13.21 12.81 13.26 13.16 13.57 13.34 13.47 13.41 13.69 13.15 [73] 13.63 12.99 13.29 13.69 13.48 13.37 13.29 13.15 13.13 13.36 13.66 12.89 [85] 13.57 13.45 13.69 11.91 13.15 13.30 13.23 13.27 13.80 13.46 13.48 13.47 [97] 13.08 13.48 10.14 11.32 10.07 11.18 9.08 11.08 11.00 9.74 8.99 9.97 [109] 9.00 7.96 9.29 11.34 9.62 9.64 8.78 7.29 10.45 8.60 9.55 5.87 [121] 11.09 11.22 11.14 10.31 11.28 11.44 8.69 10.74 10.40 11.03 9.59 6.30 [133] 10.72 11.00 9.41 9.75 11.32 6.62 7.01 6.55 6.95 6.53 6.88 6.69 [145] 6.67 6.77 6.63 6.47 7.01 6.72 7.08 7.04 6.23 6.67 6.15 6.31 [157] 7.12 7.10 6.69 7.35 6.13 6.15 5.90 6.62 6.73 6.52 6.37 6.66 [169] 7.02 6.75 6.90 6.62 6.48 7.02 7.08 6.86 6.28 6.43 5.98 6.14 [181] 7.44 7.43 6.67 6.07 6.40 6.19 7.22 7.19 7.22 6.59 5.87 6.56 [193] 7.26 6.57 6.04 6.57 7.22 7.63 14.08 14.08 $stan [1] 32.1 $nnk [1] 2 $mnnd [1] 0.0533 $pamc [1] 0.0624 $pamcentroids [1] 58 138 200 $dindex [1] 0.0238 $denscut [1] 0 $highdgap [1] 0.0149 $npenalty [1] 0 0 0 $dpenalty [1] 0.0991 0.0141 0.0000 $withindensp [1] 1.41 1.99 1.31 1.89 1.72 4.82 5.58 1.94 1.99 1.97 5.78 1.89 [13] 3.26 1.36 2.89 3.23 4.96 1.41 3.23 3.94 3.10 3.10 1.86 2.17 [25] 3.19 5.73 1.55 1.07 3.27 1.00 3.24 3.20 1.47 2.20 1.85 1.60 [37] 3.10 5.85 1.78 9.35 23.85 28.50 26.48 9.94 12.53 5.39 23.69 28.80 [49] 27.65 25.24 25.86 14.12 14.61 16.27 4.95 27.05 27.70 18.54 12.35 21.58 [61] 7.53 21.89 21.41 4.12 25.77 17.50 27.21 26.93 28.50 26.16 19.59 15.00 [73] 24.67 4.87 21.68 23.44 28.26 26.71 16.37 10.33 20.66 25.89 23.12 5.53 [85] 26.05 16.05 17.50 1.00 20.91 26.39 19.53 26.42 12.42 24.00 23.77 17.19 [97] 14.77 28.71 3.70 8.63 5.83 7.25 5.20 7.92 9.09 7.53 4.98 6.53 [109] 4.12 1.30 6.59 8.09 7.58 6.16 4.16 1.10 5.18 3.57 7.38 1.00 [121] 9.38 9.08 7.10 4.74 7.77 6.60 4.10 3.14 4.27 8.93 7.12 1.00 [133] 6.41 8.07 5.54 7.48 7.51 14.53 13.64 11.49 14.52 13.47 14.42 15.70 [145] 14.74 14.97 15.34 10.01 4.16 15.79 12.36 12.01 6.42 3.52 3.73 7.03 [157] 9.81 11.82 7.03 5.49 4.85 4.47 2.95 12.47 13.23 13.89 4.41 14.65 [169] 8.97 3.77 12.11 5.00 12.14 5.11 12.34 5.84 2.08 4.83 4.17 1.85 [181] 1.51 3.13 3.80 4.75 4.56 4.96 3.44 2.68 1.67 3.11 3.70 3.31 [193] 3.44 4.06 2.58 3.73 1.00 1.00 2.00 2.00 $densoc [1] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [38] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [75] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [112] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [149] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [186] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 $pdistto $pdistto[[1]] [1] NA 48 69 48 48 42 49 68 98 67 50 50 51 90 41 47 89 63 [19] 63 66 76 73 85 56 70 82 78 70 94 73 60 62 97 66 79 52 [37] 79 78 65 92 65 81 71 83 59 82 91 72 80 80 45 96 61 74 [55] 87 44 46 46 55 12 61 39 2 23 19 29 37 24 15 13 17 7 [73] 26 11 11 6 20 35 12 84 88 28 9 31 16 25 14 8 34 21 [91] 32 22 88 126 112 112 100 122 121 105 130 104 123 102 121 105 133 117 [109] 124 124 108 136 106 113 113 131 119 131 108 101 111 109 115 127 135 103 [127] 134 118 110 116 123 1 10 27 18 5 $pdistto[[2]] [1] NA 150 144 150 146 145 147 166 142 173 140 164 147 138 143 141 139 158 143 [20] 171 139 175 151 157 138 148 156 153 169 174 176 161 140 162 155 167 172 170 [39] 170 159 132 180 179 191 184 186 178 185 186 194 194 192 190 186 183 187 193 [58] 187 188 182 $pdistto[[3]] [1] NA 199 199 199 $pclosetomode $pclosetomode[[1]] [1] 48 69 77 42 98 49 68 90 67 50 76 51 83 41 47 89 63 75 [19] 66 97 73 85 56 70 82 78 57 94 95 60 62 71 53 79 52 59 [37] 86 65 92 43 81 58 93 87 61 91 72 80 44 45 96 54 74 64 [55] 40 46 55 84 12 4 39 2 23 19 29 37 24 15 13 17 7 26 [73] 11 38 6 20 35 33 30 88 28 9 31 16 25 14 8 34 21 32 [91] 22 36 126 112 100 125 122 121 105 130 104 123 102 137 134 133 117 124 [109] 129 108 136 106 113 119 131 114 135 111 101 99 109 115 127 118 103 107 [127] 128 110 116 120 1 10 27 18 5 3 $pclosetomode[[2]] [1] 150 144 147 146 145 168 166 142 173 140 164 165 138 143 141 139 158 152 171 [20] 169 175 151 157 160 148 156 153 161 174 176 159 163 162 155 167 172 170 154 [39] 149 132 180 179 191 184 186 178 185 196 194 183 192 190 177 195 187 193 182 [58] 188 181 189 $pclosetomode[[3]] [1] 199 200 197 198 $distto $distto[[1]] [1] NA -0.010308 -0.008199 -0.010265 -0.002805 -0.029572 -0.024898 [8] -0.018713 -0.052255 -0.068266 -0.062747 0.021523 -0.095210 -0.088147 [15] -0.005542 -0.096747 0.017529 0.009342 -0.135760 -0.094820 0.042933 [22] 0.047641 0.034916 -0.030965 -0.009467 0.028707 0.034230 -0.075005 [29] -0.008034 -0.107555 0.010907 -0.079852 -0.005589 -0.039372 -0.078150 [36] -0.061560 -0.010983 -0.032677 0.022470 0.002038 -0.177657 -0.073619 [43] -0.248917 -0.195095 -0.167149 -0.220591 -0.157537 -0.162211 -0.013300 [50] 0.076384 0.161927 -0.032004 -0.092537 -0.025939 -0.283240 -0.158031 [57] -0.015482 0.004635 -0.106143 0.000000 -0.199966 0.007360 -0.004461 [64] 0.047700 0.001219 -0.005897 -0.032149 0.024814 0.012786 0.059112 [71] 0.021739 0.004992 0.001735 0.002452 -0.033090 -0.030655 -0.072734 [78] -0.013223 -0.030961 -0.157221 0.002537 0.031912 0.043207 -0.000288 [85] -0.001281 -0.063523 0.020193 0.009040 0.031200 0.003498 -0.003548 [92] -0.051979 0.194308 0.051962 0.018810 -0.011259 0.015496 0.010479 [99] -0.010186 -0.005647 -0.034853 -0.028384 0.005062 0.008929 -0.045612 [106] -0.093128 -0.042588 -0.015108 -0.016329 0.062131 0.032728 0.001954 [113] 0.001515 -0.006843 -0.015786 -0.033391 -0.063940 -0.018370 -0.024519 [120] -0.073963 -0.085706 0.001138 -0.001867 -0.018489 -0.011797 -0.007750 [127] -0.171206 -0.079010 -0.006788 -0.003495 -0.197571 0.019128 -0.014412 [134] -0.005023 0.010807 -0.014106 $distto[[2]] [1] NA -3.06e-03 -1.24e-02 -2.83e-02 -7.96e-03 -3.20e-03 -5.05e-02 [8] -1.46e-02 -4.60e-02 -2.26e-02 3.38e-02 2.64e-02 -2.83e-02 -3.64e-03 [15] 3.30e-03 -3.05e-02 -6.32e-02 6.65e-03 -8.03e-02 -1.09e-01 -4.52e-02 [22] 9.18e-04 -8.87e-02 -1.50e-01 -1.57e-01 -1.04e-01 -2.11e-02 -5.46e-02 [29] -1.34e-01 2.51e-02 4.14e-02 -6.58e-02 -2.44e-01 -2.56e-02 2.35e-02 [36] 2.05e-02 -4.26e-02 -8.61e-03 1.35e-02 -2.09e-01 2.94e-02 8.06e-02 [43] -1.62e-02 3.64e-02 7.30e-03 -4.62e-03 -9.46e-03 -2.86e-02 -3.15e-02 [50] -8.85e-03 -2.57e-02 -7.18e-03 -3.57e-02 -8.27e-02 -1.24e-02 -9.93e-05 [57] -1.09e-02 -2.65e-02 -4.08e-02 -5.05e-02 $distto[[3]] [1] NA 0.0000 -0.0347 -0.0347 $percwdens [1] 0.0491 0.0690 0.0455 0.0657 0.0596 0.1675 0.1939 0.0675 0.0692 0.0683 [11] 0.2006 0.0657 0.1131 0.0473 0.1003 0.1121 0.1722 0.0488 0.1123 0.1369 [21] 0.1077 0.1077 0.0646 0.0755 0.1108 0.1989 0.0539 0.0373 0.1135 0.0347 [31] 0.1124 0.1112 0.0509 0.0765 0.0642 0.0557 0.1076 0.2031 0.0617 0.3247 [41] 0.8284 0.9897 0.9195 0.3453 0.4350 0.1873 0.8229 1.0000 0.9602 0.8767 [51] 0.8982 0.4904 0.5075 0.5650 0.1718 0.9394 0.9619 0.6437 0.4288 0.7493 [61] 0.2617 0.7602 0.7436 0.1432 0.8950 0.6079 0.9449 0.9353 0.9897 0.9085 [71] 0.6804 0.5209 0.8569 0.1691 0.7530 0.8139 0.9815 0.9277 0.5685 0.3586 [81] 0.7174 0.8990 0.8030 0.1919 0.9045 0.5575 0.6079 0.0347 0.7261 0.9166 [91] 0.6784 0.9175 0.4314 0.8334 0.8254 0.5970 0.5131 0.9972 0.1284 0.2998 [101] 0.2024 0.2518 0.1805 0.2751 0.3156 0.2616 0.1728 0.2269 0.1432 0.0450 [111] 0.2289 0.2810 0.2631 0.2139 0.1444 0.0382 0.1799 0.1240 0.2563 0.0347 [121] 0.3258 0.3153 0.2467 0.1648 0.2697 0.2290 0.1425 0.1090 0.1484 0.3099 [131] 0.2473 0.0347 0.2225 0.2802 0.1923 0.2596 0.2607 0.5045 0.4737 0.3991 [141] 0.5041 0.4677 0.5009 0.5451 0.5119 0.5199 0.5328 0.3477 0.1445 0.5482 [151] 0.4294 0.4171 0.2229 0.1224 0.1295 0.2440 0.3406 0.4104 0.2441 0.1906 [161] 0.1683 0.1551 0.1025 0.4330 0.4593 0.4823 0.1530 0.5087 0.3115 0.1310 [171] 0.4206 0.1736 0.4217 0.1776 0.4284 0.2027 0.0722 0.1677 0.1448 0.0641 [181] 0.0523 0.1086 0.1320 0.1650 0.1582 0.1723 0.1196 0.0931 0.0580 0.1079 [191] 0.1286 0.1151 0.1195 0.1408 0.0896 0.1296 0.0347 0.0347 0.0695 0.0695 $percdensoc [1] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [38] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [75] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [112] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [149] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 [186] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 $parsimony [1] 0.3 $cvnnd [1] 0.067 $cvnndc [1] 1.070 0.733 0.000 attr(,"class") [1] "cquality" > > > > > cleanEx() > nameEx("cvnn") > ### * cvnn > > flush(stderr()); flush(stdout()) > > ### Name: cvnn > ### Title: Cluster validation based on nearest neighbours > ### Aliases: cvnn > ### Keywords: cluster > > ### ** Examples > > options(digits=3) > iriss <- as.matrix(iris[c(1:10,51:55,101:105),-5]) > irisc <- as.numeric(iris[c(1:10,51:55,101:105),5]) > print(cvnn(dist(iriss),list(irisc,rep(1:4,5)))) $cvnnindex [1] 0.616 2.000 $sep [1] 0.36 0.92 $comp [1] 0.674 2.998 > > > > cleanEx() > nameEx("cweight") > ### * cweight > > flush(stderr()); flush(stdout()) > > ### Name: cweight > ### Title: Weight function for AWC > ### Aliases: cweight > ### Keywords: arith > > ### ** Examples > > cweight(4,1) [1] 0.25 > > > > cleanEx() > nameEx("dbscan") > ### * dbscan > > flush(stderr()); flush(stdout()) > > ### Name: dbscan > ### Title: DBSCAN density reachability and connectivity clustering > ### Aliases: dbscan print.dbscan plot.dbscan predict.dbscan > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(665544) > n <- 600 > x <- cbind(runif(10, 0, 10)+rnorm(n, sd=0.2), runif(10, 0, 10)+rnorm(n, + sd=0.2)) > par(bg="grey40") > ds <- dbscan(x, 0.2) > # run with showplot=1 to see how dbscan works. > ds dbscan Pts=600 MinPts=5 eps=0.2 0 1 2 3 4 5 6 7 8 9 10 11 border 28 4 4 8 5 3 3 4 3 4 6 4 seed 0 50 53 51 52 51 54 54 54 53 51 1 total 28 54 57 59 57 54 57 58 57 57 57 5 > plot(ds, x) > > x2 <- matrix(0,nrow=4,ncol=2) > x2[1,] <- c(5,2) > x2[2,] <- c(8,3) > x2[3,] <- c(4,4) > x2[4,] <- c(9,9) > predict(ds, x, x2) [1] 4 9 0 0 > > n <- 600 > x <- cbind((1:3)+rnorm(n, sd=0.2), (1:3)+rnorm(n, sd=0.2)) > > # Not run, but results from my machine are 0.105 - 0.068 - 0.255: > # system.time(ds <- dbscan(x, 0.3, countmode=NULL, method="raw"))[3] > # system.time(dsb <- dbscan(x, 0.3, countmode=NULL, method="hybrid"))[3] > # system.time(dsc <- dbscan(dist(x), 0.3, countmode=NULL, > # method="dist"))[3] > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("dipp.tantrum") > ### * dipp.tantrum > > flush(stderr()); flush(stdout()) > > ### Name: dipp.tantrum > ### Title: Simulates p-value for dip test > ### Aliases: dipp.tantrum > ### Keywords: cluster > > ### ** Examples > > # not run, requires package diptest > # x <- runif(100) > # d <- dip(x) > # dt <- dipp.tantrum(x,d,M=10) > > > > cleanEx() > nameEx("diptest.multi") > ### * diptest.multi > > flush(stderr()); flush(stdout()) > > ### Name: diptest.multi > ### Title: Diptest for discriminant coordinate projection > ### Aliases: diptest.multi > ### Keywords: cluster multivariate > > ### ** Examples > > require(diptest) Loading required package: diptest > x <- cbind(runif(100),runif(100)) > partition <- 1+(x[,1]<0.5) > d1 <- diptest.multi(x,partition) > d2 <- diptest.multi(x,partition,pvalue="tantrum",M=10) > > > > cleanEx() detaching ‘package:diptest’ > nameEx("discrcoord") > ### * discrcoord > > flush(stderr()); flush(stdout()) > > ### Name: discrcoord > ### Title: Discriminant coordinates/canonical variates > ### Aliases: discrcoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > dcf <- discrcoord(face,grface) > plot(dcf$proj,col=grface) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("discrete.recode") > ### * discrete.recode > > flush(stderr()); flush(stdout()) > > ### Name: discrete.recode > ### Title: Recodes mixed variables dataset > ### Aliases: discrete.recode > ### Keywords: manip > > ### ** Examples > > set.seed(776655) > v1 <- rnorm(20) > v2 <- rnorm(20) > d1 <- sample(c(2,4,6,8),20,replace=TRUE) > d2 <- sample(1:4,20,replace=TRUE) > ldata <- cbind(v1,d1,v2,d2) > lc <- + discrete.recode(ldata,xvarsorted=FALSE,continuous=c(1,3),discrete=c(2,4)) > require(MASS) Loading required package: MASS > data(Cars93) > Cars934 <- Cars93[,c(3,5,8,10)] > cc <- discrete.recode(Cars934,xvarsorted=FALSE,continuous=c(2,3),discrete=c(1,4)) > > > > cleanEx() detaching ‘package:MASS’ > nameEx("discrproj") > ### * discrproj > > flush(stderr()); flush(stdout()) > > ### Name: discrproj > ### Title: Linear dimension reduction for classification > ### Aliases: discrproj > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(300,dMoNo=2,dNoEy=0,p=3) > grface <- as.integer(attr(face,"grouping")) > > # The abs in the following is there to unify the output, > # because eigenvectors are defined only up to their sign. > # Statistically it doesn't make sense to compute absolute values. > round(abs(discrproj(face,grface, method="nc")$units),digits=2) [,1] [,2] [,3] [1,] 0.84 1.15 0.02 [2,] 0.34 0.28 0.01 [3,] 0.07 0.04 1.00 > round(abs(discrproj(face,grface, method="wnc")$units),digits=2) [,1] [,2] [,3] [1,] 0.07 1.42 0.04 [2,] 0.44 0.00 0.01 [3,] 0.04 0.00 1.01 > round(abs(discrproj(face,grface, clnum=1, method="arc")$units),digits=2) [,1] [,2] [,3] [1,] 1.30 0.50 0.34 [2,] 0.65 0.05 0.08 [3,] 0.08 0.66 0.47 > > > > cleanEx() > nameEx("distancefactor") > ### * distancefactor > > flush(stderr()); flush(stdout()) > > ### Name: distancefactor > ### Title: Factor for dissimilarity of mixed type data > ### Aliases: distancefactor > ### Keywords: cluster > > ### ** Examples > > set.seed(776655) > d1 <- sample(1:5,20,replace=TRUE) > d2 <- sample(1:4,20,replace=TRUE) > ldata <- cbind(d1,d2) > lc <- cat2bin(ldata,categorical=1)$data > lc[,1:5] <- lc[,1:5]*distancefactor(5,20,type="categorical") > lc[,6] <- lc[,6]*distancefactor(4,20,type="ordinal") > > > > cleanEx() > nameEx("distcritmulti") > ### * distcritmulti > > flush(stderr()); flush(stdout()) > > ### Name: distcritmulti > ### Title: Distance based validity criteria for large data sets > ### Aliases: distcritmulti > ### Keywords: cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > clustering <- as.integer(attr(face,"grouping")) > distcritmulti(face,clustering,ns=3,seed=100000,criterion="pearsongamma") $crit.overall [1] 0.469 $crit.sub [1] 0.512 0.475 0.424 $crit.sd [1] 0.0443 $subsets $subsets[[1]] [1] 42 12 45 43 29 19 49 4 18 14 28 40 23 5 36 10 $subsets[[2]] [1] 21 47 31 7 6 34 46 9 1 35 22 30 17 37 16 39 $subsets[[3]] [1] 3 8 32 48 50 13 44 38 24 20 27 15 11 2 26 41 25 33 > > > > cleanEx() > nameEx("distrsimilarity") > ### * distrsimilarity > > flush(stderr()); flush(stdout()) > > ### Name: distrsimilarity > ### Title: Similarity of within-cluster distributions to normal and uniform > ### Aliases: distrsimilarity > ### Keywords: multivariate classif cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > km3 <- kmeans(face,3) > distrsimilarity(face,km3$cluster) $kdnorm [1] 0.194 $kdunif [1] 0.631 $kdnormc [1] 0.213 0.114 0.240 $kdunifc [1] 0.536 0.470 0.891 $xmahal [1] 3.5129 4.7799 6.3495 7.8442 5.0433 5.6147 6.0117 2.8951 3.4951 [10] 3.3331 5.4346 7.4691 4.9576 2.5102 4.4414 2.7912 5.8919 4.1839 [19] 3.7049 5.8845 4.0592 4.3852 3.6482 3.8863 2.8131 5.8701 3.3964 [28] 4.5995 3.4337 6.0519 2.8604 4.2351 5.5251 3.2639 5.5791 5.6654 [37] 3.3749 5.3881 5.1841 0.1951 0.5543 0.2182 0.3423 1.3870 1.0588 [46] 2.2913 0.5611 0.1924 0.2815 0.0719 0.1155 1.2668 1.0872 0.6318 [55] 2.4902 0.1747 0.2197 0.5771 1.5045 0.0861 2.2272 0.1307 0.7233 [64] 3.2335 0.3505 1.0175 0.1018 0.3543 0.1838 0.2504 0.1653 0.8485 [73] 0.0698 3.1368 0.6831 0.0465 0.1856 0.2635 1.0392 1.3378 0.5079 [82] 0.2897 0.0961 1.9485 0.1066 0.8219 0.2738 7.4076 0.7187 0.3910 [91] 0.5886 0.3360 0.3379 0.2821 0.2710 0.6091 1.2201 0.1610 0.2435 [100] 1.3166 0.1236 1.1852 0.6172 1.0057 0.7931 0.0836 0.6910 0.0241 [109] 0.4402 2.3749 0.2522 1.3834 0.1406 0.0374 0.7562 4.3898 0.2228 [118] 1.1830 0.1869 11.0909 0.9184 1.1382 1.1336 0.1277 1.2978 1.5902 [127] 0.9341 0.6224 0.1983 0.8693 0.0827 0.1414 0.4996 0.8275 0.3648 [136] 0.0871 1.3740 0.3438 0.3971 0.6755 0.3984 0.5457 0.3319 0.4708 [145] 0.5582 0.5483 0.4577 0.2446 1.0912 0.4667 0.5092 0.3227 0.2626 [154] 1.4643 1.0198 0.2357 0.6785 0.3872 0.1366 0.7052 0.2668 0.8800 [163] 0.4571 0.6615 0.6485 0.4795 1.1714 0.5612 0.2161 1.3731 0.2612 [172] 1.1953 0.5939 0.1136 0.5080 0.1114 3.2163 1.4314 1.1823 0.7213 [181] 1.0050 1.9337 2.0463 1.4902 1.1999 1.6324 1.8385 1.2525 2.5465 [190] 2.8546 1.3114 2.7367 1.8662 2.0296 2.2878 1.1876 11.6174 10.4323 [199] 23.1443 23.1443 $xdknn [1] 0.7869 0.5996 0.9758 0.8746 0.5241 0.2066 0.1396 0.4856 0.5316 0.4451 [11] 0.1041 0.8472 0.4166 0.8038 0.5286 0.0330 0.2158 0.9412 0.1836 0.3729 [21] 0.1554 0.1554 0.5434 0.5559 0.0445 0.1041 0.7869 1.2058 0.1624 1.0412 [31] 0.0445 0.0792 0.9497 0.5262 0.5460 0.6086 0.1836 0.1201 0.7696 0.3148 [41] 0.0925 0.0373 0.0686 0.1787 0.1787 0.2043 0.0850 0.0368 0.0634 0.0647 [51] 0.0735 0.0877 0.1696 0.2180 0.3033 0.0847 0.0817 0.1949 0.1573 0.0905 [61] 0.2285 0.0610 0.0810 0.3732 0.0686 0.1248 0.0735 0.0634 0.0352 0.0602 [71] 0.0905 0.2093 0.1010 0.2285 0.1312 0.0931 0.0428 0.0517 0.1569 0.1692 [81] 0.1518 0.0522 0.0858 0.2576 0.0975 0.2108 0.2287 1.2165 0.0850 0.1060 [91] 0.2131 0.0365 0.2383 0.0987 0.1149 0.2074 0.1481 0.0503 0.4643 0.1035 [101] 0.3037 0.1217 0.3723 0.1217 0.1360 0.1248 0.4015 0.2825 0.3543 0.6843 [111] 0.3329 0.0968 0.1248 0.2612 0.2589 1.3065 0.2769 0.3047 0.1963 2.1923 [121] 0.1477 0.1416 0.1031 0.2388 0.1412 0.1595 0.1747 0.4555 0.2769 0.1722 [131] 0.1812 1.2096 0.3164 0.2369 0.2848 0.1386 0.2088 0.2163 0.0919 0.1223 [141] 0.1260 0.0983 0.1396 0.0609 0.1027 0.1113 0.0916 0.2354 0.4479 0.0916 [151] 0.1720 0.1345 0.1015 0.2424 0.2546 0.1630 0.2245 0.1167 0.3315 0.3251 [161] 0.1807 0.3613 0.4538 0.1241 0.1310 0.1222 0.2546 0.1113 0.2096 0.1844 [171] 0.1576 0.2424 0.1223 0.2617 0.1708 0.1843 0.4485 0.2678 0.2942 0.5822 [181] 0.8005 0.2150 0.4959 0.2568 0.2056 0.2958 0.2150 0.4788 0.6237 0.4241 [191] 0.2568 0.4485 0.1713 0.4629 0.5524 0.2678 7.5721 7.8539 8.3217 8.3217 > > > > cleanEx() > nameEx("dridgeline") > ### * dridgeline > > flush(stderr()); flush(stdout()) > > ### Name: dridgeline > ### Title: Density along the ridgeline > ### Aliases: dridgeline > ### Keywords: cluster multivariate > > ### ** Examples > > q <- dridgeline(seq(0,1,0.1),0.5,c(1,1),c(2,5),diag(2),diag(2)) > > > > cleanEx() > nameEx("dudahart2") > ### * dudahart2 > > flush(stderr()); flush(stdout()) > > ### Name: dudahart2 > ### Title: Duda-Hart test for splitting > ### Aliases: dudahart2 > ### Keywords: cluster > > ### ** Examples > > options(digits=2) > set.seed(98765) > iriss <- iris[sample(150,20),-5] > km <- kmeans(iriss,2) > dudahart2(iriss,km$cluster) $p.value [1] 2.2e-05 $dh [1] 0.26 $compare [1] 0.4 $cluster1 [1] FALSE $alpha [1] 0.001 $z [1] 3.1 > > > > cleanEx() > nameEx("extract.mixturepars") > ### * extract.mixturepars > > flush(stderr()); flush(stdout()) > > ### Name: extract.mixturepars > ### Title: Extract parameters for certain components from mclust > ### Aliases: extract.mixturepars > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(98765) > require(mclust) Loading required package: mclust Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > iriss <- iris[sample(150,20),-5] > irisBIC <- mclustBIC(iriss,G=5,modelNames="VEV") > siris <- summary(irisBIC,iriss) > emp <- extract.mixturepars(siris,2) > emp$pro [1] 1 > round(emp$mean,digits=1) Sepal.Length Sepal.Width Petal.Length Petal.Width 7.6 2.8 6.7 2.2 > emp$variance$modelName [1] "VEV" > round(emp$variance$scale,digits=2) [1] 0.00 0.00 0.02 0.00 0.03 > > > > > cleanEx() detaching ‘package:mclust’ > nameEx("findrep") > ### * findrep > > flush(stderr()); flush(stdout()) > > ### Name: findrep > ### Title: Finding representatives for cluster border > ### Aliases: findrep > ### Keywords: cluster > > ### ** Examples > > options(digits=3) > iriss <- as.matrix(iris[c(1:5,51:55,101:105),-5]) > irisc <- as.numeric(iris[c(1:5,51:55,101:105),5]) > findrep(iriss,colMeans(iriss),irisc,cluster=1,r=2) $repc [1] 3 5 $repx [1] 3 5 $maxr [1] 2 $wvar [1] 10.6 > > > > cleanEx() > nameEx("fixmahal") > ### * fixmahal > > flush(stderr()); flush(stdout()) > > ### Name: fixmahal > ### Title: Mahalanobis Fixed Point Clusters > ### Aliases: fixmahal summary.mfpc plot.mfpc fpclusters.mfpc > ### print.summary.mfpc print.mfpc fpmi > ### Keywords: cluster multivariate robust > > ### ** Examples > > options(digits=2) > set.seed(20000) > face <- rFace(400,dMoNo=2,dNoEy=0, p=3) > # The first example uses grouping information via init.group. > initg <- list() > grface <- as.integer(attr(face,"grouping")) > for (i in 1:5) initg[[i]] <- (grface==i) > ff0 <- fixmahal(face, pointit=FALSE, init.group=initg) > summary(ff0) * Mahalanobis Fixed Point Clusters * Often a clear cluster in the data leads to several similar FPCs. The summary shows the representative FPCs of groups of similar FPCs. Method fuzzy was used. Number of representative FPCs: 5 FPCs with less than 10 points were skipped. 0 iteration runs led to 0 skipped clusters. Weight 1 for r^2<= 7.8 weight 0 for r^2> 13 Constant ca= 7.8 corresponding to alpha= 0.95 FPC 1 Times found (group members): 1 Mean: [1] -2.1 17.1 1.2 Covariance matrix: [,1] [,2] [,3] [1,] 0.1420 -0.0053 -0.041 [2,] -0.0053 0.1518 0.063 [3,] -0.0413 0.0632 1.057 Number of points (sum of weights): 39 FPC 2 Times found (group members): 1 Mean: [1] 2.0 17.0 1.2 Covariance matrix: [,1] [,2] [,3] [1,] 0.1544 0.0038 0.043 [2,] 0.0038 0.1159 0.057 [3,] 0.0427 0.0567 1.296 Number of points (sum of weights): 76 FPC 3 Times found (group members): 1 Mean: [1] -0.0043 3.0912 0.5582 Covariance matrix: [,1] [,2] [,3] [1,] 0.1866 0.0017 0.0175 [2,] 0.0017 0.0438 0.0047 [3,] 0.0175 0.0047 0.2097 Number of points (sum of weights): 96 FPC 4 Times found (group members): 2 Mean: [1] 0.013 3.880 0.615 Covariance matrix: [,1] [,2] [,3] [1,] 0.2064 0.0067 0.017 [2,] 0.0067 4.8776 0.193 [3,] 0.0173 0.1934 0.262 Number of points (sum of weights): 197 FPC 5 Times found (group members): 1 Mean: [1] 0.11 7.58 0.63 Covariance matrix: [,1] [,2] [,3] [1,] 1.830 1.769 0.037 [2,] 1.769 36.251 0.074 [3,] 0.037 0.074 0.270 Number of points (sum of weights): 328 Number of points (rounded weights) in intersection of representative FPCs [,1] [,2] [,3] [,4] [,5] [1,] 39 0 0 0 27 [2,] 0 76 0 0 58 [3,] 0 0 96 96 96 [4,] 0 0 96 197 197 [5,] 27 58 96 197 328 > cff0 <- fpclusters(ff0) > plot(face, col=1+cff0[[1]]) > plot(face, col=1+cff0[[4]]) # Why does this come out as a cluster? > plot(ff0, face, 4) # A bit clearer... > # Without grouping information, examples need more time: > # ff1 <- fixmahal(face) > # summary(ff1) > # cff1 <- fpclusters(ff1) > # plot(face, col=1+cff1[[1]]) > # plot(face, col=1+cff1[[6]]) # Why does this come out as a cluster? > # plot(ff1, face, 6) # A bit clearer... > # ff2 <- fixmahal(face,method="ml") > # summary(ff2) > # ff3 <- fixmahal(face,method="ml",calpha=0.95,subset=50) > # summary(ff3) > ## ...fast, but lots of clusters. mer=0.3 may be useful here. > # set.seed(3000) > # face2 <- rFace(400,dMoNo=2,dNoEy=0) > # ff5 <- fixmahal(face2) > # summary(ff5) > ## misses right eye of face data; with p=6, > ## initial configurations are too large for 40 point clusters > # ff6 <- fixmahal(face2, startn=30) > # summary(ff6) > # cff6 <- fpclusters(ff6) > # plot(face2, col=1+cff6[[3]]) > # plot(ff6, face2, 3) > # x <- c(1,2,3,6,6,7,8,120) > # ff8 <- fixmahal(x) > # summary(ff8) > # ...dataset a bit too small for the defaults... > # ff9 <- fixmahal(x, mnc=3, startn=3) > # summary(ff9) > > > > cleanEx() > nameEx("fixreg") > ### * fixreg > > flush(stderr()); flush(stdout()) > > ### Name: fixreg > ### Title: Linear Regression Fixed Point Clusters > ### Aliases: fixreg summary.rfpc plot.rfpc fpclusters.rfpc > ### print.summary.rfpc print.rfpc rfpi > ### Keywords: cluster robust regression > > ### ** Examples > > set.seed(190000) > options(digits=3) > data(tonedata) > attach(tonedata) > tonefix <- fixreg(stretchratio,tuned,mtf=1,ir=20) > summary(tonefix) * Fixed Point Clusters * Often a clear cluster in the data leads to several similar FPCs. The summary shows the representative FPCs of groups of similar FPCs, which were found at least 1 times. Constant ca= 10.1 Number of representative FPCs: 2 FPCs with less than 50 points were skipped. 2 iterations led to skipped FPCs. FPC 1 Times found (group members): 14 Ratio to estimated expectation: 1.31 Regression parameters: Intercept X 1.9051 0.0477 Error variance: 0.00282 Number of points: 122 FPC 2 Times found (group members): 5 Ratio to estimated expectation: 2.13 Regression parameters: Intercept X 0.023 0.991 Error variance: 2e-04 Number of points: 74 Number of points in intersection of representative FPCs [,1] [,2] [1,] 122 57 [2,] 57 74 > # This is designed to have a fast example; default setting would be better. > # If you want to see more (and you have a bit more time), > # try out the following: > ## Not run: > ##D set.seed(1000) > ##D tonefix <- fixreg(stretchratio,tuned) > ##D # Default - good for these data > ##D summary(tonefix) > ##D plot(tonefix,stretchratio,tuned,1) > ##D plot(tonefix,stretchratio,tuned,2) > ##D plot(tonefix,stretchratio,tuned,3,bw=FALSE,pch=5) > ##D toneclus <- fpclusters(tonefix,stretchratio,tuned) > ##D plot(stretchratio,tuned,col=1+toneclus[[2]]) > ##D tonefix2 <- fixreg(stretchratio,tuned,distcut=1,mtf=1,countmode=50) > ##D # Every found fixed point cluster is reported, > ##D # no matter how instable it may be. > ##D summary(tonefix2) > ##D tonefix3 <- fixreg(stretchratio,tuned,ca=7) > ##D # ca defaults to 10.07 for these data. > ##D summary(tonefix3) > ##D subset <- c(rep(FALSE,5),rep(TRUE,24),rep(FALSE,121)) > ##D tonefix4 <- fixreg(stretchratio,tuned, > ##D mtf=1,ir=0,init.group=list(subset)) > ##D summary(tonefix4) > ## End(Not run) > > > > cleanEx() detaching ‘tonedata’ > nameEx("flexmixedruns") > ### * flexmixedruns > > flush(stderr()); flush(stdout()) > > ### Name: flexmixedruns > ### Title: Fitting mixed Gaussian/multinomial mixtures with flexmix > ### Aliases: flexmixedruns > ### Keywords: cluster > > ### ** Examples > > options(digits=3) > set.seed(776655) > v1 <- rnorm(100) > v2 <- rnorm(100) > d1 <- sample(1:5,100,replace=TRUE) > d2 <- sample(1:4,100,replace=TRUE) > ldata <- cbind(v1,v2,d1,d2) > fr <- flexmixedruns(ldata, + continuous=2,discrete=2,simruns=2,n.cluster=2:3,allout=FALSE) k= 2 new best fit found in run 1 k= 2 new best fit found in run 2 k= 2 BIC= 1258 k= 3 new best fit found in run 1 Nonoptimal or repeated fit found in run 2 k= 3 BIC= 1298 > print(fr$optimalk) [1] 2 > print(fr$optsummary) Call: flexmix(formula = x ~ 1, k = k, cluster = initial.cluster, model = lcmixed(continuous = continuous, discrete = discrete, ppdim = ppdim, diagonal = diagonal), control = control) prior size post>0 ratio Comp.1 0.511 47 77 0.61 Comp.2 0.489 53 93 0.57 'log Lik.' -576 (df=23) AIC: 1198 BIC: 1258 > print(fr$flexout@cluster) [1] 2 1 1 1 2 2 1 1 1 1 2 1 2 1 2 1 1 2 2 2 1 1 2 1 2 1 1 1 1 1 1 2 2 1 1 1 2 [38] 2 2 1 2 2 2 2 2 1 2 2 1 2 2 1 2 2 1 1 2 2 2 1 2 2 1 2 2 2 1 1 2 2 1 1 1 2 [75] 2 2 1 2 2 1 2 1 2 1 2 2 2 1 2 2 2 2 1 1 1 1 2 2 1 1 > print(fr$flexout@components) $Comp.1 $Comp.1[[1]] $center [1] 0.278 -0.171 $cov [,1] [,2] [1,] 1.37 0.00 [2,] 0.00 1.37 $pp $pp[[1]] [1] 0.136 0.260 0.216 0.121 0.268 $pp[[2]] [1] 2.31e-01 6.25e-07 2.02e-01 5.67e-01 $Comp.2 $Comp.2[[1]] $center [1] -0.1221 0.0365 $cov [,1] [,2] [1,] 0.403 0.00 [2,] 0.000 1.01 $pp $pp[[1]] [1] 0.206 0.219 0.204 0.303 0.068 $pp[[2]] [1] 0.351856 0.470507 0.177369 0.000268 > > > > cleanEx() > nameEx("itnumber") > ### * itnumber > > flush(stderr()); flush(stdout()) > > ### Name: itnumber > ### Title: Number of regression fixed point cluster iterations > ### Aliases: itnumber > ### Keywords: univar cluster > > ### ** Examples > > itnumber(500,4,150,2) [1] 6985 > > > > cleanEx() > nameEx("jittervar") > ### * jittervar > > flush(stderr()); flush(stdout()) > > ### Name: jittervar > ### Title: Jitter variables in a data matrix > ### Aliases: jittervar > ### Keywords: manip > > ### ** Examples > > set.seed(776655) > v1 <- rnorm(20) > v2 <- rnorm(20) > d1 <- sample(1:5,20,replace=TRUE) > d2 <- sample(1:4,20,replace=TRUE) > ldata <- cbind(v1,v2,d1,d2) > jv <- jittervar(ldata,jitterv=3:4) > > > > cleanEx() > nameEx("kmeansCBI") > ### * kmeansCBI > > flush(stderr()); flush(stdout()) > > ### Name: kmeansCBI > ### Title: Interface functions for clustering methods > ### Aliases: kmeansCBI hclustCBI hclusttreeCBI disthclustCBI > ### disthclusttreeCBI noisemclustCBI distnoisemclustCBI claraCBI pamkCBI > ### dbscanCBI mahalCBI mergenormCBI speccCBI tclustCBI pdfclustCBI > ### emskewCBI stupidkcentroidsCBI stupidknnCBI stupidkfnCBI > ### stupidkavenCBI > ### Keywords: cluster multivariate > > ### ** Examples > > options(digits=3) > set.seed(20000) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > dbs <- dbscanCBI(face,eps=1.5,MinPts=4) > dhc <- disthclustCBI(dist(face),method="average",k=1.5,noisecut=2) > table(dbs$partition,dhc$partition) 1 1 16 2 9 3 9 4 5 5 11 > dm <- mergenormCBI(face,G=10,modelNames="EEE",nnk=2) > dtc <- tclustCBI(face,6,trim=0.1,restr.fact=500) > table(dm$partition,dtc$partition) 1 2 3 4 5 6 7 1 3 0 0 2 0 0 0 2 10 9 8 0 5 0 0 3 0 0 0 0 0 0 1 4 1 0 0 4 0 3 4 > > > > > cleanEx() > nameEx("kmeansruns") > ### * kmeansruns > > flush(stderr()); flush(stdout()) > > ### Name: kmeansruns > ### Title: k-means with estimating k and initialisations > ### Aliases: kmeansruns > ### Keywords: cluster multivariate > > ### ** Examples > > options(digits=3) > set.seed(20000) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > pka <- kmeansruns(face,krange=1:5,critout=TRUE,runs=2,criterion="asw") 2 clusters 0.742 3 clusters 0.544 4 clusters 0.59 5 clusters 0.588 > pkc <- kmeansruns(face,krange=1:5,critout=TRUE,runs=2,criterion="ch") 2 clusters 181 3 clusters 108 4 clusters 231 5 clusters 187 > > > > cleanEx() > nameEx("lcmixed") > ### * lcmixed > > flush(stderr()); flush(stdout()) > > ### Name: lcmixed > ### Title: flexmix method for mixed Gaussian/multinomial mixtures > ### Aliases: lcmixed > ### Keywords: cluster > > ### ** Examples > > set.seed(112233) > options(digits=3) > require(MASS) Loading required package: MASS > require(flexmix) Loading required package: flexmix Loading required package: lattice > data(Cars93) > Cars934 <- Cars93[,c(3,5,8,10)] > cc <- + discrete.recode(Cars934,xvarsorted=FALSE,continuous=c(2,3),discrete=c(1,4)) > fcc <- flexmix(cc$data~1,k=2, + model=lcmixed(continuous=2,discrete=2,ppdim=c(6,3),diagonal=TRUE)) > summary(fcc) Call: flexmix(formula = cc$data ~ 1, k = 2, model = lcmixed(continuous = 2, discrete = 2, ppdim = c(6, 3), diagonal = TRUE)) prior size post>0 ratio Comp.1 0.464 42 61 0.689 Comp.2 0.536 51 66 0.773 'log Lik.' -789 (df=23) AIC: 1625 BIC: 1683 > > > > cleanEx() detaching ‘package:flexmix’, ‘package:lattice’, ‘package:MASS’ > nameEx("localshape") > ### * localshape > > flush(stderr()); flush(stdout()) > > ### Name: localshape > ### Title: Local shape matrix > ### Aliases: localshape > ### Keywords: multivariate > > ### ** Examples > > options(digits=3) > data(iris) > localshape(iris[,-5],mscatter="cov") Sepal.Length Sepal.Width Petal.Length Petal.Width Sepal.Length 631889 309217 265161 63257 Sepal.Width 309217 456220 34488 52444 Petal.Length 265161 34488 386520 116040 Petal.Width 63257 52444 116040 104828 > > > > cleanEx() > nameEx("mahalanodisc") > ### * mahalanodisc > > flush(stderr()); flush(stdout()) > > ### Name: mahalanodisc > ### Title: Mahalanobis for AWC > ### Aliases: mahalanodisc > ### Keywords: multivariate > > ### ** Examples > > options(digits=3) > x <- cbind(rnorm(50),rnorm(50)) > mahalanodisc(x,c(0,0),cov(x)) [1] 0.7135 0.4376 1.1082 4.8732 2.3944 5.0023 0.4707 1.8782 0.8586 0.1586 [11] 9.8187 0.2204 1.0252 7.1017 2.3418 0.0401 3.4794 3.7170 1.0145 5.6731 [21] 1.5074 1.3702 0.4105 6.8458 2.1580 0.0936 0.2516 3.1341 0.3337 0.6000 [31] 2.9447 0.0362 1.7431 2.4890 3.0445 0.3544 1.3904 0.1056 1.9390 0.9399 [41] 0.3619 1.6200 2.2192 1.0100 3.2679 1.0197 1.8863 1.1636 1.6318 1.3235 > mahalanodisc(x,c(0,0),matrix(0,ncol=2,nrow=2)) [1] 5.51e+09 4.08e+09 8.15e+09 3.82e+10 2.16e+10 4.60e+10 3.72e+09 1.64e+10 [9] 6.56e+09 1.12e+09 8.05e+10 1.54e+09 8.62e+09 4.91e+10 1.82e+10 3.77e+08 [17] 3.26e+10 3.04e+10 6.98e+09 5.07e+10 1.07e+10 1.12e+10 3.79e+09 4.83e+10 [25] 1.96e+10 8.81e+08 2.21e+09 2.16e+10 2.34e+09 5.22e+09 2.17e+10 2.88e+08 [33] 1.54e+10 2.32e+10 2.25e+10 2.83e+09 1.29e+10 9.60e+08 1.35e+10 6.54e+09 [41] 3.21e+09 1.52e+10 1.83e+10 8.00e+09 2.99e+10 8.12e+09 1.76e+10 9.19e+09 [49] 1.51e+10 1.00e+10 > > > > cleanEx() > nameEx("mahalanofix") > ### * mahalanofix > > flush(stderr()); flush(stdout()) > > ### Name: mahalanofix > ### Title: Mahalanobis distances from center of indexed points > ### Aliases: mahalanofix mahalanofuz > ### Keywords: multivariate > > ### ** Examples > > x <- c(1,2,3,4,5,6,7,8,9,10) > y <- c(1,2,3,8,7,6,5,8,9,10) > mahalanofix(cbind(x,y),gv=c(0,0,0,1,1,1,1,1,0,0)) > mahalanofix(cbind(x,y),gv=c(0,0,0,1,1,1,1,0,0,0)) > mahalanofix(cbind(x,y),gv=c(0,0,0,1,1,1,1,1,0,0),method="mcd") > mahalanofuz(cbind(x,y),gv=c(0,0,0.5,0.5,1,1,1,0.5,0.5,0)) > > > > cleanEx() > nameEx("mahalconf") > ### * mahalconf > > flush(stderr()); flush(stdout()) > > ### Name: mahalconf > ### Title: Mahalanobis fixed point clusters initial configuration > ### Aliases: mahalconf > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0,p=2) > mahalconf(face,no=200,startn=20,covall=cov(face),plot="start") [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [37] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [49] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [61] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [73] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [85] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [97] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [109] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [121] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [133] FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE [145] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [157] FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [169] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [181] FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE [193] TRUE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE [205] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [217] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [229] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [241] TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [253] FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE [265] FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [277] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [289] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE [301] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [313] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [325] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [337] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [349] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [361] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [373] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [385] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [397] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [409] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [421] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [433] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [445] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [457] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [469] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [481] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [493] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [505] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [517] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [529] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [541] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [553] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [565] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [577] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE [589] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE > > > > cleanEx() > nameEx("mergenormals") > ### * mergenormals > > flush(stderr()); flush(stdout()) > > ### Name: mergenormals > ### Title: Clustering by merging Gaussian mixture components > ### Aliases: mergenormals summary.mergenorm print.summary.mergenorm > ### Keywords: multivariate cluster > > ### ** Examples > > require(mclust) Loading required package: mclust Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > require(MASS) Loading required package: MASS > options(digits=3) > data(crabs) > dc <- crabs[,4:8] > cm <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE") > scm <- summary(cm,crabs[,4:8]) > cmnbhat <- mergenormals(crabs[,4:8],scm,method="bhat") > summary(cmnbhat) * Merging Gaussian mixture components * Method: bhat , cutoff value: 0.1 Original number of components: 9 Number of clusters after merging: 4 Values at which clusters were merged: [,1] [,2] [1,] 8 0.5746 [2,] 7 0.2503 [3,] 6 0.2307 [4,] 5 0.1443 [5,] 4 0.1258 [6,] 3 0.0999 Components assigned to clusters: [,1] [1,] 1 [2,] 1 [3,] 2 [4,] 3 [5,] 2 [6,] 1 [7,] 4 [8,] 4 [9,] 3 > cmndemp <- mergenormals(crabs[,4:8],scm,method="demp") > summary(cmndemp) * Merging Gaussian mixture components * Method: demp , cutoff value: 0.025 Original number of components: 9 Number of clusters after merging: 4 Values at which clusters were merged: [,1] [,2] [1,] 8 0.1902 [2,] 7 0.0714 [3,] 6 0.0694 [4,] 5 0.0351 [5,] 4 0.0266 [6,] 3 0.0220 Components assigned to clusters: [,1] [1,] 1 [2,] 1 [3,] 2 [4,] 3 [5,] 2 [6,] 1 [7,] 4 [8,] 4 [9,] 3 > # Other methods take a bit longer, but try them! > # The values of by and M below are still chosen for reasonably fast execution. > # cmnrr <- mergenormals(crabs[,4:8],scm,method="ridge.ratio",by=0.05) > # cmd <- mergenormals(crabs[,4:8],scm,method="dip.tantrum",by=0.05) > # cmp <- mergenormals(crabs[,4:8],scm,method="predictive",M=3) > > > > cleanEx() detaching ‘package:MASS’, ‘package:mclust’ > nameEx("mergeparameters") > ### * mergeparameters > > flush(stderr()); flush(stdout()) > > ### Name: mergeparameters > ### Title: New parameters from merging two Gaussian mixture components > ### Aliases: mergeparameters > ### Keywords: multivariate cluster > > ### ** Examples > > options(digits=3) > set.seed(98765) > require(mclust) Loading required package: mclust Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > iriss <- iris[sample(150,20),-5] > irisBIC <- mclustBIC(iriss) > siris <- summary(irisBIC,iriss) > probs <- siris$parameters$pro > muarray <- siris$parameters$mean > Sigmaarray <- siris$parameters$variance$sigma > z <- siris$z > mpi <- mergeparameters(iriss,1,2,probs,muarray,Sigmaarray,z) > mpi$probs [1] 0.25 0.10 0.30 0.15 0.30 > mpi$muarray [,1] [,2] [,3] [,4] [,5] Sepal.Length 6.98 7.65 5.233 7.03 5.80 Sepal.Width 2.94 2.80 3.583 3.60 2.73 Petal.Length 5.40 6.75 1.383 6.07 4.52 Petal.Width 1.72 2.20 0.217 2.33 1.60 > > > > cleanEx() detaching ‘package:mclust’ > nameEx("minsize") > ### * minsize > > flush(stderr()); flush(stdout()) > > ### Name: minsize > ### Title: Minimum size of regression fixed point cluster > ### Aliases: minsize > ### Keywords: univar cluster > > ### ** Examples > > minsize(500,4,7000,2) [1] 127 > > > > cleanEx() > nameEx("mixdens") > ### * mixdens > > flush(stderr()); flush(stdout()) > > ### Name: mixdens > ### Title: Density of multivariate Gaussian mixture, mclust > ### parameterisation > ### Aliases: mixdens > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(98765) > require(mclust) Loading required package: mclust Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > iriss <- iris[sample(150,20),-5] > irisBIC <- mclustBIC(iriss) > siris <- summary(irisBIC,iriss) > round(mixdens(siris$modelName,iriss,siris$parameters),digits=2) 59 106 37 22 52 40 149 15 119 76 1136.59 3272.04 8.35 1.02 2381.43 0.23 15.95 9.24 3272.04 258.91 95 112 118 65 50 38 122 110 143 93 0.30 1.47 20.20 0.47 1.87 1.06 1.91 41.67 4.02 0.40 > > > > cleanEx() detaching ‘package:mclust’ > nameEx("mixpredictive") > ### * mixpredictive > > flush(stderr()); flush(stdout()) > > ### Name: mixpredictive > ### Title: Prediction strength of merged Gaussian mixture > ### Aliases: mixpredictive > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(98765) > iriss <- iris[sample(150,20),-5] > mp <- mixpredictive(iriss,2,2,M=2) > > > > cleanEx() > nameEx("mvdcoord") > ### * mvdcoord > > flush(stderr()); flush(stdout()) > > ### Name: mvdcoord > ### Title: Mean/variance differences discriminant coordinates > ### Aliases: mvdcoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(300,dMoNo=2,dNoEy=0,p=3) > grface <- as.integer(attr(face,"grouping")) > mcf <- mvdcoord(face,grface) > plot(mcf$proj,col=grface) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("ncoord") > ### * ncoord > > flush(stderr()); flush(stdout()) > > ### Name: ncoord > ### Title: Neighborhood based discriminant coordinates > ### Aliases: ncoord > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > ncf <- ncoord(face,grface) > plot(ncf$proj,col=grface) > ncf2 <- ncoord(face,grface,weighted=TRUE) > plot(ncf2$proj,col=grface) > # ...done in one step by function plotcluster. > > > > cleanEx() > nameEx("neginc") > ### * neginc > > flush(stderr()); flush(stdout()) > > ### Name: neginc > ### Title: Neg-entropy normality index for cluster validation > ### Aliases: neginc > ### Keywords: cluster > > ### ** Examples > > options(digits=3) > iriss <- as.matrix(iris[c(1:10,51:55,101:105),-5]) > irisc <- as.numeric(iris[c(1:10,51:55,101:105),5]) > neginc(iriss,irisc) [1] -2.92 > > > > cleanEx() > nameEx("nselectboot") > ### * nselectboot > > flush(stderr()); flush(stdout()) > > ### Name: nselectboot > ### Title: Selection of the number of clusters via bootstrap > ### Aliases: nselectboot > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > nselectboot(dist(face),B=2,clustermethod=disthclustCBI, + method="average",krange=5:7) > nselectboot(dist(face),B=2,clustermethod=claraCBI, + classification="centroid",krange=5:7) > nselectboot(face,B=2,clustermethod=kmeansCBI, + classification="centroid",krange=5:7) > # Of course use larger B in a real application. > > > > cleanEx() > nameEx("pamk") > ### * pamk > > flush(stderr()); flush(stdout()) > > ### Name: pamk > ### Title: Partitioning around medoids with estimation of number of > ### clusters > ### Aliases: pamk > ### Keywords: cluster multivariate > > ### ** Examples > > options(digits=3) > set.seed(20000) > face <- rFace(50,dMoNo=2,dNoEy=0,p=2) > pk1 <- pamk(face,krange=1:5,criterion="asw",critout=TRUE) 1 clusters 0 2 clusters 0.742 3 clusters 0.748 4 clusters 0.581 5 clusters 0.544 > pk2 <- pamk(face,krange=1:5,criterion="multiasw",ns=2,critout=TRUE) 1 clusters 0 2 clusters 0.749 3 clusters 0.727 4 clusters 0.584 5 clusters 0.582 > # "multiasw" is better for larger data sets, use larger ns then. > pk3 <- pamk(face,krange=1:5,criterion="ch",critout=TRUE) 1 clusters 0 2 clusters 181 3 clusters 210 4 clusters 204 5 clusters 181 > > > > cleanEx() > nameEx("piridge") > ### * piridge > > flush(stderr()); flush(stdout()) > > ### Name: piridge > ### Title: Ridgeline Pi-function > ### Aliases: piridge > ### Keywords: cluster multivariate > > ### ** Examples > > q <- piridge(seq(0,1,0.1),c(1,1),c(2,5),diag(2),diag(2)) > > > > cleanEx() > nameEx("piridge.zeroes") > ### * piridge.zeroes > > flush(stderr()); flush(stdout()) > > ### Name: piridge.zeroes > ### Title: Extrema of two-component Gaussian mixture > ### Aliases: piridge.zeroes > ### Keywords: cluster multivariate > > ### ** Examples > > q <- piridge.zeroes(0.2,c(1,1),c(2,5),diag(2),diag(2),by=0.1) > > > > cleanEx() > nameEx("plot.valstat") > ### * plot.valstat > > flush(stderr()); flush(stdout()) > > ### Name: plot.valstat > ### Title: Simulation-standardised plot and print of cluster validation > ### statistics > ### Aliases: plot.valstat print.valstat > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(10,dMoNo=2,dNoEy=0,p=2) > clustermethod=c("kmeansCBI","hclustCBI","hclustCBI") > clustermethodpars <- list() > clustermethodpars[[2]] <- clustermethodpars[[3]] <- list() > clustermethodpars[[2]]$method <- "ward.D2" > clustermethodpars[[3]]$method <- "single" > methodname <- c("kmeans","ward","single") > cbs <- clusterbenchstats(face,G=2:3,clustermethod=clustermethod, + methodname=methodname,distmethod=rep(FALSE,3), + clustermethodpars=clustermethodpars,nnruns=2,kmruns=2,fnruns=2,avenruns=2) [1] "kmeansCBI" [1] "hclustCBI" [1] "hclustCBI" [1] "Computation of validity statistics" comsum 1 comsum 2 comsum 3 [1] "Simulation" 2 clusters; nn run 1 2 clusters; nn run 2 2 clusters; fn run 1 2 clusters; fn run 2 2 clusters; aven run 1 2 clusters; aven run 2 2 clusters; km run 1 2 clusters; km run 2 3 clusters; nn run 1 3 clusters; nn run 2 3 clusters; fn run 1 3 clusters; fn run 2 3 clusters; aven run 1 3 clusters; aven run 2 3 clusters; km run 1 3 clusters; km run 2 [1] "Simulation quantile re-standardisation" [1] "Simulation sd re-standardisation" > plot(cbs$stat,cbs$sim) > plot(cbs$stat,cbs$sim,statistic="dindex") > plot(cbs$stat,cbs$sim,statistic="avewithin") > pcbs <- print(cbs$sstat,aggregate=TRUE,weights=c(1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0)) avewithin method 2 3 1 kmeans -0.56 0.38 2 ward -0.17 -0.89 3 single -2.43 -0.76 mnnd method 2 3 1 kmeans -1.94 -0.15 2 ward 0.31 -2.64 3 single 0.79 0.71 cvnnd method 2 3 1 kmeans -1.36 0.33 2 ward -0.29 -0.61 3 single -0.57 -1.21 maxdiameter method 2 3 1 kmeans -0.6 0.17 2 ward -0.6 -0.73 3 single -1.9 -1.25 widestgap method 2 3 1 kmeans -0.85 0.69 2 ward -0.70 -1.16 3 single -0.70 -1.01 sindex method 2 3 1 kmeans -1.66 -0.18 2 ward -0.13 -0.62 3 single -0.13 0.62 minsep method 2 3 1 kmeans -1.66 -0.37 2 ward -0.13 -0.71 3 single -0.13 0.88 asw method 2 3 1 kmeans -0.61 0.21 2 ward -0.11 -0.91 3 single -2.43 -0.16 dindex method 2 3 1 kmeans 0.3 NaN 2 ward 0.3 NaN 3 single -3.0 NaN denscut method 2 3 1 kmeans NaN 0.60 2 ward NaN 0.47 3 single NaN 0.60 highdgap method 2 3 1 kmeans -0.106 0.69 2 ward -0.042 -1.16 3 single -2.760 -1.01 pearsongamma method 2 3 1 kmeans -0.50 0.34 2 ward -0.12 -1.44 3 single -2.53 -0.94 withinss method 2 3 1 kmeans -0.42 0.51 2 ward -0.15 -0.57 3 single -2.57 -1.25 entropy method 2 3 1 kmeans 0.47 0.85 2 ward -0.13 1.13 3 single -2.90 -1.27 pamc method 2 3 1 kmeans -0.334 0.34 2 ward -0.093 -0.32 3 single -2.691 -0.91 dmode method 2 3 1 kmeans 0.20 NaN 2 ward 0.22 NaN 3 single -2.95 NaN aggregate method 2 3 1 kmeans -0.91 0.18 2 ward -0.14 -0.98 3 single -1.69 -0.36 > # Some of the values are "NaN" because due to the low number of runs of > # the stupid clustering methods there is no variation. If this happens > # in a real application, nnruns etc. should be chosen higher than 2. > # Also useallg=TRUE in clusterbenchstats may help. > # > # Finding the best aggregated value: > mpcbs <- as.matrix(pcbs[[17]][,-1]) > which(mpcbs==max(mpcbs),arr.ind=TRUE) row col [1,] 1 2 > # row=1 refers to the first clustering method kmeansCBI, > # col=2 refers to the second number of clusters, which is 3 in g=2:3. > > > > cleanEx() > nameEx("plotcluster") > ### * plotcluster > > flush(stderr()); flush(stdout()) > > ### Name: plotcluster > ### Title: Discriminant projection plot. > ### Aliases: plotcluster > ### Keywords: multivariate classif > > ### ** Examples > > set.seed(4634) > face <- rFace(300,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > plotcluster(face,grface) > plotcluster(face,grface==1) > plotcluster(face,grface, clnum=1, method="vbc") [1] "Cluster indicator has more than 2 values" > > > > cleanEx() > nameEx("prediction.strength") > ### * prediction.strength > > flush(stderr()); flush(stdout()) > > ### Name: prediction.strength > ### Title: Prediction strength for estimating number of clusters > ### Aliases: prediction.strength print.predstr > ### Keywords: cluster multivariate > > ### ** Examples > > options(digits=3) > set.seed(98765) > iriss <- iris[sample(150,20),-5] > prediction.strength(iriss,2,3,M=3) Prediction strength Clustering method: kmeans Maximum number of clusters: 3 Resampled data sets: 3 Mean pred.str. for numbers of clusters: 1 1 0.889 Cutoff value: 0.8 Largest number of clusters better than cutoff: 3 > prediction.strength(iriss,2,3,M=3,clustermethod=claraCBI) Prediction strength Clustering method: clara/pam Maximum number of clusters: 3 Resampled data sets: 3 Mean pred.str. for numbers of clusters: 1 1 0.933 Cutoff value: 0.8 Largest number of clusters better than cutoff: 3 > # The examples are fast, but of course M should really be larger. > > > > cleanEx() > nameEx("rFace") > ### * rFace > > flush(stderr()); flush(stdout()) > > ### Name: rFace > ### Title: "Face-shaped" clustered benchmark datasets > ### Aliases: rFace > ### Keywords: data > > ### ** Examples > > set.seed(4634) > face <- rFace(600,dMoNo=2,dNoEy=0) > grface <- as.integer(attr(face,"grouping")) > plot(face, col = grface) > # pairs(face, col = grface, main ="rFace(600,dMoNo=2,dNoEy=0)") > > > > cleanEx() > nameEx("randcmatrix") > ### * randcmatrix > > flush(stderr()); flush(stdout()) > > ### Name: randcmatrix > ### Title: Random partition matrix > ### Aliases: randcmatrix > ### Keywords: cluster > > ### ** Examples > > set.seed(111) > randcmatrix(10,2,1) [,1] [,2] [1,] 0 1 [2,] 0 1 [3,] 1 0 [4,] 0 1 [5,] 1 0 [6,] 1 0 [7,] 1 0 [8,] 0 1 [9,] 1 0 [10,] 1 0 > > > > cleanEx() > nameEx("randconf") > ### * randconf > > flush(stderr()); flush(stdout()) > > ### Name: randconf > ### Title: Generate a sample indicator vector > ### Aliases: randconf > ### Keywords: distribution > > ### ** Examples > > randconf(10,3) [1] FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE TRUE FALSE > > > > cleanEx() > nameEx("randomclustersim") > ### * randomclustersim > > flush(stderr()); flush(stdout()) > > ### Name: randomclustersim > ### Title: Simulation of validity indexes based on random clusterings > ### Aliases: randomclustersim > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(10,dMoNo=2,dNoEy=0,p=2) > rmx <- randomclustersim(dist(face),datanp=face,npstats=TRUE,G=2:3, + nnruns=2,kmruns=2, fnruns=1,avenruns=1,nnk=2) 2 clusters; nn run 1 2 clusters; nn run 2 2 clusters; fn run 1 2 clusters; aven run 1 2 clusters; km run 1 2 clusters; km run 2 3 clusters; nn run 1 3 clusters; nn run 2 3 clusters; fn run 1 3 clusters; aven run 1 3 clusters; km run 1 3 clusters; km run 2 > ## Not run: > ##D rmx$km # Produces slightly different but basically identical results on ATLAS > ## End(Not run) > rmx$aven [[1]] NULL [[2]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.773 0.317 0.97 0.482 0.715 0.405 0.405 0.648 1 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 1 0.862 0.885 0.785 0.971 0.832 0.599 0.541 [[3]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.847 0.385 0.969 0.692 0.715 0.317 0.272 0.609 1 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 1 0.862 0.867 0.919 0.96 0.912 0.54 0.505 > rmx$fn [[1]] NULL [[2]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.773 0.317 0.97 0.482 0.715 0.405 0.405 0.648 1 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 1 0.862 0.885 0.785 0.971 0.832 0.599 0.541 [[3]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.718 0.358 0.719 0.493 0.595 0.0908 0 0.206 1 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 0.906 0.804 0.786 0.76 0.865 0.837 0.484 0.376 > rmx$nn [[1]] NULL [[2]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.500 0.332 0.82 0.000 0.595 0.285 0.285 0.000 0.847 2 0.773 0.317 0.97 0.482 0.715 0.405 0.405 0.648 1.000 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 1 0.608 0.498 0.082 0.469 0.675 0.608 0.558 2 1 0.862 0.885 0.785 0.971 0.832 0.599 0.541 [[3]] avewithin mnnd cvnnd maxdiameter widestgap sindex minsep asw dindex 1 0.764 0.279 0.980 0.482 0.715 0.190 0.0827 0.403 1 2 0.838 0.285 0.964 0.661 0.715 0.315 0.2846 0.587 1 denscut highdgap pearsongamma withinss entropy pamc kdnorm kdunif 1 0.995 0.862 0.801 0.790 0.817 0.843 0.475 0.448 2 1.000 0.862 0.857 0.912 0.991 0.913 0.415 0.562 > > > > > cleanEx() > nameEx("regmix") > ### * regmix > > flush(stderr()); flush(stdout()) > > ### Name: regmix > ### Title: Mixture Model ML for Clusterwise Linear Regression > ### Aliases: regmix regem > ### Keywords: cluster regression > > ### ** Examples > > ## Not run: > ##D # This apparently gives slightly different > ##D # but data-analytically fine results > ##D # on some versions of R. > ##D set.seed(12234) > ##D data(tonedata) > ##D attach(tonedata) > ##D rmt1 <- regmix(stretchratio,tuned,nclust=1:2) > ##D # nclust=1:2 makes the example fast; > ##D # a more serious application would rather use the default. > ##D rmt1$g > ##D round(rmt1$bic,digits=2) > ##D # start with initial parameter values > ##D cln <- 3 > ##D n <- 150 > ##D initcoef <- cbind(c(2,0),c(0,1),c(0,2.5)) > ##D initvar <- c(0.001,0.0001,0.5) > ##D initeps <- c(0.4,0.3,0.3) > ##D # computation of m from initial parameters > ##D m <- matrix(nrow=n, ncol=cln) > ##D stm <- numeric(0) > ##D for (i in 1:cln) > ##D for (j in 1:n){ > ##D m[j,i] <- initeps[i]*dnorm(tuned[j],mean=initcoef[1,i]+ > ##D initcoef[2,i]*stretchratio[j], sd=sqrt(initvar[i])) > ##D } > ##D for (j in 1:n){ > ##D stm[j] <- sum(m[j,]) > ##D for (i in 1:cln) > ##D m[j,i] <- m[j,i]/stm[j] > ##D } > ##D rmt2 <- regem(stretchratio, tuned, m, cln) > ## End(Not run) > > > > cleanEx() > nameEx("ridgeline") > ### * ridgeline > > flush(stderr()); flush(stdout()) > > ### Name: ridgeline > ### Title: Ridgeline computation > ### Aliases: ridgeline > ### Keywords: cluster multivariate > > ### ** Examples > > ridgeline(0.5,c(1,1),c(2,5),diag(2),diag(2)) [,1] [1,] 1.5 [2,] 3.0 > > > > cleanEx() > nameEx("ridgeline.diagnosis") > ### * ridgeline.diagnosis > > flush(stderr()); flush(stdout()) > > ### Name: ridgeline.diagnosis > ### Title: Ridgeline plots, ratios and unimodality > ### Aliases: ridgeline.diagnosis > ### Keywords: cluster multivariate > > ### ** Examples > > muarray <- cbind(c(0,0),c(0,0.1),c(10,10)) > sigmaarray <- array(c(diag(2),diag(2),diag(2)),dim=c(2,2,3)) > rd <- + ridgeline.diagnosis(c(0.5,0.3,0.2),muarray,sigmaarray,ridgelineplot="matrix",by=0.1) > # Much slower but more precise with default by=0.001. > > > > cleanEx() > nameEx("simmatrix") > ### * simmatrix > > flush(stderr()); flush(stdout()) > > ### Name: simmatrix > ### Title: Extracting intersections between clusters from fpc-object > ### Aliases: simmatrix > ### Keywords: utilities > > ### ** Examples > > set.seed(190000) > data(tonedata) > # Note: If you do not use the installed package, replace this by > # tonedata <- read.table("(path/)tonedata.txt", header=TRUE) > attach(tonedata) > tonefix <- fixreg(stretchratio,tuned,mtf=1,ir=20) > simmatrix(tonefix)[sseg(2,3)] [1] NA > > > > cleanEx() detaching ‘tonedata’ > nameEx("solvecov") > ### * solvecov > > flush(stderr()); flush(stdout()) > > ### Name: solvecov > ### Title: Inversion of (possibly singular) symmetric matrices > ### Aliases: solvecov > ### Keywords: array > > ### ** Examples > > x <- c(1,0,0,1,0,1,0,0,1) > dim(x) <- c(3,3) > solvecov(x) > > > > cleanEx() > nameEx("sseg") > ### * sseg > > flush(stderr()); flush(stdout()) > > ### Name: sseg > ### Title: Position in a similarity vector > ### Aliases: sseg > ### Keywords: utilities > > ### ** Examples > > sseg(3,4) [1] 9 > > > > cleanEx() > nameEx("stupidkaven") > ### * stupidkaven > > flush(stderr()); flush(stdout()) > > ### Name: stupidkaven > ### Title: Stupid average dissimilarity random clustering > ### Aliases: stupidkaven > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > stupidkaven(dist(face),3) [1] 2 3 2 2 2 3 3 2 2 2 3 2 3 2 3 2 3 2 3 3 2 2 3 3 2 3 2 2 3 2 2 2 1 2 1 2 3 [38] 3 3 1 3 1 1 2 2 2 3 1 1 1 1 3 3 2 2 1 1 1 3 1 3 1 3 3 1 3 1 3 1 1 1 2 1 3 [75] 3 1 1 1 3 2 1 1 1 2 1 3 1 2 3 3 2 1 1 1 1 2 3 1 2 2 2 2 2 2 2 2 2 2 2 2 2 [112] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [149] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [186] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 > > > > cleanEx() > nameEx("stupidkcentroids") > ### * stupidkcentroids > > flush(stderr()); flush(stdout()) > > ### Name: stupidkcentroids > ### Title: Stupid k-centroids random clustering > ### Aliases: stupidkcentroids > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > stupidkcentroids(dist(face),3) $partition [1] 3 3 3 2 3 1 1 2 2 3 1 2 1 2 1 2 1 3 1 1 2 2 1 1 2 1 3 2 1 2 2 2 1 2 1 2 1 [38] 1 3 1 3 1 3 2 2 2 3 1 1 1 1 3 3 2 2 1 1 3 3 1 3 1 3 3 1 3 1 3 1 1 1 2 1 3 [75] 3 1 1 1 3 2 3 1 1 2 1 3 1 2 3 3 1 1 1 1 1 2 3 1 3 3 3 3 3 3 3 3 3 3 2 2 3 [112] 3 3 2 2 2 3 3 3 3 3 3 3 3 3 3 2 2 2 3 3 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 [149] 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 2 2 2 2 2 2 2 2 2 [186] 2 2 2 2 2 2 2 2 2 2 2 2 3 2 2 $centroids [1] 51 55 63 $distances [1] TRUE > > > > cleanEx() > nameEx("stupidkfn") > ### * stupidkfn > > flush(stderr()); flush(stdout()) > > ### Name: stupidkfn > ### Title: Stupid farthest neighbour random clustering > ### Aliases: stupidkfn > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > stupidkfn(dist(face),3) [1] 1 3 1 2 1 3 3 2 2 1 3 2 3 2 3 2 3 1 3 3 1 1 3 3 2 3 1 2 3 2 2 1 3 2 3 1 3 [38] 3 3 3 3 1 3 2 2 2 3 1 3 1 1 3 3 2 2 1 1 1 3 1 3 1 3 3 3 3 1 3 1 1 1 2 1 3 [75] 3 1 1 1 3 2 1 1 1 2 1 3 3 2 3 3 2 3 1 1 1 2 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [112] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [149] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [186] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 > > > > cleanEx() > nameEx("stupidknn") > ### * stupidknn > > flush(stderr()); flush(stdout()) > > ### Name: stupidknn > ### Title: Stupid nearest neighbour random clustering > ### Aliases: stupidknn > ### Keywords: multivariate cluster > > ### ** Examples > > set.seed(20000) > options(digits=3) > face <- rFace(200,dMoNo=2,dNoEy=0,p=2) > stupidknn(dist(face),3) [1] 2 3 2 2 2 3 3 2 2 2 3 2 3 2 3 2 3 2 3 3 2 2 3 3 2 3 2 2 3 2 2 2 3 2 3 2 3 [38] 3 3 1 3 1 1 1 1 2 3 1 1 1 1 3 3 1 2 1 1 1 3 1 3 1 3 3 1 3 1 1 1 1 1 1 1 3 [75] 3 1 1 1 3 1 1 1 1 2 1 3 1 2 3 1 1 1 1 1 1 1 3 1 2 2 2 2 2 2 2 2 2 2 2 2 2 [112] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [149] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [186] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 > > > > cleanEx() > nameEx("tdecomp") > ### * tdecomp > > flush(stderr()); flush(stdout()) > > ### Name: tdecomp > ### Title: Root of singularity-corrected eigenvalue decomposition > ### Aliases: tdecomp > ### Keywords: array > > ### ** Examples > > x <- rnorm(10) > y <- rnorm(10) > z <- cov(cbind(x,y)) > round(tdecomp(z),digits=2) [,1] [,2] [1,] -0.48 1.03 [2,] -0.62 -0.29 > > > > cleanEx() > nameEx("unimodal.ind") > ### * unimodal.ind > > flush(stderr()); flush(stdout()) > > ### Name: unimodal.ind > ### Title: Is a fitted denisity unimodal or not? > ### Aliases: unimodal.ind > ### Keywords: univar > > ### ** Examples > > unimodal.ind(c(1,3,3,4,2,1,0,0)) [1] TRUE > > > > cleanEx() > nameEx("weightplots") > ### * weightplots > > flush(stderr()); flush(stdout()) > > ### Name: weightplots > ### Title: Ordered posterior plots > ### Aliases: weightplots > ### Keywords: multivariate cluster > > ### ** Examples > > require(mclust) Loading required package: mclust Package 'mclust' version 6.1.1 Type 'citation("mclust")' for citing this R package in publications. > require(MASS) Loading required package: MASS > data(crabs) > dc <- crabs[,4:8] > cm <- mclustBIC(crabs[,4:8],G=9,modelNames="EEE") > scm <- summary(cm,crabs[,4:8]) > weightplots(scm$z,clusternumbers=1:3,ask=FALSE) > weightplots(scm$z,clusternumbers=1:3,allcol=1:9, ask=FALSE, + legendposition=c(5,0.7)) > # Remove ask=FALSE to have time to watch the plots. > > > > cleanEx() detaching ‘package:MASS’, ‘package:mclust’ > nameEx("wfu") > ### * wfu > > flush(stderr()); flush(stdout()) > > ### Name: wfu > ### Title: Weight function (for Mahalabobis distances) > ### Aliases: wfu > ### Keywords: arith > > ### ** Examples > > md <- seq(0,10,by=0.1) > round(wfu(md,ca=5,ca2=8),digits=2) [1] 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 [16] 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 [31] 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 [46] 1.00 1.00 1.00 1.00 1.00 1.00 0.97 0.93 0.90 0.87 0.83 0.80 0.77 0.73 0.70 [61] 0.67 0.63 0.60 0.57 0.53 0.50 0.47 0.43 0.40 0.37 0.33 0.30 0.27 0.23 0.20 [76] 0.17 0.13 0.10 0.07 0.03 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 [91] 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 > > > > cleanEx() > nameEx("xtable") > ### * xtable > > flush(stderr()); flush(stdout()) > > ### Name: xtable > ### Title: Partition crosstable with empty clusters > ### Aliases: xtable > ### Keywords: array > > ### ** Examples > > c1 <- 1:3 > c2 <- c(1,1,2) > xtable(c1,c2,3) [,1] [,2] [,3] [1,] 1 0 0 [2,] 1 0 0 [3,] 0 1 0 > > > > cleanEx() > nameEx("zmisclassification.matrix") > ### * zmisclassification.matrix > > flush(stderr()); flush(stdout()) > > ### Name: zmisclassification.matrix > ### Title: Matrix of misclassification probabilities between mixture > ### components > ### Aliases: zmisclassification.matrix > ### Keywords: cluster multivariate > > ### ** Examples > > set.seed(12345) > m <- rpois(20,lambda=5) > dim(m) <- c(5,4) > m <- m/apply(m,1,sum) > round(zmisclassification.matrix(m,symmetric=FALSE),digits=2) [,1] [,2] [,3] [,4] [1,] 0.00 0.73 1.00 0.75 [2,] 0.10 0.00 0.56 0.73 [3,] 0.16 0.17 0.00 0.16 [4,] 0.10 0.30 0.56 0.00 > > > > ### *