R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("clue-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('clue') > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "Cassini" > > ### * Cassini > > flush(stderr()); flush(stdout()) > > ### Name: Cassini > ### Title: Cassini Data > ### Aliases: Cassini > ### Keywords: datasets > > ### ** Examples > > data("Cassini") > op <- par(mfcol = c(1, 2)) > ## Plot the data set: > plot(Cassini$x, col = as.integer(Cassini$classes), + xlab = "", ylab = "") > ## Create a "random" k-means partition of the data: > set.seed(1234) > party <- kmeans(Cassini$x, 3) > ## And plot that. > plot(Cassini$x, col = cl_class_ids(party), + xlab = "", ylab = "") > ## (We can see the problem ...) > par(op) > > > > graphics::par(get("par.postscript", env = .CheckExEnv)) > cleanEx(); ..nameEx <- "GVME.Consensus" > > ### * GVME.Consensus > > flush(stderr()); flush(stdout()) > > ### Name: GVME_Consensus > ### Title: Gordon-Vichi Macroeconomic Consensus Partition Data > ### Aliases: GVME_Consensus > ### Keywords: datasets > > ### ** Examples > > ## Load the consensus partitions. > data("GVME_Consensus") > ## Fuzziness using the Partition Coefficient. > cl_fuzziness(GVME_Consensus) Fuzziness using normalized partition coefficient: [1] 0.5422432 0.4842278 0.5408150 0.5722069 > ## (Correponds to 1 - F in the source.) > ## Dissimilarities: > cl_dissimilarity(GVME_Consensus) Dissimilarities using minimal euclidean membership distances: MF1 MF2 JMF MF2 0.6855290 JMF 0.2866566 0.9686620 S&S 0.1951666 0.7241630 0.3305662 > cl_dissimilarity(GVME_Consensus, method = "comem") Dissimilarities using euclidean comembership distances: MF1 MF2 JMF MF2 2.2088384 JMF 0.8876256 3.0564347 S&S 0.6347740 2.3419563 1.0324972 > > > > cleanEx(); ..nameEx <- "Kinship82.Consensus" > > ### * Kinship82.Consensus > > flush(stderr()); flush(stdout()) > > ### Name: Kinship82_Consensus > ### Title: Gordon-Vichi Kinship82 Consensus Partition Data > ### Aliases: Kinship82_Consensus > ### Keywords: datasets > > ### ** Examples > > ## Load the consensus partitions. > data("Kinship82_Consensus") > ## Fuzziness using the Partition Coefficient. > cl_fuzziness(Kinship82_Consensus) Fuzziness using normalized partition coefficient: [1] 0.5751 0.2546 0.3894 > ## (Correponds to 1 - F in the source.) > ## Dissimilarities: > cl_dissimilarity(Kinship82_Consensus) Dissimilarities using minimal euclidean membership distances: MF1 MF2 MF2 1.1353414 JMF 0.8368990 0.5069517 > cl_dissimilarity(Kinship82_Consensus, method = "comem") Dissimilarities using euclidean comembership distances: MF1 MF2 MF2 3.135140 JMF 2.091081 1.501794 > > > > cleanEx(); ..nameEx <- "agreement" > > ### * agreement > > flush(stderr()); flush(stdout()) > > ### Name: agreement > ### Title: Agreement Between Partitions or Hierarchies > ### Aliases: cl_agreement > ### Keywords: cluster > > ### ** Examples > > ## An ensemble of partitions. > data("CKME") > pens <- CKME[1 : 20] # for saving precious time ... > summary(c(cl_agreement(pens))) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.3093 0.3097 0.4229 0.5741 0.9368 1.0000 > summary(c(cl_agreement(pens, method = "Rand"))) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.7191 0.7381 0.7527 0.8369 0.9949 1.0000 > summary(c(cl_agreement(pens, method = "diag"))) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5230 0.5235 0.6670 0.7319 0.9960 1.0000 > cl_agreement(pens[1:5], pens[6:7], method = "NMI") Agreements using normalized mutual information: 1 2 1 0.5992095 1.0000000 2 0.5992095 1.0000000 3 1.0000000 0.5992095 4 0.9405846 0.5998537 5 1.0000000 0.5992095 > ## Equivalently, using subscripting. > cl_agreement(pens, method = "NMI")[1:5, 6:7] Agreements using normalized mutual information: 6 7 1 0.5992095 1.0000000 2 0.5992095 1.0000000 3 1.0000000 0.5992095 4 0.9405846 0.5998537 5 1.0000000 0.5992095 > > ## An ensemble of hierarchies. > d <- dist(USArrests) > hclust_methods <- c("ward", "single", "complete", "average", + "mcquitty", "median", "centroid") > hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) > hens <- cl_ensemble(list = hclust_results) > names(hens) <- hclust_methods > summary(c(cl_agreement(hens))) Min. 1st Qu. Median Mean 3rd Qu. Max. 1.910e-05 2.188e-05 2.790e-04 5.070e-04 5.654e-04 2.926e-03 > summary(c(cl_agreement(hens, method = "cophenetic"))) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.4834 0.5880 0.9917 0.8684 0.9968 0.9986 > cl_agreement(hens[1:3], hens[4:5], method = "gamma") Agreements using rate of inversions: average mcquitty ward 0.9762118 0.9774923 single 0.8807656 0.8846339 complete 0.9903135 0.9865946 > > > > cleanEx(); ..nameEx <- "boot" > > ### * boot > > flush(stderr()); flush(stdout()) > > ### Name: boot > ### Title: Bootstrap Resampling of Clustering Algorithms > ### Aliases: cl_boot > ### Keywords: cluster > > ### ** Examples > > ## Study e.g. the effect of random kmeans() initializations. > data("Cassini") > pens <- cl_boot(Cassini$x, 15, 3) > diss <- cl_dissimilarity(pens) > summary(c(diss)) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.00 0.00 25.50 18.18 30.69 30.89 > plot(hclust(diss)) > > > > cleanEx(); ..nameEx <- "clustering" > > ### * clustering > > flush(stderr()); flush(stdout()) > > ### Name: clustering > ### Title: Partitions and Hierarchies > ### Aliases: is.cl_partition is.cl_hard_partition is.cl_soft_partition > ### is.cl_hierarchy > ### Keywords: cluster > > ### ** Examples > > data("Cassini") > pcl <- kmeans(Cassini$x, 3) > is.cl_partition(pcl) [1] TRUE > is.cl_hard_partition(pcl) [1] TRUE > is.cl_soft_partition(pcl) [1] FALSE > > hcl <- hclust(dist(USArrests)) > is.cl_partition(hcl) [1] FALSE > is.cl_hierarchy(hcl) [1] TRUE > > > > cleanEx(); ..nameEx <- "consensus" > > ### * consensus > > flush(stderr()); flush(stdout()) > > ### Name: consensus > ### Title: Consensus Partitions and Hierarchies > ### Aliases: cl_consensus > ### Keywords: cluster > > ### ** Examples > > ## Consensus partition for the Rosenberg-Kim kinship terms partition > ## data based on co-membership dissimilarities. > data("Kinship82") > m1 <- cl_consensus(Kinship82, method = "GV3", + control = list(k = 3, verbose = TRUE)) Iteration: 1 Rho: 0.0978703 P: 51.14835 Iteration: 2 Rho: 0.978703 P: 0.2692162 Iteration: 3 Rho: 9.78703 P: 0.1837687 Iteration: 4 Rho: 97.8703 P: 0.03277258 Iteration: 5 Rho: 978.703 P: 0.0008170748 Iteration: 6 Rho: 9787.03 P: 9.28507e-06 Iteration: 7 Rho: 97870.3 P: 9.410234e-08 Iteration: 8 Rho: 978703 P: 9.4229e-10 Iteration: 9 Rho: 9787030 P: 9.424172e-12 Iteration: 10 Rho: 97870297 P: 9.4243e-14 Iteration: 11 Rho: 978702969 P: 9.424312e-16 Iteration: 12 Rho: 9787029692 P: 9.424313e-18 > ## (Note that one should really use several replicates of this.) > ## Value for criterion function to be minimized: > sum(cl_dissimilarity(Kinship82, m1, "comem") ^ 2) [1] 2411.388 > ## Compare to the consensus solution given in Gordon & Vichi (2001). > data("Kinship82_Consensus") > m2 <- Kinship82_Consensus[["JMF"]] > sum(cl_dissimilarity(Kinship82, m2, "comem") ^ 2) [1] 2422.397 > ## Seems we get a better solution ... > ## How dissimilar are these solutions? > cl_dissimilarity(m1, m2, "comem") Dissimilarities using euclidean comembership distances: 1 1 0.3708908 > ## How "fuzzy" are they? > cl_fuzziness(cl_ensemble(m1, m2)) Fuzziness using normalized partition coefficient: [1] 0.4360393 0.3894000 > ## Do the "nearest" hard partitions fully agree? > cl_dissimilarity(as.cl_hard_partition(m1), + as.cl_hard_partition(m2)) Dissimilarities using minimal euclidean membership distances: 1 1 0 > ## Hmm ... > > ## Consensus partition for the Gordon and Vichi (2001) macroeconomic > ## partition data based on Euclidean dissimilarities. > data("GVME") > set.seed(1) > m1 <- cl_consensus(GVME, method = "GV1", + control = list(k = 2, verbose = TRUE)) Iteration: 1 Old value: 8.37857 New value: 3.005484 Iteration: 2 Old value: 3.005484 New value: 3.005484 > ## (Note that one should really use several replicates of this.) > ## Value of criterion function to be minimized: > sum(cl_dissimilarity(GVME, m1) ^ 2) [1] 15.02742 > ## Compare to the consensus solution given in Gordon & Vichi (2001). > data("GVME_Consensus") > m2 <- GVME_Consensus[["MF1"]] > sum(cl_dissimilarity(GVME, m2) ^ 2) [1] 17.69469 > ## Seems we get a better solution ... > ## (But note that for partitions with different numbers of classes, > ## they only use the matches classes for computing dissimilarities.) > ## And in fact, it is qualitatively different: > table(as.cl_hard_partition(m1), + as.cl_hard_partition(m2)) 0 1 0 16 5 1 5 16 > ## Hmm ... > > > > cleanEx(); ..nameEx <- "dissimilarity" > > ### * dissimilarity > > flush(stderr()); flush(stdout()) > > ### Name: dissimilarity > ### Title: Dissimilarity Between Partitions or Hierarchies > ### Aliases: cl_dissimilarity > ### Keywords: cluster > > ### ** Examples > > ## An ensemble of partitions. > data("CKME") > pens <- CKME[1 : 30] > diss <- cl_dissimilarity(pens) > summary(c(diss)) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.00 4.69 25.02 18.45 30.69 30.89 > cl_dissimilarity(pens[1:5], pens[6:7]) Dissimilarities using minimal euclidean membership distances: 1 2 1 30.88689 0.00000 2 30.88689 0.00000 3 0.00000 30.88689 4 5.09902 30.65942 5 0.00000 30.88689 > ## Equivalently, using subscripting. > diss[1:5, 6:7] Dissimilarities using minimal euclidean membership distances: 6 7 1 30.88689 0.00000 2 30.88689 0.00000 3 0.00000 30.88689 4 5.09902 30.65942 5 0.00000 30.88689 > ## Can use the dissimilarities for "secondary" clustering > ## (e.g. obtaining hierarchies of partitions): > hc <- hclust(diss) > plot(hc) > > > > cleanEx(); ..nameEx <- "ensemble" > > ### * ensemble > > flush(stderr()); flush(stdout()) > > ### Name: ensemble > ### Title: Cluster Ensembles > ### Aliases: cl_ensemble as.cl_ensemble is.cl_ensemble > ### Keywords: cluster > > ### ** Examples > > d <- dist(USArrests) > hclust_methods <- c("ward", "single", "complete", "average", + "mcquitty", "median", "centroid") > hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) > ## Now create an ensemble from the results. > hens <- cl_ensemble(list = hclust_results) > names(hens) <- hclust_methods > hens An ensemble of 7 hierarchies of 50 objects > ## Subscripting. > hens[1 : 3] An ensemble of 3 hierarchies of 50 objects > ## Replication. > rep(hens, 3) An ensemble of 21 hierarchies of 50 objects > ## And continue to analyze the ensemble, e.g. > cl_dissimilarity(hens, method = "gamma") Dissimilarities using rate of inversions: ward single complete average mcquitty single 0.128422035 complete 0.027424303 0.125826331 average 0.023788182 0.119234360 0.009686541 mcquitty 0.022507670 0.115366146 0.013405362 0.008211285 median 0.022022142 0.126672002 0.020720288 0.014138989 0.020594905 centroid 0.031231159 0.118626117 0.015432840 0.008152594 0.015547552 median single complete average mcquitty median centroid 0.010828331 > > > > cleanEx(); ..nameEx <- "fuzziness" > > ### * fuzziness > > flush(stderr()); flush(stdout()) > > ### Name: fuzziness > ### Title: Partition Fuzziness > ### Aliases: cl_fuzziness > ### Keywords: cluster > > ### ** Examples > > if(require("e1071", quiet = TRUE)) { + ## Use an on-line version of fuzzy c-means from package e1071 if + ## available. + data("Cassini") + pens <- cl_boot(Cassini$x, B = 15, k = 3, algorithm = "cmeans", + parameters = list(method = "ufcl")) + pens + summary(cl_fuzziness(pens, "PC")) + summary(cl_fuzziness(pens, "PE")) + } Loading required package: class Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5501 0.5666 0.5775 0.5847 0.5888 0.6673 > > > > cleanEx(); ..nameEx <- "ls.fit.ultrametric" > > ### * ls.fit.ultrametric > > flush(stderr()); flush(stdout()) > > ### Name: ls_fit_ultrametric > ### Title: Least Squares Fit of Ultrametrics to Dissimilarities > ### Aliases: ls_fit_ultrametric > ### Keywords: cluster optimize > > ### ** Examples > > ## Least squares fit of an ultrametric to the Miller-Nicely consonant > ## phoneme confusion data. > data("Phonemes") > ## Note that the Phonemes data set has the consonant misclassification > ## probabilities, i.e., the similarities between the phonemes. > d <- 1 - as.dist(Phonemes) > u <- ls_fit_ultrametric(d, control = list(verbose = TRUE)) Iteration: 1 Rho: 0.04900063 P: 1.241596 Iteration: 2 Rho: 0.4900063 P: 0.1532151 Iteration: 3 Rho: 4.900063 P: 0.01698076 Iteration: 4 Rho: 49.00063 P: 0.0002151106 Iteration: 5 Rho: 490.0063 P: 2.288047e-06 Iteration: 6 Rho: 4900.063 P: 2.302562e-08 Iteration: 7 Rho: 49000.63 P: 2.304022e-10 Iteration: 8 Rho: 490006.3 P: 2.304168e-12 Iteration: 9 Rho: 4900063 P: 2.304183e-14 Iteration: 10 Rho: 49000626 P: 2.304184e-16 > ## Cophenetic correlation: > cor(d, u) [1] 0.9711138 > ## Dendrogram: > plot(u) > ## ("Basically" the same as Figure 1 in de Soete (1986).) > > > > cleanEx(); ..nameEx <- "medoid" > > ### * medoid > > flush(stderr()); flush(stdout()) > > ### Name: medoid > ### Title: Medoid Partitions and Hierarchies > ### Aliases: cl_medoid > ### Keywords: cluster > > ### ** Examples > > ## An ensemble of partitions. > data("CKME") > pens <- CKME[1 : 20] > m1 <- cl_medoid(pens) > diss <- cl_dissimilarity(pens) > require("cluster") Loading required package: cluster [1] TRUE > m2 <- pens[[pam(diss, 1)$medoids]] > ## Agreement of medoid consensus partitions. > cl_agreement(m1, m2) Agreements using minimal euclidean membership distances: 1 1 1 > ## Or, more straightforwardly: > table(cl_class_ids(m1), cl_class_ids(m2)) 1 2 3 1 0 0 285 2 234 0 0 3 0 481 0 > > > > cleanEx(); ..nameEx <- "membership" > > ### * membership > > flush(stderr()); flush(stdout()) > > ### Name: membership > ### Title: Memberships of Partitions > ### Aliases: cl_membership as.cl_membership as.cl_hard_partition > ### Keywords: cluster > > ### ** Examples > > ## Getting the memberships of a single soft partition. > d <- dist(USArrests) > hclust_methods <- c("ward", "single", "complete", "average", + "mcquitty", "median", "centroid") > hclust_results <- lapply(hclust_methods, function(m) hclust(d, m)) > ## Now create an ensemble from the results. > hens <- cl_ensemble(list = hclust_results) > names(hens) <- hclust_methods > ## Create a dissimilarity object from this. > d1 <- cl_dissimilarity(hens) > ## And compute a soft partition. > require("cluster") Loading required package: cluster [1] TRUE > party <- fanny(d1, 2) > cl_membership(party) Memberships: [,1] [,2] ward 0.999977126 2.287369e-05 single 0.032745093 9.672549e-01 complete 0.057916650 9.420833e-01 average 0.009782488 9.902175e-01 mcquitty 0.013344608 9.866554e-01 median 0.009418508 9.905815e-01 centroid 0.011467621 9.885324e-01 > ## The "nearest" hard partition to this: > as.cl_hard_partition(party) Memberships: 1 2 ward 1 0 single 0 1 complete 0 1 average 0 1 mcquitty 0 1 median 0 1 centroid 0 1 > ## (which has the same class ids as cl_class_ids(party)). > > ## Converting all elements in an ensemble of partitions to their > ## memberships. > pens <- cl_boot(USArrests, 30, 3) > pens An ensemble of 30 partitions of 50 objects > pens <- cl_ensemble(list = lapply(pens, cl_membership)) > pens An ensemble of 30 partitions of 50 objects > pens[[length(pens)]] Memberships: 1 2 3 Alabama 0 1 0 Alaska 0 1 0 Arizona 0 1 0 Arkansas 0 0 1 California 0 1 0 Colorado 0 0 1 Connecticut 1 0 0 Delaware 0 1 0 Florida 0 1 0 Georgia 0 0 1 Hawaii 1 0 0 Idaho 1 0 0 Illinois 0 1 0 Indiana 1 0 0 Iowa 1 0 0 Kansas 1 0 0 Kentucky 1 0 0 Louisiana 0 1 0 Maine 1 0 0 Maryland 0 1 0 Massachusetts 0 0 1 Michigan 0 1 0 Minnesota 1 0 0 Mississippi 0 1 0 Missouri 0 0 1 Montana 1 0 0 Nebraska 1 0 0 Nevada 0 1 0 New Hampshire 1 0 0 New Jersey 0 0 1 New Mexico 0 1 0 New York 0 1 0 North Carolina 0 1 0 North Dakota 1 0 0 Ohio 1 0 0 Oklahoma 0 0 1 Oregon 0 0 1 Pennsylvania 1 0 0 Rhode Island 0 0 1 South Carolina 0 1 0 South Dakota 1 0 0 Tennessee 0 0 1 Texas 0 0 1 Utah 1 0 0 Vermont 1 0 0 Virginia 0 0 1 Washington 0 0 1 West Virginia 1 0 0 Wisconsin 1 0 0 Wyoming 0 0 1 > > > > cleanEx(); ..nameEx <- "n.of.classes" > > ### * n.of.classes > > flush(stderr()); flush(stdout()) > > ### Name: n_of_classes > ### Title: Classes in a Partition > ### Aliases: n_of_classes cl_class_ids > ### Keywords: cluster > > ### ** Examples > > data("Cassini") > party <- kmeans(Cassini$x, 3) > n_of_classes(party) [1] 3 > ## A simple confusion matrix: > table(cl_class_ids(party), Cassini$classes) 1 2 3 1 196 0 89 2 204 0 30 3 0 400 81 > ## For an "oversize" membership matrix representation: > n_of_classes(cl_membership(party, 6)) [1] 3 > > > > cleanEx(); ..nameEx <- "n.of.objects" > > ### * n.of.objects > > flush(stderr()); flush(stdout()) > > ### Name: n_of_objects > ### Title: Number of Objects in a Partition or Hierarchy > ### Aliases: n_of_objects > ### Keywords: cluster > > ### ** Examples > > data("Cassini") > pcl <- kmeans(Cassini$x, 3) > n_of_objects(pcl) [1] 1000 > hcl <- hclust(dist(USArrests)) > n_of_objects(hcl) [1] 50 > > > > cleanEx(); ..nameEx <- "pclust" > > ### * pclust > > flush(stderr()); flush(stdout()) > > ### Name: pclust > ### Title: Prototype-Based Partitions of Clusterings > ### Aliases: cl_pclust > ### Keywords: cluster > > ### ** Examples > > ## Use a precomputed ensemble of 50 k-means partitions of the > ## Cassini data. > data("CKME") > CKME <- CKME[1 : 30] # for saving precious time ... > diss <- cl_dissimilarity(CKME) > hc <- hclust(diss) > plot(hc) > ## This suggests using a partition with three classes, which can be > ## obtained using cutree(hc, 3). Could use cl_consensus() to compute > ## prototypes as the least squares consensus clusterings of the classes, > ## or alternatively: > x1 <- cl_pclust(CKME, 3, m = 1) > x2 <- cl_pclust(CKME, 3, m = 2) > ## Agreement of solutions. > cl_dissimilarity(x1, x2) Dissimilarities using minimal euclidean membership distances: 1 1 4.065519 > table(cl_class_ids(x1), cl_class_ids(x2)) 1 2 3 1 0 9 4 2 6 0 0 3 11 0 0 > > > > cleanEx(); ..nameEx <- "solve.LSAP" > > ### * solve.LSAP > > flush(stderr()); flush(stdout()) > > ### Encoding: UTF-8 > > ### Name: solve_LSAP > ### Title: Solve Linear Sum Assignment Problem > ### Aliases: solve_LSAP > ### Keywords: optimize > > ### ** Examples > > x <- matrix(c(5, 1, 4, 3, 5, 2, 2, 4, 4), nr = 3) > solve_LSAP(x) Optimal assignment: 1 => 3, 2 => 1, 3 => 2 > solve_LSAP(x, max = TRUE) Optimal assignment: 1 => 1, 2 => 2, 3 => 3 > > > > cleanEx(); ..nameEx <- "ultrametric" > > ### * ultrametric > > flush(stderr()); flush(stdout()) > > ### Name: ultrametric > ### Title: Ultrametrics of Hierarchies > ### Aliases: cl_ultrametric as.cl_ultrametric > ### Keywords: cluster > > ### ** Examples > > hc <- hclust(dist(USArrests)) > u <- cl_ultrametric(hc) > ## Subscripting. > u[1 : 5, 1 : 5] Ultrametric distances: Alabama Alaska Arizona Arkansas Alaska 48.72515 Arizona 64.99362 64.99362 Arkansas 293.62275 293.62275 293.62275 California 64.99362 64.99362 53.59338 293.62275 > u[1 : 5, 6 : 7] Ultrametric distances: Colorado Connecticut Alabama 293.62275 293.6228 Alaska 293.62275 293.6228 Arizona 293.62275 293.6228 Arkansas 36.73486 168.6114 California 293.62275 293.6228 > ## Plotting. > plot(u) > > > > ### *