R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("e1071-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('e1071') Loading required package: class > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "Discrete" > > ### * Discrete > > flush(stderr()); flush(stdout()) > > ### Name: Discrete > ### Title: Discrete Distribution > ### Aliases: ddiscrete pdiscrete qdiscrete rdiscrete > ### Keywords: distribution > > ### ** Examples > > ## a vector of length 30 whose elements are 1 with probability 0.2 > ## and 2 with probability 0.8. > rdiscrete (30, c(0.2, 0.8)) [1] 2 2 2 1 2 1 1 2 2 2 2 2 2 2 2 2 2 1 2 2 1 2 2 2 2 2 2 2 1 2 > > ## a vector of length 100 whose elements are A, B, C, D. > ## The probabilities of the four values have the relation 1:2:3:3 > rdiscrete (100, c(1,2,3,3), c("A","B","C","D")) [1] "C" "C" "C" "D" "B" "B" "B" "D" "B" "C" "B" "C" "B" "C" "C" "B" "D" "C" [19] "B" "B" "C" "B" "C" "D" "D" "D" "D" "C" "C" "C" "A" "D" "C" "D" "C" "D" [37] "C" "B" "D" "B" "C" "B" "C" "C" "C" "A" "B" "C" "B" "A" "C" "B" "C" "D" [55] "B" "D" "B" "D" "D" "D" "D" "D" "C" "B" "B" "B" "C" "C" "B" "C" "C" "C" [73] "D" "A" "C" "D" "D" "C" "A" "C" "A" "B" "C" "C" "D" "D" "B" "D" "C" "C" [91] "A" "C" "C" "D" "B" "C" "C" "D" "D" "C" > > > > > cleanEx(); ..nameEx <- "bclust" > > ### * bclust > > flush(stderr()); flush(stdout()) > > ### Name: bclust > ### Title: Bagged Clustering > ### Aliases: bclust hclust.bclust plot.bclust centers.bclust > ### clusters.bclust > ### Keywords: multivariate cluster > > ### ** Examples > > data(iris) > bc1 <- bclust(iris[,1:4], 3, base.centers=5) Committee Member: 1(1) 2(1) 3(1) 4(1) 5(1) 6(1) 7(1) 8(1) 9(1) 10(1) Computing Hierarchical Clustering > plot(bc1) > > table(clusters.bclust(bc1, 3)) 1 2 3 32 50 68 > centers.bclust(bc1, 3) [,1] [,2] [,3] [,4] [1,] 5.468214 2.581250 3.878740 1.2089126 [2,] 5.060607 3.477752 1.488814 0.2511244 [3,] 6.665909 3.013153 5.405872 1.9066071 > > > > cleanEx(); ..nameEx <- "bincombinations" > > ### * bincombinations > > flush(stderr()); flush(stdout()) > > ### Name: bincombinations > ### Title: Binary Combinations > ### Aliases: bincombinations > ### Keywords: utilities > > ### ** Examples > > bincombinations(2) [,1] [,2] [1,] 0 0 [2,] 0 1 [3,] 1 0 [4,] 1 1 > bincombinations(3) [,1] [,2] [,3] [1,] 0 0 0 [2,] 0 0 1 [3,] 0 1 0 [4,] 0 1 1 [5,] 1 0 0 [6,] 1 0 1 [7,] 1 1 0 [8,] 1 1 1 > > > > cleanEx(); ..nameEx <- "bootstrap.lca" > > ### * bootstrap.lca > > flush(stderr()); flush(stdout()) > > ### Name: bootstrap.lca > ### Title: Bootstrap Samples of LCA Results > ### Aliases: bootstrap.lca print.bootstrap.lca > ### Keywords: multivariate > > ### ** Examples > > ## Generate a 4-dim. sample with 2 latent classes of 500 data points each. > ## The probabilities for the 2 classes are given by type1 and type2. > type1 <- c(0.8,0.8,0.2,0.2) > type2 <- c(0.2,0.2,0.8,0.8) > x <- matrix(runif(4000),nr=1000) > x[1:500,] <- t(t(x[1:500,]) x[501:1000,] <- t(t(x[501:1000,]) > l <- lca(x, 2, niter=5) > bl <- bootstrap.lca(l,nsamples=3,lcaiter=5) > bl Bootstrap of LCA ---------------- Number of Bootstrap Samples: 3 Number of LCA Iterations/Sample: 5 Likelihood Ratio Mean: 9.342148 SDev: 3.426301 Value in Data Set: 8.134228 Z-Statistics: -0.3525435 P(Z>X): 0.6377847 P-Val: 0.6666667 Pearson's Chisquare Mean: 9.231176 SDev: 3.409688 Value in Data Set: 8.05263 Z-Statistics: -0.3456462 P(Z>X): 0.6351957 P-Val: 0.6666667 > > > > cleanEx(); ..nameEx <- "boxplot.bclust" > > ### * boxplot.bclust > > flush(stderr()); flush(stdout()) > > ### Name: boxplot.bclust > ### Title: Boxplot of cluster profiles > ### Aliases: boxplot.bclust > ### Keywords: hplot > > ### ** Examples > > data(iris) > bc1 <- bclust(iris[,1:4], 3, base.centers=5) Committee Member: 1(1) 2(1) 3(1) 4(1) 5(1) 6(1) 7(1) 8(1) 9(1) 10(1) Computing Hierarchical Clustering > boxplot(bc1) Warning in if (x$datamean) { : the condition has length > 1 and only the first element will be used Warning in if (x$datamean) { : the condition has length > 1 and only the first element will be used Warning in if (x$datamean) { : the condition has length > 1 and only the first element will be used > > > > cleanEx(); ..nameEx <- "classAgreement" > > ### * classAgreement > > flush(stderr()); flush(stdout()) > > ### Name: classAgreement > ### Title: Coefficients comparing classification agreement > ### Aliases: classAgreement > ### Keywords: category > > ### ** Examples > > ## no class correlations: both kappa and crand almost zero > g1 <- sample(1:5, size=1000, replace=TRUE) > g2 <- sample(1:5, size=1000, replace=TRUE) > tab <- table(g1, g2) > classAgreement(tab) $diag [1] 0.21 $kappa [1] 0.01247161 $rand [1] 0.6802082 $crand [1] 0.0005188043 > > ## let pairs (g1=1,g2=1) and (g1=3,g2=3) agree better > k <- sample(1:1000, size=200) > g1[k] <- 1 > g2[k] <- 1 > > k <- sample(1:1000, size=200) > g1[k] <- 3 > g2[k] <- 3 > > tab <- table(g1, g2) > ## both kappa and crand should be significantly larger than before > classAgreement(tab) $diag [1] 0.511 $kappa [1] 0.3527500 $rand [1] 0.7222923 $crand [1] 0.2486902 > > > > cleanEx(); ..nameEx <- "cmeans" > > ### * cmeans > > flush(stderr()); flush(stdout()) > > ### Name: cmeans > ### Title: Fuzzy C-Means Clustering > ### Aliases: cmeans print.fclust > ### Keywords: cluster > > ### ** Examples > > # a 2-dimensional example > x<-rbind(matrix(rnorm(100,sd=0.3),ncol=2), + matrix(rnorm(100,mean=1,sd=0.3),ncol=2)) > cl<-cmeans(x,2,20,verbose=TRUE,method="cmeans",m=2) Iteration: 1, Error: 0.1424585192 Iteration: 2, Error: 0.1313120585 Iteration: 3, Error: 0.1305563616 Iteration: 4, Error: 0.1305085539 Iteration: 5, Error: 0.1305054562 Iteration: 6, Error: 0.1305052536 Iteration: 7, Error: 0.1305052404 Iteration: 8 converged, Error: 0.1305052395 > print(cl) Fuzzy c-means clustering with 2 clusters Cluster centers: [,1] [,2] 1 0.95425995 1.03019239 2 0.03204283 0.01879421 Memberships: 1 2 [1,] 0.026688977 0.9733110235 [2,] 0.017861554 0.9821384457 [3,] 0.036219111 0.9637808887 [4,] 0.134809791 0.8651902091 [5,] 0.137077499 0.8629225011 [6,] 0.200237542 0.7997624576 [7,] 0.014961794 0.9850382055 [8,] 0.058740485 0.9412595148 [9,] 0.030842572 0.9691574282 [10,] 0.008326108 0.9916738918 [11,] 0.659039263 0.3409607366 [12,] 0.004537272 0.9954627278 [13,] 0.040300971 0.9596990289 [14,] 0.116924279 0.8830757205 [15,] 0.072170795 0.9278292046 [16,] 0.001856341 0.9981436589 [17,] 0.085086804 0.9149131959 [18,] 0.231106890 0.7688931097 [19,] 0.030777706 0.9692222945 [20,] 0.361469236 0.6385307638 [21,] 0.056472357 0.9435276433 [22,] 0.043912154 0.9560878465 [23,] 0.016819693 0.9831803070 [24,] 0.105225371 0.8947746290 [25,] 0.065374608 0.9346253920 [26,] 0.003861382 0.9961386184 [27,] 0.012265691 0.9877343089 [28,] 0.069400355 0.9305996445 [29,] 0.013682803 0.9863171973 [30,] 0.021447511 0.9785524890 [31,] 0.092256773 0.9077432272 [32,] 0.003519107 0.9964808925 [33,] 0.093083151 0.9069168491 [34,] 0.067635028 0.9323649719 [35,] 0.079303353 0.9206966475 [36,] 0.015085813 0.9849141865 [37,] 0.063699753 0.9363002467 [38,] 0.006582651 0.9934173494 [39,] 0.073044022 0.9269559782 [40,] 0.028911643 0.9710883570 [41,] 0.016027157 0.9839728428 [42,] 0.079230092 0.9207699080 [43,] 0.120486953 0.8795130472 [44,] 0.040675348 0.9593246520 [45,] 0.138509545 0.8614904553 [46,] 0.037410303 0.9625896967 [47,] 0.058159993 0.9418400075 [48,] 0.037072109 0.9629278912 [49,] 0.049778992 0.9502210081 [50,] 0.041359444 0.9586405555 [51,] 0.983739560 0.0162604398 [52,] 0.997556759 0.0024432409 [53,] 0.949390129 0.0506098708 [54,] 0.936039856 0.0639601440 [55,] 0.779282435 0.2207175654 [56,] 0.854476981 0.1455230191 [57,] 0.955749536 0.0442504636 [58,] 0.935988136 0.0640118636 [59,] 0.869376455 0.1306235450 [60,] 0.886087007 0.1139129932 [61,] 0.983639504 0.0163604958 [62,] 0.987640714 0.0123592862 [63,] 0.921848775 0.0781512251 [64,] 0.965068168 0.0349318324 [65,] 0.968724980 0.0312750197 [66,] 0.894375237 0.1056247632 [67,] 0.991265883 0.0087341169 [68,] 0.837780672 0.1622193282 [69,] 0.980153703 0.0198462973 [70,] 0.999437343 0.0005626573 [71,] 0.885173214 0.1148267855 [72,] 0.935206134 0.0647938656 [73,] 0.994335137 0.0056648628 [74,] 0.998342576 0.0016574237 [75,] 0.989690726 0.0103092739 [76,] 0.971341164 0.0286588356 [77,] 0.982216296 0.0177837041 [78,] 0.908292916 0.0917070841 [79,] 0.956344598 0.0436554024 [80,] 0.957777163 0.0422228373 [81,] 0.891580953 0.1084190469 [82,] 0.962710774 0.0372892262 [83,] 0.982006348 0.0179936518 [84,] 0.587956236 0.4120437644 [85,] 0.985781108 0.0142188921 [86,] 0.863480478 0.1365195223 [87,] 0.942721485 0.0572785149 [88,] 0.938222045 0.0617779545 [89,] 0.965100239 0.0348997614 [90,] 0.935969437 0.0640305630 [91,] 0.780189818 0.2198101821 [92,] 0.946529683 0.0534703168 [93,] 0.748799255 0.2512007451 [94,] 0.974911612 0.0250883885 [95,] 0.767312308 0.2326876924 [96,] 0.868409689 0.1315903105 [97,] 0.880996064 0.1190039363 [98,] 0.924455238 0.0755447621 [99,] 0.926104545 0.0738954549 [100,] 0.816204656 0.1837953437 Closest hard clustering: [1] 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 [38] 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 [75] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 Available components: [1] "centers" "size" "cluster" "membership" "iter" [6] "withinerror" "call" > > # a 3-dimensional example > x<-rbind(matrix(rnorm(150,sd=0.3),ncol=3), + matrix(rnorm(150,mean=1,sd=0.3),ncol=3), + matrix(rnorm(150,mean=2,sd=0.3),ncol=3)) > cl<-cmeans(x,6,20,verbose=TRUE,method="cmeans") Iteration: 1, Error: 0.1321324551 Iteration: 2, Error: 0.1258269400 Iteration: 3, Error: 0.1243321641 Iteration: 4, Error: 0.1238300128 Iteration: 5, Error: 0.1236301287 Iteration: 6, Error: 0.1235366457 Iteration: 7, Error: 0.1234864440 Iteration: 8, Error: 0.1234568118 Iteration: 9, Error: 0.1234379982 Iteration: 10, Error: 0.1234252088 Iteration: 11, Error: 0.1234159418 Iteration: 12, Error: 0.1234088589 Iteration: 13, Error: 0.1234032262 Iteration: 14, Error: 0.1233986242 Iteration: 15, Error: 0.1233947983 Iteration: 16, Error: 0.1233915822 Iteration: 17, Error: 0.1233888595 Iteration: 18, Error: 0.1233865434 Iteration: 19, Error: 0.1233845662 Iteration: 20, Error: 0.1233828735 > print(cl) Fuzzy c-means clustering with 6 clusters Cluster centers: [,1] [,2] [,3] 1 2.150369704 1.849928005 2.08219757 2 0.823300519 0.617803291 0.99222886 3 1.198316655 1.091694841 1.01554172 4 1.852750589 2.099580038 1.74140705 5 0.705454046 1.019570342 0.82180333 6 0.003345627 0.004415598 -0.03607473 Memberships: 1 2 3 4 5 6 [1,] 0.011388321 0.070804612 0.036998278 0.012549184 0.056115518 0.812144087 [2,] 0.020363255 0.139517574 0.081244117 0.024560872 0.170917663 0.563396519 [3,] 0.001515047 0.008295689 0.004798168 0.001711160 0.007600210 0.976079727 [4,] 0.021085977 0.260990066 0.106095486 0.025098054 0.268994766 0.317735650 [5,] 0.019562784 0.188218208 0.094084196 0.023272297 0.199975198 0.474887316 [6,] 0.024634184 0.094890084 0.064423373 0.028157711 0.097938053 0.689956595 [7,] 0.004541010 0.030026518 0.016880844 0.005250654 0.029065031 0.914235943 [8,] 0.019000752 0.078952960 0.053200808 0.021250737 0.074179682 0.753415061 [9,] 0.001972968 0.011116745 0.006495939 0.002268356 0.010922100 0.967223892 [10,] 0.030444677 0.121428946 0.091679007 0.035584675 0.131975197 0.588887499 [11,] 0.013273253 0.094517844 0.046255210 0.015034614 0.081590308 0.749328770 [12,] 0.003463621 0.017494339 0.010599487 0.003913263 0.016364230 0.948165060 [13,] 0.007845248 0.044326470 0.025764164 0.008777645 0.038438717 0.874847757 [14,] 0.016938007 0.114709332 0.064209050 0.019143085 0.097341438 0.687659088 [15,] 0.011325263 0.089559984 0.045685716 0.013264986 0.090083042 0.750081010 [16,] 0.006761567 0.040745399 0.021759272 0.007581303 0.035303914 0.887848545 [17,] 0.010721537 0.057987985 0.032855657 0.012149525 0.054255516 0.832029780 [18,] 0.010351300 0.048732153 0.029681719 0.011480987 0.043362216 0.856391625 [19,] 0.013088895 0.057721345 0.038463268 0.015133633 0.060501571 0.815091289 [20,] 0.011036882 0.062901559 0.038716592 0.013038039 0.069727722 0.804579206 [21,] 0.014957920 0.111639120 0.061626758 0.017789307 0.124135125 0.669851771 [22,] 0.010322825 0.050147605 0.032168827 0.012023728 0.053387156 0.841949859 [23,] 0.037566375 0.187693417 0.114296231 0.040123577 0.141026325 0.479294075 [24,] 0.007952454 0.040076645 0.025772731 0.009195822 0.040949439 0.876052910 [25,] 0.009963779 0.072310220 0.039171202 0.011457190 0.066062270 0.801035340 [26,] 0.021810943 0.094275737 0.058914811 0.024631396 0.091682489 0.708684626 [27,] 0.004050243 0.026468479 0.014716674 0.004627649 0.024195704 0.925941251 [28,] 0.012249725 0.056776015 0.034473897 0.013709907 0.052570785 0.830219672 [29,] 0.020620130 0.259742998 0.094925605 0.023584276 0.190300742 0.410826248 [30,] 0.007180504 0.051251506 0.025489746 0.008054754 0.041933329 0.866090161 [31,] 0.019703085 0.106696964 0.057657715 0.021468273 0.085266458 0.709207506 [32,] 0.014123540 0.075904338 0.046921778 0.015916106 0.068359068 0.778775171 [33,] 0.013816464 0.081343899 0.048105877 0.016315413 0.090035019 0.750383327 [34,] 0.012215746 0.077437358 0.039782114 0.013702575 0.066231590 0.790630617 [35,] 0.009064772 0.048566947 0.029093568 0.010520580 0.050313380 0.852440754 [36,] 0.022064227 0.088853414 0.063488940 0.025756181 0.097106402 0.702730837 [37,] 0.007694551 0.056961631 0.029128503 0.008717874 0.048090523 0.849406919 [38,] 0.016366095 0.070927094 0.048030059 0.018561133 0.069136484 0.776979135 [39,] 0.010348368 0.055407541 0.033670016 0.011628077 0.049543168 0.839402830 [40,] 0.015821034 0.120563394 0.058622165 0.018256470 0.112646624 0.674090312 [41,] 0.010909179 0.066793143 0.035216732 0.012025247 0.053312114 0.821743585 [42,] 0.010499649 0.085170520 0.039906976 0.011947865 0.071990133 0.780484856 [43,] 0.012055901 0.060268134 0.035427818 0.013628652 0.056922676 0.821696819 [44,] 0.031800580 0.152186239 0.091668637 0.034004655 0.117593861 0.572746028 [45,] 0.013090969 0.060102106 0.037464990 0.014486157 0.053284809 0.821570969 [46,] 0.019644312 0.233827433 0.090659498 0.022718627 0.190997247 0.442152883 [47,] 0.006671771 0.044655839 0.024082478 0.007531290 0.038216259 0.878842363 [48,] 0.006285028 0.034283734 0.021269894 0.007326206 0.035870711 0.894964426 [49,] 0.021579862 0.156695396 0.085885446 0.025904294 0.186435149 0.523499853 [50,] 0.019515594 0.213567824 0.095184671 0.023138435 0.217350080 0.431243397 [51,] 0.042739750 0.196734029 0.532399591 0.049167139 0.149398097 0.029561393 [52,] 0.009949158 0.342719703 0.130120260 0.012340005 0.476008132 0.028862743 [53,] 0.002235149 0.932562095 0.024464052 0.002556002 0.033464037 0.004718664 [54,] 0.016733547 0.660012997 0.140938472 0.018566980 0.140883313 0.022864691 [55,] 0.042333726 0.281223414 0.355149849 0.051692807 0.242192509 0.027407696 [56,] 0.014417396 0.136780358 0.220628093 0.019691113 0.583594494 0.024888547 [57,] 0.010775784 0.743826017 0.089358277 0.012010751 0.115048950 0.028980222 [58,] 0.057611683 0.271952012 0.428835084 0.059770277 0.150948830 0.030882115 [59,] 0.008799515 0.094678533 0.129927695 0.012033156 0.739006837 0.015554264 [60,] 0.025869255 0.245736670 0.205721312 0.032723255 0.392947948 0.097001560 [61,] 0.059368419 0.130505886 0.417891435 0.099330268 0.257413266 0.035490727 [62,] 0.038849956 0.117591443 0.559668665 0.057985594 0.200340227 0.025564114 [63,] 0.011692717 0.128712049 0.212748391 0.016036336 0.617464063 0.013346445 [64,] 0.004625443 0.111296435 0.119315142 0.006039473 0.751695914 0.007027593 [65,] 0.001732392 0.013459982 0.964595028 0.002288885 0.016712743 0.001210969 [66,] 0.020059226 0.240440103 0.535235197 0.023891887 0.159250726 0.021122862 [67,] 0.016640158 0.136862369 0.512175416 0.022964398 0.297926995 0.013430663 [68,] 0.027132123 0.146277631 0.207534747 0.039575751 0.545447934 0.034031812 [69,] 0.107090754 0.116692343 0.479260810 0.149848220 0.125413900 0.021693973 [70,] 0.031108338 0.245037555 0.476975648 0.036492076 0.177869491 0.032516892 [71,] 0.004041144 0.889390864 0.036872933 0.004592109 0.056336650 0.008766299 [72,] 0.010360461 0.192715194 0.289363495 0.013434434 0.477534916 0.016591500 [73,] 0.017162388 0.190950376 0.141248911 0.022715397 0.600120726 0.027802202 [74,] 0.027864062 0.088716852 0.657486127 0.043306774 0.167581215 0.015044970 [75,] 0.043286823 0.285839131 0.331208975 0.049791119 0.220873749 0.069000204 [76,] 0.010392112 0.137140839 0.097806920 0.013774334 0.721849337 0.019036458 [77,] 0.030486499 0.274920895 0.308500150 0.038951275 0.321627811 0.025513370 [78,] 0.034330846 0.162862064 0.372705795 0.050070149 0.355979809 0.024051337 [79,] 0.008818529 0.225074647 0.186262439 0.011368372 0.556835988 0.011640024 [80,] 0.014102322 0.222538107 0.103168234 0.017872499 0.580945992 0.061372847 [81,] 0.033372304 0.417029999 0.324430452 0.036191336 0.162004180 0.026971729 [82,] 0.010247658 0.365213728 0.098000541 0.012696268 0.490992366 0.022849439 [83,] 0.009287571 0.290162772 0.330029849 0.011707451 0.345319936 0.013492421 [84,] 0.136618658 0.115211832 0.297100632 0.266840311 0.157060308 0.027168260 [85,] 0.057495258 0.140575764 0.341056689 0.096308695 0.323824425 0.040739169 [86,] 0.009836861 0.079672424 0.827331359 0.012121226 0.064315236 0.006722895 [87,] 0.028132324 0.477910791 0.261279496 0.031247856 0.175583414 0.025846119 [88,] 0.012583318 0.741206863 0.091165310 0.013843841 0.115344813 0.025855855 [89,] 0.005029783 0.032309349 0.899021488 0.006908658 0.052883124 0.003847598 [90,] 0.003517122 0.827379059 0.052006344 0.004201754 0.105114428 0.007781293 [91,] 0.124135373 0.102144460 0.358133018 0.249439735 0.140012322 0.026135092 [92,] 0.011913107 0.593628179 0.217609735 0.013921387 0.148940037 0.013987555 [93,] 0.002478694 0.018084604 0.955162502 0.003199276 0.019321560 0.001753363 [94,] 0.150833800 0.103264144 0.207249682 0.351695811 0.153004212 0.033952352 [95,] 0.026479249 0.341548366 0.129773722 0.031650756 0.348510816 0.122037090 [96,] 0.003718381 0.054293077 0.041343761 0.004950190 0.887823396 0.007871194 [97,] 0.006825064 0.789751549 0.086269260 0.007907970 0.099181940 0.010064217 [98,] 0.029758117 0.195525680 0.299068401 0.039693330 0.379107897 0.056846575 [99,] 0.021729246 0.396483797 0.128445811 0.026101971 0.364999351 0.062239823 [100,] 0.011294976 0.064003035 0.846140165 0.014194181 0.058030699 0.006336944 [101,] 0.184678201 0.018465719 0.032576728 0.735295622 0.021588884 0.007394845 [102,] 0.457344758 0.045588350 0.067703428 0.363282852 0.048259367 0.017821246 [103,] 0.070130881 0.007875721 0.015738496 0.894139665 0.009305723 0.002809515 [104,] 0.770425450 0.015070655 0.024538317 0.168620502 0.015388070 0.005957007 [105,] 0.169171250 0.093071553 0.280319137 0.317815025 0.113704334 0.025918702 [106,] 0.263700294 0.101993981 0.181026905 0.320548890 0.106488758 0.026241171 [107,] 0.930015515 0.003761227 0.006192673 0.054760858 0.003853555 0.001416172 [108,] 0.205508895 0.030919843 0.053751847 0.659777875 0.037152971 0.012888568 [109,] 0.543733978 0.018287221 0.030246856 0.381495301 0.019566766 0.006669876 [110,] 0.328927599 0.025520427 0.043987478 0.564534475 0.028226403 0.008803618 [111,] 0.663528935 0.016600534 0.026606955 0.269438041 0.017490209 0.006335326 [112,] 0.557105845 0.015926012 0.029832308 0.374632442 0.016869726 0.005633667 [113,] 0.361892432 0.047256163 0.103861054 0.424899379 0.049045533 0.013045438 [114,] 0.223765019 0.052106331 0.088752062 0.547514788 0.064546859 0.023314942 [115,] 0.788068909 0.010871000 0.018975673 0.166868895 0.011203712 0.004011810 [116,] 0.164327755 0.014556462 0.030771353 0.769196119 0.016412094 0.004736217 [117,] 0.643208851 0.038098752 0.062099519 0.208588896 0.035962027 0.012041955 [118,] 0.442317165 0.014604619 0.026262072 0.496222546 0.015705239 0.004888359 [119,] 0.648851524 0.034578217 0.050095264 0.218080811 0.034256546 0.014137639 [120,] 0.125663633 0.016877178 0.036380615 0.796197671 0.019662431 0.005218472 [121,] 0.693736288 0.035139366 0.053281066 0.172447430 0.032822399 0.012573451 [122,] 0.347422924 0.029048106 0.048298620 0.530086914 0.032680888 0.012462548 [123,] 0.182398848 0.029429498 0.053047486 0.687366085 0.035743574 0.012014509 [124,] 0.662967323 0.016260634 0.026241365 0.270751663 0.017222372 0.006556643 [125,] 0.465522496 0.040760265 0.079672041 0.358293306 0.041884806 0.013867086 [126,] 0.268824845 0.016210013 0.029587321 0.660936474 0.018178338 0.006263009 [127,] 0.575537379 0.031931695 0.053918903 0.292952036 0.032872383 0.012787604 [128,] 0.297519617 0.051946609 0.090676456 0.485565336 0.057939756 0.016352226 [129,] 0.146267442 0.023571563 0.043002822 0.749353477 0.028806941 0.008997755 [130,] 0.463212641 0.057937479 0.091037577 0.311993992 0.057539055 0.018279254 [131,] 0.324758671 0.019405787 0.039539772 0.588800845 0.021080696 0.006414229 [132,] 0.786635220 0.016647654 0.027162423 0.146618953 0.016568469 0.006367282 [133,] 0.718363078 0.018468088 0.030074461 0.206702227 0.018976897 0.007415249 [134,] 0.497627825 0.043211665 0.086146157 0.316694717 0.042940912 0.013378725 [135,] 0.783506217 0.011321184 0.020294846 0.169285309 0.011589296 0.004003148 [136,] 0.098392730 0.005444220 0.009955564 0.878002674 0.006180859 0.002023953 [137,] 0.704985793 0.015535109 0.024693126 0.232366351 0.016292416 0.006127205 [138,] 0.736971419 0.024860605 0.037606797 0.167175356 0.024210095 0.009175728 [139,] 0.033505089 0.003540286 0.006913404 0.950616349 0.004181492 0.001243380 [140,] 0.517194663 0.068337041 0.104428578 0.227693292 0.061294622 0.021051804 [141,] 0.660155986 0.008412513 0.015228558 0.304424491 0.008916937 0.002861515 [142,] 0.330703854 0.041216439 0.074763199 0.490245489 0.046120966 0.016950053 [143,] 0.239297215 0.026914943 0.056334697 0.639581951 0.029917290 0.007953905 [144,] 0.301038436 0.055080262 0.091752352 0.472076024 0.061794855 0.018258071 [145,] 0.756810549 0.022384526 0.037865264 0.153938351 0.021461900 0.007539411 [146,] 0.163943032 0.044526293 0.093488738 0.624541522 0.057092536 0.016407879 [147,] 0.582174264 0.046134592 0.076401442 0.237721056 0.043541004 0.014027641 [148,] 0.912345811 0.005357087 0.008691625 0.066140381 0.005437073 0.002028023 [149,] 0.252742362 0.013169315 0.024946079 0.690392798 0.014467939 0.004281507 [150,] 0.056136919 0.002679293 0.005166404 0.932105584 0.002996322 0.000915478 Closest hard clustering: [1] 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 [38] 6 6 6 6 6 6 6 6 6 6 6 6 6 3 5 2 2 3 5 2 3 5 5 3 3 5 5 3 3 3 5 3 3 2 5 5 3 [75] 3 5 5 3 5 5 2 5 5 3 3 3 2 2 3 2 3 2 3 4 5 5 2 5 2 3 4 1 4 1 4 4 1 4 1 4 1 [112] 1 4 4 1 4 1 4 1 4 1 4 4 1 1 4 1 4 4 1 4 1 1 1 1 4 1 1 4 1 1 4 4 4 1 4 1 1 [149] 4 4 Available components: [1] "centers" "size" "cluster" "membership" "iter" [6] "withinerror" "call" > > > > cleanEx(); ..nameEx <- "countpattern" > > ### * countpattern > > flush(stderr()); flush(stdout()) > > ### Name: countpattern > ### Title: Count Binary Patterns > ### Aliases: countpattern > ### Keywords: multivariate > > ### ** Examples > > xx <- rbind(c(1,0,0),c(1,0,0),c(1,0,1),c(0,1,1),c(0,1,1)) > countpattern(xx) 000 001 010 011 100 101 110 111 0 0 0 2 2 1 0 0 > countpattern(xx, matching=TRUE) $pat 000 001 010 011 100 101 110 111 0 0 0 2 2 1 0 0 $matching [1] 5 5 6 4 4 > > > > cleanEx(); ..nameEx <- "cshell" > > ### * cshell > > flush(stderr()); flush(stdout()) > > ### Name: cshell > ### Title: Fuzzy C-Shell Clustering > ### Aliases: cshell > ### Keywords: cluster > > ### ** Examples > > ## a 2-dimensional example > x<-rbind(matrix(rnorm(50,sd=0.3),ncol=2), + matrix(rnorm(50,mean=1,sd=0.3),ncol=2)) > cl<-cshell(x,2,20,verbose=TRUE,method="cshell",m=2) Iteration: 1 Epsi2: 2.5303603956 Iteration: 1 Error: 0.0344906314 Iteration: 2 Epsi2: 1.4573616683 Iteration: 2 Error: 0.0287429347 Iteration: 3 converged, Error: 0.1475346726 Iteration: 3 Error: 0.0286897118 > print(cl) Fuzzy c-means clustering with 2 clusters Cluster centers: [,1] [,2] [1,] 1.0943410 1.0079852 [2,] -0.1575513 0.1404565 Memberships: [,1] [,2] [1,] 3.370827e-02 0.9662917320 [2,] 1.067684e-02 0.9893231593 [3,] 1.346060e-02 0.9865394032 [4,] 8.922530e-02 0.9107746989 [5,] 2.119205e-02 0.9788079542 [6,] 1.124890e-02 0.9887510958 [7,] 2.378482e-03 0.9976215179 [8,] 5.112373e-04 0.9994887627 [9,] 1.137391e-03 0.9988626094 [10,] 1.101142e-02 0.9889885782 [11,] 7.371500e-02 0.9262849971 [12,] 4.015694e-04 0.9995984306 [13,] 3.356324e-02 0.9664367592 [14,] 8.477894e-03 0.9915221064 [15,] 1.958713e-02 0.9804128683 [16,] 1.862033e-02 0.9813796733 [17,] 1.280140e-02 0.9871986027 [18,] 3.470946e-03 0.9965290543 [19,] 3.356082e-05 0.9999664392 [20,] 5.055800e-03 0.9949441998 [21,] 1.998288e-02 0.9800171192 [22,] 5.421009e-05 0.9999457899 [23,] 4.057658e-02 0.9594234243 [24,] 1.982721e-03 0.9980172792 [25,] 1.814163e-03 0.9981858374 [26,] 9.494374e-01 0.0505626414 [27,] 9.967918e-01 0.0032082231 [28,] 9.132611e-01 0.0867388709 [29,] 9.912487e-01 0.0087513119 [30,] 9.996399e-01 0.0003600980 [31,] 9.865889e-01 0.0134111141 [32,] 9.753746e-01 0.0246253573 [33,] 9.739256e-01 0.0260744244 [34,] 9.869976e-01 0.0130024289 [35,] 9.845812e-01 0.0154187805 [36,] 9.747683e-01 0.0252316531 [37,] 9.994435e-01 0.0005565123 [38,] 9.692079e-01 0.0307920534 [39,] 9.614322e-01 0.0385678245 [40,] 9.987695e-01 0.0012304740 [41,] 9.675788e-01 0.0324212071 [42,] 8.726253e-01 0.1273747157 [43,] 9.937768e-01 0.0062232054 [44,] 9.848468e-01 0.0151531652 [45,] 9.614979e-01 0.0385020620 [46,] 9.773894e-01 0.0226106460 [47,] 9.570546e-01 0.0429454461 [48,] 9.802311e-01 0.0197688580 [49,] 9.251357e-01 0.0748642633 [50,] 9.594580e-01 0.0405419944 Closest hard clustering: [1] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 [39] 1 1 1 1 1 1 1 1 1 1 1 1 Available components: [1] "centers" "radius" "size" "cluster" "iter" [6] "membership" "withinerror" "call" > > # assign classes to some new data > y<-rbind(matrix(rnorm(13,sd=0.3),ncol=2), + matrix(rnorm(13,mean=1,sd=0.3),ncol=2)) Warning: data length [13] is not a sub-multiple or multiple of the number of rows [7] in matrix Warning: data length [13] is not a sub-multiple or multiple of the number of rows [7] in matrix > # ycl<-predict(cl, y, type="both") > > > > cleanEx(); ..nameEx <- "element" > > ### * element > > flush(stderr()); flush(stdout()) > > ### Name: element > ### Title: Extract Elements of an Array > ### Aliases: element > ### Keywords: array > > ### ** Examples > > x <- array(1:20, dim=c(2,5,2)) > element(x, c(1,4,2)) [1] 17 > > > > cleanEx(); ..nameEx <- "fclustIndex" > > ### * fclustIndex > > flush(stderr()); flush(stdout()) > > ### Name: fclustIndex > ### Title: Fuzzy Cluster Indexes (Validity/Performance Measures) > ### Aliases: fclustIndex > ### Keywords: cluster > > ### ** Examples > > # a 2-dimensional example > x<-rbind(matrix(rnorm(100,sd=0.3),ncol=2), + matrix(rnorm(100,mean=1,sd=0.3),ncol=2)) > cl<-cmeans(x,2,20,verbose=TRUE,method="cmeans") Iteration: 1, Error: 0.1424585192 Iteration: 2, Error: 0.1313120585 Iteration: 3, Error: 0.1305563616 Iteration: 4, Error: 0.1305085539 Iteration: 5, Error: 0.1305054562 Iteration: 6, Error: 0.1305052536 Iteration: 7, Error: 0.1305052404 Iteration: 8 converged, Error: 0.1305052395 > resultindexes <- fclustIndex(cl,x, index="all") > resultindexes fhv apd pd xb fs 2.288748e-01 1.868756e+02 1.869574e+02 6.966184e-04 -4.073341e+01 pc pe pre si 8.725031e-01 2.312633e-01 2.460912e+02 2.042184e-01 > > > > cleanEx(); ..nameEx <- "hamming.distance" > > ### * hamming.distance > > flush(stderr()); flush(stdout()) > > ### Name: hamming.distance > ### Title: Hamming Distances of Vectors > ### Aliases: hamming.distance > ### Keywords: multivariate > > ### ** Examples > > x <- c(1, 0, 0) > y <- c(1, 0, 1) > hamming.distance(x, y) [1] 1 > z <- rbind(x,y) > rownames(z) <- c("Fred", "Tom") > hamming.distance(z) Fred Tom Fred 0 1 Tom 1 0 > > > > cleanEx(); ..nameEx <- "hamming.window" > > ### * hamming.window > > flush(stderr()); flush(stdout()) > > ### Name: hamming.window > ### Title: Computes the Coefficients of a Hamming Window. > ### Aliases: hamming.window > ### Keywords: ts > > ### ** Examples > hamming.window(10) [1] 0.0800000 0.1876196 0.4601218 0.7700000 0.9722586 0.9722586 0.7700000 [8] 0.4601218 0.1876196 0.0800000 > > x<-rnorm(500) > y<-stft(x, wtype="hamming.window") > plot(y) > > > > cleanEx(); ..nameEx <- "hanning.window" > > ### * hanning.window > > flush(stderr()); flush(stdout()) > > ### Name: hanning.window > ### Title: Computes the Coefficients of a Hanning Window. > ### Aliases: hanning.window > ### Keywords: ts > > ### ** Examples > hanning.window(10) [1] 0.0000000 0.1169778 0.4131759 0.7500000 0.9698463 0.9698463 0.7500000 [8] 0.4131759 0.1169778 0.0000000 > > x<-rnorm(500) > y<-stft(x, wtype="hanning.window") > plot(y) > > > > cleanEx(); ..nameEx <- "impute" > > ### * impute > > flush(stderr()); flush(stdout()) > > ### Name: impute > ### Title: Replace Missing Values > ### Aliases: impute > ### Keywords: manip > > ### ** Examples > > x<- matrix(1:10, ncol=2) > x[c(1,3,7)] <- NA > print(x) [,1] [,2] [1,] NA 6 [2,] 2 NA [3,] NA 8 [4,] 4 9 [5,] 5 10 > print(impute(x)) [,1] [,2] [1,] 4 6.0 [2,] 2 8.5 [3,] 4 8.0 [4,] 4 9.0 [5,] 5 10.0 > > > > cleanEx(); ..nameEx <- "interpolate" > > ### * interpolate > > flush(stderr()); flush(stdout()) > > ### Name: interpolate > ### Title: Interpolate Values of Array > ### Aliases: interpolate > ### Keywords: arith multivariate > > ### ** Examples > > x <- seq(0,3,0.2) > z <- outer(x,x, function(x,y) sin(x*y)) > dimnames(z) <- list(x,x) > sin(1.1*2.1) [1] 0.7390053 > interpolate(c(1.1, 2.1),z) [1] 0.7185199 > > > > cleanEx(); ..nameEx <- "kurtosis" > > ### * kurtosis > > flush(stderr()); flush(stdout()) > > ### Name: kurtosis > ### Title: Kurtosis > ### Aliases: kurtosis > ### Keywords: univar > > ### ** Examples > > x <- rnorm(100) > kurtosis(x) [1] -0.05219909 > > > > cleanEx(); ..nameEx <- "lca" > > ### * lca > > flush(stderr()); flush(stdout()) > > ### Name: lca > ### Title: Latent Class Analysis (LCA) > ### Aliases: lca print.lca summary.lca print.summary.lca predict.lca > ### Keywords: multivariate cluster > > ### ** Examples > > ## Generate a 4-dim. sample with 2 latent classes of 500 data points each. > ## The probabilities for the 2 classes are given by type1 and type2. > type1 <- c(0.8,0.8,0.2,0.2) > type2 <- c(0.2,0.2,0.8,0.8) > x <- matrix(runif(4000),nr=1000) > x[1:500,] <- t(t(x[1:500,]) x[501:1000,] <- t(t(x[501:1000,]) > l <- lca(x, 2, niter=5) > print(l) LCA-Result ---------- Datapoints: 1000 Classes: 2 Probability of classes [1] 0.494 0.506 Itemprobabilities 1 2 3 4 1 0.19 0.26 0.80 0.76 2 0.81 0.76 0.22 0.23 > summary(l) LCA-Result ---------- Datapoints: 1000 Classes: 2 Goodness of fit statistics: Number of parameters, estimated model: 9 Number of parameters, saturated model: 15 Log-Likelihood, estimated model: -2508.545 Log-Likelihood, saturated model: -2504.478 Information Criteria: BIC, estimated model: 5079.259 BIC, saturated model: 5112.572 TestStatistics: Likelihood ratio: 8.134228 p-val: 0.2284335 Pearson Chi^2: 8.05263 p-val: 0.2342728 Degress of freedom: 6 > p <- predict(l, x) > table(p, c(rep(1,500),rep(2,500))) p 1 2 1 47 440 2 453 60 > > > > cleanEx(); ..nameEx <- "matchClasses" > > ### * matchClasses > > flush(stderr()); flush(stdout()) > > ### Name: matchClasses > ### Title: Find similar classes in two-way contingency tables > ### Aliases: matchClasses compareMatchedClasses > ### Keywords: category > > ### ** Examples > > ## a stupid example with no class correlations: > g1 <- sample(1:5, size=1000, replace=TRUE) > g2 <- sample(1:5, size=1000, replace=TRUE) > tab <- table(g1, g2) > matchClasses(tab, "exact") Direct agreement: 0 of 5 pairs Iterations for permutation matching: 120 Cases in matched pairs: 22.9 % 1 2 3 4 5 4 2 5 1 3 > > ## let pairs (g1=1,g2=4) and (g1=3,g2=1) agree better > k <- sample(1:1000, size=200) > g1[k] <- 1 > g2[k] <- 4 > > k <- sample(1:1000, size=200) > g1[k] <- 3 > g2[k] <- 1 > > tab <- table(g1, g2) > matchClasses(tab, "exact") Direct agreement: 2 of 5 pairs Iterations for permutation matching: 6 Cases in matched pairs: 50.8 % 1 2 3 4 5 4 2 1 5 3 > > ## get agreement coefficients: > compareMatchedClasses(g1, g2, method="exact") $diag [,1] [1,] 0.508 $kappa [,1] [1,] 0.3488825 $rand [,1] [1,] 0.7205506 $crand [,1] [1,] 0.2423375 > > > > cleanEx(); ..nameEx <- "matchControls" > > ### * matchControls > > flush(stderr()); flush(stdout()) > > ### Name: matchControls > ### Title: Find matched control group > ### Aliases: matchControls > ### Keywords: manip > > ### ** Examples > > Age.case <- 40 + 5 * rnorm(50) > Age.cont <- 45 + 10 * rnorm(150) > Age <- c(Age.case, Age.cont) > > Sex.case <- sample(c("M", "F"), 50, prob = c(.4, .6), replace = TRUE) > Sex.cont <- sample(c("M", "F"), 150, prob = c(.6, .4), replace = TRUE) > Sex <- as.factor(c(Sex.case, Sex.cont)) > > casecont <- as.factor(c(rep("case", 50), rep("cont", 150))) > > ## now look at the group properties: > boxplot(Age ~ casecont) > barplot(table(Sex, casecont), beside = TRUE) > > m <- matchControls(casecont ~ Sex + Age) Loading required package: cluster > > ## properties of the new groups: > boxplot(Age ~ m$factor) > barplot(table(Sex, m$factor)) > > > > cleanEx(); ..nameEx <- "moment" > > ### * moment > > flush(stderr()); flush(stdout()) > > ### Name: moment > ### Title: Statistical Moment > ### Aliases: moment > ### Keywords: univar > > ### ** Examples > > x <- rnorm(100) > > ## Compute the mean > moment(x) [1] 0.1088874 > ## Compute the 2nd centered moment (!= var) > moment(x, order=2, center=TRUE) [1] 0.7986945 > > ## Compute the 3rd absolute centered moment > moment(x, order=3, center=TRUE, absolute=TRUE) [1] 1.148700 > > > > cleanEx(); ..nameEx <- "naiveBayes" > > ### * naiveBayes > > flush(stderr()); flush(stdout()) > > ### Name: naiveBayes > ### Title: Naive Bayes Classifier > ### Aliases: naiveBayes naiveBayes.default naiveBayes.formula > ### print.naiveBayes > ### Keywords: classif category > > ### ** Examples > > ## Categorical data only: > data(HouseVotes84) > model <- naiveBayes(Class ~ ., data = HouseVotes84) > predict(model, HouseVotes84[1:10,-1]) [1] republican republican republican democrat democrat democrat [7] republican republican republican democrat Levels: democrat republican > predict(model, HouseVotes84[1:10,-1], type = "raw") democrat republican [1,] 1.029209e-07 9.999999e-01 [2,] 5.820415e-08 9.999999e-01 [3,] 5.684937e-03 9.943151e-01 [4,] 9.985798e-01 1.420152e-03 [5,] 9.666720e-01 3.332802e-02 [6,] 8.121430e-01 1.878570e-01 [7,] 1.751512e-04 9.998248e-01 [8,] 8.300100e-06 9.999917e-01 [9,] 8.277705e-08 9.999999e-01 [10,] 1.000000e+00 5.029425e-11 > > pred <- predict(model, HouseVotes84[,-1]) > table(pred, HouseVotes84$Class) pred democrat republican democrat 238 13 republican 29 155 > > ## Example of using a contingency table: > data(Titanic) > m <- naiveBayes(Survived ~ ., data = Titanic) > m Naive Bayes Classifier for Discrete Predictors Call: naiveBayes.formula(formula = Survived ~ ., data = Titanic) A-priori probabilities: Survived No Yes 0.676965 0.323035 Conditional probabilities: Class Survived 1st 2nd 3rd Crew No 0.0818792 0.1120805 0.3543624 0.4516779 Yes 0.2855134 0.1659634 0.2503516 0.2981716 Sex Survived Male Female No 0.91543624 0.08456376 Yes 0.51617440 0.48382560 Age Survived Child Adult No 0.03489933 0.96510067 Yes 0.08016878 0.91983122 > predict(m, as.data.frame(Titanic)[,1:3]) [1] Yes No No No Yes Yes Yes Yes No No No No Yes Yes Yes Yes Yes No No [20] No Yes Yes Yes Yes No No No No Yes Yes Yes Yes Levels: No Yes > > ## Example with metric predictors: > data(iris) > m <- naiveBayes(Species ~ ., data = iris) > ## alternatively: > m <- naiveBayes(iris[,-5], iris[,5]) > m Naive Bayes Classifier for Discrete Predictors Call: naiveBayes.default(x = iris[, -5], y = iris[, 5]) A-priori probabilities: iris[, 5] setosa versicolor virginica 0.3333333 0.3333333 0.3333333 Conditional probabilities: Sepal.Length iris[, 5] [,1] [,2] setosa 5.006 0.3524897 versicolor 5.936 0.5161711 virginica 6.588 0.6358796 Sepal.Width iris[, 5] [,1] [,2] setosa 3.428 0.3790644 versicolor 2.770 0.3137983 virginica 2.974 0.3224966 Petal.Length iris[, 5] [,1] [,2] setosa 1.462 0.1736640 versicolor 4.260 0.4699110 virginica 5.552 0.5518947 Petal.Width iris[, 5] [,1] [,2] setosa 0.246 0.1053856 versicolor 1.326 0.1977527 virginica 2.026 0.2746501 > table(predict(m, iris[,-5]), iris[,5]) setosa versicolor virginica setosa 50 0 0 versicolor 0 47 3 virginica 0 3 47 > > > > cleanEx(); ..nameEx <- "permutations" > > ### * permutations > > flush(stderr()); flush(stdout()) > > ### Name: permutations > ### Title: All permutations of integers 1:n > ### Aliases: permutations > ### Keywords: datagen > > ### ** Examples > > permutations(3) [,1] [,2] [,3] [1,] 1 2 3 [2,] 2 1 3 [3,] 2 3 1 [4,] 1 3 2 [5,] 3 1 2 [6,] 3 2 1 > > > > cleanEx(); ..nameEx <- "plot.stft" > > ### * plot.stft > > flush(stderr()); flush(stdout()) > > ### Name: plot.stft > ### Title: Plot Short Time Fourier Transforms > ### Aliases: plot.stft > ### Keywords: ts > > ### ** Examples > x<-rnorm(500) > y<-stft(x) > plot(y) > > > > cleanEx(); ..nameEx <- "plot.svm" > > ### * plot.svm > > flush(stderr()); flush(stdout()) > > ### Name: plot.svm > ### Title: Plot svm objects > ### Aliases: plot.svm > ### Keywords: neural classif nonlinear > > ### ** Examples > > ## a simple example > library(MASS) > data(cats) > m <- svm(Sex~., data = cats) > plot(m, cats) > > ## more than two variables: fix 2 dimensions > data(iris) > m2 <- svm(Species~., data = iris) > plot(m2, iris, Petal.Width ~ Petal.Length, + slice = list(Sepal.Width = 3, Sepal.Length = 4)) > > ## plot with custom symbols and colors > plot(m, cats, svSymbol = 1, dataSymbol = 2, symbolPalette = rainbow(4), + color.palette = terrain.colors) > > > > > cleanEx(); ..nameEx <- "plot.tune" > > ### * plot.tune > > flush(stderr()); flush(stdout()) > > ### Name: plot.tune > ### Title: Plot tuning object > ### Aliases: plot.tune > ### Keywords: models > > ### ** Examples > > data(iris) > obj <- tune.svm(Species~., data = iris, sampling = "fix", + gamma = 2^c(-8,-4,0,4), cost = 2^c(-8,-4,-2,0)) > plot(obj, transform.x = log2, transform.y = log2) > plot(obj, type = "perspective", theta = 120, phi = 45) > > > > cleanEx(); ..nameEx <- "predict.naiveBayes" > > ### * predict.naiveBayes > > flush(stderr()); flush(stdout()) > > ### Name: predict.naiveBayes > ### Title: Naive Bayes Classifier > ### Aliases: predict.naiveBayes > ### Keywords: classif category > > ### ** Examples > > ## Categorical data only: > data(HouseVotes84) > model <- naiveBayes(Class ~ ., data = HouseVotes84) > predict(model, HouseVotes84[1:10,-1]) [1] republican republican republican democrat democrat democrat [7] republican republican republican democrat Levels: democrat republican > predict(model, HouseVotes84[1:10,-1], type = "raw") democrat republican [1,] 1.029209e-07 9.999999e-01 [2,] 5.820415e-08 9.999999e-01 [3,] 5.684937e-03 9.943151e-01 [4,] 9.985798e-01 1.420152e-03 [5,] 9.666720e-01 3.332802e-02 [6,] 8.121430e-01 1.878570e-01 [7,] 1.751512e-04 9.998248e-01 [8,] 8.300100e-06 9.999917e-01 [9,] 8.277705e-08 9.999999e-01 [10,] 1.000000e+00 5.029425e-11 > > pred <- predict(model, HouseVotes84[,-1]) > table(pred, HouseVotes84$Class) pred democrat republican democrat 238 13 republican 29 155 > > ## Example of using a contingency table: > data(Titanic) > m <- naiveBayes(Survived ~ ., data = Titanic) > m Naive Bayes Classifier for Discrete Predictors Call: naiveBayes.formula(formula = Survived ~ ., data = Titanic) A-priori probabilities: Survived No Yes 0.676965 0.323035 Conditional probabilities: Class Survived 1st 2nd 3rd Crew No 0.0818792 0.1120805 0.3543624 0.4516779 Yes 0.2855134 0.1659634 0.2503516 0.2981716 Sex Survived Male Female No 0.91543624 0.08456376 Yes 0.51617440 0.48382560 Age Survived Child Adult No 0.03489933 0.96510067 Yes 0.08016878 0.91983122 > predict(m, as.data.frame(Titanic)[,1:3]) [1] Yes No No No Yes Yes Yes Yes No No No No Yes Yes Yes Yes Yes No No [20] No Yes Yes Yes Yes No No No No Yes Yes Yes Yes Levels: No Yes > > ## Example with metric predictors: > data(iris) > m <- naiveBayes(Species ~ ., data = iris) > ## alternatively: > m <- naiveBayes(iris[,-5], iris[,5]) > m Naive Bayes Classifier for Discrete Predictors Call: naiveBayes.default(x = iris[, -5], y = iris[, 5]) A-priori probabilities: iris[, 5] setosa versicolor virginica 0.3333333 0.3333333 0.3333333 Conditional probabilities: Sepal.Length iris[, 5] [,1] [,2] setosa 5.006 0.3524897 versicolor 5.936 0.5161711 virginica 6.588 0.6358796 Sepal.Width iris[, 5] [,1] [,2] setosa 3.428 0.3790644 versicolor 2.770 0.3137983 virginica 2.974 0.3224966 Petal.Length iris[, 5] [,1] [,2] setosa 1.462 0.1736640 versicolor 4.260 0.4699110 virginica 5.552 0.5518947 Petal.Width iris[, 5] [,1] [,2] setosa 0.246 0.1053856 versicolor 1.326 0.1977527 virginica 2.026 0.2746501 > table(predict(m, iris[,-5]), iris[,5]) setosa versicolor virginica setosa 50 0 0 versicolor 0 47 3 virginica 0 3 47 > > > > cleanEx(); ..nameEx <- "predict.svm" > > ### * predict.svm > > flush(stderr()); flush(stdout()) > > ### Name: predict.svm > ### Title: Predict method for Support Vector Machines > ### Aliases: predict.svm > ### Keywords: neural nonlinear classif > > ### ** Examples > > data(iris) > attach(iris) > > ## classification mode > # default with factor response: > model <- svm(Species ~ ., data = iris) > > # alternatively the traditional interface: > x <- subset(iris, select = -Species) > y <- Species > model <- svm(x, y, probability = TRUE) > > print(model) Call: svm.default(x = x, y = y, probability = TRUE) Parameters: SVM-Type: C-classification SVM-Kernel: radial cost: 1 gamma: 0.25 Number of Support Vectors: 51 > summary(model) Call: svm.default(x = x, y = y, probability = TRUE) Parameters: SVM-Type: C-classification SVM-Kernel: radial cost: 1 gamma: 0.25 Number of Support Vectors: 51 ( 8 22 21 ) Number of Classes: 3 Levels: setosa versicolor virginica > > # test with train data > pred <- predict(model, x) > # (same as:) > pred <- fitted(model) > > # compute decision values and probabilites > pred <- predict(model, x, decision.values = TRUE, probability = TRUE) > attr(pred, "decision.values")[1:4,] setosa/versicolor setosa/virginica versicolor/virginica [1,] 1.196132 1.091460 0.6708631 [2,] 1.064989 1.056332 0.8484557 [3,] 1.181309 1.074534 0.6440710 [4,] 1.111313 1.053143 0.6783256 > attr(pred, "probabilities")[1:4,] setosa versicolor virginica [1,] 0.9807643 0.01077289 0.008462770 [2,] 0.9736472 0.01725992 0.009092833 [3,] 0.9795191 0.01135487 0.009125984 [4,] 0.9756159 0.01458703 0.009797115 > > ## try regression mode on two dimensions > > # create data > x <- seq(0.1, 5, by = 0.05) > y <- log(x) + rnorm(x, sd = 0.2) > > # estimate model and predict input values > m <- svm(x, y) > new <- predict(m, x) > > # visualize > plot (x, y) > points (x, log(x), col = 2) > points (x, new, col = 4) > > ## density-estimation > > # create 2-dim. normal with rho=0: > X <- data.frame(a = rnorm(1000), b = rnorm(1000)) > attach(X) > > # traditional way: > m <- svm(X, gamma = 0.1) > > # formula interface: > m <- svm(~., data = X, gamma = 0.1) > # or: > m <- svm(~ a + b, gamma = 0.1) > > # test: > newdata <- data.frame(a = c(0, 4), b = c(0, 4)) > predict (m, newdata) [1] TRUE FALSE > > # visualize: > plot(X, col = 1:1000 %in% m$index + 1, xlim = c(-5,5), ylim=c(-5,5)) > points(newdata, pch = "+", col = 2, cex = 5) > > > > cleanEx(); ..nameEx <- "rbridge" > > ### * rbridge > > flush(stderr()); flush(stdout()) > > ### Name: rbridge > ### Title: Simulation of Brownian Bridge > ### Aliases: rbridge > ### Keywords: distribution > > ### ** Examples > > # simulate a Brownian bridge on [0,1] and plot it > > x <- rbridge() > plot(x,type="l") > > > > cleanEx(); ..nameEx <- "read.matrix.csr" > > ### * read.matrix.csr > > flush(stderr()); flush(stdout()) > > ### Name: read.matrix.csr > ### Title: read/write sparse data > ### Aliases: read.matrix.csr write.matrix.csr > ### Keywords: IO > > ### ** Examples > > ## Not run: > ##D library(methods) > ##D if (require(SparseM)) { > ##D data(iris) > ##D x <- as.matrix(iris[,1:4]) > ##D y <- iris[,5] > ##D xs <- as.matrix.csr(x) > ##D write.matrix.csr(xs, y = y, file="iris.dat") > ##D xs2 <- read.matrix.csr("iris.dat")$x > ##D if (!all(as.matrix(xs) == as.matrix(xs2))) > ##D stop("Error: objects are not equal!") > ##D } > ## End(Not run) > > > > cleanEx(); ..nameEx <- "rectangle.window" > > ### * rectangle.window > > flush(stderr()); flush(stdout()) > > ### Name: rectangle.window > ### Title: Computes the Coefficients of a Rectangle Window. > ### Aliases: rectangle.window > ### Keywords: ts > > ### ** Examples > x<-rnorm(500) > y<-stft(x, wtype="rectangle.window") > plot(y) > > > > cleanEx(); ..nameEx <- "rwiener" > > ### * rwiener > > flush(stderr()); flush(stdout()) > > ### Name: rwiener > ### Title: Simulation of Wiener Process > ### Aliases: rwiener > ### Keywords: distribution > > ### ** Examples > > # simulate a Wiener process on [0,1] and plot it > > x <- rwiener() > plot(x,type="l") > > > > cleanEx(); ..nameEx <- "shortestPaths" > > ### * shortestPaths > > flush(stderr()); flush(stdout()) > > ### Name: allShortestPaths > ### Title: Find Shortest Paths Between All Nodes in a Directed Graph > ### Aliases: allShortestPaths extractPath > ### Keywords: optimize > > ### ** Examples > > ## build a graph with 5 nodes > x <- matrix(NA, 5, 5) > diag(x) <- 0 > x[1,2] <- 30; x[1,3] <- 10 > x[2,4] <- 70; x[2,5] <- 40 > x[3,4] <- 50; x[3,5] <- 20 > x[4,5] <- 60 > x[5,4] <- 10 > print(x) [,1] [,2] [,3] [,4] [,5] [1,] 0 30 10 NA NA [2,] NA 0 NA 70 40 [3,] NA NA 0 50 20 [4,] NA NA NA 0 60 [5,] NA NA NA 10 0 > > ## compute all path lengths > z <- allShortestPaths(x) > print(z) $length [,1] [,2] [,3] [,4] [,5] [1,] 0 30 10 40 30 [2,] NA 0 NA 50 40 [3,] NA NA 0 30 20 [4,] NA NA NA 0 60 [5,] NA NA NA 10 0 $middlePoints [,1] [,2] [,3] [,4] [,5] [1,] 0 0 0 5 3 [2,] 0 0 0 5 0 [3,] 0 0 0 5 0 [4,] 0 0 0 0 0 [5,] 0 0 0 0 0 > > ## the following should give 1 -> 3 -> 5 -> 4 > extractPath(z, 1, 4) [1] 1 3 5 4 > > > > cleanEx(); ..nameEx <- "sigmoid" > > ### * sigmoid > > flush(stderr()); flush(stdout()) > > ### Name: sigmoid > ### Title: The logistic function and derivatives > ### Aliases: sigmoid dsigmoid d2sigmoid > ### Keywords: math > > ### ** Examples > > plot(sigmoid, -5, 5, ylim = c(-.2, 1)) > plot(dsigmoid, -5, 5, add = TRUE, col = 2) > plot(d2sigmoid, -5, 5, add = TRUE, col = 3) > > > > cleanEx(); ..nameEx <- "skewness" > > ### * skewness > > flush(stderr()); flush(stdout()) > > ### Name: skewness > ### Title: Skewness > ### Aliases: skewness > ### Keywords: univar > > ### ** Examples > > x <- rnorm(100) > skewness(x) [1] -0.07115113 > > > > cleanEx(); ..nameEx <- "stft" > > ### * stft > > flush(stderr()); flush(stdout()) > > ### Name: stft > ### Title: Computes the Short Time Fourier Transform of a Vector > ### Aliases: stft > ### Keywords: ts > > ### ** Examples > x<-rnorm(500) > y<-stft(x) > plot(y) > > > > cleanEx(); ..nameEx <- "svm" > > ### * svm > > flush(stderr()); flush(stdout()) > > ### Name: svm > ### Title: Support Vector Machines > ### Aliases: svm svm.default svm.formula summary.svm print.summary.svm > ### print.svm > ### Keywords: neural nonlinear classif > > ### ** Examples > > data(iris) > attach(iris) > > ## classification mode > # default with factor response: > model <- svm(Species ~ ., data = iris) > > # alternatively the traditional interface: > x <- subset(iris, select = -Species) > y <- Species > model <- svm(x, y) > > print(model) Call: svm.default(x = x, y = y) Parameters: SVM-Type: C-classification SVM-Kernel: radial cost: 1 gamma: 0.25 Number of Support Vectors: 51 > summary(model) Call: svm.default(x = x, y = y) Parameters: SVM-Type: C-classification SVM-Kernel: radial cost: 1 gamma: 0.25 Number of Support Vectors: 51 ( 8 22 21 ) Number of Classes: 3 Levels: setosa versicolor virginica > > # test with train data > pred <- predict(model, x) > # (same as:) > pred <- fitted(model) > > # Check accuracy: > table(pred, y) y pred setosa versicolor virginica setosa 50 0 0 versicolor 0 48 2 virginica 0 2 48 > > # compute decision values and probabilities: > pred <- predict(model, x, decision.values = TRUE) > attr(pred, "decision.values")[1:4,] setosa/versicolor setosa/virginica versicolor/virginica [1,] 1.196132 1.091460 0.6708631 [2,] 1.064989 1.056332 0.8484557 [3,] 1.181309 1.074534 0.6440710 [4,] 1.111313 1.053143 0.6783256 > > # visualize (classes by color, SV by crosses): > plot(cmdscale(dist(iris[,-5])), + col = as.integer(iris[,5]), + pch = c("o","+")[1:150 %in% model$index + 1]) > > ## try regression mode on two dimensions > > # create data > x <- seq(0.1, 5, by = 0.05) > y <- log(x) + rnorm(x, sd = 0.2) > > # estimate model and predict input values > m <- svm(x, y) > new <- predict(m, x) > > # visualize > plot(x, y) > points(x, log(x), col = 2) > points(x, new, col = 4) > > ## density-estimation > > # create 2-dim. normal with rho=0: > X <- data.frame(a = rnorm(1000), b = rnorm(1000)) > attach(X) > > # traditional way: > m <- svm(X, gamma = 0.1) > > # formula interface: > m <- svm(~., data = X, gamma = 0.1) > # or: > m <- svm(~ a + b, gamma = 0.1) > > # test: > newdata <- data.frame(a = c(0, 4), b = c(0, 4)) > predict (m, newdata) [1] TRUE FALSE > > # visualize: > plot(X, col = 1:1000 %in% m$index + 1, xlim = c(-5,5), ylim=c(-5,5)) > points(newdata, pch = "+", col = 2, cex = 5) > > # weights: (example not particularly sensible) > i2 <- iris > levels(i2$Species)[3] <- "versicolor" > summary(i2$Species) setosa versicolor 50 100 > wts <- 100 / table(i2$Species) > wts setosa versicolor 2 1 > m <- svm(Species ~ ., data = i2, class.weights = wts) > > > > cleanEx(); ..nameEx <- "tune" > > ### * tune > > flush(stderr()); flush(stdout()) > > ### Name: tune > ### Title: Parameter tuning of fuctions using grid search > ### Aliases: tune best.tune print.tune summary.tune print.summary.tune > ### Keywords: models > > ### ** Examples > > data(iris) > ## tune `svm' for classification with RBF-kernel (default in svm), > ## using one split for training/validation set > > obj <- tune(svm, Species~., data = iris, + ranges = list(gamma = 2^(-1:1), cost = 2^(2:4)), + tunecontrol = tune.control(sampling = "fix") + ) > > ## alternatively: > ## obj <- tune.svm(Species~., data = iris, gamma = 2^(-1:1), cost = 2^(2:4)) > > summary(obj) Parameter tuning of `svm': - sampling method: fixed training/validation set - best parameters: gamma cost 0.5 4 - best performance: 0.04 - Detailed performance results: gamma cost error 1 0.5 4 0.04 2 1.0 4 0.04 3 2.0 4 0.04 4 0.5 8 0.04 5 1.0 8 0.06 6 2.0 8 0.06 7 0.5 16 0.06 8 1.0 16 0.06 9 2.0 16 0.06 > plot(obj) > > ## tune `knn' using a convenience function; this time with the > ## conventional interface and bootstrap sampling: > x <- iris[,-5] > y <- iris[,5] > obj2 <- tune.knn(x, y, k = 1:5, tunecontrol = tune.control(sampling = "boot")) > summary(obj2) Parameter tuning of `knn.wrapper': - sampling method: bootstrapping - best parameters: k 3 - best performance: 0.05333333 - Detailed performance results: k error 1 1 0.06000000 2 2 0.06000000 3 3 0.05333333 4 4 0.05333333 5 5 0.06000000 > plot(obj2) > > ## tune `rpart' for regression, using 10-fold cross validation (default) > data(mtcars) > obj3 <- tune.rpart(mpg~., data = mtcars, minsplit = c(5,10,15)) Loading required package: rpart > summary(obj3) Parameter tuning of `rpart.wrapper': - sampling method: 10-fold cross validation - best parameters: minsplit 10 - best performance: 14.58208 - Detailed performance results: minsplit error 1 5 15.32165 2 10 14.58208 3 15 16.54531 > plot(obj3) > > ## simple error estimation for lm using 10-fold cross validation > tune(lm, mpg~., data = mtcars) Error estimation of 'lm' using 10-fold cross validation: 12.46788 > > > > cleanEx(); ..nameEx <- "write.svm" > > ### * write.svm > > flush(stderr()); flush(stdout()) > > ### Name: write.svm > ### Title: Write SVM object to file > ### Aliases: write.svm > ### Keywords: neural nonlinear classif > > ### ** Examples > > data(iris) > attach(iris) > > ## classification mode > # default with factor response: > model <- svm (Species~., data=iris) > > # export SVM object to file > write.svm(model, svm.file = "iris-classifier.svm", scale.file = "iris-classifier.scale") > > # read scale file > # the n-th row is corresponding to n-th dimension. The 1st column contains the > # center value, the 2nd column is the scale value. > > read.table("iris-classifier.scale") V1 V2 1 5.843333 0.8280661 2 3.057333 0.4358663 3 3.758000 1.7652982 4 1.199333 0.7622377 > > > > > ### *