[
  {
    "path": "00_迷你小案例.R",
    "content": "\n# A mini but complete case with only 50 lines of code\n\n# Data Import -------------------------------------------------------------\nlibrary(readr)\ncjb_url <- \"data/cjb.csv\"\ncjb <- read_csv(cjb_url, locale = locale(encoding = \"CP936\"))\nView(cjb) # View the data\n\n# Data Exploration --------------------------------------------------------\nlibrary(tidyverse)\ncjb %>%\n    dplyr::select(sx, wlfk) %>%\n    ggplot(aes(x = wlfk,\n               y = sx,\n               fill = wlfk)) +\n    geom_boxplot(width = 0.5)\n\n# Data Preparation --------------------------------------------------------\nas_five_grade_scores <- function(x) {\n    cut(\n        x,\n        breaks = c(0, seq(60, 100, by = 10)),\n        include.lowest = TRUE,\n        right = FALSE,\n        ordered_result = TRUE,\n        labels = c(\"不及格\", \"及格\", \"中\", \"良\", \"优\")\n    )\n}\ncjb <- cjb %>%\n    mutate(zcj = rowSums(.[4:12])) %>%\n    filter(zcj != 0) %>% #剔除脏数据\n    mutate_at(vars(xb, wlfk), factor) %>% #类型转换\n    mutate_at(vars(yw:sw), as_five_grade_scores)#数据分箱\nView(cjb) #查看转换后的数据\n\n# Model -------------------------------------------------------------------\nlibrary(arulesViz)\nmy_model <- cjb %>%\n    select(xb:wlfk) %>%\n    apriori(parameter = list(supp = 0.06, conf = 0.8, maxlen=11),\n            appearance = list(rhs = paste0(\"wlfk=\", c(\"文科\", \"理科\"))))\n\n# Visualization -----------------------------------------------------------\ninspectDT(my_model) #输出挖掘到的规则，查看其性能指标\n\nplot(my_model, method = \"graph\") #结果可视化\n\n# The End ^-^ -------------------------------------------------------------\n\n\n"
  },
  {
    "path": "01_Get Your Hands Dirty.R",
    "content": "\n\n# 01_Get Your Hands Dirty -------------------------------------------------\n\n\n#大部分的R语言上手教程\n#都先给大家介绍一些零零散散的简单操作\n#本课程也不能免俗，先给大家看一些简单的语句\n#经过简单的语句热身之后，再给大家讲一个完整的故事\n\n#以下代码，请逐行手敲，不要简单的拷贝粘贴\n#撸起袖子开干吧\n\n\n# Basics ------------------------------------------------------------------\n\nx <- 2 #创建一个变量x\n\n# 一些简单的数学运算\nsqrt(x) #熟悉的根号2\n? sqrt #打开帮助文档，会发现sq rt = square root\nx ^ 0.5 #与上一语句等价\nx ^ 2 #x的平方\ny <- x ^ 2 #将x的平方赋值给变量y\n\n#圆的面积\nr <- x #设圆的半径为2\narea <- pi * (r ^ 2)\n#上式中pi的含义相信大家都已经猜到了，为内置常数\npi\narea #将面积显示出来\nprint(pi, digits = 11)\n(circumference <- 2 * pi * r) #不仅给周长赋值，还显示出来\n\nlog2(x)#底为2的对数\nlog10(x)#底为10的对数\nlog(x)#底为e的对数\nexp(1)#数学常数e\nexp(x)#e^x\nlog(x, base = c(2, exp(1), 10))#取不同的底\n\n#创建一个向量\nx <- seq(from = -10, to = 10, by = 0.5)#从-10开始，步长为0.05，最大值不超过10\n#x被重新赋值了\n#注意与其他一些高级语言的区别，x并不需要事先被定义\n#同时，被重新赋值之后，x便是新的数据对象\nx\n2 * x\nmin(x)\nmax(x)\nrange(x) #最大值和最小值\ndiff(range(x)) #滞后差分项\ndiff(c(1, 3, 6), lag  = 2) #滞后两期\ndiff(c(1, 3, 6), differences  = 2) #二阶差分\n\nx <- seq(-pi, pi, by = pi / 100)\ny <- sin(x)\nplot(x, y) #绘制正弦曲线\ny <- abs(y)\nlines(x, y, col = \"blue\", type = \"o\") # 在原图上叠加\n#积分\nintegrate(sin, 0, pi)\nintegrate(function(x) {\n    abs(sin(x))\n    }, -10, 10)\n\nz <- sin(2 * x)\nlines(x, z, col = \"red\", type = \"o\")\n\n#解方程，超过800按20%扣税\n#求税后是x- (x - 800) * 20% = 10000\nf <- function(x, AT = 10000) {\n    x - (x - 800) * 0.2 - AT\n}\nuniroot(f, c(10000, 15000), 10000)$root\n\n#创建向量a\na <- c(3, 4)\n#向量a的长度|a|\nsqrt(sum(a ^ 2))\n#两个向量之和\nb <- c(2, 8)\na + b\n#向量的内积<a, b>\na %*% b\ndrop(a %*% b) #结果作为标量\nsum(a * b) #与上一语句等价\n\n\n#创建一个矩阵\n# 1  2\n# 3 -1\nA <- matrix(c(1, 2,\n              3, -1),\n            ncol = 2,\n            byrow = TRUE)\nA\n#在窗格中显示A\nView(A)\n#矩阵的转置\nt(A)\n#求矩阵的逆\nsolve(A)\n#行列式\ndet(A)\n#特征向量与特征值\neigen(A)\n\n#排列组合\nchoose(4, 2)\n\n#解线性代数方程\n#Ax = b\n#x1 + 2x2 = 2\n#3x1 - x2 = 4\nb <- c(2, 4)\nsolve(A, b)\n#求解A的逆矩阵\nsolve(A)\n#等价于\nsolve(A, diag(2))\n#这里diag(2)是一个单位阵\ndiag(2)#单位阵\ndiag(1:3) #对角阵\n\n#排列组合\nchoose(4, 2)\nfactorial(4)#4! = 4*3*2*1\ncombn(c('I', 'C', 'B', 'C'), 2) #从四个元素中选两个\nexpand.grid(c(1, 2, 3),\n            c('a', 'b', 'c')) #元素组合\n\n\n#常见的集合运算\nA <- 1:10\nB <- seq(5, 15, 2)\nC <- 1:5\n#求A和B的并集\nunion(A, B)\n#求A和B的交集\nintersect(A, B)\n#求A-B\nsetdiff(A, B)\n#求B-A\nsetdiff(B, A)\n#检验集合A,B是否相同\nsetequal(A, B)\n#检验元素12是否属于集合C\nis.element(12, C)\n#检验集合A是否包含C\nC %in% A\nall(C %in% A)\nC %in% B\nall(C %in% B)\n\n\n#抽样\nset.seed(2012)#设定随机数种子\n#在set.seed之后，执行后续相同的代码，结果相同\nidx <- 1:100\nsample(idx)#打乱次序\n#选取70%的数据作为训练集\ntrain_set_idx <- sample(idx, 70)\ntrain_set_idx <- sample(idx, length(idx) * 0.7)\ntrain_set_idx\n\nidx_selected <- sample(idx, 100, rep = TRUE) #有放回的抽样\nidx_not_selected <- setdiff(idx, idx_selected)\nlength(idx_not_selected) / length(idx)\nunique(idx_selected)\ntable(idx_selected)\nsort(table(idx_selected), decreasing = TRUE)\n\n\n# Storytelling: Division of Arts and Sciences -----------------------------\n\n\n#前边这些简单的脚本，显然不能满足小伙伴们关于R的各种幻想\n#我们学习R语言数据分析时，显然不应该再从hello world开始\n#在此，我们换一个思路：通过讲一个完整的故事，\n#让大家对R语言数据分析有一个直观的认识\n#故事的梗概是根据各门课的成绩，进行文理分科\n\n#类似的故事应该也发生在很多小伙伴身上\n#大约在五六年前（假如您修这门课时，刚上研究生的话）\n#那时候，大家都还只是高中生\n#高中会根据大家的各科成绩，进行文理分科\n#我们希望这个数据分析的故事不需要太多专业背景知识\n#同时，又是发生在咱们自己身上的事情\n#自己的事情，才会比较有感触，\n#也有助于我们找到数据分析的感觉\n\n#先从最基本的向量开始\n#向量，用于存储对多个对象的某一属性进行观测所得到的结果\n#比如现在有6个同学，对他们的姓名进行观测，得结果如下：\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nxm\n#xm是变量名称。\n#本课程变量命名，要么采用拼音第一个字母，要么采用相应的英文\n#如此便生成了一个长度为6的字符向量\n#生成的方式很简单，就是调用c()这个函数\n#Combine Values into a Vector or List\n#R里边，函数的调用，就是用函数名紧接着相应的小括号\n#c()这个函数的用法，请执行下述语句\n?  c\n\n#找一找第1位、第3为同学分别叫什么\nxm[c(1, 3)]\n#小伙伴们应该注意到了，R里边三种括号的用法\n#小括号()：表示函数调用\n#中括号[]：表示下标\n#大括号{}：表示语句块\n#下标顺序可颠倒、可重复\nxm[c(1, 3, 3, 2)]\n#好比辅导员喊1号、3号、3号、2号，然后相应的同学报出自己的名字\n\n#也可以反向选择\nxm[-c(1, 3)] #不包含第1、3名同学\n#也可以倒过来\nxm[6:1]\nrev(xm) #与上一语句效果相同\n\n#给这些同学一些学号\nxh <- c(201003001,\n        201003002,\n        201003003,\n        201003004,\n        201003005,\n        201003006)\n#因为学号是连续的，也可以通过下边的方式生成\nxh <- 201003001:201003006\n#用冒号连接的，是指生成步长为1的等差数列\n#生成等差数列更一般的方法\nxh2 <- seq(from = 201003001,\n           to = 201003006,\n           by = 2)\n#显然，此时的步长为2\n#生成这个序列时，也是在调用函数\n#调用的是seq函数，接受了三个参数\n#各参数的含义，请小伙伴自行查阅文档\n? seq\n#至此，我们已经学会了两种向量：字符型和数值型\n\nxb <- c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\")\n#性别的特殊之处在于，它的取值水平是有限的\n#这种情况下，我们应该将其转换为因子\nxb <- as.factor(xb)\n#统计一下男、女的数量\ntable(xb)\n\n#对这6个同学的语文成绩进行记录\nyw <- c(94, 87, 92, 91, 85, 92)\n#看看平均成绩\nmean(yw)\n#对他们进行排序\nsort(yw)\n#将高分排在前边\nsort(yw, decreasing = TRUE)\n#另外一个函数\norder(yw) #成绩从低到高的序号\nyw[order(yw)] #与sort(yw)等价\n\n#看看同学们的排名\nxm[order(yw)]\n#可以将他们排成一个“张三 < 李四 < 王五”的队列么\npaste(xm[order(yw)], collapse = \" < \")\n#paste是指将不同的字符粘在一起\n#比如，创建x1~x10变量名\npaste(\"x\", 1:10, sep = \"\")\n#再比如，将每个人的姓名与成绩粘在一起\npaste(xm, '语文成绩为', yw)\ncat(paste(xm, '\\t语文成绩为\\t', yw), sep = \"\\n\")\n\n\n#语文成绩加5分\nyw + 5#结果依旧是长度为6的一个向量\nyw #原向量并没有变\nyw <- yw + 5 #换一种方式\nyw #这个时候变了\n\n#找找男生的成绩\nxb == \"男\"\n#[1] FALSE  TRUE  TRUE FALSE  TRUE FALSE\nxm[xb == \"男\"]\nyw[xb == \"男\"]\n#实际上就是\nyw[c(FALSE, TRUE, TRUE, FALSE, TRUE, FALSE)]\n#无非是一个等长的逻辑向量，将其中的TRUE部分取出来而已\n#注意：千万不要以为R会去自动寻找什么“性别为男”的部分\n#再举一个例子\n#运行之前，小伙伴们猜一下结果\nxm[yw > 90]\n#不出意外的话，你已经学会了下表里边最难掌握的内容：\n#逻辑下标\n#找到逻辑值为真的位置\nwhich(yw > 90)\n#显然，下边的语句与前述结果一样\nxm[which(yw > 90)]\n\n\n#将百分之成绩换算成五分制成绩\nyw5 <- cut(yw, breaks = c(0, 60, 70, 80, 90, 100))\nyw5 #结果是变成了若干个区间\n#一个简单的cut\n#居然就完成了数据预处理中一个很重要的操作：\n#数据离散化，或者说，数据分箱\nyw5 <- cut(\n    yw,\n    breaks = c(0, seq(60, 100, by = 10)),\n    include.lowest = TRUE,\n    right = FALSE,\n    ordered_result = TRUE,\n    labels = c(\"不及格\", \"及格\", \"中\", \"良\", \"优\")\n)\nyw5\n#小伙伴们应该注意到了，除了打上标签之外\n#还增加了ordered这一项\n#前述的xing_bie中，因为男女平等，所以是无序因子\n#但是，对于成绩而言，显然是有序的\n#对因子进行排序\n\n#每一个同学都有多科成绩\n#语文\nyw <- c(94, 87, 92, 91, 85, 92)\n#数学\nsx <- c(82, 94, 79, 84, 92, 82)\n#外语\nwy <- c(96, 89, 86, 96, 82, 85)\nysw <- c(yw, sx, wy) #长度为3*6=18的向量\nysw <- matrix(ysw, ncol = 3)\nysw\nView(ysw)\n#小伙伴们看到了\n#矩阵，其实就是一个二维表\n#每一列，是一个变量的取值\ncolnames(ysw) <- c(\"yw\", \"sx\", \"wy\")\nrownames(ysw) <- xm\nView(ysw)\n#算一算各门课的平均分\napply(ysw, 2, mean)\n#看看每个人的总成绩\napply(ysw, 1, sum)\nzcj <- apply(ysw, 1, sum)\n\nnrow(ysw) #行数\nncol(ysw) #列数\n#增加一列总成绩\nysw <- cbind(ysw, zcj)\nView(ysw)\n\nclass(ysw)#看看ysw的类\nmode(ysw) #存储的模式为数值型\n\n#看看他的第一列\nysw[, 1]\n#这里，使用的下标，依然是方括号\n#但是，因为是二维的，所以方括号里边有两部分组成\n#逗号之前，是表示行；逗号之后，是表示列\nysw[1, ]\n#产看某些行、某些列\nysw[1:2, 1:3]\n#不同的行，当然是可以互换的\nysw[6:1, ]\nysw[, c(3, 1, 2)]\n#重复某些行、列当然也可以\nysw[c(1, 3, 3, 1, 2), ]\n\n\n#当然，我们现在可以将姓名、学号、性别、各科成绩放一起\ncjb <- data.frame(\n    xm = xm,\n    xb = xb,\n    yw = yw,\n    sx = sx,\n    wy = wy\n)\n#形式上看，和矩阵差不多\n#但是，不同的列，类别是不一样的\n#有的是字符向量、有的是因子、有的是数值向量\n#这种情况下，应该请出R里边最重要的数据对象了：\n#数据框！！！！\n#可以按照总分，对成绩表进行排序\ncjb$zcj <- apply(cjb[, 3:5], 1, sum)\ncjb <- cjb[order(cjb$zcj, decreasing = TRUE), ]\nView(cjb)\n\nstr(cjb)\n#也可以对它进行简单的统计汇总\nsummary(cjb)\n\n#至此，我们已经了解了R里边四种数据对象\n#向量——单变量观测值（各种类型的变量，包括连续型数值变量）\n#因子——单变量观测值（离散变量，取值水平有限）\n#矩阵——多变量观测值（同质）\n#数据框——多变量观测值（异质）\n\n#当然，绝大部分时候，我们所要分析的数据\n#不会在代码里边一个一个的敲入\n#按照数据挖掘的方法论\n#在需求确定之后，首先会采集数据\n#比如，我们要分析的成绩数据，置于以下URL之中\ncjb_url <- \"data/cjb.csv\"\ncjb <- read.csv(cjb_url,\n                stringsAsFactors = FALSE,\n                encoding = \"CP936\")\n\nView(cjb)\n#看看他的结构\nstr(cjb)\n#输出如下\n#>'data.frame':\t775 obs. of  13 variables:  ##表示数据框，有775条记录，每条记录13个变量/属性\n#>   $ xm  : chr  \"周黎\" \"汤海明\" \"舒江辉\" \"翁柯\" ...\n#> $ bj  : int  1101 1101 1101 1101 1101 1101 1101 1101 1101 1101 ...\n#> $ xb  : chr  \"女\" \"男\" \"男\" \"女\" ...\n#> $ yw  : int  94 87 92 91 85 92 88 81 88 94 ...\n#> $ sx  : int  82 94 79 84 92 82 72 89 77 81 ...\n#> $ wy  : int  96 89 86 96 82 85 86 87 95 88 ...\n#> $ zz  : int  97 95 98 93 93 91 94 97 94 91 ...\n#> $ ls  : int  97 94 95 97 87 90 87 94 84 85 ...\n#> $ dl  : int  98 94 96 94 88 92 88 96 94 98 ...\n#> $ wl  : int  95 90 89 82 95 82 89 81 87 81 ...\n#> $ hx  : int  94 90 94 90 94 98 98 88 94 88 ...\n#> $ sw  : int  88 89 87 83 93 90 94 83 82 88 ...\n#> $ wlfk: chr  \"文科\" \"文科\" \"文科\" \"文科\" ...\n\n#看完数据结构之后，可以看看具体的取值\nhead(cjb) #控制台中输出前6行\nView(head(cjb, n = 10)) #窗口中输出前10行\nView(tail(cjb, n = 10)) #窗口显示后10行\n\n#看看有多少个班级\nunique(cjb$bj)\ncjb$bj <- factor(cjb$bj) #因为班级取值水平有限，转换为因子\nnlevels(cjb$bj) #因子取值水平的个数，在这里也就是班级数\nlevels(cjb$bj) #具体的班级名\ncjb$xb <- factor(cjb$xb)\ntable(cjb$xb)#看得出来，女生比男生多一点\ncjb$wlfk <- factor(cjb$wlfk)\ntable(cjb$wlfk)#文理分科相对均衡\n\n#可以看看男女生，在文理分科方面有没有倾向性\ntable(cjb$xb, cjb$wlfk)\n#     理科 文科\n# 男  238  131\n# 女  143  263\n#可见，文理分科，性别还是很明显的\n\n#可以增加一列总成绩\ncjb$zcj <- apply(cjb[, 4:12], 1, sum)\nView(cjb)\n\n#通过五数来查看总成绩的分布\nfivenum(cjb$zcj)\n#0 767 801 832 885\n#居然有0分的，这显然是异常点，可能该生并未参加考试\n#找到这个学生\ncjb[cjb$zcj == 0, ]\n#通过箱线图，可以观察数据更为详尽的分布\n#由此来查找是否有更多的离群点\nboxplot(cjb$zcj)\nboxplot.stats(cjb$zcj)\noutliers <- boxplot.stats(cjb$zcj)$out\n#看看哪些学生离群\nView(cjb[cjb$zcj %in% outliers, ])\n#离群点的序号\noutliers_idx <- which(cjb$zcj %in% outliers)\n\n#看看不同班级，学生总成绩的分布\nboxplot(zcj ~ bj,\n        data = cjb,\n        col = rep(2:8, len = 15))\n#看看文理分科总成绩的分布\nboxplot(zcj ~ wlfk,\n        data = cjb,\n        col = 2:3)\n#从图中可以看出，理科分数相对偏高\n\n#也可以观察不同科目的数据\n#语文\nmin(cjb$yw) #最低分\nmax(cjb$yw) #最高分\nrange(cjb$yw) #极差\ndiff(range(cjb$yw)) #效果相同\n#前十名\nhead(sort(cjb$yw, decreasing = TRUE), n = 10)\n#后十名\nsort(cjb$yw)[1:10]\nmean(cjb$yw) #平均分\nmean(cjb$yw, trim = 0.2)#类似于娱乐节目里边的去掉一个最低分、去掉一个最高分\nmedian(cjb$yw) #中位数\nsd(cjb$yw) #标准差\n\n#看看哪些科目差距最大/最小\nsort(apply(cjb[, 4:12], 2, sd), decreasing = TRUE)\n#由此可以看出：\n#物理成绩差别最大，标准差为12.45\n#政治差别最小，标准差为5.63\n#意味着什么？总分方面，数学可能拉开差距？？\n#那咱们来看看，究竟哪些科目，与最后总分相关性最强\nnames(cjb)\ntail(cor(cjb[, c(4:12, 14)]), n = 1)\ntail(cor(cjb[-outliers_idx, c(4:12, 14)]), n = 1)\n#从线性相关性来看\n#政治不出意外的，最不相关，仅为0.46\n#数学最相关，为0.80，生物次之，为0.79\n#这其实已经是一个比较有意思的结果了\n#至少佐证了，哪些是容易拉分的科目\n\n#当然，我们能够用图形来表达其相关性，那就再好不过了\ncor_val <- cor(cjb[-outliers_idx, c(4:12, 14)])\nround(cor(cjb[-outliers_idx, c(4:12, 14)]), digits = 2)\nsymnum(cor(cjb[-outliers_idx, c(4:12, 14)]))\n\n#第一次运行的话\n#需要安装corrplot包\n#install.packages(\"corrplot\")\nlibrary(corrplot)\ncorrplot(cor_val,\n         method = \"color\",\n         diag = FALSE)\n\n#咱们来具体看看数学成绩的分布，于政治成绩分布的对比\nhist(cjb$sx[-outliers_idx],\n     freq = FALSE,\n     ylim = c(0, 0.12))\nlines(density(cjb$sx[-outliers_idx]),\n      col = \"red\",\n      lwd = 2)\nlines(density(cjb$zz[-outliers_idx]),\n      col = \"blue\",\n      lwd = 2)\n\n#当然，还可以做各种各样的数据探索\n#限于篇幅，我们直接将直接进入主题\n#寻找那些因素决定了文理分科\n#我们采用决策树\n#来对文理分科进行判定\n\n#对于那些异常点，我们可以先剔除掉\ncjb <- cjb[-outliers_idx, ]\n#先将数据分为训练集和测试集两部分\ntrain_idx <- sample(1:nrow(cjb),\n                    round(nrow(cjb) * 0.7))\ntrain_set <- cjb[train_idx, ]\n\n#第一次运行\n#需要安装rpart\n#install.packages(\"rpart\")\nlibrary(rpart)\ntree_model <- rpart(wlfk ~ ., data = cjb[, 4:13])\nplot(tree_model,\n     uniform = TRUE,\n     branch = 0.8,\n     margin = 0.1)\ntext(tree_model,\n     all = TRUE,\n     use.n = TRUE,\n     cex = 0.7)\ntree_model\n\n#可以画得好看一点\nlibrary(rpart.plot)\nrpart.plot(\n    tree_model,\n    type = 4,\n    fallen = T,\n    branch = .5,\n    round = 0,\n    leaf.round = 6,\n    #clip.right.labs = T,\n    cex = 0.75,\n    under.cex = 0.75,\n    box.palette = \"GnYlRd\",\n    branch.col = \"gray\",\n    branch.lwd = 2,\n    extra = 101,\n    under = T,\n    lt = \" < \",\n    ge = \" >= \",\n    split.cex = 0.85\n)\n\n#输出规则\nlibrary(rattle)\nasRules(tree_model)\n\n#看看规则的准确性\ntest_set <- cjb[-train_idx, ]\npredicted <- predict(tree_model, test_set, type = \"class\")\ncon_table <- table(predicted, test_set$wlfk)\nsum(diag(con_table)) / sum(con_table)\n#> [1] 0.8209607\n#十之七八，是能预测对的\n#仅仅根据成绩，来进行文理分科，可能这个结果已经可以接受了\n\n\n#以上代码的行数并不多，但是，我们已经了解到：\n#（1）R里边四种数据对象\n#（2）如何读取数据\n#（3）进行基本的数据预处理\n#（4）开展探索性数据分析\n#（5）建立分类模型，并开展模型评估和结果可视化\n\n#当然，这只是一个非常粗浅的版本\n#实际上，整门课程，都将通过这一份数据进行分析\n#更多的兴奋点，且听下回分解\n\n#这，只是你对机器学习/数据挖掘感兴趣的开始~\n#有部分代码，并没有做过多解释。\n#但相信，这些令人兴奋的结果，已经引起你的兴趣了。\n#兴趣，是最重要的！！！\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "02_基础编程.R",
    "content": "\n\n# 02_基础编程 --------------------------------------------------------------\n\n#学习R语言数据分析，或是其它数据科学编程\n#往往学习曲线都比较陡\n#因为既要学习语言本身，还需要掌握机器学习/数据挖掘的模型和算法\n\n#本课程的策略是分而治之：\n#先讲R语言，\n#再以R语言为工具，讲授机器学习/数据挖掘的模型和算法\n\n#单就R语言的学习来说，无非掌握两个方面：\n#R语言 = 基础编程 + 数据对象\n#这也体现了作为数据分析语言的特殊之处\n#在掌握一些基本的语法、逻辑控制之后\n#R语言的核心在于数据对象及其操作\n#或者说，R语言会比一般的编程语言更多关注数据对象本身\n#02_基础编程\n#03_数据对象\n\n\n#先看R的基础编程\n#考虑到大家已经Get Hands Dirty了\n#接下来，我们从R语言本身的特点讲起\n#力图用最简单语言，把R基础编程需要掌握的核心要点\n#给大家做一个交待\n\n#时至今日，编程几乎是必备技能\n#编程，并不只是程序员的事情\n#无论你身处学术界、产业界\n#无论你搞科研，还是做工程\n#在绝大部分的高技术里，都需要编程\n#不会编程，几乎是难以想像的\n\n#R编程最大的特点，就是：\n#用别人的包和函数，讲述你自己的故事！\n#当然，你有新的idea，可以写新的算法或是改进已有算法\n#即便如此，你也需要大量使用既有函数\n#作为一个数据分析人员，编写代码，早就不应该是从零开始了\n#而是站在巨人的肩膀上\n#尽管，很多小伙伴觉得从零开始更能满足自己的控制欲\n\n#所以，先应该学会安装包、查找包\n#查找包的方法，请参照讲义内容\n\n\n# Add-on Packages ---------------------------------------------------------\n\n#对于Windows用户，推荐将扩展包单独安装在D:盘\n#这样不会污染R系统本身的文件\n#此时注意设置好环境变量R_LIBS_USER，\n#会直接将我们私有的包安装在指定位置\n#并且每次重装R之后，自己用到的扩展包也无需重新安装\n#即便重装系统，也不受影响\n#（如果是R大版本升级时，比如从R 3.X.X升级到R 4.X.X，可能有不少扩展包需要重装）\n\n#绝大部分的包都可以通过以下语句来安装\n#比如安装神经网络的包nnet\ninstall.packages(\"nnet\")\n#有些包，没有放在CRAN\n#需要通过特定渠道下载\n#比如github、bioconductor等\n\n#从github上下载安装一些最新版本的包\n#install.packages(\"devtools\")\ndevtools::install_github(\"hadley/tidyverse\")\n\n\n#有些情况下，也需要将包文件download到本地，然后再安装\n#比如你有一台机子数据保密很严格，不能上网\n#此时能做的事情就是先下载到本地，然后拷贝文件安装\n#方法如下：\n# install.packages(\"maptree_1.4-7.tar.gz\",\n#                  repos = NULL,\n#                  type = \"source\")\n#这里边涉及到包之间的依存关系，感兴趣的小伙伴自行思考解决\n\n\n#安装完包之后，就是加载使用了\n#这和一次性安装完word/excel/powerpoint，\n#然后可以反反复复使用，是一个道理\nlibrary(tidyverse)\n#或者\nrequire(tidyverse)\n#以上两个语句几乎完全相同，只不过是后者有一个返回值TRUE/FALSE\n#代表是否加载成功\n#这也就不难理解有些代码通过require来判断能否加载包，\n#若不能加载，则通过install.packages()语句进行安装\n\n#若有多个包，可以通过以下方式一次性加载\nmy_libs <- c(\"igraph\", \"infotheo\")\nsapply(my_libs, require, character.only = TRUE)\n\n#也可以通过pacman扩展包进行包的管理和加载\nlibrary(pacman)\np_load(igraph, infotheo)\n\n#当然，可能也有小伙伴希望自己管理一个迷你CRAN\n#library(miniCRAN)\n#这样可以在离线状态下，比如物理隔离的某些工作站上，安装扩展包\n\n\n# Help --------------------------------------------------------------------\n\n# StackOverflow当然是值得关注的\n\n\n#打开R的帮助页面\nhelp.start()\n#进入该页面之后，点击其中的packages，可以查看已安装所有包的帮助文档\n\n#打开函数c()的帮助页面\n?c\n?plot\n\n#操作符也是函数\n#也可以打开相应的帮助文档\n?'+'\n?\"if\"\n?`if`\n#注意以上单引号、双引号、反单引号的用法\n#反单引号也别称为重音符，和波浪号~同一个键位\n?'plot' #对于一般的函数，通过引号引起来，当然也可以\n?'c'\n\n#模糊查找包含某些字符的函数\n#比如，查找一下R所支持的假设检验\napropos(\"test\")\n#当然，这里边所列出的，并不包含其他扩展包的内容\napropos(\"test\", where = TRUE)\n#以下这个函数列出当前搜索的范围\nsearch()\n\n\n#stackoverflow网站是强烈推荐的\n#无论你学的是R还是Python\n#\"https://stackoverflow.com/questions/tagged/r\"\n#当然，你也可以在R里边，执行下边的语句直接打开浏览器\nbrowseURL(\"https://stackoverflow.com/questions/tagged/r\")\n#如果自行打开stackoverflow搜索，注意加上[r]\n#当然，如果是搜索其他相关主题，如ggplot2，也可以直接加上[ggplot2]之类的\n\n#推荐使用sos包\n#初次使用，需要安装\n#install.packages(\"sos\")\nlibrary(sos)\n#比如，查找R语言里边深度学习的相关包和函数\nfindFn(\"deep learning\")\n\n\n# Task Views --------------------------------------------------------------\n\n#毫无疑问，TASK VIEWS是最正统的\n#机器学习相关主题\nbrowseURL(\"https://CRAN.R-project.org/view=MachineLearning\")\n#聚类分析相关主题\nbrowseURL(\"https://cran.r-project.org/web/views/Cluster.html\")\n#自然语言处理\nbrowseURL(\"https://cran.r-project.org/web/views/NaturalLanguageProcessing.html\")\n#高性能计算相关主题\nbrowseURL(\"https://cran.r-project.org/web/views/HighPerformanceComputing.html\")\n#模型部署相关主题\nbrowseURL(\"https://cran.r-project.org/web/views/ModelDeployment.html\")\n#互联网相关主题\nbrowseURL(\"https://cran.r-project.org/web/views/WebTechnologies.html\")\n\n\n# More Packages for ML and DM ---------------------------------------------\n\n#机器学习/数据挖掘相关的一些扩展包\nbrowseURL(\"https://github.com/thedataincubator/data-science-blogs/blob/master/top-r-packages.md\")\nbrowseURL(\"https://www.r-pkg.org/starred\")\n\n\n# Updates -----------------------------------------------------------------\n\n\n#R的升级及包的更新\n#R不支持升级，多个版本可以共存\n#在更新之后，RStudio一般会自动关联\n#当然，也可以手动在Tools >> Global Options中手动设置\n\n# R Versions --------------------------------------------------------------\n\n#有些小伙伴对于不同版本R更新周期比较感兴趣\n#为了满足大家的好奇心，以下给出具体的实现代码\n#这些代码属于插播的内容，初次阅读会有一定难度\n#R不同版本的网址\nr_htmls <- paste0(\"https://cran.r-project.org/src/base/R-\", 0:3)\n#加载爬虫工具\nlibrary(rvest)\nr_version_info <- NULL\nfor (cur_r_html in r_htmls) {\n    html_content <- read_html (cur_r_html)\n    target_name <- \"table , th:nth-child(1), th a, th:nth-child(2)\"\n    #这个target_name，是通过SelectorGadget获取的\n    r_version_info <- rbind(r_version_info,\n                            html_content %>% html_node(target_name) %>% html_table)\n}\nView(r_version_info)\nlibrary(tidyverse)\nlibrary(ggplot2)\nr_version_info %>%\n    as.data.frame() %>%\n    select(2:4) %>%\n    set_names(c(\"name\", \"last_modified\", \"size\")) %>%\n    filter(gregexpr(\"R-\", name) != -1) %>%\n    mutate(type = substring(name, 1, 3),\n           last_modified = as.POSIXct(last_modified)) %>%\n    mutate(days_ellpased = c(NA, round(\n        difftime(last_modified[2:(length(last_modified))],\n                 last_modified[1:(length(last_modified) - 1)],\n                 units = \"days\"),\n        digits = 2\n    ))) %>%\n    ggplot(aes(x = type, y = days_ellpased, fill = type)) +\n    geom_boxplot()\n#通过这个图，可以看出R.0/R.1/R.2/R.3不同版本的更新周期\n#当然，我们也可以计算一下具体的数值\nr_version_info %>%\n    select(2:4) %>%\n    set_names(c(\"name\", \"last_modified\", \"size\")) %>%\n    filter(gregexpr(\"R-\", name) != -1) %>%\n    mutate(type = substring(name, 1, 3),\n           last_modified = as.POSIXct(last_modified)) %>%\n    mutate(days_ellpased = c(NA, round(\n        difftime(last_modified[2:(length(last_modified))],\n                 last_modified[1:(length(last_modified) - 1)],\n                 units = \"days\"),\n        digits = 2\n    ))) %>%\n    group_by(type) %>%\n    summarise(\n        mean = mean(days_ellpased, na.rm = TRUE),\n        median = median(days_ellpased, na.rm = TRUE)\n    )\n#具体数值如下\n# # A tibble: 4 x 3\n# type   mean median\n# <chr> <dbl>  <dbl>\n# 1 R-0    39.3   41.5\n# 2 R-1    55.0   57\n# 3 R-2    77.4   70.2\n# 4 R-3    77.2   70.0\n#换句话说，大概每过2个月，基本上就应该更新一下你的R\n\n\n\n# Objects -----------------------------------------------------------------\n\n#object_name <- value\n\n#变量命名\n#不要太短的名字\n#可能过了几个月，你自己都完全忘了它是什么意思\n#可以由多个单词，表达完整的意思\n#切记，变量命名，一定要有意义meaningful\n#比如，用下划线连接\nuse_snake_case <- 1:10\n#用.连接\nuse.periods <- seq(1, 10)\n#或者是驼峰命名法\nuseCamelCase <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n\n#推荐用第一种命名方法\n#另外，在R中，点号.一般情况下可以当做普通字符对待，以下两种情况是例外：\n#（1）以.开头的变量，一般是隐藏变量\n#（2）泛型函数，通过.来匹配、分发具体类型的函数\n\n#其它编码规范，请参阅Google's R Style Guide\nbrowseURL(\"https://google.github.io/styleguide/Rguide.xml\")\n\n2 + 2     #直接显示结果\na <- 2    #赋值，但不显示结果\n(a <- 2)  #既赋值，又显示结果\n\n#既可以向左赋值\nb <- 211\n#也可以向右赋值\n985 -> c\n#这种赋值情况看似比较少见，不过下边这种情况下也比较常见：\n#x %>% g() %>% f() -> new_x\n#相当于new_x <- f(g(x))\n#也即是经过一系列操作之后，形成一个新的对象\n\nd <- \"信息黄埔\"  -> e\nd\ne #注意，R中并没有常量e，不过可以用函数exp(1)来代替\n\n.x <- \"not to show\"\nls() #不包括隐藏对象\nls(all.names = TRUE) #包括隐藏对象\n\n# Control Flow ------------------------------------------------------------\n\n\n#从最基本的程序结构说起\n#顺序、分支、循环\n#是一切结构化编程的基本逻辑\n\n# Sequence Structure ------------------------------------------------------\n\n#顺序结构\n#定义3个向量\nyw <- c(94, 87, 92, 91, 85, 92) #6个同学的语文成绩\nsx <- c(82, 94, 79, 84, 92, 82) #数学成绩\nwy <- c(96, 89, 86, 96, 82, 85) #外语成绩\nysw <- yw + sx + wy #向量化操作：三科成绩相加\nysw #显示三科成绩\n#> [1] 274 272 259 273 261 261\n(yw <- yw + 2) #向量化操作：每个同学语文成绩加2分\n#> [1] 96 89 94 93 87 94\n\n\n(mean_score <- mean(yw)) #求语文平均分\n#> [1] 92.16667\nsd(yw) #求语文成绩标准差\n(sd_score <- (1 / (6 - 1) * sum((yw - mean_score) ^ 2)) ^ 0.5)\n#> [1] 3.430258\nc(sd(yw), sd(sx), sd(wy))\n#> [1] 3.430258 6.058052 5.865151\n(z_score_yw <- (yw - mean_score) / sd_score) #求标准得分\n#> [1]  1.12 -0.92  0.53  0.24 -1.51  0.53\n\nshow(yw) #显示语文成绩\n#> [1] 96 89 94 93 87 94\nshow(sx) #显示数学成绩\n#> [1] 82 94 79 84 92 82\nyw >= 90 #向量化操作：逻辑判断\n#> [1]  TRUE FALSE  TRUE  TRUE FALSE  TRUE\nyw >= 85 & sx >= 85 #向量化操作：逻辑判断\n#> [1] FALSE  TRUE FALSE FALSE  TRUE FALSE\nyw >= 95 | sx >= 95 #向量化操作：逻辑判断\n#> [1]  TRUE FALSE FALSE FALSE FALSE FALSE\n\nround(z_score_yw, digits = 3) #小数点后三位\n#> [1]  1.118 -0.923  0.534  0.243 -1.506  0.534\nsqrt(c(1, 4, 9)) #开根号\n#> [1] 1 2 3\n\">=\"(yw, 90) #逻辑判断\n#[1]  TRUE FALSE  TRUE  TRUE FALSE  TRUE\nyw + 5 #求和\n#> [1] 101  94  99  98  92  99\n\"+\"(yw, 5) #运算符作为特殊的函数\n#> [1] 101  94  99  98  92  99\n\n\n\n\n# Decision Structures -----------------------------------------------------\n\n#分支结构\nmin_score <- min(yw)\nif (min_score >= 90) {\n    message(\"语文成绩全部为优\")\n} else if (min_score >= 80) {\n    message(\"语文成绩至少为良\")\n} else {\n    message(\"并非所有同学语文成绩均为优良\")\n}\n\n\n#all()与any()\nyw >= 90\n#> [1]  TRUE FALSE  TRUE  TRUE FALSE  TRUE\nall(yw >= 90) #逻辑向量每一个值均为TRUE时，返回TRUE；否则返回FALSE\n#> [1] FALSE\nif (all(yw >= 90)) {\n    message(\"语文成绩全部为优\")\n} else if (all(yw >= 80)) {\n    message(\"语文成绩至少为良\")\n} else {\n    message(\"并非所有同学语文成绩均为优良\")\n}\n\n\nshow(yw)\n#> [1] 96 89 94 93 87 94\nany(yw <  88)\n#> [1] TRUE\nany(c(FALSE, FALSE, FALSE, FALSE, TRUE, FALSE))\nif (any(yw < 60)) {\n    message(\"有同学语文成绩挂科\")\n} else {\n    message(\"所有同学语文考试顺利通过\")\n}\n#> 所有同学语文考试顺利通过\n\n#下边这种写法是错误的\n# if(all(yw >= 90)) {\n#   message(\"语文成绩全部为优\")\n# }\n# else if(all(yw >= 80)) { #else不能单起一行\n#   message(\"语文成绩至少为良\")\n# }\n# else { #else不能单起一行\n#   message(\"并非所有同学语文成绩均为优良\")\n# }\n\n#else不能另起一行\n\n\n#注意：if里边是标量\nyw > 90\n#[1]  TRUE FALSE  TRUE  TRUE FALSE  TRUE\nif (yw > 90) {\n    message(\"所有同学语文成绩均为优\")\n}\n#> 所有同学语文成绩均为优\n#> Warning message:\n#>   In if (yw > 90) { :\n#>       the condition has length > 1 and only the first element will be used\n#相当于只取了第一个元素\nif ((yw > 90)[1]) {\n    message(\"所有同学语文成绩均为优\")\n}\n\nyw\n#[1] 96 89 94 93 87 94\nifelse(yw >= 90, \"优\", \"非优\")\n#[1] \"优\"   \"非优\" \"优\"   \"优\"   \"非优\" \"优\"\nifelse(yw >= 90, \"优\",\n       ifelse(yw >= 88, \"较好\", \"一般\"))\n#[1] \"优\" \"良\" \"优\" \"优\" \"良\" \"优\"\nifelse(yw >= 90,\n       #T F T T F T\n       c(\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"),\n       c(\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\"))\n#> [1] \"1\"  \"II\" \"3\"  \"4\"  \"V\"  \"6\"\n\n\n\n# Loop Structures ---------------------------------------------------------\n\n\n#for循环\n#求斐波那契数列的前16个数\nn_fib <- 16\nfib <- numeric(n_fib)\nfib[1:2] <- c(1, 1)\nfor (i in 3:n_fib) {\n    fib[i] <- fib[i - 2] + fib[i - 1]\n    show(fib[i])\n}\nfib\n#> [1]   1   1   2   3   5   8  13  21  34  55  89 144 233 377 610 987\n\n#求1000以内的斐波那契数列\n#不知道循环多少次\n#while来实现\nfib <- c(1, 1)\nwhile (sum(tail(fib, 2)) < 1000) {\n    fib <- c(fib, sum(tail(fib, 2)))\n}\nfib\n#[1]   1   1   2   3   5   8  13  21  34  55  89 144 233 377 610 987\n\n#或者通过\n#repeat来实现\nfib <- c(1, 1)\nrepeat {\n    if (sum(tail(fib, 2)) >= 1000) {\n        break\n    }\n    fib <- c(fib, sum(tail(fib, 2)))\n}\nfib\n\n#当然，上述语句的逻辑\n#与下述的while结构更加吻合\nfib <- c(1, 1)\nwhile (TRUE) {\n    if (sum(tail(fib, 2)) >= 1000) {\n        break\n    }\n    fib <- c(fib, sum(tail(fib, 2)))\n}\nfib\n\n#再尝试一个例子\n#从1~100随机抽取一个数\n#若是52，则停止；否则，继续抽取\ntime_count <- 0\nrepeat {\n    my_number <- sample(1:100, 1)\n    time_count <- time_count + 1\n    if (my_number == 52) {\n        message(\"Haha~, I finally got '52' after \",\n                time_count, \" attempts\")\n        break\n    } else {\n        message(time_count,\n                \": Not lucky enough [\", my_number, \"]\")\n    }\n}\n#> 1: Not lucky enough [42]\n#> 2: Not lucky enough [4]\n#> 3: Not lucky enough [7]\n#> 4: Not lucky enough [2]\n#> 5: Not lucky enough [98]\n#> 6: Not lucky enough [43]\n#> 7: Not lucky enough [44]\n#> 8: Not lucky enough [15]\n#> 9: Not lucky enough [70]\n#> 10: Not lucky enough [54]\n#> 11: Not lucky enough [27]\n#> 12: Not lucky enough [65]\n#> 13: Not lucky enough [62]\n#> 14: Not lucky enough [43]\n#> 15: Not lucky enough [54]\n#> 16: Not lucky enough [9]\n#> 17: Not lucky enough [71]\n#> 18: Not lucky enough [4]\n#> 19: Not lucky enough [66]\n#> 20: Not lucky enough [5]\n#> 21: Not lucky enough [92]\n#> Haha~, I finally got '52' after 22 attempts\n#小伙伴们需要注意一点：\n#在上述实现过程中，用到了tail函数\n#指的是倒数的某些元素，具体用法请看帮助文档\n? tail\n\n\n\n#当然，在R里边\n#尽量不要使用显式循环\n#能向量化运算的，尽量向量化\nx <- 1:1e8 #一亿\ny <- 2:(1e8 + 1) #一亿\nz <- integer(1e8)\nsystem.time(z <- x + y, gcFirst = TRUE)\n#> user  system elapsed\n#> 0.36    0.09    0.45\n# The ‘user time’ is the CPU time charged for\n#       the execution of user instructions of the calling process.\n# The ‘system time’ is the CPU time charged for\n#        execution by the system on behalf of the calling process.\n\nsystem.time({\n    for (i in 1:1e8) {\n        z[i] <- x[i] + y[i]\n    }\n},  gcFirst = TRUE)\n#> user  system elapsed\n#> 11.51    0.06   11.70\n\n\n#其实，连斐波那契数列\n#也可以采用并行的方式\nn_fib <- 16\nsapply(1:n_fib, function(x) {\n    1 / sqrt(5) *\n        (((1 + sqrt(5)) / 2) ^ x -\n             ((1 - sqrt(5)) / 2) ^ x)\n})\n#> [1]   1   1   2   3   5   8  13  21  34  55  89 144 233 377 610 987\n\n\n#注意：apply函数族并非真正的并行！！\n\n#关于apply，补充一下两点：\n#(1)带进度条的apply\n#for循环的进度条\npbfor::pb_for()\nfor(i in 1:100) {\n    Sys.sleep(0.5)\n}\n#(2)并行计算\n#\nlibrary(foreach)\nlibrary(doSNOW)\n\ncl <- makeCluster(parallel::detectCores() - 1)\nregisterDoSNOW(cl)\n\nI_REPEAT_TIMES <- 20\nJ_REPEAT_TIMES <- 10\nresults <-\n    foreach(\n        i = seq(0, 1, length.out = I_REPEAT_TIMES)\n    ) %:%\n    foreach(\n        j = 1:J_REPEAT_TIMES\n    ) %dopar% {\n        cur_ij <- paste0('i = ', i,\n                           'j = ', j)\n        # to do\n        cur_ij\n    }\nstopCluster(cl)\n\n\n\n# Function ----------------------------------------------------------------\n\n\n#编代码的过程中，一定要注意避免硬代码\n#千万不要一次次Ctr C之后Ctr V\n#那样你的代码会变得很难维护\n#如果一套逻辑需要多次重复出现\n#最好的办法是编写一个函数\n\n\n#函数就是一个输入、处理、到输出的过程\n#输入的是参数\n\n#一切都是对象\n#所以，函数，也是通过赋值来创建的\n#比如：\n#摄氏度（Celsius）到华氏度（Fahrenheit）的转换\nce2fa <- function(ce) {\n    #参数ce为输入\n    fa <- 1.8 * ce + 32 #对输入进行处理\n    return(fa) #输出相应的值\n}\nce2fa(0)#0℃相当于32℉\n#> [1] 32\nce2fa(0:10)#将0~10℃转换为相应的℉\n#> [1] 32.0 33.8 35.6 37.4 39.2 41.0 42.8 44.6 46.4 48.2 50.0\nce2fa\n#> function(ce) { #参数ce为输入\n#>   fa <- 1.8 * ce + 32 #对输入进行处理\n#>   return(fa) #输出相应的值\n#> }\n#> <bytecode: 0x00000000144b5d28>\n\n#多种温度计量\nce2all <- function(ce) {\n    if (!is.numeric(ce) || length(ce) >= 2) {\n        stop(\"Invalid arguments!\")\n    }\n    fa <- 1.8 * ce + 32 #华氏度，巴哈马等\n    re <- 0.8 * ce #列氏度，德国\n    ra <- 1.8 * ce + 32 + 459.67 #兰氏度\n    ke <- 273.15 + ce #开氏度\n    return(c(\n        C = ce,\n        F = fa,\n        Re = re,\n        Ra = ra,\n        K = ke\n    ))\n}\nce2all(0)\nce2all(\"0\")\nce2all(0:10)\n\n\n#位置参数和名义参数\nfrm <- function(name, frm = \"BUPT\") {\n    cat(name, \" is frm \", frm)\n}\nfrm()#出错\n#> Error in cat(name, \" is frm \", frm) :\n#>   argument \"name\" is missing, with no default\nfrm(\"axb\")#参数的缺省值\n#> axb  is frm  BUPT\nfrm(name = \"AXB\", frm = \"BJTU\")\n#> AXB  is frm  BJTU\nfrm(frm = \"BJTU\", name = \"AXB\")\n#> AXB  is frm  BJTU\n\n\n#看几行我们比较熟悉的代码\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nyw <- c(94, 87, 92, 91, 85, 92)\nxb <- c(FALSE, TRUE, TRUE)\n#再看看sum函数\nsum(94, 87, 92, 91, 85, 92)\nsum(1, 3, 5, 7)\n? c\n? sum\n\nmy_func <- function(...) {\n    cat(\"The second arg is \", ..2)\n    dot_args <- list(...)\n    message(\"\\nThe sum is \", sum(dot_args[[1]], dot_args[[5]]))\n}\nmy_func(1, 'arg2', 3, 4, 5, 6, 7, 8)\n#> The second arg is  arg2\n#> The sum is 6\n\n#+、-、*、/binary operators\n#其实都是函数\n1 + 2\n\"+\"(1, 2)\n'+'(1, 2)\n#[1] 3\n'/'(2, 3)\n#[1] 0.6666667\n'^'(10, 2)\n#[1] 100\n#连赋值符号<-都可以变成函数的形式\n\">\"(2, 1)\n#[1] TRUE\n'<-'(new_var, 3)\nnew_var\n#> [1] 3\n#:，本质上也是一个函数\n':'(1, 10)\n#> [1]  1  2  3  4  5  6  7  8  9 10\n'['(1:10, 2)\n#> [1] 2\n#连if都是\n'if'(2 > 1, {\n    cat(\"好吧，连if都是函数\")\n})\n#> 好吧，连if都是函数\n\n\n#%in%运算符\n#可以简单的理解为：\n#左侧的集合是否在右侧的集合之中\nc(1, 3, 9) %in% 1:3\n'%in%'(c(1, 3, 9), 1:3)\n#[1]  TRUE  TRUE FALSE\n\n#自己定义二元操作符函数\n#a、b为直角边，c为斜边\n\"%ab2c%\" <- function(a, b) {\n    sqrt(sum(a ^ 2, b ^ 2))\n}\n3 %ab2c% 4\n# [1] 5\n\n#看完%ab2c%之后，对下边的符号，也就觉得不过如此了\nlibrary(purrr)\nx <- c(17, 28, 17, 12, 15, 12, 49)\nx %>%\n    unique() %>%\n    sort()\n#等价于下边的代码，不过是更加简洁优雅\nx <- c(17, 28, 17, 12, 15, 12, 49)\nx2 <- unique(x)\nx3 <- sort(x2)\nx3\n#[1] 12 15 17 28 49\n\n#来点恶作剧\n\"+\" <- function(x, y) {\n    x * y\n}\n5 + 2\n#[1] 10\nrm(\"+\")\n5 + 2\n#[1] 7\n\n#当我们看完了上边的%my_binary_operator%之后\n#对下边的符号，也就不怕了\nlibrary(tidyverse)\nx <- c(17, 28, 17, 12, 15, 12, 49)\n#%>%管道操作符\nx %>%\n    unique() %>%\n    sort()\n\n#特殊函数的帮助文档\n? round\n? \"+\" #双引号\n? '+' #单引号\n? `+` #反单引号\n? '%in%'\n? 'round'\n\nisGeneric(\"plot\")\nplot\nplot(1:10)\n\nx <- seq(1, 100, by = 10)\ny <- 2 * x + 10\nxy <- cbind(x, y)\nclass(xy)\n#> [1] \"matrix\"\nplot(\n    xy,\n    xlim = c(1, 100),\n    ylim = c(0, 230),\n    type = \"o\",\n    col = \"red\"\n)\nx <- seq(1, 100, by = 10)\ny <- 2 * x + 10\nxy <- lm(y ~ x)\nclass(xy)\n#> [1] \"lm\"\nop <- par(mfrow = c(2, 2))\nplot(xy)\npar(op)\n\n#泛型函数\ninterface <- function(x, y) {\n    message(\"Single interface\")\n    UseMethod(\"particular\", y)\n}\nparticular.classA <- function(x, y) {\n    message(\"Different behavior: classA\")\n}\nparticular.classB <- function(x, y) {\n    message(\"Different behavior: classB\")\n}\nparticular.default <- function(x, y) {\n    message(\"Different behavior: default\")\n}\nx <- 1:10\ny <- 1:20\nclass(y) <- \"classA\"\ninterface(x, y)\n#> Single interface\n#> Different behavior: classA\nclass(y) <- \"classB\"\ninterface(x, y)\n#> Single interface\n#> Different behavior: classB\nclass(y) <- \"classC\"\ninterface(x, y)\n#> Single interface\n#> Different behavior: default\nclass(y) <- NULL\ninterface(x, y)\n#> Single interface\n#> Different behavior: default\n\n\n#其实，+也是一个泛型函数\nmethods(\"+\")\n#> [1] +.Date   +.POSIXt\n#> see '?methods' for accessing help and source code\nlibrary(ggplot2)\nmethods(\"+\")\n# [1] +.Date   +.gg*    +.POSIXt\n# see '?methods' for accessing help and source code\n\nz <- rnorm(1000)\nggplot(data = data.frame(z), aes(z)) +\n    geom_density()\n\n#你当然可以对它进行修改\n#以下操作纯属娱（e）乐（gao）\n\"+.onlyFirst\" <- function(a, b) {\n    return(a[1] + b[1])\n}\n`+.onlyFirst` <- function(a, b) {\n    return(a[1] + b[1])\n}\na <- 1:5\na + 6:10\n#> [1]  7  9 11 13 15\nclass(a) <- \"onlyFirst\" #给a贴上一个类标签onlyFirst\na + 6:10\n#> [1] 7\n\n#当你理解了上边这个泛型的+\n#后续看到ggplot2中的加号\n#就不会陌生了\nlibrary(ggplot2)\nggplot(data = iris,\n       aes(\n           x = Petal.Length,\n           y = Petal.Width,\n           colour = Species,\n           shape = Species\n       )) +\n    geom_point()\n\n\n#究竟有多少个+的函数\nmethods(\"+\")\ndetach(package:ggplot2, force = TRUE)\nmethods(\"+\")\nrm(list = \"+.onlyFirst\")\nmethods(\"+\")\n\n\n#系统方法也可以扩展哦\na <- 1:10\nprint(a)\nprint.MyClass <- function(x, ...) {\n    cat(\"This is my print:\\n\")\n    print.default(x, ...)\n}\nattr(a, 'class') <- 'MyClass'\nprint(a)\n\n\n# Recursion ---------------------------------------------------------------\n\nold_monk_story <- function(depth = 1) {\n    message(\n        rep(\"  \", depth),\n        \"400 years ago(\",\n        2012 - 400 * depth,\n        \"), monk[\",\n        depth,\n        \"] is telling the story:\"\n    )\n    if (2012 - 400 * (depth + 1) >= 66) {\n        #据说佛教公元66年传入我国\n        old_monk_story(depth + 1)\n    }\n    message(rep(\"  \", depth),\n            \"monk [\", depth, \"] finished his story\")\n}\nold_monk_story()\n\n#400 years ago(1612), monk[1] is telling the story:\n#    400 years ago(1212), monk[2] is telling the story:\n#        400 years ago(812), monk[3] is telling the story:\n#            400 years ago(412), monk[4] is telling the story:\n#            monk [4] finished his story\n#        monk [3] finished his story\n#    monk [2] finished his story\n#monk [1] finished his story\n\n\n#斐波那契数列，也可以采用递归的方式实现\nfib <- function(n) {\n    if (n == 1) {\n        return(1)\n    } else {\n        return(c(fib(n - 1), sum(tail(\n            fib(n - 1), n = 2\n        ))))\n    }\n}\nfib(16)\n\n\n\nfib(1)\nfib(2)\nfib(3)\nfib(10)\n\nc(c(c(c(c(\n    c(1, 1), 2\n), 3), 5), 8), 13)\n\n\n# Debug -------------------------------------------------------------------\n\nfindRuns <- function(x, k) {\n    n <- length(x)\n    runs <- NULL\n    for (i in 1:(n - k)) {\n        if (all(x[i:i + k - 1] == 1))\n            runs <- c(runs, i)\n    }\n    return(runs)\n}\nx <- c(1, 0, 0, 1, 1, 1, 0, 1, 1)\n#期望的是4,5,8\nfindRuns(x = x, k = 2)\ndebugonce(findRuns)\n#debug(findRuns)\nfindRuns(x = x, k = 2)\n\n\n# Exception ---------------------------------------------------------------\n\n#不要让个别循环出现的异常\n#影响我们的程序运行\n#尤其是运行时间长的代码，期望第二天能出结果，结果头天晚上23:00就出问题了\n#比如在for循环中，增加异常处理，若本轮循环出问题，则直接进入下一轮\nX <- list(1, 2, \"3\", 4, 5)\n#注意理解为何不能用c()替换list()\nfor (cur_x in X) {\n    reciprocal <- 1 / cur_x\n    cat(\"\\nThe reciprocal of\", cur_x, \"is\", reciprocal)\n}\n#改用下边的方式\n#也就是把可能出问题的语句，\n#全都交给tryCatch()函数\nfor (cur_x in X) {\n    tryCatch({\n        reciprocal <- 1 / cur_x\n        cat(\"\\nThe reciprocal of\", cur_x, \"is\", reciprocal)\n    }, #显然，函数的第一个参数就是表达式\n    #表达式可能有很多，建议都用{}括起来\n    error = function(e) {\n        cat(\"\\nSomething wrong while processing \", cur_x)\n    })\n}\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "03_数据对象.R",
    "content": "\n\n# 03_数据对象 --------------------------------------------------------------\n\n#R语言的学习 = 基础编程 + 数据对象\n#本讲主要对R里边的六大类对象及其操作进行简要描述\n#1、向量/因子\n#2、矩阵/数组\n#3、列表/数据框\n\n#万法归宗！\n#任意类型的外部数据，无论是文本、传感器信号\n#还是图像、音频，或者是一般关系数据库存储的数据\n#都将转换成这六种数据之一\n\n\n# Constants ---------------------------------------------------------------\n\n#查看R内置的一些常量\n?Constants\n\n#预定义的全局变量\nLETTERS\nletters\nmonth.abb\nmonth.name\npi\nformat(pi, digits = 17)\nT #TRUE是真正的常量，而T <- FALSE\nF\n\n#数值常量\n?NumericConstants\nInf <- 0\npi <- 1\nrm(pi)#恢复pi为内置常量\n\n.12\n.12 == 0.12\n\n#R中的保留字\n?Reserved\n#不能把一个数值赋给另外一个\n6 <- 1\n#也不能把TRUE赋值给FALSE\nFALSE <- TRUE\n#混淆是非\nF <- TRUE\nif (isTRUE(F)) {\n    print(\"F is TRUE\")\n} else {\n    print(\"F is FALSE\")\n}\nif (isTRUE(F)) {\n    print(\"F is FALSE\")\n}\nrm(F)\nif (!F) {\n    print(\"F is FALSE\")\n}\n\n#锁定某些变量，不让别人修改\nfake_constant <- 1\nlockBinding(\"fake_constant\", globalenv())\nfake_constant <- 2\nrm(fake_constant) #清理掉伪常量\n\n\n# Vector ------------------------------------------------------------------\n\n\n#c()创建向量最常见的方式\n#Combine Values into a Vector\n#字符型向量\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nxb <- c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\")\n#数值型向量\nyw <- c(94, 87, 92, 91, 85, 92)\n#逻辑型向量\nxb2 <- c(F, T, TRUE, FALSE, T, F)\n\nmy_pi <- c(3, \".\", 1, 4, 1, 5, 9, 2, 6) #不能有混合类型\nmy_pi\n#> [1] \"3\" \".\" \"1\" \"4\" \"1\" \"5\" \"9\" \"2\" \"6“\nmy_pi <- c(3, TRUE, 4, TRUE, 5, 9, 2, 6) #强制类型转换\nmy_pi\n#[1] 3 1 4 1 5 9 2 6\nc(1, 2, c(4, 3), c(1, 0)) ##不存在包含向量的向量，一律拆包\n#> [1] 1 2 4 3 1 0\nc(1, 2, 4, 3, 1, 0)\n#> [1] 1 2 4 3 1 0\n\n\n\n#假如事先知道长度和类型\n(x1 <- vector(\"numeric\", 8))\n#> [1] 0 0 0 0 0 0 0 0\n(x2 <- numeric(8))\n#> [1] 0 0 0 0 0 0 0 0\n(x3 <- character(8))\n#>  [1] \"\" \"\" \"\" \"\" \"\" \"\" \"\" \"\"\n(x4 <- vector(len = 8))\n#>  [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE\n(x5 <- logical(8))\n#>  [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE\n\n\n#规则序列的产生\n#等差数列\nseq(from = 1, to = 20, by = 2)\n#> [1]  1  3  5  7  9 11 13 15 17 19\nseq(from = 20, to = 1, by = -2)\n#>  [1] 20 18 16 14 12 10  8  6  4  2\nseq(from = 1, to = 20, len = 10)\n#> [1]  1.0  3.1  5.2  7.3  9.4 11.6 13.7 15.8 17.9 20.0\n#by = (to - from) / (len - 1)\n\n1:10#from:to,步长为1的等差数列\n#> [1]  1  2  3  4  5  6  7  8  9 10\npi:1\n#[1] 3.14 2.14 1.14\n#注意运算符的优先级\n1:10 - 1 #长度为10\n# [1] 0 1 2 3 4 5 6 7 8 9\n1:(10 - 1) #长度为9\n#[1] 1 2 3 4 5 6 7 8 9\n#不要有记忆的负担，在R里边，不要吝啬{}和()的使用\n\n#顺便提一句，seq为泛型函数\n#以本课程2012年第一次开课课程安排为例\n#2012年9月3日至2013年1月6日（校历2-19周）\n#9月3日（周一）第一次上课，1月6日前结束\n#用下述语句可以排出一个学期的课表\nseq(from = as.Date(\"2012-9-3\"),\n    to = as.Date(\"2013-1-6\"),\n    by = \"weeks\")\n#可以采用下边的方法\n#当时为18周36学时\nseq(from = as.Date(\"2012-9-3\"),\n    by = \"weeks\",\n    length = 18)\n\n#生成重复元素的向量\nrep(\"a\", 10)\nrep(c(\"a\", \"b\"), c(2, 3))\nrep(letters, 1:26)\n\n#产生随机数\n#产生服从某种分布的随机数\nrnorm(100)#标准正态分布\nrnorm(100, 2, 1) #均值为2，标准差为1\n#当然，正态分布只是诸多分布的其中一种\n? Distributions\n#另外，请注意d/p/q/r四个前缀各自的含义\n\n#随机抽样\nsample(10)\n#> [1]  6  5  8  7  2  3  4  1 10  9\nsample(c(\"b\", \"u\", \"p\", \"t\", \"a\", \"x\", \"b\"))\n#> [1] \"u\" \"x\" \"t\" \"b\" \"a\" \"p\" \"b\"\nset.seed(2012)\nsample(10)\n#[1]  3  7 10  9  5  6  8  4  2  1\n(train_idx <- sample(1:10, 7))\n#> [1]  3  6 10  5  4  1  8\n(test_idx <- setdiff(1:10, train_idx))\n#> [1] 2 7 9\n\n#有放回的抽样\nre_sample <- sample(1:100,\n                    100,\n                    replace = TRUE)\nunique_re_sample <- unique(re_sample)\nlength(unique_re_sample)\n#> [1] 62\n\n#请小伙伴们验证一下\n#从N个对象中，有放回抽取N个对象\n#大约有多少是抽取不到的？\ntimes <- 1000\nnum <- 10000\n1 - mean(unlist(replicate(times, {\n    re_sample <- sample(num, num, replace = TRUE)\n    unique_re_sample <- unique(re_sample)\n    length(unique_re_sample)\n}))) / num\n#> [1] 0.3677274\n#这里的replicate函数用于重复执行某些语句\n#当然也可以采用sapply来实现类似的效果，\n#只不过replicate更加便捷而已\n\n\n#一旦涉及到随机，每次结果都不一样\n#要复现结果，尤其是在开展可重复性学术研究时，\n#需要设置随机数种子，确保之后的随机动作所得结果都是一样的\nset.seed(2012)\n#这里的随机数种子没有什么特殊含义\n#可以随便设置，这里取2012，\n#只是因为该门课从2012年第一次开\nsample(20)\nsample(20)\nsample(20)\n#请注意，每次执行上述四条语句时，\n#三个sample(20)，前后结果不一样，\n#但每次的结果是一样的\n\n#向量的增删改查\n#以“查”最为重要\n(x <- letters[1:10])\nx <- c(x[1:7], x[10])\nx\n#追加某些元素\nappend(x, c(\"h\", \"i\"), after = 7)\nx\nx <- append(x, c(\"h\", \"i\"), after = 7)\n\n#访问向量的子集\n#子集的访问，应该说是数据对象里边最需要掌握的内容\n#向量的子集通过[]来指定\n#第1种方法：采用1~N的正整数来指定，N为向量的长度\nyw <- c(94, 87, 92, 91, 85, 92)\nyw[c(2, 5)]\n#[1] 87 85\nyw[c(2, 5)] - 90\n#[1] -3 -5\nyw[c(2, 5)] <- yw[c(2, 5)] + 6\nyw\n#[1] 94 93 92 91 91 92\nyw[] <- mean(yw)\nyw\n#[1] 92.17 92.17 92.17 92.17 92.17 92.17\n(yw <- mean(yw))\n##[1] 92.17\n\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\")\nxm[c(1, 3, 2, 3)]\n\n#方法二：采用负整数，反向选择——排除某些元素\nyw <- c(94, 87, 92, 91, 85, 92)\nyw[-c(2, 5)]\n#[1] 94 92 91 92\nwhich(yw < 90)\n#> [1] 2 5\nidx <- which(yw < 90)\nyw[-which(yw < 90)] #避免了硬代码\n#[1] 94 92 91 92\n\n#注意：which()并不是一个值得推荐的函数\n#有时候可能会出一些很诡异的结果\nyw[-which(yw > 100)]\n#慎用which()尤其是当可能返回结果不包含任何下标时\n\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nxm[-which(yw < 90)]\n#[1] \"周黎\"   \"舒江辉\" \"翁柯\"   \"湛容\"\n\n(yw <- yw[-which(yw < 90)])\nxm <- xm[-idx]\n#xm <- xm[-which(yw < 90)]\nnames(yw) <- xm\nyw\n# 周黎 舒江辉   翁柯   湛容\n# 94     92     91     92\n\n#方法三：逻辑下标\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nyw <- c(94, 87, 92, 91, 85, 92)\nyw < 90\n#> [1] FALSE  TRUE FALSE FALSE  TRUE FALSE\nyw[yw < 90]\n#> [1] 87 85\nxm[yw < 90]\n#> [1] \"汤海明\" \"祁强\"\n\n#小伙伴们思考一下\n#为什么R会很“智能”地识别出：\n#哪些同学语文成绩小于90呢？\n#注意！！！！！！！！！\n#其实没有那种识别过程\n#yw < 90无非是一个与xm等长的逻辑向量而已\n#这个逻辑向量为TRUE，对应位置的xm的元素取出来而已\n#方法四：通过元素名访问相应的子集\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nyw <- c(94, 87, 92, 91, 85, 92)\nnames(yw) <- xm\nyw\n# 周黎 汤海明 舒江辉   翁柯   祁强   湛容\n# 94     87     92     91     85     92\nyw[c(\"汤海明\", \"祁强\")]\n#> 汤海明   祁强\n#> 87     85\n\n#向量排序\nfen_shu_xian2016 <- c(\n    中国科学院大学    = 671,\n    中央民族大学    = 625,\n    北京大学    = 678,\n    中国人民大学    = 670,\n    清华大学    = 680,\n    北京交通大学    = 640,\n    北京科技大学    = 635,\n    北京化工大学    = 620,\n    北京邮电大学    = 646,\n    中国农业大学    = 634,\n    北京林业大学    = 621\n)\nsort(fen_shu_xian2016)\n# 北京化工大学   北京林业大学   中央民族大学   中国农业大学\n# 620            621            625            634\n# 北京科技大学   北京交通大学   北京邮电大学   中国人民大学\n# 635            640            646            670\n# 中国科学院大学       北京大学       清华大学\n# 671            678            680\n\nprint(fen_shu_xian2016,\n      trim = TRUE,\n      width = 3,\n      justify = \"right\")\n\nsort(fen_shu_xian2016, decreasing = TRUE)\norder(fen_shu_xian2016, decreasing = TRUE)\n#> [1]  5  3  1  4  9  6  7 10  2 11  8\nfen_shu_xian2016[order(fen_shu_xian2016, decreasing = TRUE)]\n#> 清华大学       北京大学 中国科学院大学   中国人民大学\n#> 680            678            671            670\n#> 北京邮电大学   北京交通大学   北京科技大学   中国农业大学\n#> 646            640            635            634\n#> 中央民族大学   北京林业大学   北京化工大学\n#> 625            621            620\n\n\n#倒序\nyw <- c(94, 87, 92, 91, 85, 92)\nsort(yw)\n#> [1] 85 87 91 92 92 94\nrev(yw)\n#> [1] 92 85 91 92 87 94\nyw[6]\n#> [1] 92\nyw[length(yw)]\n#> [1] 92\ntail(yw, n = 1)\n#> [1] 92\nrev(tail(yw, n = 3))\n#> [1] 92 85 91\nhead(rev(yw), n = 3)\n\n#向量化运算\n#设张三、李四、王五合伙开店\n#分别投入3200、1500和900\n#现获利530，按照投入比进行分成\ncheng_ben <- c(张三    = 3200,    李四    = 1500,    王五    = 900)\nli_run <- cheng_ben / sum(cheng_ben) * 530\nnames(li_run) <- names(cheng_ben)\nli_run\n\n\n#以上均是向量作为一个存储容器的基本操作\n#接下来看一下向量的数学运算\n\n#原点\np0 <- c(x = 0, y = 0)\n#向量1\np1 <- c(x = 1, y = 2)\n#向量2\np2 <- c(x = 2, y = 1)\n\n#求和\np3 <- p1 + p2\n\n#数乘\np4 <- 1.5 * p3\n\nlibrary(ggplot2)\nmy_ggplot <- ggplot() +\n    xlim(0, 5) +\n    ylim(0, 5) +\n    coord_fixed()\nplot(my_ggplot)\nmy_ggplot <- my_ggplot +\n    geom_point(aes(x = p1[\"x\"], y = p1[\"y\"])) +\n    geom_segment(\n        aes(\n            x = p0[\"x\"],\n            y = p0[\"y\"],\n            xend = p1[\"x\"],\n            yend = p1[\"y\"]\n        ),\n        arrow = arrow(length = unit(0.3, \"cm\")),\n        colour = 'black'\n    ) +\n    geom_text(aes(x = p1[\"x\"], y = p1[\"y\"], label = \"p1\"),\n              size = 4,\n              vjust = -1) +\n    xlab(\"x\") +\n    ylab(\"y\")\nplot(my_ggplot)\n\nmy_ggplot <- my_ggplot +\n    geom_point(aes(x = p2[\"x\"], y = p2[\"y\"])) +\n    geom_segment(\n        aes(\n            x = p0[\"x\"],\n            y = p0[\"y\"],\n            xend = p2[\"x\"],\n            yend = p2[\"y\"]\n        ),\n        arrow = arrow(length = unit(0.3, \"cm\")),\n        colour = 'black'\n    ) +\n    geom_text(aes(x = p2[\"x\"], y = p2[\"y\"], label = \"p2\"),\n              size = 4,\n              vjust = -1)\nplot(my_ggplot)\nmy_ggplot <- my_ggplot +\n    geom_segment(aes(\n        x = p2[\"x\"],\n        y = p2[\"y\"],\n        xend = p3[\"x\"],\n        yend = p3[\"y\"]\n    ),\n    linetype = 2,\n    colour = 'grey') +\n    geom_segment(aes(\n        x = p1[\"x\"],\n        y = p1[\"y\"],\n        xend = p3[\"x\"],\n        yend = p3[\"y\"]\n    ),\n    linetype = 2,\n    colour = 'grey')\nplot(my_ggplot)\nmy_ggplot <- my_ggplot +\n    geom_point(aes(x = p3[\"x\"], y = p3[\"y\"]), colour = 'red', size = 3) +\n    geom_segment(\n        aes(\n            x = p0[\"x\"],\n            y = p0[\"y\"],\n            xend = p3[\"x\"],\n            yend = p3[\"y\"]\n        ),\n        arrow = arrow(length = unit(0.3, \"cm\")),\n        colour = 'red'\n    ) +\n    geom_text(aes(x = p3[\"x\"], y = p3[\"y\"], label = \"p3\"),\n              size = 4,\n              vjust = -1)\nplot(my_ggplot)\n\nmy_ggplot <- my_ggplot +\n    geom_point(aes(x = p4[\"x\"], y = p4[\"y\"]), size = 3, colour = 'blue') +\n    geom_segment(\n        aes(\n            x = p3[\"x\"],\n            y = p3[\"y\"],\n            xend = p4[\"x\"],\n            yend = p4[\"y\"]\n        ),\n        linetype = 2,\n        arrow = arrow(length = unit(0.4, \"cm\")),\n        colour = 'blue'\n    ) +\n    geom_text(aes(x = p4[\"x\"], y = p4[\"y\"], label = \"p4\"),\n              size = 4,\n              vjust = -1)\nplot(my_ggplot)\n\nggsave(\"p1p2p3.png\", dpi = 600)\n#注意：\n#图片存储在\"getwd()的结果/p1p2p3.png\"\n\n#投影\n#向量1\np1 <- c(x = 1, y = 2)\n#向量2\np2 <- c(x = 2, y = 1)\n#投影\np1_on_p2 <-\n    sum(p1 * p2) /\n    sum(p2 * p2) * p2\n\n\np2 / sum(p2 * p2)\n\nsqrt(sum(p2 * p2)) * sqrt(sum(p1_on_p2 * p1_on_p2))\n\n#容易看出，求投影，完全不需要用到cos之类的\nlibrary(ggplot2)\nggplot() +\n    xlim(0, 3) +\n    ylim(0, 3) +\n    coord_fixed() + #求投影时，必须fixed，否则垂直效果容易失真\n    geom_point(aes(x = p1[\"x\"], y = p1[\"y\"])) +\n    geom_segment(\n        aes(\n            x = p0[\"x\"],\n            y = p0[\"y\"],\n            xend = p1[\"x\"],\n            yend = p1[\"y\"]\n        ),\n        arrow = arrow(length = unit(0.3, \"cm\")),\n        colour = 'black'\n    ) +\n    geom_text(aes(x = p1[\"x\"], y = p1[\"y\"], label = \"p1\"),\n              size = 4,\n              vjust = -1) +\n    geom_point(aes(x = p2[\"x\"], y = p2[\"y\"])) +\n    geom_segment(\n        aes(\n            x = p0[\"x\"],\n            y = p0[\"y\"],\n            xend = p2[\"x\"],\n            yend = p2[\"y\"]\n        ),\n        arrow = arrow(length = unit(0.3, \"cm\")),\n        colour = 'black'\n    ) +\n    geom_text(aes(x = p2[\"x\"], y = p2[\"y\"], label = \"p2\"),\n              size = 4,\n              vjust = -1) +\n    geom_point(aes(x = p1_on_p2[\"x\"], y = p1_on_p2[\"y\"]),\n               size = 2,\n               colour = \"red\") +\n    geom_segment(aes(\n        x = p0[\"x\"],\n        y = p0[\"y\"],\n        xend = p1_on_p2[\"x\"],\n        yend = p1_on_p2[\"y\"]\n    ),\n    colour = 'red') +\n    geom_segment(\n        aes(\n            x = p1[\"x\"],\n            y = p1[\"y\"],\n            xend = p1_on_p2[\"x\"],\n            yend = p1_on_p2[\"y\"]\n        ),\n        linetype = 2,\n        colour = 'red'\n    ) +\n    geom_text(\n        aes(x = p1_on_p2[\"x\"], y = p1_on_p2[\"y\"], label = \"p1_on_p2\"),\n        size = 4,\n        vjust = 1,\n        hjust = -0.2\n    )\nggsave(\"prj.png\", dpi = 600)\n\n#向量相乘\npetal_raw <- iris[, c(\"Petal.Length\", \"Petal.Width\")]\npetal_raw$type <- \"raw\"\nw <- c(1, 2)\npetal_multiply <- t(apply(petal_raw[, 1:2], 1, function(x) {\n    w * x\n}))\npetal_multiply <- petal_multiply %>%\n    as.data.frame() %>%\n    mutate(type = \"multiply\")\npetal <- rbind(petal_raw, petal_multiply)\nggplot(petal, aes(x = Petal.Length, y = Petal.Width)) +\n    geom_point() +\n    facet_wrap( ~ type)\n#由此可见，向量相乘，只不过是在不同维度上缩放而已\n\n#向量的内积\nset.seed(2012)\nx <- rnorm(100)\ny <- rnorm(100)\nsum(x * y)\n#> [1] -11.1336\nsum(sort(x) * sort(y))\n#> [1] 128.3501\nsum(sort(x) * sort(y, decreasing = T))\n#> [1] -127.108\nx <- sort(x)\ny <- sort(y)\ninner_products <- NULL\nfor (i in 2:99) {\n    same_part_len <- rep(i, 500)\n    inner_product <- replicate(500,\n                               sum(x * y[c(sample(i), (i + 1):100)]))\n\n    inner_products <- rbind(inner_products,\n                            cbind(same_part_len, inner_product))\n}\ny <- rev(y)\nfor (i in 99:2) {\n    same_part_len <- rep(i, 500)\n    inner_product <- replicate(500,\n                               sum(x * y[c(sample(i), (i + 1):100)]))\n\n    inner_products <- rbind(inner_products,\n                            cbind(same_part_len, inner_product))\n}\n\n\n\nggplot(\n    as.data.frame(inner_products),\n    aes(\n        x = same_part_len,\n        y = inner_product,\n        group = same_part_len,\n        fill = same_part_len\n    )\n) +\n    geom_boxplot() +\n    xlab(\"顺序不相同的长度\") +\n    ylab(\"内积大小\") +\n    theme(legend.position = \"none\")\n\n\n#向量是逐个相乘之后再相加\n#相乘之后再相减\n#x1*x2 - y1*y2\np0 <- c(0, 0)\np1 <- c(4, 3)\np2 <- c(1, 3)\np3 <- p1 + p2\ndiff(rev(p1) * p2)\n#其实，行列式代表的都是面积或是体积\ndemo_points <- rbind(p0, p1, p3, p2)\npoint_label <- c(\"c(0, 0)\",\n                 \"c(4, 3)\",\n                 \"c(4, 6)\",\n                 \"c(1, 3)\")\nggplot(as.data.frame(demo_points),\n       aes(x = demo_points[, 1], y = demo_points[, 2])) +\n    geom_point() +\n    geom_polygon(fill = \"red\",\n                 alpha = 0.25,\n                 colour = \"black\") +\n    geom_label(\n        aes(label = point_label),\n        hjust = c(0, 0, 1, 1),\n        vjust = c(1, 1, 0, 0),\n        fill = \"yellow\"\n    ) +\n    xlim(0, 5.5) +\n    ylim(0, 6.5) +\n    coord_fixed()\ndiff(rev(p1) * p2)\n#其实，行列式代表的都是面积或是体积\n\n\n# Factor ------------------------------------------------------------------\n\n#从连续变量和离散变量的角度看\n#向量主要用来存储连续取值变量\n#（向量当然可以存储任意取值的集合，包括字符、逻辑值等）\n#而离散取值的变量，则用因子来存储\n\n#比如性别：\nxb <- c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\")\nis.vector(xb)\n#[1] TRUE\ntypeof(xb)\n#[1] \"character\"\nxb <- factor(xb)\nis.vector(xb)\n#[1] FALSE\nis.factor(xb)\n#[1] TRUE\nxb\ntypeof(xb)\n#[1] \"integer\"\nas.numeric(xb)\n#> [1] 2 1 1 2 1 2\n\n#取值水平\nlevels(xb)\n#? [1] \"男\" \"女\"\n#结果与下属语句相同\nsort(unique(as.character(xb)))\n#> [1] \"男\" \"女\"\n#取值水平的个数\nnlevels(xb)\n#> [1] 2\ntable(xb)\n#> xb\n#> 男 女\n#> 3  3\n\nxb\n#> [1] 女 男 男 女 男 女\n#> Levels: 男 女\n\nas.integer(xb)\n#> [1] 2 1 1 2 1 2\n#以上顺序为字符顺序，可参阅?Comparison的结果\n\nas.character(xb)\n#> [1] \"女\" \"男\" \"男\" \"女\" \"男\" \"女\"\n\nxb == \"男\"\n#> [1] FALSE  TRUE  TRUE FALSE  TRUE FALSE\n\nxb == 1\n#> [1] FALSE FALSE FALSE FALSE FALSE FALSE\nas.integer(xb) == 1\n#> [1] FALSE  TRUE  TRUE FALSE  TRUE FALSE\n\nlevels(xb)[as.integer(xb)]\nlevels(xb)[xb]\n#> [1] \"女\" \"男\" \"男\" \"女\" \"男\" \"女\"\n\nxb[c(1, 4:5)]\n#[1] 女 女 男\n# Levels: 男 女\nxb[-c(2:3, 6)]\n# [1] 女 女 男\n# Levels: 男 女\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nyw <- c(94, 87, 92, 91, 85, 92)\nxb <- c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\")\nxb <- factor(xb)\nxb[yw > 90]\n#> [1] 女 男 女 女\n#> Levels: 男 女\n\nxb[1] <- \"男\"\nxb\n# [1] 男 男 男 女 男 女\n# Levels: 男 女\nxb[1] <- \"未知\"\n#> Warning message:\n#>   In `[<-.factor`(`*tmp*`, 1, value = \"未知\") :\n#>   invalid factor level, NA generated\nxb <- c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\")\nxb <- factor(xb,\n             levels = c(\"男\", \"女\", \"未知\"))\nxb\n# [1] 女 男 男 女 男 女\n# Levels: 男 女 未知\ntable(xb)\n# xb\n# 男   女 未知\n# 3    3    0\nxb[1] <- \"未知\" #此时可以赋值了\nxb\n# [1] 未知 男   男   女   男   女\n# Levels: 男 女 未知\n\nnumber_factors <- factor(c(10, 20, 20, 20, 10))\nmean(number_factors)\n#[1] NA\nmean(as.numeric(number_factors))\n#[1] 1.6\nas.numeric(number_factors)\n#[1] 1 2 2 2 1\nmean(as.numeric(as.character(number_factors)))\n#[1] 16\nlevels(number_factors)\n#[1] \"10\" \"20\"\nmean(as.numeric(levels(number_factors)[number_factors]))\n\n#男女平等，xb为无序因子\n#因而下述逻辑运算符没有意义\nxb[1] > xb[2]\n#> [1] NA\n#> Warning message:\n#>   In Ops.factor(xb[1], xb[2]) : ‘>’ not meaningful for factors\n\nscore <- factor(c(\"优\", \"良\", \"优\", \"优\", \"良\", \"优\"),\n                ordered = TRUE)\nscore[1] > score[2]\n#> [1] TRUE\n\ndays <- factor(c(\"周一\", \"周三\", \"周二\", \"周二\"),\n               ordered = TRUE)\ndays[3] < days[2]\n#> [1] TRUE\ndays[1] < days[3]\n#> [1] FALSE\ndays\n#> [1] 周一 周三 周二 周二\n#> Levels: 周二 < 周三 < 周一\n\ndays <- factor(c(\"周一\", \"周三\", \"周二\", \"周二\"),\n               ordered = TRUE,\n               levels = c(\"周一\", \"周二\", \"周三\"))\ndays\n#> [1] 周一 周三 周二 周二\n#> Levels: 周一 < 周二 < 周三\n\ndays[3] < days[2]\n#> [1] TRUE\ndays[1] < days[3]\n#> [1] TRUE\n\n#百分制成绩变为五分制成绩\nyw <-  c(94, 87, 92, 91, 85, 92)\n#数据分箱\nyw5 <-  cut(yw,\n            breaks = c(0, (6:10) * 10))\nyw5\n#> [1] (90,100] (80,90]  (90,100] (90,100] (80,90]  (90,100]\n#> Levels: (0,60] (60,70] (70,80] (80,90] (90,100]\n\n#百分制成绩变为五分制成绩\nyw <-  c(94, 87, 92, 91, 85, 92)\n#数据分箱+闭区间\nyw5 <-  cut(yw,\n            breaks = c(0, (6:10) * 10),\n            include.lowest = TRUE)\nyw5\n#> [1] (90,100] (80,90]  (90,100] (90,100] (80,90]  (90,100]\n#> Levels: [0,60] (60,70] (70,80] (80,90] (90,100]\n\n#百分制成绩变为五分制成绩\nyw <-  c(94, 87, 92, 91, 85, 92)\n#数据分箱+闭区间+左开右闭\nyw5 <-  cut(\n    yw,\n    breaks = c(0, (6:10) * 10),\n    include.lowest = TRUE,\n    right = FALSE\n)\nyw5\n#> [1] [90,100] [80,90)  [90,100] [90,100] [80,90)  [90,100]\n#> Levels: [0,60) [60,70) [70,80) [80,90) [90,100]\n\n\n#百分制成绩变为五分制成绩\nyw <-  c(94, 87, 92, 91, 85, 92)\n#数据分箱+闭区间+左开右闭+有序因子\nyw5 <-  cut(\n    yw,\n    breaks = c(0, (6:10) * 10),\n    include.lowest = TRUE,\n    right = FALSE,\n    ordered_result = TRUE\n)\nyw5\n#> [1] [90,100] [80,90)  [90,100] [90,100] [80,90)  [90,100]\n#> Levels: [0,60) < [60,70) < [70,80) < [80,90) < [90,100]\n\n#百分制成绩变为五分制成绩\nyw <-  c(94, 87, 92, 91, 85, 92)\n#数据分箱+闭区间+左开右闭+有序因子+标签\nyw5 <-  cut(\n    yw,\n    breaks = c(0, (6:10) * 10),\n    include.lowest = TRUE,\n    right = FALSE,\n    ordered_result = TRUE,\n    labels = c(\"不及格\", \"及格\", \"中\", \"良\", \"优\")\n)\nyw5\n#> [1] 优 良 优 优 良 优\n#> Levels: 不及格 < 及格 < 中 < 良 < 优\n\n\n# Matrix and Array --------------------------------------------------------\n\n#一维数据可以用向量或因子存储\n#假如对多个观测对象的多个属性同时进行记录\n#若这些数据是同质的，宜采用矩阵进行存储\n#依然以学生成绩这份数据为例\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nyw <- c(94, 87, 92, 91, 85, 92)\nsx <- c(82, 94, 79, 84, 92, 82)\nwy <- c(96, 89, 86, 96, 82, 85)\n\n#语文、数学、外语三科成绩最好放一起\nysw <- matrix(c(94, 87, 92, 91, 85, 92,\n                82, 94, 79, 84, 92, 82,\n                96, 89, 86, 96, 82, 85),\n              ncol = 3)\ncolnames(ysw) <- c(\"yw\", \"sx\", \"wy\")\nrow.names(ysw) <- xm\nView(ysw)\n\n\n#假如数据本身就是“站”着的\n#要注意其中byrow = 参数的设置\nysw <- matrix(\n    c(94, 82, 96,\n      87, 94, 89,\n      92, 79, 86,\n      91, 84, 96,\n      85, 92, 82,\n      92, 82, 85),\n    byrow = TRUE,\n    ncol = 3\n)\ncolnames(ysw) <- c(\"yw\", \"sx\", \"wy\")\nrow.names(ysw) <- xm\n\n\nexample_vector <- 1:18\nexample_matrix <- matrix(example_vector, ncol = 3)\nView(example_matrix)\n\nexample_vector <- 1:18\nexample_matrix <- matrix(example_vector, ncol = 3, byrow = TRUE)\nView(example_matrix)\n\n\n#矩阵的基本性质\ncolnames(ysw)\n#[1] \"yw\" \"sx\" \"wy\"\nrow.names(ysw)\n#[1] \"周黎\"   \"汤海明\" \"舒江辉\" \"翁柯\"   \"祁强\"   \"湛容\"\nnrow(ysw) #行数\n#[1] 6\nncol(ysw) #列数\n#[1] 3\ndim(ysw) #行数和列数\n#[1] 6 3\ndimnames(ysw) #行列名称\n# [[1]]\n# [1] \"周黎\"   \"汤海明\" \"舒江辉\" \"翁柯\"   \"祁强\"   \"湛容\"\n#\n# [[2]]\n# [1] \"yw\" \"sx\" \"wy\"\n\n#访问矩阵的子集\n#子集的访问依然是通过[]\n#由于矩阵是二维的，需要','来分别指定行和列\nysw[1,] #第一个同学语文、数学、外语得分\nysw[\"周黎\",] #同上\n# yw sx wy\n# 94 82 96\n\nysw[, 1] #语文成绩\nysw[, \"yw\"] #同上\n# 周黎 汤海明 舒江辉   翁柯   祁强   湛容\n# 94     87     92     91     85     92\n\nysw[1, 1] #第一个同学的第一门课得分\nysw[\"周黎\", \"yw\"] #第一个同学的第一门课得分\n#[1] 94\n\nysw[\"周黎\", 2:3]\nysw[1, c(\"sx\", \"wy\")]\n# sx wy\n# 82 96\nysw[1, -1]\n# sx wy\n# 82 96\n\n#列重新排序\nysw[, c(\"sx\", \"yw\", \"wy\")]\nysw[, c(2, 1, 3)]\n#         sx yw  wy\n#周黎     82  94  96\n#汤海明   94 87 89\n# 舒江辉 79 92 86\n# 翁柯   84 91 96\n# 祁强   92 85 82\n# 湛容   82 92 85\n#行进行排序\n#比如，按照数学成绩进行排序\n(order_sx <- order(ysw[, \"sx\"],\n                   decreasing = TRUE))\n#[1] 2 5 4 1 6 3\nysw[order_sx,]\nysw[order(ysw[, \"sx\"], ysw[, \"wy\"], decreasing = c(FALSE, TRUE)),]\n# yw sx wy\n# 汤海明 87 94 89\n# 祁强   85 92 82\n# 翁柯   91 84 96\n# 周黎   94 82 96\n# 湛容   92 82 85\n# 舒江辉 92 79 86\n\n\n\n#将两个矩阵摞起来，像叠罗汉一样\nysw1 <- matrix(\n    c(94, 87, 92, 91, 85, 92,\n      82, 94, 79, 84, 92, 82,\n      96, 89, 86, 96, 82, 85),\n    ncol = 3,\n    dimnames = list(c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\"),\n                    c(\"yw\", \"sx\", \"wy\"))\n)\nysw2 <- matrix(c(88, 81,\n                 72, 89,\n                 86, 87),\n               ncol = 3,\n               dimnames = list(c(\"穆伶俐\", \"韦永杰\"),\n                               c(\"yw\", \"sx\", \"wy\")))\nysw <- rbind(ysw1, ysw2)\ncjb$zz[1:8]\ncjb$ls[1:8]\nyu_shu_wai <- matrix()\n#政治zz和历史ls成绩\nzzls <- matrix(\n    c(97, 97,\n      95, 94,\n      98, 95,\n      93, 97,\n      93, 87,\n      91, 90,\n      94, 87,\n      97, 94),\n    ncol = 2,\n    byrow = TRUE,\n    dimnames = list(\n        c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\",\n          \"祁强\", \"湛容\", \"穆伶俐\", \"韦永杰\"),\n        c(\"zz\", \"ls\")\n    )\n)\n#将个矩阵并列合并，像书架上的书一样\n#得到成绩表cjb如下\ncjb <- cbind(ysw, zzls)\n\n\n#对矩阵进行操作\nrowSums(cjb) #每个同学的总成绩\n# 周黎 汤海明 舒江辉   翁柯   祁强   湛容 穆伶俐 韦永杰\n# 466    459    450    461    439    440    427    448\ncolMeans(cjb) #各门课的平均分\n# yw     sx     wy     zz     ls\n# 88.750 84.250 88.375 94.750 92.625\n#更一般的方法\napply(cjb, 1, sum)\n# 周黎 汤海明 舒江辉   翁柯   祁强   湛容 穆伶俐 韦永杰\n# 466    459    450    461    439    440    427    448\napply(cjb, 2, mean)\n# yw    sx    wy    zz    ls\n# 88.75 84.25 88.38 94.75 92.62\nround(apply(cjb, 2, sd), digits = 2)\n# yw   sx   wy   zz   ls\n# 4.33 7.23 5.10 2.43 4.10\n\n#可以自定义函数\ncoefficient_of_variation <- function(x) {\n    sd(x) / mean(x)\n}\napply(cjb, 2, coefficient_of_variation)\n#当然，也可以采用匿名函数\nround(apply(cjb, 2, function(x) {\n    sd(x) / mean(x)\n}), digits = 3)\n# yw    sx    wy    zz    ls\n# 0.049 0.086 0.058 0.026 0.044\n\n#矩阵的妙用\n#设有向量\nx <- c(12, 23, 17, 48, 35, 23, 14, 39, 101)\n# （1）求三个相邻元素的乘积，不滚动\n# 即：12*23*17；48*35*23\n# （2）求三个相邻元素的乘积，滚动\n# 即：12*23*17；23*17*48\nx_matrix1 <- matrix(x,\n                    ncol = 3)\napply(x_matrix1, 2, prod)\nx_len <- length(x)\nx_matrix2 <- rbind(x[1:(x_len - 2)],\n                   x[2:(x_len - 1)],\n                   x[3:x_len])\napply(x_matrix2, 2, prod)\n\n\nA <- matrix(c(1, 2, 3,\n              2, 2, 5,\n              3, 5, 1),\n            ncol = 3,\n            byrow = TRUE)\nb <- 1:3\nsolve(A, b)\n#[1] 1 0 0\n\n#是否可以利用solve函数求逆矩阵\ndiag(3)\n# [,1] [,2] [,3]\n# [1,]    1    0    0\n# [2,]    0    1    0\n# [3,]    0    0    1\nsolve(A, diag(3))\nround(solve(A), digits = 2)\n# [,1]  [,2]  [,3]\n# [1,] -1.53  0.87  0.27\n# [2,]  0.87 -0.53  0.07\n# [3,]  0.27  0.07 -0.13\n\nsolve(A) %*% A\nsqrt(2) ^ 2 == 2\n#[1] FALSE\ndplyr::near(sqrt(2) ^ 2, 2)\n#[1] TRUE\nall(near(solve(A) %*% A, diag(3)))\n#[1] TRUE\n\n\n#数组是矩阵的扩展\n#矩阵是二维的，数组则可以是高维的\n#比如，我们读入一个JPEG文件\n#就是一个三维的数组\n#download the presidents.jpg from:\njpg_url <-\n    \"https://raw.githubusercontent.com/byaxb/RDataAnalytics/master/data/presidents.jpg\"\ndownload.file(jpg_url,\n              \"presidents.jpg\", mode = \"wb\")\nlibrary(imager)\npresidents <- load.image(\"presidents.jpg\")\nstr(presidents)\n#> 'cimg' num [1:482, 1:345, 1, 1:3] 1 0.976 0.929 0.914 0.914\n\npresidents <- load.image(\"presidents.jpg\")\nplot(presidents)\ntypeof(presidents)\n\npresidents <- load.image(\"presidents.jpg\")\npresidents[, , 2] <- 0\npresidents[, , 3] <- 0\nplot(presidents)\n\npresidents <- load.image(\"presidents.jpg\")\npresidents[, , 1] <- 0\npresidents[, , 3] <- 0\nplot(presidents)\n\npresidents <- load.image(\"presidents.jpg\")\npresidents[, , 1] <- 0\npresidents[, , 2] <- 0\nplot(presidents)\n\n#黄色#FFFF00\npresidents <- load.image(\"presidents.jpg\")\npresidents[, , 3] <- 0\nplot(presidents)\n\npresidents <- load.image(\"presidents.jpg\")\narea_coor_x <- 350:449 #100\narea_coor_y <- 110:259 #150\ndegree <- 0.6\narray_dim <- c(length(area_coor_x),\n               length(area_coor_y),\n               3)\narray_data <- runif(prod(array_dim))\nrandom_noise <- array(dim = array_dim,\n                      data = array_data)\npresidents[area_coor_x, area_coor_y, ] <-\n    (1 - degree) * presidents[area_coor_x, area_coor_y, ] +\n    degree * random_noise\nplot(presidents)\n\n? imager\nimager::save.image(president,\n                   file = \"president2.jpg\")\n\n#Windows里边，还可以使用以下函数\nshell.exec(\"president2.jpg\") # 效果一样\n\n\n#感兴趣的小伙伴，可以再去探索一下其他的一些图像计算\n#实现对图像平移、旋转、放大和缩小、灰度差值、加噪等\n\n\n# List --------------------------------------------------------------------\n\n#矩阵已经可以存储高维数据了\n#但是，矩阵只能存储“同质”的数据\n#假如要存储非同质的数据，或者是类型、长度都不一样的数据\n#则需要用到列表list这种结构\n\n#北京邮电大学下设以下学院\nxue_yuan <- c(\n    \"信息与通信工程学院\",\n    \"电子工程学院\",\n    \"计算机学院\",\n    \"自动化学院\",\n    \"软件学院\",\n    \"数字媒体与设计艺术学院\",\n    \"现代邮政学院\",\n    \"网络空间安全学院\",\n    \"光电信息学院\",\n    \"理学院\",\n    \"经济管理学院\",\n    \"人文学院\",\n    \"马克思主义学院\",\n    \"国际学院\",\n    \"网络教育学院\",\n    \"继续教育学院\",\n    \"民族教育学院\"\n)\nlength(xue_yuan)\n#拥有以下基地\nji_di <- c(国家重点实验室    = 2,\n                  国家工程实验室    = 5,\n                  部级重点实验室    = 9)\n#校区分布\nxiao_qu <- c(\"西土城路校区\", \"沙河校区\", \"宏福校区\")\n#学生数量\nxue_sheng <- c(全日制     = 23000,     非全日制     = 55000)\n#集合在一起\nbupt <- list(\n    xue_yuan = xue_yuan,\n    xiao_qu = xiao_qu,\n    ji_di = ji_di,\n    xue_sheng = xue_sheng\n)\nlength(bupt)\n#[1] 4\nnames(bupt)\n#[1] \"xue_yuan\"  \"xiao_qu\"   \"ji_di\"     \"xue_sheng\"\ntypeof(bupt)\n#[1] \"list\"\n\nbupt$A_xueke <- c(\"信息与通信工程\", \"计算机科学与技术\", \"电子科学与技术\")\nlength(bupt)\n#[1] 5\nnames(bupt)\n#> [1] \"xue_yuan\"  \"xiao_qu\"   \"ji_di\"     \"xue_sheng\"\n#> [5] \"A_xueke\"\nbupt$A_xueke <- NULL\nnames(bupt)\n#>[1] \"xue_yuan\"  \"xiao_qu\"   \"ji_di\"     \"xue_sheng\"\n\n#访问列表\n#通过美元$符号访问\nbupt$xue_yuan[1:2]\n#[1] \"信息与通信工程学院\" \"电子工程学院\"\nbupt$xiao_qu\n#[1] \"西土城路校区\" \"沙河校区\"     \"宏福校区\"\nsum(bupt$ji_di)\n#[1] 16\n\nbupt$xue_sheng\n#> 全日制 非全日制\n#> 23000    55000\n\nbupt$xue_sheng[\"全日制\"]\n# 全日制\n# 30000\nsum(bupt$xue_sheng)\n#[1] 75000\n\n#以下三种方式效果相同\nbupt$xue_sheng\nbupt[[4]]\nbupt[[\"xue_sheng\"]]\n# 全日制 非全日制\n# 30000    45000\n\n#也可以通过[]来访问\nbupt[4]\n#> $`xue_sheng`\n#> 全日制 非全日制\n#> 30000    45000\ntypeof(bupt[4]) #单个的[]，看到的依然是包装箱\n#> [1] \"list\"\nbupt[[4]]\n#> 全日制 非全日制\n#> 30000    45000\ntypeof(bupt[[4]])\n#> [1] \"double\"\n\nsum(bupt[4])\n#> Error in sum(bupt[4]) : invalid 'type' (list) of argument\n#这才是正确的打开方式\nsum(bupt[[4]])\n#[1] 75000\n#两个方括号，\n#相当于进入包装箱内部了\n#能看到包装箱内部了\nbupt[\"xue_sheng\"]\n# $`xue_sheng`\n# 全日制 非全日制\n# 30000    45000\nbupt[[\"xue_sheng\"]]\n# 全日制 非全日制\n# 30000    45000\n\nunlist(bupt[4])\n\nbupt$xue_sheng\n# 全日制 非全日制\n# 30000    45000\nbupt$xue_sheng <- c(全日制     = 23000,\n                       非全日制    = 55000)\nbupt$xue_sheng\n# 全日制 非全日制\n# 23000    55000\n\n#可以同时获取两个部分\n#也就是锁定两个仓库里的两个箱子\nbupt[3:4]\n#下边这种方式显然是不被允许的\nbupt[[1:3]]\n\n#对列表的每一个组成部分，执行某种操作\n(component_length <- lapply(bupt, length))\n# $`xue_yuan`\n# [1] 17\n#\n# $xiao_qu\n# [1] 3\n#\n# $ji_di\n# [1] 3\n#\n# $xue_sheng\n# [1] 2\nunlist(component_length)\n# xue_yuan   xiao_qu     ji_di xue_sheng\n# 17         3         3         2\n#可以返回一个可读性更强的结果\nsapply(bupt, length)\n# xue_yuan   xiao_qu     ji_di xue_sheng\n# 17         3         3         2\n\nsapply(bupt, typeof)\n#> xue_yuan     xiao_qu       ji_di   xue_sheng\n#> \"character\" \"character\"    \"double\"    \"double\"\n\n# Data Frame --------------------------------------------------------------\n\n#毫无疑问，数据框是数据分析领域最美妙的结构\n#数据框本质上是一个列表，所以不同的列类别可以不一样\n#但形式上，又像是矩阵，以一个二维关系表的方式呈现\n\nxm <- c(\"周黎\", \"汤海明\", \"舒江辉\", \"翁柯\", \"祁强\", \"湛容\")\nxb <- factor(c(\"女\", \"男\", \"男\", \"女\", \"男\", \"女\"))\nyw <- c(94, 87, 92, 91, 85, 92)\nsx <- c(82, 94, 79, 84, 92, 82)\nwy <- c(96, 89, 86, 96, 82, 85)\ncjb <- data.frame(\n    xm = xm,\n    xb = xb,\n    yw = yw,\n    sx = sx,\n    wy = wy\n)\n#注意比较与下述语句的区别\n#cheng_ji_biao <- cbind(xing_ming, xing_bie, yu_wen, shu_xue, wai_yu)\n#class(cheng_ji_biao)\n\nstr(cjb[1,])\ntypeof(cjb[1,])\nclass(cjb[1,])\nstr(cjb[, 1])\n\nView(cjb) #打开成绩表\n#由于数据框本质上是列表，可以通过以下三种方式访问其中的列\ncjb$xm\ncjb[[1]]\ncjb[[\"xm\"]]\n#[1] \"周黎\"   \"汤海明\" \"舒江辉\" \"翁柯\"   \"祁强\"   \"湛容\"\n#但一般来讲[[]]的用法较少，要么用$，要么像矩阵一样访问\ncjb[, 1]\ncjb[, 'xm']\n#[1] \"周黎\"   \"汤海明\" \"舒江辉\" \"翁柯\"   \"祁强\"   \"湛容\"\n\ncjb[1,]\n# xm xb yw sx wy\n# 1 周黎 女 94 82 96\ncjb[c(1, 3), c(\"xm\", \"sx\")]\n# xm sx\n# 1   周黎 82\n# 3 舒江辉 79\ncjb[1:3, -1]\n#   xb yw sx wy\n# 1 女 94 82 96\n# 2 男 87 94 89\n# 3 男 92 79 86\n\n#作为列表，通过美元符号增加一列政治zz\ncjb$zz <- c(97, 95, 98, 93, 93, 91)\nView(cjb)\n#像矩阵，cbind也可以\ncjb <- cbind(cjb,\n             ls = c(97, 94, 95, 97, 87, 90))\n\nstr(cjb)\n#当然，绝大部分情况下\n#数据不会在代码里逐字敲入\n#也不会通过控制台输入\n#毕竟采集数据和分析数据的过程是分开的\ncjb_url <- \"data/cjb.csv\"\ncjb <- read.csv(cjb_url,\n                head = TRUE,\n                stringsAsFactors = FALSE)\n#注意：\n#在前边的章节中，已经读取了cjb数据\n#在读取完数据之后，可以直接通过以下代码将cjb存在本地以备后用\n#存储：\n#save(cjb, file = 'cjb.rda')\n#加载：\n#load('cjb.rda')\n#当然，本课程为保证各章节代码的相对独立性，会多次从Github中加载数据\n\n\nView(cjb)\nhead(cjb)\n#>       xm   bj xb yw sx wy zz ls dl wl hx sw wlfk\n#> 1   周黎 1101 女 94 82 96 97 97 98 95 94 88 文科\n#> 2 汤海明 1101 男 87 94 89 95 94 94 90 90 89 文科\n#> 3 舒江辉 1101 男 92 79 86 98 95 96 89 94 87 文科\n#> 4   翁柯 1101 女 91 84 96 93 97 94 82 90 83 文科\n#> 5   祁强 1101 男 85 92 82 93 87 88 95 94 93 文科\n#> 6   湛容 1101 女 92 82 85 91 90 92 82 98 90 文科\ntail(cjb, n = 3)\n#>         xm   bj xb yw sx wy zz ls dl wl hx sw wlfk\n#> 773 徐宏平 1115 男 85 59 89 80 85 82 61 64 75 理科\n#> 774 昌肖峰 1115 男 81 62 76 89 76 91 49 68 74 理科\n#> 775 郑慕海 1115 男 72 59 82 92 85 82 59 58 55 理科\n\n#Compactly Display the Structure\nstr(cjb) #查看数据的结构\n#> 'data.frame':\t775 obs. of  13 variables:\n#>   $ xm  : chr  \"周黎\" \"汤海明\" \"舒江辉\" \"翁柯\" ...\n#> $ bj  : int  1101 1101 1101 1101 1101 1101 1101 1101 1101 1101 ...\n#> $ xb  : chr  \"女\" \"男\" \"男\" \"女\" ...\n#> $ yw  : int  94 87 92 91 85 92 88 81 88 94 ...\n#> $ sx  : int  82 94 79 84 92 82 72 89 77 81 ...\n#> $ wy  : int  96 89 86 96 82 85 86 87 95 88 ...\n#> $ zz  : int  97 95 98 93 93 91 94 97 94 91 ...\n#> $ ls  : int  97 94 95 97 87 90 87 94 84 85 ...\n#> $ dl  : int  98 94 96 94 88 92 88 96 94 98 ...\n#> $ wl  : int  95 90 89 82 95 82 89 81 87 81 ...\n#> $ hx  : int  94 90 94 90 94 98 98 88 94 88 ...\n#> $ sw  : int  88 89 87 83 93 90 94 83 82 88 ...\n#> $ wlfk: chr  \"文科\" \"文科\" \"文科\" \"文科\" ...\nsummary(cjb) #对数据进行统计描述\n# xm                  bj            xb\n# Length:775         Min.   :1101   Length:775\n# Class :character   1st Qu.:1104   Class :character\n# Mode  :character   Median :1107   Mode  :character\n#                    Mean   :1108\n#                    3rd Qu.:1111\n#                    Max.   :1115\n# yw              sx               wy\n# Min.   : 0.00   Min.   :  0.00   Min.   : 0.0\n# 1st Qu.:85.00   1st Qu.: 81.00   1st Qu.:84.0\n# Median :88.00   Median : 89.00   Median :88.0\n# Mean   :87.27   Mean   : 86.08   Mean   :87.4\n# 3rd Qu.:91.00   3rd Qu.: 95.00   3rd Qu.:92.0\n# Max.   :96.00   Max.   :100.00   Max.   :99.0\n# zz               ls               dl\n# Min.   :  0.00   Min.   :  0.00   Min.   :  0.00\n# 1st Qu.: 90.00   1st Qu.: 85.00   1st Qu.: 90.00\n# Median : 93.00   Median : 90.00   Median : 94.00\n# Mean   : 92.21   Mean   : 89.03   Mean   : 92.91\n# 3rd Qu.: 95.00   3rd Qu.: 94.50   3rd Qu.: 96.00\n# Max.   :100.00   Max.   :100.00   Max.   :100.00\n# wl              hx               sw\n# Min.   :  0.0   Min.   :  0.00   Min.   :  0.00\n# 1st Qu.: 74.0   1st Qu.: 88.00   1st Qu.: 81.00\n# Median : 83.0   Median : 94.00   Median : 88.00\n# Mean   : 81.1   Mean   : 91.57   Mean   : 86.26\n# 3rd Qu.: 91.0   3rd Qu.: 98.00   3rd Qu.: 93.00\n# Max.   :100.0   Max.   :100.00   Max.   :100.00\n# wlfk\n# Length:775\n# Class :character\n# Mode  :character\n\nlength(cjb)\n#> [1] 13\nnames(cjb)\n#> [1] \"xm\"   \"bj\"   \"xb\"   \"yw\"   \"sx\"   \"wy\"   \"zz\"\n#> [8] \"ls\"   \"dl\"   \"wl\"   \"hx\"   \"sw\"   \"wlfk\"\ncolnames(cjb) #结果同上\nnrow(cjb)\n#> [1] 775\nncol(cjb)\n#> [1] 13\nrownames(cjb)\nrow.names(cjb)\n\nlibrary(Hmisc)\ndescribe(cjb) #对数据进行描述\n\n\n#作必要的类型转换\ncjb$bj <- factor(cjb$bj)\ncjb$xb <- factor(cjb$xb)\ncjb$wlfk <- factor(cjb$wlfk)\nstr(cjb)\n# 'data.frame':\t775 obs. of  14 variables:\n#   $ xm  : chr  \"周黎\" \"汤海明\" \"舒江辉\" \"翁柯\" ...\n# $ bj  : Factor w/ 15 levels \"1101\",\"1102\",..: 1 1 1 1 1 1 1 1 1 1 ...\n# $ xb  : Factor w/ 2 levels \"男\",\"女\": 2 1 1 2 1 2 2 1 2 2 ...\n# $ yw  : int  94 87 92 91 85 92 88 81 88 94 ...\nsummary(cjb)\n#> xm                  bj       xb\n#> Length:775         1102   : 58   男:369\n#> Class :character   1103   : 57   女:406\n#> Mode  :character   1105   : 57\n#>                    1104   : 56\n#>                    1106   : 56\n#>                    1107   : 55\n#>                    (Other):436\n\ncjb$zcj <- apply(cjb[, 4:12], 1, sum)\norder(cjb$zcj, decreasing = TRUE)[1:5]\n#> [1] 488 392 438 393 489 337\ncjb_sorted <- cjb[order(cjb$zcj, decreasing = TRUE),]\nView(cjb_sorted)\n\n(top5 <- order(cjb$zcj, decreasing = TRUE)[1:5])\n#[1] 488 392 438 393 489\ncjb$xm[top5]\n#[1] \"宁琦\"   \"焦金音\" \"鲁孟秋\" \"伊礼贤\" \"傅世鸿\"\ncjb[top5, \"xm\"]\n#[1] \"宁琦\"   \"焦金音\" \"鲁孟秋\" \"伊礼贤\" \"傅世鸿\"\ncjb$zcj[top5]\n#[1] 885 879 878 876 872\nsort(cjb$zcj, decreasing = TRUE)[1:5]\n#[1] 885 879 878 876 872\n\n#数据集分为训练集和测试集\nset.seed(2012)\nn_record <- nrow(cjb)\ntrain_idx <- sample(1:n_record, floor(n_record * 0.7))\ntrain_idx <- sample(n_record, n_record * 0.7)\nlength(train_idx)\n#[1] 542\ntest_idx <- (1:n_record)[-train_idx]\ntest_idx <- setdiff(1:n_record, train_idx)\nall((1:n_record)[-train_idx] == setdiff(1:n_record, train_idx))\n#[1] TRUE\nlength(test_idx)\n#[1] 233\n\n#得到测试集和训练集\ntrain_set <- cjb[train_idx,]\ntest_set <- cjb[-train_idx,]\n#或者\ntest_set <- cjb[-test_idx,]\n#显然，下面这种方式是错的\ntrain_set <- cjb[sample(n_record, n_record * 0.7),]\ntest_set <- cjb[sample(n_record, n_record * 0.3),]\n\n\n#当然，像训练集、测试集划分这么常见的任务\n#一些成熟的包里边早就实现了，\n#如分类与回归的框架包caret\nlibrary(caret)\ntrain_idx <- createDataPartition(cjb$文理分科,\n                                 p = 0.7,\n                                 list = FALSE)\ntrain_set <- cjb[train_idx,]\ntest_set <- cjb[-train_idx,]\n#在工业级/商业级代码中，我们建议不要重复造轮子\n#但是在入门时，多做一些手工活还是有必要的\n\n\n#以上只是阐述了六种数据对象的基本操作\n#对于文件的读取、数据库操作、字符操作、日期操作、网络文件的解析等，\n#均未涉及，留待小伙伴自行前去探索\n\n\n# tidyverse ---------------------------------------------------------------\n\n#在结束数据对象的探索之前\n#tidyverse这个扩展包套装是强烈推荐的\n#https://www.tidyverse.org/\n#这个网站里边的dplyr/tidyr都是数据转换所必须掌握的包\n#文件读取readr和readxl，也极大增加了文件读取的便利，避免字符编码等问题\n#当然，其他的包如ggplot2等，早就改变了R的生态\n#一句话：tidyverse，让R变得更美好\n#以下简单演示一下tidyverse的使用\n\n#初次使用，当然是要安装\n#install.packages(\"tidyverse\")\n\nlibrary(tidyverse)\ncjb_url <- \"data/cjb.csv\"\ncjb <- read.csv(cjb_url,\n                stringsAsFactors = FALSE,\n                encoding = \"CP936\")\n#从https://github.com/byaxb/RDataAnalytics下载之后读取\n#cjb <- readxl::read_excel(\"data/cjb.xlsx\")\n\n#x %>% f\n#相当于f(x)\ncjb %>%\n    head\n#即：\nhead(cjb)\n\ncjb %>%\n    head(n = 4) #cjb默认为第一个参数\n#相当于\nhead(cjb, n = 4)\n\n#选择某些列\ncjb %>%\n    select(xm, yw, sx) %>% #注意xm, yw, sx无需用引号括起来\n    head(n = 3)\n\ncjb %>%\n    select(\"xm\", \"yw\", \"sx\") %>% #用引号括起来当然也可以\n    head(n = 3)\n\n#选择某些列之后，对列进行重命名\n#一般来讲，在编代码时不建议用中文做变量名或列名\n#即将显示的时候，可以改为中文\ncjb %>%\n    select(xm, yw, sx) %>%\n    set_names(c(\"姓名\", \"语文\", \"数学\")) %>%\n    head(n = 3)\n\n#通过正整数来选择\ncjb %>%\n    select(1, 4:12) %>%\n    head(n = 3)\n\n#连续选择某些列\ncjb %>%\n    select(xm, yw:sw) %>% #注意：这里是列名yw:sw\n    head(n = 3)\n\n#修改某些列\n#修改或是新增，都是直接赋值\ncjb %>%\n    mutate_at(vars(bj, xb, wlfk), factor) %>%\n    mutate(zcj = rowSums(.[4:12])) %>%\n    arrange(desc(zcj)) %>%\n    tail(n = 2)\n\n#上述操作只是产生临时对象，cjb本身的值未动\n#采用%<>%操作符\ncjb %<>%\n    mutate_at(vars(bj, xb, wlfk), factor) %>%\n    mutate(zcj = rowSums(.[4:12])) %>%\n    arrange(desc(zcj))\ncjb <- cjb %>% #和上述语句等价\n    mutate_at(vars(bj, xb, wlfk), factor) %>%\n    mutate(zcj = rowSums(.[4:12])) %>%\n    arrange(desc(zcj))\n\n\n#以下再来看看行操作\n#找出语文成绩不及格的同学\ncjb %>%\n    dplyr::filter(yw < 60)\n\n#找出有不及格科目的同学\ncjb %>%\n    filter_at(vars(4:12), any_vars(. < 60))\n\n\n#按照性别进行分组统计\ncjb %>%\n    dplyr::filter(zcj != 0) %>%\n    group_by(xb) %>%\n    summarise(\n        count = n(),\n        max = max(zcj),\n        mean = mean(zcj),\n        min = min(zcj)\n    )\n\n#数据的长宽变换\n#宽数据变成长数据：\n#-->多列变两列\n#-->一行变多行\ncjb %>%\n    gather(key = ke_mu, value = cheng_ji, yw:sw) %>%\n    arrange(xm)\n\n\n#按科目进行汇总统计\ncjb %>%\n    dplyr::filter(zcj != 0) %>%\n    gather(key = ke_mu, value = cheng_ji, yw:sw) %>%\n    group_by(ke_mu) %>%\n    summarise(\n        max = max(cheng_ji),\n        mean = mean(cheng_ji),\n        median = median(cheng_ji),\n        min = min(cheng_ji)\n    ) %>%\n    arrange(desc(mean))\n\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "04_观数以形.R",
    "content": "\n\n# 04_观数以形 -----------------------------------------------------------\n\n\n#了解一个人，可能是先看其长相，了解其言谈举止\n#而后再逐渐深入\n#了解一份数据，首先也是要看数据的高矮胖瘦\n#然后再去深入了解内在的关系结构\n\n#要初步了解数据，大部分教材上用的是以下两种方法：\n#1、用少量数字描述数据\n#2、用图形直观刻画数据\n\n#机器学习之要义在于“关系结构”\n#尤其是变量之间的关系和数据空间的结构\n#认识数据：\n#同样是刻画数据空间的形态和变量之间的关系\n\n\n# Data Import -------------------------------------------------------------\n\n#为保持课程的一致性，减少小伙伴们熟悉业务背景的成本\n#本次课程同样采用前述的《学生文理分科》的数据\n#采用真实数据的目的很简单：所得结果是鲜活的\n\n#书接前文，在观测数据的外表之前，首先还是将数据读入\nlibrary(tidyverse)\ncjb_url <- \"data/cjb.csv\"\ncjb <- read.csv(cjb_url,\n                stringsAsFactors = FALSE,\n                encoding = \"CP936\")\n\ncjb %<>%\n    mutate(zcj = rowSums(.[, 4:12])) %>%\n    mutate_at(vars(xb, bj, wlfk), factor) %>%\n    dplyr::filter(zcj != 0)\n\n\n#可能有小伙伴觉得，一次次从网络读取数据很麻烦\n#好吧，那咱们就把它存到本地\n#实际上，在大部分数据分析项目中\n#我们都可以把清理好的数据存为rda格式放在本地\nView(cjb)\nsave(cjb, file = \"data/cjb.rda\")\n#文件存储在当前工作路径的data子目录之下\n#当前工作路径可通过getwd()/setwd()来查询和设置\n\nrm(list = ls())\nload(\"data/cjb.rda\",\n     verbose = TRUE)\n#也可以采用下边这种方式选取\n#load(file.choose())\n\n\n# 1D-univariate -----------------------------------------------------------\n\n\n#机器学习的核心任务\n#是揭示变量之间的关系和数据空间的结构\n#变量之间的关系，自然是一个变量变化或若干变量变化之后，\n#另外变量随之产生变化；\n#数据空间的形态，\n#则主要是数据点在数据空间的散布所呈现的结构\n\n#要考察变量之间的依存/随动关系，\n#自然首先要看单个变量的分布情况\n#若某个变量取值不变，退化为常量，则几乎是不被作为特征的\n#我们要考查的，恰恰就是数据本身的变化或者说分布情况\n#同样，要考查数据空间的形态，\n#当然也可以从单个维度的形态着手\n#因此，我们要认识数据，做的第一件事情，\n#往往就是单变量的分布情况的描述\n\n#一维数据空间，毫无疑问就是一条直线\n\n\n# Stem --------------------------------------------------------------------\n\n#一维空间的数据形态，可以通过茎叶图或是Wikinson点图\n#来直观表示\nlibrary(tidyverse)\ncjb %>%\n    dplyr::filter(bj == \"1101\") %>%\n    pull(sx) %>%\n    sort() -> sx_1101\n\ncjb %>%\n    dplyr::filter(bj == \"1110\") %>%\n    pull(sx) %>%\n    sort() -> sx_1110\n\nstem(sx_1101, scale = 0.5)\ntable(cjb$bj)\nset.seed(2012)\ntmp_x <- 10 * round(sx_1101 + rnorm(length(sx_1101)), digits = 2)\nsort(tmp_x)\n# [1] 542.2 564.2 590.9 593.7 596.6 606.0 612.6 637.7 643.7 654.1 672.8 673.1 681.6 688.2 691.8 692.5 703.7 718.9 719.3\n# [20] 724.3 724.4 729.8 731.4 731.5 736.3 742.4 749.4 760.3 767.9 778.6 779.8 789.4 789.9 797.7 801.4 801.8 812.5 819.5\n# [39] 822.6 825.4 825.6 828.6 831.8 840.0 840.4 845.5 853.5 865.7 891.0 912.6 915.9 950.0\nstem(tmp_x, scale = 0.5)\n\n#1101班数学成绩茎叶图\ncjb %>%\n    dplyr::filter(bj == \"1101\") %>%\n    select(sx) %>%\n    as_vector() %>%\n    stem(scale = 0.5)\n#1110班数学成绩茎叶图\ncjb %>%\n    dplyr::filter(bj == \"1110\") %>%\n    select(sx) %>%\n    as_vector() %>%\n    stem(scale = 2)\n\nstem(sx_1101, scale = 0.5)\nstem(sx_1110, scale = 2)\n\n\n\n# Histogram ---------------------------------------------------------------\n\nresults <- hist(cjb$zz,\n                breaks = \"Sturges\")\n\nresults <- hist(cjb$zz,\n                breaks = \"Sturges\",\n                plot = FALSE)\nresults$breaks\n\n(max(cjb$zz) - min(cjb$zz)) /\n    (ceiling(log2(nrow(cjb))) + 1)\nnclass.Sturges(cjb$yw)\nnclass.Sturges(cjb$zz)\n\n#ggplot2的基本绘图模板template是：\n# ggplot(data = <DATA>) +\n#   <GEOM_FUNCTION>(mapping = aes(<MAPPINGS>),\n#                   stat = <STAT>,\n#                   position = <POSITION>) +\n#   <COORDINATE_FUNCTION> +\n#   <FACET_FUNCTION>\n#虽然绘图框架很直观明了，但是真正要精通各类图形绘制，\n#仍然需要日积月累\n\n#在初学ggplot2的过程中，除了图形语法之外，\n#还有一个难点，那就是数据的转换的过程，\n#比如数据的长宽变换等\n#在接下来的具体代码演示过程中，\n#这一点小伙伴们也许多加留意\n\n#看一看数据分布的形状\nsx_hist_results <- hist(cjb$sx,\n                        plot = FALSE)\n? hist\n#查看sx_hist_results的类型\ntypeof(sx_hist_results)\n#> [1] \"list\"\n#查看列表的组成\nnames(sx_hist_results)\n#> [1] \"breaks\"   \"counts\"   \"density\"  \"mids\"     \"xname\"    \"equidist\"\nsx_hist_results$density\nsx_hist_results$counts / length(cjb$sx) * 2 - sx_hist_results$density\n\nlibrary(tidyverse)\nggplot(data = cjb, mapping = aes(sx)) +\n    geom_histogram(breaks = sx_hist_results$breaks,\n                   color = \"darkgray\",\n                   fill = \"white\") +\n    stat_bin(breaks = sx_hist_results$breaks,\n             geom = \"text\",\n             aes(label = ..count..)) +\n    coord_flip()\n\nggplot(data = cjb, mapping = aes(sx)) +\n    geom_histogram(breaks = sx_hist_results$breaks,\n                   color = \"darkgray\",\n                   fill = \"white\") +\n    stat_bin(breaks = sx_hist_results$breaks,\n             geom = \"text\",\n             aes(label = ..count..)) +\n    coord_flip()\n\n\nggsave(\"histogram1.png\", dpi = 600)\n#dpi一般设为300，就可以达到印刷要求了\n#换言之，发表论文时，dpi设置为300也就足够了\n#当然，dpi值越高、质量越好\n\nggplot(data = cjb, mapping = aes(sx)) +\n    geom_histogram(breaks = sx_hist_results$breaks,\n                   color = \"darkgray\",\n                   fill = \"white\") +\n    stat_bin(breaks = sx_hist_results$breaks,\n             geom = \"text\",\n             aes(label = ..count..))\nggsave(\"histogram2.png\", dpi = 600)\n\n\n# Density -----------------------------------------------------------------\n\n#获取直方图相关参数\nsx_hist_results <- hist(cjb$sx,\n                        plot = FALSE)\n#绘制直方图\nggplot(data = cjb, mapping = aes(sx)) +\n    geom_histogram(\n        aes(y = ..count..),\n        breaks = sx_hist_results$breaks,\n        color = \"darkgray\",\n        fill = \"white\"\n    ) +\n    #绘制概率密度曲线\n    geom_density(colour = \"red\")\nggplot(data = cjb, mapping = aes(sx)) +\n    geom_histogram(\n        aes(y = ..density..),\n        breaks = sx_hist_results$breaks,\n        color = \"darkgray\",\n        fill = \"white\"\n    ) +\n    #绘制概率密度曲线\n    geom_density(colour = \"red\")\nggsave(\"histogram2+density.png\", dpi = 600)\n\n#概率密度图\ndata_points <- head(cjb$sx, n = 10)\nsx_density <- density(data_points)\ngaussian_kernel <- function(X, x, h) {\n    u <- (X - x) / h\n    return(1 / sqrt(2 * pi) * exp(-0.5 * (u ^ 2)))\n}\nX <- sx_density$x\nh <- sx_density$bw\nn <- length(data_points)\ncomponents <- lapply(data_points, function(y) {\n    gaussian_kernel(X, y, h)\n})\nplot(sx_density)\nfor (i in seq_along(components)) {\n    lines(X, components[[i]] / (n * h), col = \"grey\")\n}\npoints(data_points, rep(0, n), col = \"red\")\n\n\n#不同的核函数\n#方窗\nu <- c(-2.5, -2.5, 2.5, 2.5)\nkernel_2 <- c(0, 0.5, 0.5, 0)\nplot(\n    u,\n    kernel_2,\n    type = \"l\",\n    xaxt = \"n\",\n    yaxt = \"n\",\n    axes = F,\n    xlim = c(-5, 5),\n    ylim = c(0, 1)\n)\narrows(0, 0, 0, 0.7, length = 0.1)\narrows(-5, 0, 5, 0, length = 0.1)\n\n#三角窗\nu <- c(-2.5, 0, 2.5)\nkernel_2 <- c(0, 0.5, 0)\nplot(\n    u,\n    kernel_2,\n    type = \"l\",\n    xaxt = \"n\",\n    yaxt = \"n\",\n    axes = F,\n    xlim = c(-5, 5),\n    ylim = c(0, 1)\n)\narrows(0, 0, 0, 0.7, length = 0.1)\narrows(-5, 0, 5, 0, length = 0.1)\n\n#正态窗\nu <- seq(-4, 4, len = 10000)\nkernel_2 <- 1 / sqrt(2 * pi) * exp(-0.5 * (u ^ 2))\nplot(\n    u,\n    kernel_2,\n    type = \"l\",\n    xaxt = \"n\",\n    yaxt = \"n\",\n    axes = F,\n    xlim = c(-5, 5),\n    ylim = c(0, 1)\n)\narrows(0, 0, 0, 0.6, length = 0.1)\narrows(-5, 0, 5, 0, length = 0.1)\n\n#指数窗\nu <- seq(-4, 4, len = 10000)\nkernel_2 <- 1 / 2 * exp(-abs(u))\nplot(\n    u,\n    kernel_2,\n    type = \"l\",\n    xaxt = \"n\",\n    yaxt = \"n\",\n    axes = F,\n    xlim = c(-5, 5),\n    ylim = c(0, 1)\n)\narrows(0, 0, 0, 0.6, length = 0.1)\narrows(-5, 0, 5, 0, length = 0.1)\n\n\n# Violin ------------------------------------------------------------------\n\n#绘制小提琴图\nggplot(cjb, aes(x = factor(0), y = sx)) +\n    geom_violin(fill = \"orange\", alpha = 0.2) +\n    coord_flip()\nggsave(\"violin.png\", dpi = 600)\n\n\n# Boxplot -----------------------------------------------------------------\n\nggplot(cjb, aes(x = factor(0), y = sx)) +\n    geom_violin(fill = \"orange\", alpha = 0.2) +\n    geom_boxplot(width = 0.25,\n                 fill = \"blue\",\n                 alpha = 0.2) +\n    coord_flip()\n\n#箱线图\nset.seed(2012)\nsample_data <- rnorm(100000)\nop <- par(mfrow = c(2, 1))\nboxplot(sample_data,\n        horizontal = TRUE)\nplot(density(sample_data), main = NA)\npar(op)\n\nop <- par(mfrow = c(2, 3))\nlibrary(skewt)\nset.seed(2012)\nrt1 <- rskt(5000, 12, 10)\nrt2 <- rskt(5000, 12, 1)\nrt3 <- rskt(5000, 12, 0.2)\nplot(density(rt1), col = \"red\", lwd = 2)\nplot(density(rt2), col = \"red\", lwd = 2)\nplot(density(rt3), col = \"red\", lwd = 2)\nboxplot(rt1, horizontal = TRUE)\nboxplot(rt2, horizontal = TRUE)\nboxplot(rt3, horizontal = TRUE)\npar(op)\n\ncjb %>%\n    ggplot(aes(x = factor(0), y = sx)) +\n    geom_violin(fill = \"#56B4E9\", width = 0.75) +\n    geom_boxplot(\n        width = 0.25,\n        fill = \"#E69F00\",\n        outlier.colour = \"red\",\n        outlier.shape = 1,\n        outlier.size = 2\n    ) +\n    geom_rug(position = \"jitter\",\n             size = 0.1,\n             sides = \"b\") +\n    coord_flip() +\n    theme(\n        axis.title.y = element_blank(),\n        axis.text.y = element_blank(),\n        axis.ticks.y = element_blank()\n    )\n\n\ncjb %>%\n    ggplot(aes(x = factor(0), y = sx)) +\n    geom_violin(fill = \"#56B4E9\", width = 0.75) +\n    geom_boxplot(\n        width = 0.25,\n        fill = \"#E69F00\",\n        outlier.colour = \"red\",\n        outlier.shape = 1,\n        outlier.size = 2\n    ) +\n    geom_rug(position = \"jitter\",\n             size = 0.1,\n             sides = \"b\") +\n    coord_flip()\nggsave(\"boxplot1.png\", dpi = 600)\n\n\ncjb %>%\n    ggplot(aes(x = factor(0), y = sx)) +\n    geom_boxplot(\n        width = 0.25,\n        fill = \"#E69F00\",\n        outlier.colour = \"red\",\n        outlier.shape = 1,\n        outlier.size = 2\n    ) +\n    geom_rug(position = \"jitter\",\n             size = 0.1,\n             sides = \"b\") +\n    coord_flip()\nggsave(\"boxplot2.png\", dpi = 600)\n\n\nboxplot_results <- boxplot.stats(cjb$sx)\n# $`stats`\n# [1]  60  81  89  95 100\n#\n# $n\n# [1] 774\n#\n# $conf\n# [1] 88.20491 89.79509\n#\n# $out\n# [1] 55 59 57 59 58 51 56 55 59 26 58 46 59 59\nsort(boxplot_results$out)\n\ntypeof(boxplot_results)\nnames(boxplot_results)\nboxplot_results$stats\nfivenum(cjb$sx)\n\n\n\n# Location ----------------------------------------------------------------\n\n#前边是数据形态的直观展示\n#我们需要有一些定量指标来对数据形态进行刻画\n#数据的集中趋势\n#均值\ncjb %>%\n    group_by(wlfk) %>% #按文理分科分组统计\n    summarise(\n        count = n(),\n        #各组人数\n        sx_max = max(sx),\n        #最大值\n        sx_Q3 = quantile(sx, 0.75),\n        #第三分位数\n        sx_median = median(sx),\n        #中位数\n        sx_mean = mean(sx),\n        #均值\n        sx_Q1 = quantile(sx, 0.25),\n        #第一分位数\n        sx_iqr = IQR(sx),\n        #四分位距\n        sx_min = min(sx),\n        #最小值\n        sx_range = max(sx) - min(sx)#极差\n    )\n\n\ncjb %>%\n    group_by(wlfk) %>% #按文理分科分组统计\n    summarise(count = n(),\n              #各组人数\n              sx_median = median(sx),\n              #中位数\n              sx_mean = mean(sx))#均值\n\n\n# Scale -------------------------------------------------------------------\n\ncjb %>%\n    group_by(wlfk) %>% #按文理分科分组统计\n    summarise(\n        sx_max = max(sx),\n        #最大值\n        sx_min = min(sx),\n        #最小值\n        sx_range = max(sx) - min(sx)\n    )#极差\ncjb %>%\n    group_by(wlfk) %>% #按文理分科分组统计\n    summarise(\n        sx_Q3 = quantile(sx, 3 / 4),\n        #第三分位数\n        sx_Q1 = quantile(sx, 1 / 4),\n        #第一分位数\n        sx_iqr = IQR(sx)\n    )#四分位距\n\n#查看各科情况\nround(apply(cjb[, 4:12], 2, function(x) {\n    c(\n        mean = mean(x),\n        median = median(x),\n        range = diff(range(x)),\n        IQR = IQR(x)\n    )\n}))\n\n\n# More Plots --------------------------------------------------------------\n\ncjb %>%\n    dplyr::filter(bj == \"1110\") %>%\n    select(xm, sx) %>%\n    mutate(sx_z = (sx - mean(sx)) / sd(sx),\n           sx_type = ifelse(sx_z >= 0, \"above\", \"below\")) %>%\n    arrange(sx_z) %>%\n    ggplot(aes(x = fct_inorder(xm), y = sx_z, label = sx_z)) +\n    geom_bar(stat = 'identity', aes(fill = sx_type), width = .5)  +\n    scale_fill_manual(\n        name = \"Math Score\",\n        labels = c(\"Above Average\", \"Below Average\"),\n        values = c(\"above\" = \"#00ba38\", \"below\" = \"#f8766d\")\n    ) +\n    coord_flip()\n\ncjb %>%\n    dplyr::filter(bj == \"1110\" & xb == \"男\") %>%\n    select(xm, sx) %>%\n    mutate(sx_z = (sx - mean(sx)) / sd(sx),\n           sx_type = ifelse(sx_z >= 0, \"above\", \"below\")) %>%\n    arrange(sx_z) %>%\n    ggplot(aes(x = fct_inorder(xm), y = sx_z, label = sx_z)) +\n    geom_bar(stat = 'identity', aes(fill = sx_type), width = 0.5)  +\n    scale_fill_manual(\n        name = \"Math Score\",\n        labels = c(\"Above Average\", \"Below Average\"),\n        values = c(\"above\" = \"#00ba38\", \"below\" = \"#f8766d\")\n    ) +\n    coord_flip() +\n    theme_bw()\n\n\ng <- ggplot(cjb, aes(sx))\nsx_hist_results <- hist(cjb$sx,\n                        plot = FALSE)\nlibrary(tidyverse)\ng + geom_histogram(aes(fill = bj),\n                   breaks = sx_hist_results$breaks,\n                   col = \"black\",\n                   size = .1) + scale_fill_brewer(palette = \"Spectral\")\n\n\n\n\n# 2D-Two Variables --------------------------------------------------------\n\n\n\n\n# Treemap-Cat vs cat ------------------------------------------------------\n\n#离散变量vs离散变量\n#形式可以有很多种，比如马赛克图\n#本实验中推荐的是矩形树图\n\nlibrary(tidyverse)\nlibrary(treemap)\ncjb_sum <- cjb %>%\n    group_by(wlfk) %>%\n    summarise(count = n())\ncjb_sum <- cjb %>%\n    group_by(wlfk, xb) %>%\n    summarise(count = n())\ntreemap(\n    as.data.frame(cjb_sum) ,\n    index = c(\"wlfk\", \"xb\"),\n    vSize = \"count\",\n    vColor = \"xb\",\n    type = \"categorical\",\n    fontsize.labels = 20,\n    lowerbound.cex.labels = 0.6\n)\n\n#更改其中的字体\nlibrary(showtext)\nfont_add(\"fzqt\", regular = \"D://tools/fonts/FZQTJW.TTF\")\nshowtext_begin()\ntreemap(\n    as.data.frame(cjb_sum) ,\n    index = c(\"wlfk\", \"xb\"),\n    vSize = \"count\",\n    vColor = \"xb\",\n    type = \"categorical\",\n    fontsize.labels = 20,\n    fontfamily.title = \"fzqt\",\n    fontfamily.labels = \"fzqt\",\n    fontfamily.legend = \"fzqt\",\n    lowerbound.cex.labels = 0.6\n)\nshowtext_end()\n\n\nView(cjb_sum)\nnrow(cjb)\nlibrary(treemap)\ncjb %>%\n    group_by(wlfk, bj, xb) %>%\n    summarise(count = n()) %>%\n    as.data.frame() %>%\n    treemap(\n        index = c(\"wlfk\", \"bj\", \"xb\"),\n        vSize = \"count\",\n        vColor = \"count\",\n        type = \"value\"\n    )\n\n\n\ncjb %>%\n    group_by(bj, wlfk) %>%\n    summarise(count = n())\n\n\n# Numeric vs numeric ------------------------------------------------------\n\n#连续变量vs连续变量\n#散点图是最常见、但同时也应该是最有用的图之一\n#散点图可用来观察变量之间可能存在的模式\n#同时也是二位数据空间形态的最直接的体现\nlibrary(ggplot2)\nggplot(cjb,\n       aes(\n           x = sx,\n           y = sw,\n           shape = wlfk,\n           colour = wlfk\n       )) +\n    geom_point(size = 2) +\n    labs(\n        x = \"数学\",\n        y = \"生物\",\n        colour = \"文理分科\",\n        shape = \"文理分科\"\n    )\n#散点图矩阵\nGGally::ggpairs(cjb, columns = 4:12)\nggsave(\"scatter_pairs.png\", dpi = 600)\nView(cjb)\n\n\n# cor and cov -------------------------------------------------------------\n\n#协方差以及内积的含义\n#请大家进一步思考加减乘除的物理含义\n#内积在某种程度上讲，也是在衡量相似性\nset.seed(2012)\nX <- rnorm(100)\nY <- rnorm(100)\nsum(X * Y)\n#> [1] 16.52361\n#相关系数矩阵\nsum(sort(X) * sort(Y))\n#> [1] 113.6489\nsum(sort(X) * rev(sort(Y)))\n#> [1] -112.7025\n\ncov(X, Y)\nsum((X - mean(X)) * (Y - mean(Y))) / 99\n\n\n\nlibrary(animation)\nsaveGIF(\n    expr = {\n        Xs <- seq(-2, 2, len = 100)\n        Ys <- seq(-2, 2, len = 100)\n        area <- 4\n        Wx <- seq(2, -2, len = 20)\n        Wy <- sqrt(area - Wx ^ 2)\n        Wx <- c(Wx, rev(Wx))\n        Wy <- c(Wy, -rev(Wy))\n        Wx <- rev(Wx)\n        Wy <- rev(Wy)\n        W <- cbind(Wx, Wy)\n        W <- t(apply(W, 1, function(x) {\n            x / sqrt(x[1] ^ 2 + x[2] ^ 2)\n        }))\n\n\n        XY <- expand.grid(Xs, Ys)\n        names(XY) <- c(\"Xs\", \"Ys\")\n        XY_bak <- XY\n        XY <- as.data.frame(XY)\n        b <- 1 / 2\n        i <- 5\n        for (i in 1:nrow(W)) {\n            w <- W[i, ]\n            XY <- XY_bak\n            XY$Inner_Product <- apply(XY, 1, function(x) {\n                sum(x * w)\n            })\n            names(XY) <- c(\"x\", \"y\", \"Inner_Product\")\n            w_label <- paste0(\"(\",\n                              round(w[1], digits = 2),\n                              \",\",\n                              round(w[2], digits = 2),\n                              \")\")\n            library(ggplot2)\n            p <- ggplot(XY, aes(\n                x = x,\n                y = y,\n                colour = Inner_Product\n            )) +\n                geom_point(size = 0.5) +\n                geom_segment(\n                    aes(\n                        x = 0,\n                        y = 0,\n                        xend = w[1],\n                        yend = w[2]\n                    ),\n                    colour = \"red\",\n                    size = 1.2,\n                    arrow = arrow(length = unit(0.03, \"npc\"))\n                ) +\n                geom_text(aes(\n                    x = w[1],\n                    y = w[2],\n                    label = w_label\n                ),\n                colour = \"blue\") +\n                #scale_colour_gradient2(low=\"#22FF00\", mid=\"white\", high=\"#FF0000\", midpoint=0) +\n                scale_colour_gradient2(\n                    low = \"red\",\n                    mid = \"white\",\n                    high = \"blue\",\n                    midpoint = 0\n                ) +\n                coord_fixed()\n            print(p)\n        }\n    },\n    movie.name = \"animation5.gif\",\n    convert = \"gm convert\",\n    interval = 1\n)\n\n\nlibrary(animation)\nsaveGIF(\n    expr = {\n        Xs <- seq(-2, 2, len = 100)\n        Ys <- seq(-2, 2, len = 100)\n        area <- 4\n        Wx <- seq(2, -2, len = 20)\n        Wy <- sqrt(area - Wx ^ 2)\n        Wx <- c(Wx, rev(Wx))\n        Wy <- c(Wy, -rev(Wy))\n        Wx <- rev(Wx)\n        Wy <- rev(Wy)\n        W <- cbind(Wx, Wy)\n        W <- t(apply(W, 1, function(x) {\n            x / sqrt(x[1] ^ 2 + x[2] ^ 2)\n        }))\n\n\n        XY <- expand.grid(Xs, Ys)\n        names(XY) <- c(\"Xs\", \"Ys\")\n        XY_bak <- XY\n        XY <- as.data.frame(XY)\n        b <- 1\n        for (i in 1:nrow(W)) {\n            w <- W[i, ]\n            XY <- XY_bak\n            XY$Inner_Product <- apply(XY, 1, function(x) {\n                sum(x * w) > b\n            })\n            names(XY) <- c(\"x\", \"y\", \"Inner_Product\")\n            w_label <- paste0(\"(\",\n                              round(w[1], digits = 2),\n                              \",\",\n                              round(w[2], digits = 2),\n                              \")\")\n            library(ggplot2)\n            p <- ggplot(XY, aes(\n                x = x,\n                y = y,\n                colour = Inner_Product\n            )) +\n                geom_point(size = 0.5) +\n                geom_segment(\n                    aes(\n                        x = 0,\n                        y = 0,\n                        xend = w[1],\n                        yend = w[2]\n                    ),\n                    colour = \"red\",\n                    size = 1.2,\n                    arrow = arrow(length = unit(0.03, \"npc\"))\n                ) +\n                geom_text(aes(\n                    x = w[1],\n                    y = w[2],\n                    label = w_label\n                ),\n                colour = \"blue\") +\n                coord_fixed()\n            print(p)\n        }\n    },\n    movie.name = \"animation6.gif\",\n    convert = \"gm convert\",\n    interval = 1\n)\n\nlibrary(animation)\nsaveGIF(\n    expr = {\n        Xs <- seq(-2, 2, len = 100)\n        Ys <- seq(-2, 2, len = 100)\n        area <- 4\n        Wx <- seq(2, -2, len = 20)\n        Wy <- sqrt(area - Wx ^ 2)\n        Wx <- c(Wx, rev(Wx))\n        Wy <- c(Wy, -rev(Wy))\n        Wx <- rev(Wx)\n        Wy <- rev(Wy)\n        W <- cbind(Wx, Wy)\n        W <- t(apply(W, 1, function(x) {\n            x / sqrt(x[1] ^ 2 + x[2] ^ 2)\n        }))\n\n\n        XY <- expand.grid(Xs, Ys)\n        names(XY) <- c(\"Xs\", \"Ys\")\n        XY_bak <- XY\n        XY <- as.data.frame(XY)\n        b <- 1 / 2\n        for (i in 1:nrow(W)) {\n            w <- W[i, ]\n            XY <- XY_bak\n            XY$Inner_Product <- apply(XY, 1, function(x) {\n                sum(x * w) > b\n            })\n            names(XY) <- c(\"x\", \"y\", \"Inner_Product\")\n            w_label <- paste0(\"(\",\n                              round(w[1], digits = 2),\n                              \",\",\n                              round(w[2], digits = 2),\n                              \")\")\n            library(ggplot2)\n            p <- ggplot(XY, aes(\n                x = x,\n                y = y,\n                colour = Inner_Product\n            )) +\n                geom_point(size = 0.5) +\n                geom_segment(\n                    aes(\n                        x = 0,\n                        y = 0,\n                        xend = w[1],\n                        yend = w[2]\n                    ),\n                    colour = \"red\",\n                    size = 1.2,\n                    arrow = arrow(length = unit(0.03, \"npc\"))\n                ) +\n                geom_text(aes(\n                    x = w[1],\n                    y = w[2],\n                    label = w_label\n                ),\n                colour = \"blue\") +\n                coord_fixed()\n            print(p)\n        }\n    },\n    movie.name = \"animation7.gif\",\n    convert = \"gm convert\",\n    interval = 1\n)\n\nset.seed(2012)\nX <- rnorm(100)\nY <- rnorm(100)\ninner_prod <- sapply(1:1000000, function(x) {\n    sum(sample(X) * sample(Y))\n})\n#上边的代码，当然也可以用replicate改写\nsum(sort(X) * sort(Y))\nrange(inner_prod)\n\nhist(inner_prod)\n\ncor_coef <- cjb %>%\n    select(yw:sw) %>%\n    cor() %>%\n    round(digits = 2)\n#>    yw   sx   wy   zz   ls   dl   wl   hx   sw\n#> yw 1.00 0.46 0.54 0.47 0.38 0.37 0.30 0.38 0.40\n#> sx 0.46 1.00 0.55 0.39 0.37 0.45 0.57 0.59 0.60\n#> wy 0.54 0.55 1.00 0.37 0.28 0.38 0.47 0.44 0.44\n#> zz 0.47 0.39 0.37 1.00 0.39 0.32 0.20 0.28 0.28\n#> ls 0.38 0.37 0.28 0.39 1.00 0.41 0.31 0.33 0.38\n#> dl 0.37 0.45 0.38 0.32 0.41 1.00 0.39 0.44 0.46\n#> wl 0.30 0.57 0.47 0.20 0.31 0.39 1.00 0.62 0.65\n#> hx 0.38 0.59 0.44 0.28 0.33 0.44 0.62 1.00 0.69\n#> sw 0.40 0.60 0.44 0.28 0.38 0.46 0.65 0.69 1.00\n\n# Tile for cor ------------------------------------------------------------\n#HOW TO INTERPRET A CORRELATION COEFFICIENT R By Deborah J. Rumsey\nlibrary(ggplot2)\nlibrary(tidyverse)\ncor_coef %>%\n    as.data.frame() %>%\n    rownames_to_column(var = \"km1\") %>%\n    gather(key = km2, value = cor_num, -km1) %>%\n    mutate(cor_level = cut(\n        cor_num,\n        breaks = c(0, 0.3, 0.5, 0.7, 1),\n        right = FALSE\n    )) %>%\n    ggplot(aes(\n        x = fct_inorder(km1),\n        y = fct_inorder(km2),\n        fill = cor_level\n    )) +\n    geom_tile(colour = \"white\", size = 1.5) +\n    geom_text(aes(label = format(cor_num, digits = 2))) +\n    scale_fill_brewer(palette = \"YlGn\", name = \"相关系数区间\")\nggsave(\"cor_coef.png\", dpi = 600)\n\n#区间划分方法并不唯一：\n#Hinkle DE, Wiersma W, Jurs SG (2003). Applied Statistics for the Behavioral Sciences 5th ed. Boston: Houghton Mifflin\n#0~0.3negligible correlation\n#0.3~0.5low correlation\n#0.5~0.7moderate correlation\n#0.7~0.9high correlation\n#0.9~1very high correlation\n# Catetorial vs Numeric ---------------------------------------------------\n\n#离散变量vs连续变量\n#主要是分组绘图\n#对不同的组别进行比较\n\n\n# Grouped boxplots --------------------------------------------------------\n\n#分组绘制箱线图\n#看看不同班级数学成绩的分布\nlibrary(ggplot2)\nggplot(cjb, aes(x = bj,\n                y = sx,\n                fill = bj)) +\n    geom_boxplot(\n        outlier.colour = \"red\",\n        outlier.shape = 3,\n        outlier.size = 1\n    ) +\n    labs(x = \"班级\", y = \"数学成绩\") +\n    theme(legend.position = \"none\")\nggsave(\"grouped_boxplots.png\", dpi = 600)\n\n#其余图形如直方图、概率密度图等，请自行练习\n\n\n# Grouped density plots ---------------------------------------------------\n\n#当然，如果分组太多，显然不适合全都叠加在一起\n#可以采用以下方式\nlibrary(ggridges)#绘制层峦叠嶂图\nlibrary(viridis)#采用其中的颜色\nggplot(cjb, aes(x = sx, y = bj, fill = ..x..)) +\n    geom_density_ridges_gradient(scale = 2,\n                                 rel_min_height = 0.01,\n                                 gradient_lwd = 1) +\n    scale_fill_viridis(name = \"数学成绩\",\n                       option = \"C\") +\n    labs(x = \"数学\", y = \"班级\")\nggsave(\"density_ridges.png\", dpi = 600)\n\n\n# featurePlot -------------------------------------------------------------\n\n#对于分类问题而言，在进行数据描述时\n#最关键的，当属因变量vs自变量了\nlibrary(caret)\nfeaturePlot(\n    x = cjb[, 4:12],\n    y = cjb$wlfk,\n    plot = \"density\",\n    scales = list(\n        x = list(relation = \"free\"),\n        y = list(relation = \"free\")\n    ),\n    adjust = 1.5,\n    pch = \"|\",\n    auto.key = list(columns = 2)\n)\n#从上图可以看出，数学/生物最优辨识度\n#而语文，几乎文理科生没有什么区别\n\n#变量之间的依存关系\n#可以通过信息增益来度量\n#也就是说，当我们知道某个自变量时\n#有助于因变量不确定性的减少\nlibrary(infotheo)\ncondentropy(cjb$wlfk) -\n    condentropy(cjb$wlfk, cjb$xb)\n\nlibrary(FSelectorRcpp)\ninformation_gain(x = cjb[, c(1, 3)],\n                 y = cjb$wlfk)\n#实际上，信息增益也是特征选择常用的方法\n\n\n\n# More Variables ----------------------------------------------------------\n\n\n\n# 3D-scatter --------------------------------------------------------------\n\n#三维散点图\nlibrary(rgl)\nplot3d(\n    x = cjb$sx,\n    y = cjb$wl,\n    z = cjb$sw,\n    xlab = \"Mathematics\",\n    ylab = \"Physics\",\n    zlab = \"Biology\",\n    type = \"s\",\n    size = 0.6,\n    col = c(\"red\", \"green\")[cjb$wlfk]\n)\n\n\n# Define point shapes\nmyshapes = c(16, 17, 18)\nmyshapes <- myshapes[as.numeric(cjb$wlfk)]\n# Define point colors\nmycols <- c(\"#999999\", \"#E69F00\", \"#56B4E9\")\nmycols <- mycols[as.numeric(cjb$wlfk)]\n# Plot\nlibrary(\"scatterplot3d\")\ncjb %>%\n    select(sx, wl, sw) %>%\n    scatterplot3d(\n        pch = myshapes,\n        color = mycols,\n        grid = TRUE,\n        box = FALSE\n    )\nlibrary(plot3D)\nbty <- c(\"b\", \"b2\", \"f\", \"g\", \"bl\", \"bl2\", \"u\", \"n\")\nscatter3D(\n    x = cjb$sx,\n    y = cjb$wl,\n    z = cjb$sw,\n    pch = 16,\n    bty = \"g\",\n    colkey = FALSE,\n    xlab = \"数学\",\n    ylab = \"物理\",\n    zlab = \"生物\",\n    main = \"数学、物理、生物散点图\",\n    #col.panel =\"lightgreen\",\n    expand = 0.75,\n    phi = 0\n)\n\n\ndata(VADeaths)\nicut <- function(x) {\n    ibreaks <- c(0, seq(50, 100, len = 11))\n    cut(x, breaks = ibreaks)\n}\n\nrotated_angle <- 20\ncjb %>%\n    select(wl, sx) %>%\n    mutate_at(vars(wl, sx), icut) %>%\n    group_by(wl, sx) %>%\n    summarise(freq = n()) %>%\n    spread(key = sx, value = freq, fill = 0) %>%\n    column_to_rownames(var = \"wl\") %>%\n    as.matrix() %>%\n    hist3D(\n        z = .,\n        scale = FALSE,\n        expand = 0.02,\n        bty = \"g\",\n        phi = rotated_angle,\n        col = \"#0072B2\",\n        border = \"black\",\n        shade = 0.2,\n        ltheta = 90,\n        space = 0.3,\n        ticktype = \"detailed\"\n    )\n\n\n\nlibrary(animation)\nsaveGIF(\n    expr = {\n        for (rotated_angle in seq(20, 380, by = 5)) {\n            cjb %>%\n                select(wl, sx) %>%\n                mutate_at(vars(wl, sx), icut) %>%\n                group_by(wl, sx) %>%\n                summarise(freq = n()) %>%\n                spread(key = sx,\n                       value = freq,\n                       fill = 0) %>%\n                column_to_rownames(var = \"wl\") %>%\n                as.matrix() %>%\n                hist3D(\n                    z = .,\n                    scale = FALSE,\n                    expand = 0.02,\n                    bty = \"g\",\n                    phi = 20,\n                    col = \"#0072B2\",\n                    border = \"black\",\n                    shade = 0.2,\n                    ltheta = 90,\n                    theta = rotated_angle,\n                    space = 0.3,\n                    ticktype = \"detailed\"\n                )\n\n        }\n\n    },\n    movie.name = \"animation5.gif\",\n    convert = \"gm convert\",\n    interval = 1\n)\ndev.off()\n\n\n# faces -------------------------------------------------------------------\n\n#除了三维之外，可以继续向多维扩展\n#比如脸谱图\nlibrary(aplpack)\nselected_cols <- c(\"wl\", \"hx\", \"sw\")\nselected_rows <-\n    c(488, 393, 490,  440,\n      287, 289,  292, 293)\nView(cjb[selected_rows, ])\nfaces(cjb[selected_rows,\n          selected_cols],\n      ncol.plot = 4,\n      nrow.plot = 2,\n      face.type = 1)\n\n#> effect of variables:\n#>   modified item       Var\n#> \"height of face   \" \"wl\"\n#> \"width of face    \" \"hx\"\n#> \"structure of face\" \"sw\"\n#> \"height of mouth  \" \"wl\"\n#> \"width of mouth   \" \"hx\"\n#> \"smiling          \" \"sw\"\n#> \"height of eyes   \" \"wl\"\n#> \"width of eyes    \" \"hx\"\n#> \"height of hair   \" \"sw\"\n#> \"width of hair   \"  \"wl\"\n#> \"style of hair   \"  \"hx\"\n#> \"height of nose  \"  \"sw\"\n#> \"width of nose   \"  \"wl\"\n#> \"width of ear    \"  \"hx\"\n#> \"height of ear   \"  \"sw\"\n\n\n# parallel coordinate plot ------------------------------------------------\n\n#绘制平行坐标图\ncjb_top_wen <- cjb %>%\n    dplyr::filter(wlfk == \"文科\") %>%\n    arrange(zcj) %>%\n    select(4:13) %>%\n    mutate_at(vars(yw:sw), jitter) %>%\n    head(n = 50)\ncjb_top_li <- cjb %>%\n    dplyr::filter(wlfk == \"理科\") %>%\n    arrange(zcj) %>%\n    select(4:13) %>%\n    mutate_at(vars(yw:sw), jitter) %>%\n    head(n = 50)\ncjb_top <- rbind(cjb_top_wen, cjb_top_li)\nrequire(GGally)\nGGally::ggparcoord(iris, columns = 1:4, groupColumn = 5) +\n    geom_point()\nggparcoord(cjb_top,\n           columns = 1:9,\n           groupColumn = 10) +\n    geom_point()\n\nggsave(\"par2.png\", dpi = 600)\n\n\n\n# Density revisited -------------------------------------------------------\n\n#我们当然可以对这个三维数据进行直观展示，\n#但这显然是不够的，我们只有进行了量化，\n#各种各样的关系、模式才能呈现出来，\n#这也是数据科学最令人着迷的地方\n\n#数据空间的密度\n#数据的密度，当然与我们以前学过的物质的密度不一样\n#不可能是质量与体积之比\n#这里的密度，只是密集程度而已\n#一个简单的方法：单位面积/体积内数据点的多少\n#将50~100细分为N份，看每一个有多少落入其间\nicut <- function(x) {\n    ibreaks <- c(0, seq(50, 100, len = 11))\n    cut(x, breaks = ibreaks)\n}\nrange(cjb$wl)\nrange(cjb$sx)\ncjb %>%\n    select(wl, sx) %>%\n    mutate_at(vars(wl, sx), icut) %>%\n    group_by(wl, sx) %>%\n    summarise(freq = n()) %>%\n    complete(wl, sx, fill = list(freq = 0)) %>%\n    mutate(freq = ifelse(is.na(freq), 0, freq)) %>% View\ndistinct() %>% View\nggplot(aes(x = wl, y = sx, fill = freq)) +\n    geom_tile(colour = \"white\", size = 0.5) +\n    geom_text(aes(label = freq), size = 3) +\n    scale_fill_gradient(low = \"white\", high = \"red\") +\n    theme(axis.text.x = element_text(\n        angle = 90,\n        hjust = 1,\n        vjust = 0.5\n    ))\n\ncjb %>%\n    select(wl, sx) %>%\n    ggplot(aes(x = wl, y = sx)) +\n    geom_point()\nbreaks <-  c(0, seq(50, 100, len = 11))\nwl_sx_freq <- cjb %>%\n    select(wl, sx) %>%\n    mutate_at(vars(wl, sx),\n              function(x) {\n                  cut(x, breaks = breaks)\n              }) %>%\n    group_by(wl, sx) %>%\n    summarise(freq = n()) %>%\n    complete(wl, sx, fill = list(freq = 0))\nggplot(wl_sx_freq, aes(x = wl, y = sx, fill = freq)) +\n    geom_tile(colour = \"white\", size = 0.5) +\n    geom_text(aes(label = freq), size = 3) +\n    scale_fill_gradient(low = \"white\",\n                        high = \"red\") +\n    theme(axis.text.x =\n              element_text(\n                  angle = 90,\n                  hjust = 1,\n                  vjust = 0.5\n              )) +\n    coord_fixed()\nggsave(\"density.png\", dpi = 300)\n\n#感兴趣的小伙伴可以用stat_density2d\n#或是stat_bin2d实现类似的效果\n\n#接下来考虑另一种计算密度的方法\n#每一个点，半径为epsilon领域内点的多少\nselected_cols <- c(\"sx\", \"wl\", \"sw\")\nshu_wu_sheng <- cjb[, selected_cols]\nsws_dist <- as.matrix(dist(shu_wu_sheng,\n                           diag = TRUE,\n                           upper = TRUE))\niseq <- seq(50, 100, len = 10)\nimatrix <- expand.grid(iseq, iseq, iseq)\nnames(imatrix) <- selected_cols\ndist_imatrix <- apply(imatrix, 1, function(x) {\n    apply(shu_wu_sheng, 1, function(y) {\n        sqrt(sum((y - x) ^ 2))\n    })\n})\n#定义半径为平均距离\nepsilon <- mean(sws_dist)\n#计算半径范围之内的点数作为密度\nsws_density <- apply(dist_imatrix, 2, function(x) {\n    sum(x < epsilon)\n})\n#调颜色\nmy_color_ramp <- function(colors, values) {\n    v <- (values - min(values)) / diff(range(values))\n    x <- colorRamp(colors)(v)\n    rgb(x[, 1], x[, 2], x[, 3], maxColorValue = 255)\n}\ncols <- my_color_ramp(c(\"white\", \"red\"), sws_density)\n#绘制图形\nlibrary(rgl)\n#绘制“空”间\nplot3d(\n    x = imatrix,\n    size = 2,\n    type = \"n\",\n    xlab = \"Mathematics\",\n    ylab = \"Physics\",\n    zlab = \"Biology\",\n    col = cols\n)\ngrd <- imatrix\ngrd$col <- cols\ngrd$alpha <-\n    (sws_density - min(sws_density)) / (max(sws_density) - min(sws_density)) * 0.9\nlength <- width <- height <- (max(iseq) - min(iseq)) / length(iseq)\nfor (i in seq(nrow(grd))) {\n    #创建一个长方体\n    icube3d <- cube3d(col = grd$col[i])\n    #设定长宽高\n    icube3d <- scale3d(icube3d, length, width, height)\n    #将长方体移动至指定位置\n    icube3d <- translate3d(icube3d, grd$sx[i], grd$wl[i], grd$sw[i])\n    #绘制长方体\n    shade3d(icube3d, alpha = grd$alpha[i])\n}\n\n\n\n# Hopkins -----------------------------------------------------------------\n\n#数据空间的均匀程度\n#可以用hopkins统计量来描述\n#均匀分布的话，趋近于0.5\n#倾斜的话，趋近于0\nclustertend::hopkins(cjb[, 4:12], n = 100)\n#> $`H`\n#> [1] 0.1549145\n\n\n\n# Last words --------------------------------------------------------------\n\n#这里展示的，只是数据可视化的一部分图形\n#还有很多图形尚未涉及，请小伙伴们自行研究：\n#比如：\n#马赛克图mosaicplot()\n#雷达图stars()\n#关系图\n#网络图plot.igraph\n#地图ggmap\n#词云wordcloud2\n#等等\n#以后在进行具体算法建模时，也会涉及到很多专用的图形\n#比如：\n#关联规则：关联网络图\n#聚类分析：层次谱系图\n#分类回归：分类回归树、神经网络图、变量重要性图\n#……\n#这些图形展示，当然已经超越了所谓的简单的数据描述了\n#而是在对模型本身进行直观展示\n#换言之，通过这些图形，不只是看数据的长相\n#而是透过现象看本质了\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "05_相随相伴、谓之关联.R",
    "content": "\n\n# 05_相随相伴、谓之关联----------------------------------------------------\n\n#在观察完数据的长相之后，便开始深入其内在的关系结构了\n#本次实验聚焦的是关联规则\n#关联规则所表达的联系，本质上是伴随关系\n#因此，本章节名称为《相随相伴、谓之关联》\n\n#教材上的名称频繁项集、关联规则\n#关联规则可能是机器学习/数据挖掘领域最为知名的算法了\n#啤酒和尿不湿的故事，提供了“发现数据背后意想不到的模式”的范本，\n#也让关联规则成为数据挖掘最好的科（guang）普（gao）\n\n# Data Import -------------------------------------------------------------\n\n#清空内存\nrm(list = ls())\n#蛮力搜索可能的规则数\nn_item <- c(2:5, 10, 20, 50, 100)\nn_rules <- 3 ^ n_item - 2 ^ (n_item + 1) + 1\nView(data.frame(n_item, n_rules))\n\nlibrary(tidyverse)\nlibrary(readr)\ncjb_url <- \"data/cjb.csv\"\ncjb <- read_csv(cjb_url,\n                locale = locale(encoding = \"CP936\"))\n\n# Discretization ----------------------------------------------------------\n\n#数据离散化\n#arules包只能对离散数据进行关联规则挖掘\n#离散化有专用的包discretization\n#当然，对于大部分的任务而言，\n#cut()函数已经够用了\n#定义一个百分制转成五分制成绩的函数\nas_five_grade_scores <- function(x) {\n    cut(\n        x,\n        breaks = c(0, seq(60, 100, by = 10)),\n        include.lowest = TRUE,\n        right = FALSE,\n        ordered_result = TRUE,\n        labels = c(\"不及格\", \"及格\", \"中\", \"良\", \"优\")\n    )\n}\n\ncjb %<>%\n    mutate_at(vars(xb, wlfk), factor) %>% #类型转换\n    mutate_at(vars(yw:sw), as_five_grade_scores) %>% #数据分箱\n    select(-c(1:2))#姓名、班级两列不参与规则挖掘\n\n\n# Types of data -----------------------------------------------------------\n\nlibrary(arules)\n#转换为transaction\ncjb_trans <- as(cjb, \"transactions\")\n#查看数据\ncjb_trans\n#> transactions in sparse format with\n#> 775 transactions (rows) and\n#> 49 items (columns)\n\ninspect(cjb_trans[1:5])\ninspect(head(cjb_trans))\n\n\n\n\n#转换为数据框\ncjb_trans %>%\n    as(\"data.frame\") %>%\n    View()\n#转换为矩阵\ncjb_trans %>%\n    as(\"matrix\") %>%\n    View()\n#转换为列表\ncjb_trans %>%\n    as(\"list\") %>%\n    head(n = 2)\n\n\n\n#无论是列表、矩阵、数据框\n#还是最直接的事务记录transactions\n#都可以直接用来挖掘\n\n\n# Model with default args -------------------------------------------------\n#关于Apriori算法的原理，请参阅课程讲义\n#R中的具体实现，则简单得超乎人们的想象\n#首先是加载包\n#对于关联规则的挖掘和可视化\n#主要用arules和arulesViz两个包\n#加载后者时，前者自动加载\n\nlibrary(arulesViz)\n#调用apriori()函数进行挖掘\n#算法实现，只是一句话的事儿\nirules_args_default <- apriori(cjb_trans)\nirules_args_default <- apriori(cjb)\n? apriori\n\nirules_args_default <- apriori(cjb, parameter = list(ext = TRUE))\nquality(irules_args_default)\n\n\n#看一看挖出来的规则\nirules_args_default\n#> set of 3775 rules\n\n#查看具体的规则\ninspect(head(irules_args_default))\n\n\n\n# Rules information -------------------------------------------------------\n\n#关于规则的一些基本信息\nirules_args_default@info\n#> $`data`\n#> cjb_trans\n#>\n#> $ntransactions\n#> [1] 775\n#>\n#> $support\n#> [1] 0.1\n#>\n#> $confidence\n#> [1] 0.8\n\n\n# Parameters --------------------------------------------------------------\n\n#定制其中的参数\n#设置支持度、置信度、最小长度等\nirules <- apriori(cjb_trans,\n                  parameter = list(\n                      minlen = 2,\n                      supp = 50 / length(cjb_trans),\n                      #最小支持度，减少偶然性\n                      conf = 0.8 #最小置信度，推断能力\n                  ))\nlength(irules)\ninspectDT(irules)\n#> [1] 8651\n\n\n#也可以进一步设定前项和后项\nirules <- apriori(\n    cjb_trans,\n    parameter = list(\n        minlen = 2,\n        supp = 50 / length(cjb_trans),\n        conf = 0.8\n    ),\n    appearance = list(rhs = paste0(\"wlfk=\", c(\"文科\", \"理科\")),\n                      default = \"lhs\")\n)\ninspectDT(irules)\n#对规则进行排序\nirules_sorted <- sort(irules, by = \"lift\")\ninspectDT(irules_sorted)\n\n\n\n# Pruned Rules ------------------------------------------------------------\n\nsubset.matrix <-\n    is.subset(irules_sorted, irules_sorted, sparse = FALSE)\nsubset.matrix[lower.tri(subset.matrix, diag = TRUE)] <- NA\n\nView(subset.matrix)\nredundant <- colSums(subset.matrix, na.rm = TRUE) >= 1\nas.integer(which(redundant))\n\n\n(irules_pruned <- irules_sorted[!redundant])\n#> set of 107 rules\ninspect(irules_pruned)\ninspectDT(irules_pruned)\n#当然，很多时候，我们只想查看其中部分规则\ninspect(head(irules_pruned))\ninspect(tail(irules_pruned))\n\n\n# Model Evaluation --------------------------------------------------------\n\n#查看评估指标\nquality(irules_pruned)\n\n\nstr(quality(irules_pruned))\n\n\n#更多评估指标\n(\n    more_measures <- interestMeasure(\n        irules_pruned,\n        measure = c(\"support\", \"confidence\", \"lift\", \"casualConfidence\"),\n        transactions = cjb_trans\n    )\n)\n\nquality(irules_pruned) <- more_measures %>%\n    mutate_at(vars(1:3),\n              funs(round(., digits = 2)))\n\n\n\n# Rules Filtering ---------------------------------------------------------\n\n#比如仅关心文科相关的规则\nirules_sub <- subset(irules_pruned,\n                     items %in% c(\"wlfk=文科\"))\ninspect(irules_sub)\ninspectDT(irules_sub)\n\nirules_sub <- subset(irules_pruned,\n                     items %pin% c(\"文科\"))\ninspectDT(irules_sub)\n#当然也可以同时满足多种搜索条件\n#比如性别和确信度\nirules_sub <- subset(irules_pruned,\n                     lhs %pin% c(\"sw\") &\n                         lift > 1.8)\ninspectDT(irules_sub)\ninspect(irules_sub)\n#> lhs                                      rhs         support confidence\n#> [1]  {xb=男,sx=优,ls=优,wl=优,hx=优,sw=优} => {wlfk=理科} 0.074   0.93\n#> [2]  {xb=男,sx=优,dl=优,wl=优,hx=优,sw=优} => {wlfk=理科} 0.090   0.93\n#> [3]  {xb=男,dl=优,wl=优,hx=优,sw=优}       => {wlfk=理科} 0.099   0.93\n#> [4]  {xb=男,sx=优,wl=优,hx=优,sw=优}       => {wlfk=理科} 0.092   0.92\n#> [5]  {xb=男,sx=优,dl=优,wl=优,sw=优}       => {wlfk=理科} 0.090   0.92\n#> [6]  {xb=男,sx=优,ls=优,wl=优,sw=优}       => {wlfk=理科} 0.074   0.92\n#> [7]  {xb=女,zz=优,sw=中}                   => {wlfk=文科} 0.070   0.95\n\n\n# Frequent Itemsets -------------------------------------------------------\n\n#从规则中提取频繁项集\nitemsets <- unique(generatingItemsets(irules_pruned))\nitemsets\n#> set of 107 itemsets\nitemsets_df <- as(itemsets, \"data.frame\")\nView(itemsets_df)\ninspect(itemsets)\n\n#反过来，先挖掘频繁项集\n#再导出关联规则\n#生成频繁项集，而不是规则\nitemsets <- apriori(cjb_trans,\n                    parameter = list(\n                        minlen = 2,\n                        supp = 50 / length(cjb_trans),\n                        target = \"frequent itemsets\"\n                    ))\ninspect(itemsets)\nirules_induced <- ruleInduction(itemsets,\n                                cjb_trans,\n                                confidence = 0.8)\nirules_induced\n#> set of 8651 rules\n\n#显然，只要参数是一样的\n#得到规则条数也是一样的\n\n#1-项集的频繁程度\nitemFrequency(cjb_trans, type = \"relative\")\nitemFrequencyPlot(cjb_trans)\n#当然我们更愿意统一成ggplot2的风格\nitem_freq <- itemFrequency(cjb_trans, type = \"relative\")\nlibrary(tidyverse)\nitem_freq %>%\n    as.data.frame %>%\n    rownames_to_column(var = \"item\") %>%\n    mutate(item = factor(item, levels = item)) %>%\n    ggplot(aes(x = item, y = item_freq, fill = item_freq)) +\n    geom_bar(stat = \"identity\") +\n    theme(axis.text.x  = element_text(\n        angle = 60,\n        vjust = 1,\n        hjust = 1\n    ))\n#保留现有的因子水平，也有下述方法\nitem_freq %>%\n    as.data.frame %>%\n    rownames_to_column(var = \"item\") %>%\n    mutate(item = forcats::fct_inorder(item)) %>%\n    ggplot(aes(x = item, y = item_freq, fill = item_freq)) +\n    geom_bar(stat = \"identity\") +\n    theme(axis.text.x  = element_text(\n        angle = 60,\n        vjust = 1,\n        hjust = 1\n    ))\n\n\n# Rules Viz ---------------------------------------------------------------\n\nlibrary(arulesViz)\nplot(irules_pruned[1:10],\n     method = \"graph\",\n     control = list(edgeCol = 'red', engine = 'igraph'))#最常用的一种方式\nplot(irules_pruned, method = \"grouped\")\nplot(irules_pruned, method = \"paracoord\")\n\n#交互式的规则可视化\nlibrary(tcltk2)\nplot(irules_pruned,\n     method = \"graph\",\n     interactive = TRUE)\n\n\n# Rules Export ------------------------------------------------------------\n\n#这些规则怎么保存呢？\n#当然可以console输出之后复制、或是截图，\n#但效果并不好\n#稍微好一点的办法是直接将console的结果捕获\nout <- capture.output(inspect(irules_pruned))\nout\nwriteLines(out, con = \"Rules.txt\")\n\nsave(irules_pruned,\n     file = \"rules.rda\")\n\n#更好的办法，应该是将规则转换成数据框\n#然后另存为csv文件\nirules_pruned_in_df <-\n    as(irules_pruned, \"data.frame\")\nView(irules_pruned_in_df)\n#考虑到规则中也包含逗号,\n#在另存为csv文件时，一般需要设置参数quote=TRUE\nwrite.csv(\n    irules_pruned_in_df,\n    file = \"Rules.csv\",\n    quote = TRUE,\n    row.names = FALSE\n)\n#当然，在另存为csv之前，也可以对规则进行必要的处理\nirules_pruned_in_df %<>%\n    separate(rules,\n             sep = \"=>\",\n             into = c(\"LHS\", \"RHS\")) %>%\n    mutate_at(vars(\"LHS\", \"RHS\"),\n              funs(gsub(\"[\\\\{\\\\} ]\", \"\", .)))\nView(irules_pruned_in_df)\n\n#转换成data.frame之后\n#自然可以随意处置了\n#比如可以通过正则表达式任意抽取自己想要的规则\n#请小伙伴们自行练习\n#当然，arules包中write()函数也可以将规则直接写到本地\nwrite.csv(\n    irules_pruned_in_df,\n    file = \"Rules2.csv\",\n    quote = TRUE,\n    row.names = FALSE\n)\n\n#以上是R中关于关联规则的基本实现\n#感兴趣的同学，可以进一步阅读：\n#序列模式arulesSequences等主题\n#当然，即便是关联规则，arules当然使用最多\n#但也并非是唯一的选择，比如RKEEL等均可尝试\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "06_既是世间法、自当有分别.R",
    "content": "\n\n# 06_既是世间法、自当有分别------------------------------------------------\n\n\n#这里的分别，指的是分门别类\n#更具体的讲，是根据特征做出判断、作出分类\n\n#分类与回归，几乎是有监督学习的代名词\n#也是机器学习/数据挖掘最核心的内容\n#旨在揭示自变量与因变量之间的映射关系\n#因变量为类别变量时，称之为分类\n#因变量为连续变量时，称之为回归\n#本实验以分类为主\n\n#在R语言里，caret包提供了分类与回归的统一框架\n#caret包也是R里边使用最广泛的包之一\n#小伙伴们可以多加留意\n\n\n# Data Exploration --------------------------------------------------------\n\n#清空内存\nrm(list = ls())\nlibrary(tidyverse)\n#加载数据\ncjb_url <- \"data/cjb.csv\"\ncjb <- read_csv(cjb_url,\n                locale = locale(encoding = \"CP936\"))\ncjb %<>%\n    mutate(zcj = rowSums(.[, 4:12])) %>%\n    mutate_at(vars(xb, bj, wlfk), factor) %>%\n    filter(zcj != 0) %>%\n    select(xb:wlfk)\n#按照一般的数据分析流程，自然应该是先对数据进行探索性分析\n#小伙伴们可以参照之前Get to Know Your Data相关代码，认识这份数据\n\n#对于分类与回归问题，除了认识数据中的其他一些数据探索外\n#通常需要观察不同自变量，相对于不同因变量取值时的数据分布\n#考察其分类的潜力\n#我们可以借助caret::featurePlot()和plotluck::plotluck()来进行观察\nlibrary(caret)\nfeaturePlot(\n    x = cjb %>%\n        select(yw:sw),\n    y = cjb[, \"wlfk\"] %>%\n        as_vector(),\n    plot = \"density\",\n    scales = list(\n        x = list(relation = \"free\"),\n        y = list(relation = \"free\")\n    ),\n    adjust = 1.5,\n    pch = \"|\"\n)\n\n# library(devtools)\n# devtools::install_github(\"stefan-schroedl/plotluck\")\nlibrary(plotluck)\nplotluck(cjb, wlfk ~ xb)\n#绘制不同所有自变量、因变量各自分布\nplotluck(cjb, . ~ 1)\n#绘制自变量相对于因变量的分组分布\nplotluck(cjb, wlfk ~ .,\n         opts = plotluck.options(verbose = TRUE))\n#上述代码出图顺序，并非变量原有顺序，\n#而是conditional entropy从小打到排列的结果\nplotluck(cjb, wlfk ~ .,\n         opts = plotluck.options(verbose = TRUE,\n                                 multi.entropy.order = FALSE))\nplotluck(cjb, wlfk ~ yw + sx)\n\n\n\n\n# k-fold Cross Validation -------------------------------------------------\n\n#在建模之前就说模型评估，\n#仿佛为时过早\n#实际上，模型评估和模型建立是同等重要的\n#道理很简单：\n#All models are wrong, but some are useful~\n#模型之所以称之为模型，就是因为它只不过是近似、逼近而已\n#机器学习，无非是在我们已知的模型集里边，\n#找到那个最逼近的而已\n\n#有别于传统统计看p值、看统计的显著性\n#机器学习/数据挖掘领域的模型评估\n#主要是看实际效果\n#看模型在实际数据上的性能指标\n#通常的做法是：把数据分为训练集和测试集\n#在训练集上训练、或者说让机器学习出一个模型\n#然后在测试集上看其具体的性能指标：如正确率\n\n#具体而言，有三种做法：\n#1、留出法hold out:\n#将数据集一分为二，一般是训练集70%，测试集50%\n#2、交叉验证法cross validation:\n#将数据分为k折，每一次都用当前折一折作为测试集\n#其余的k-1折作为训练集，\n#最后通过k折测试集上性能指标的平均值作为模型\n#最终的性能指标\n#显然，交叉验证比留出法更加稳定\n#3、自助法out-of-bag\n#主要是应用于组合学习之中，进行有放回抽样时，\n#有36%左右的数据抽取不到，这些数据天然作为\n#测试集\n#从这三种方法的描述可以看出，交叉验证法\n#是适用范围最广的方法\n\n#留出法hold-out\n#手工版\nset.seed(2012)\ntrain_set_idx <- sample(nrow(cjb), nrow(cjb) * 0.7)\nstr(train_set_idx)\n#> int [1:541] 169 576 218 722 575 673 411 700 687 696 ...\nlength(train_set_idx) / nrow(cjb)\n#> [1] 0.6989664\ntrain_set <- cjb[train_set_idx, ]\n# test_set <- ?\n\n# #工业级\n# train_set_idx <-\n#   caret::createDataPartition(cjb$wlfk,\n#                       p = 0.7, list = FALSE)\n# str(train_set_idx)\n# length(train_set_idx) / nrow(cjb)\n# train_set <- cjb[train_set_idx, ]\n# # test_set <- ?\n\n#k折交叉检验\ncv_kfold <- function(data, k = 10, seed = 2012) {\n    n_row <- nrow(data)#计算数据的行数\n    n_foldmarkers <- rep(1:k, ceiling(n_row / k))[1:n_row]\n    set.seed(seed)\n    n_foldmarkers <- sample(n_foldmarkers)  #打乱顺序\n    kfold <- lapply(1:k, function(i) {\n        (1:n_row)[n_foldmarkers == i]\n    })\n    return(kfold)\n}\ncv_kfold(cjb)\n#> [[1]]\n#> [1]   7  14  15  25  35  48  56  59  60  61  65  91  92\n#> [14] 102 109 114 128 130 135 141 156 169 178 180 181 185\n#> [27] 189 190 191 196 208 217 244 245 247 263 280 282 291\n#> [40] 293 301 309 319 324 327 328 329 330 332 356 361 362\n#> [53] 376 384 412 413 446 456 485 489 499 500 519 525 531\n#> [66] 534 550 559 578 585 586 598 607 619 620 675 685 719\nsapply(cv_kfold(cjb), length)\n#>  [1] 78 78 78 78 77 77 77 77 77 77\n\n\n#k的具体取值，没有统一的标准，视数据量大小，\n#可以取5、10等\n#对于少量数据，甚至可以将其推到极致，\n#取nrow(cjb)折\n#也就是我们常说的留一法\nkfolds <- cv_kfold(cjb, nrow(cjb))\n#我们这里取k=10\nkfolds <- cv_kfold(cjb)\nsapply(kfolds, length)\n\n\n#像k折交叉检验这么经典的方法，在很多扩展包中\n#都有实现，比如caret或modelr等\n# library(caret)\n# kfolds <- createFolds(cjb$wlfk, k = 10)\n# sapply(kfolds, length)\n\n\n\n# Global performance ------------------------------------------------------\n\n#对于分类模型的评估，首先需要看是否存在类不平衡问题，\n#如果存在类不平衡，评估指标的选取，\n#不能单纯用正确率、错误率来评估，\n#比如：10000人中，有10个人得SARS。现在不采用任何模型，\n#只是用众数进行预测，也就是判定所有人都不得SARS，\n#此时模型的正确率为(10000 - 10) / 10000 = 99.90%，\n#正确率达到99.9%，然而，这种预测没有任何意义\n#故此，还需要引入召回率Recall和Precision，\n#以及二者的调和平均数F1值等\n#从plotluck()的结果可以看出，我们所拿到的数据，并不存在\n#类不平衡问题\n#plotluck(cjb, .~1)\n#由于类是相对均衡的，本实验仅采用分类正确率和错误率\n#手工版如下：\nglobal_performance <- NULL\nimetrics <- function(method, type, predicted, actual) {\n    con_table <- table(predicted, actual)\n    cur_one <- data.frame(\n        method = method,\n        #算法模型的名称\n        type = type,\n        #取值为train或是test\n        accuracy = sum(diag(con_table)) / sum(con_table),\n        error_rate = 1 - sum(diag(con_table)) / sum(con_table)\n    )\n    assign(\"global_performance\",\n           rbind(get(\"global_performance\", envir = .GlobalEnv) ,\n                 cur_one),\n           envir = .GlobalEnv)\n}\n#有很多专门的包，已经实现了各种模型评估指标\n#在实际的数据分析项目中，就不用去重复造轮子了\n#拿来主义，直接用就好\n#工业级\n# imetrics <- function(method, type, predicted, actual) {\n#   cur_one <- data.frame(\n#     method = method,\n#     type = type,\n#     accuracy = MLmetrics::Accuracy(y_true = actual, y_pred = predicted),\n#     # accuracy = 1 - Metrics::ce(actual, predicted),\n#     # accuracy = 1 - ModelMetrics::ce(actual, predicted),\n#     error_rate = ModelMetrics::ce(actual, predicted)\n#   )\n#   global_performance <<- rbind(global_performance, cur_one)\n# }\n\n# #分类回归模型，有数十种\n# #各类改进的模型，更是数以百计\n# available_models <- modelLookup()\n# unique(available_models$model)\n# #> [1] \"ada\"                 \"AdaBag\"\n# #> [3] \"adaboost\"            \"AdaBoost.M1\"\n# #> [235] \"xgbLinear\"           \"xgbTree\"\n# #> [237] \"xyf\"\n# length(unique(available_models$model))\n# #> [1] 237\n# #想穷尽所有的算法模型，几乎是不可能的\n# #本实验仅涉及部分经典算法模型\n# #包括决策树、近邻法、朴素贝叶斯、\n# #人工神经网络、支持向量机和随机森林\n\n\n\n# kknn --------------------------------------------------------------------\n\nload('data/cjb.rda')\ncjb <- cjb %>%\n    select(4:13) %>%\n    mutate(wlfk = factor(wlfk))\ntrain_set_idx <- sample(nrow(cjb), 0.7 * nrow(cjb))\ntest_set_idx <- (1:nrow(cjb))[-train_set_idx]\nlibrary(kknn)\nset.seed(2012)\nimodel <- kknn(wlfk ~ .,\n               train = cjb[train_set_idx, ],\n               test = cjb[train_set_idx, ])\npredicted_train <- imodel$fit\n#ce: classification error\nMetrics::ce(cjb$wlfk[train_set_idx], predicted_train)\n#> [1] 0.1090573\n#作为惰性学习法，训练和测试同时进行\nimodel <- kknn(wlfk ~ .,\n               train = cjb[train_set_idx, ],\n               test = cjb[-train_set_idx, ])\npredicted_test <- imodel$fit\nMetrics::ce(cjb$wlfk[-train_set_idx], predicted_test)\n#> [1] 0.1888412\n\n#选取最优的k和核\ntrain_kk <- train.kknn(\n    wlfk  ~ .,\n    data = cjb,\n    kmax = 100,\n    kernel = c(\n        \"rectangular\",\n        \"epanechnikov\",\n        \"cos\",\n        \"inv\",\n        \"gaussian\",\n        \"optimal\"\n    )\n)\n\n#查看具体结果\ntrain_kk\n#> Call:\n#>   train.kknn(formula = wlfk ~ ., data = cjb, kmax = 100,\n#>              kernel = c(\"rectangular\",     \"epanechnikov\",\n#>                         \"cos\", \"inv\", \"gaussian\", \"optimal\"))\n#>\n#> Type of response variable: nominal\n#> Minimal misclassification: 0.2105943\n#> Best kernel: gaussian\n#> Best k: 49\n\n#不同的k和核所对应的分类错误率\ntrain_kk$MISCLASS\n#     rectangular epanechnikov       cos       inv  gaussian   optimal\n# 1     0.2919897    0.2919897 0.2919897 0.2919897 0.2919897 0.2919897\n# 2     0.2984496    0.2919897 0.2919897 0.2919897 0.2919897 0.2919897\n# 3     0.2661499    0.2739018 0.2751938 0.2661499 0.2661499 0.2919897\n# 4     0.2713178    0.2661499 0.2648579 0.2519380 0.2532300 0.2919897\n# 5     0.2583979    0.2571059 0.2596899 0.2571059 0.2558140 0.2558140\n# 6     0.2609819    0.2558140 0.2532300 0.2441860 0.2454780 0.2532300\n\n#显然，上述矩阵中，序号就是相应的k\n\n\n#最佳的k值\nbest_k <- train_kk$best.parameters$k\nbest_k\n#> [1] 49\nbest_kernel <- train_kk$best.parameters$kernel\nbest_kernel\n#> [1] \"gaussian\"\n\n#最小的误分率\nmin_ce <- train_kk$MISCLASS[best_k,\n                            train_kk$best.parameters$kernel]\n#下边这种方法更简单\nmin(train_kk$MISCLASS)\n\n#提取不同k和核相应的分类错误率\nce_kk <- train_kk$MISCLASS\n#View(ce_kk)\n#最小错误率\nmin_ce <- min(train_kk$MISCLASS)\nstr(ce_kk)\n#通过ggplot2进行绘制\nce_kk %>%\n    as.data.frame() %>%\n    mutate(k = row_number()) %>%\n    gather(key = \"kernel\", value = \"ce\", -k) %>%\n    ggplot(aes(x = k, y = ce, colour = kernel)) +\n    geom_vline(aes(xintercept = best_k), linetype = \"dashed\") +\n    geom_hline(aes(yintercept = min_ce), linetype = \"dashed\") +\n    geom_line() +\n    geom_point(aes(shape = kernel)) +\n    theme(legend.position = c(0.9, 0.8))\n\n#进行k-折交叉检验k-fold cross validation\nlibrary(kknn)\nsp <- Sys.time() #记录开始时间\ncat(\"\\n[Start at:\", as.character(sp))\nfor (i in 1:length(kfolds)) {\n    curr_fold <- kfolds[[i]] #当前这一折\n    train_set <- cjb[-curr_fold,] #训练集\n    test_set <- cjb[curr_fold,] #测试集\n    predicted_train <- kknn(\n        wlfk ~ .,\n        train = train_set,\n        test = train_set,\n        k = best_k,\n        kernel = best_kernel\n    )$fit\n    imetrics(\"kknn\", \"Train\", predicted_train, train_set$wlfk)\n    predicted_test <- kknn(\n        wlfk ~ .,\n        train = train_set,\n        test = test_set,\n        k = best_k,\n        kernel = best_kernel\n    )$fit\n    imetrics(\"kknn\", \"Test\", predicted_test, test_set$wlfk)\n}\nep <- Sys.time()\ncat(\"\\tFinised at:\", as.character(ep), \"]\\n\")\ncat(\"[Time Ellapsed:\\t\",\n    difftime(ep, sp, units = \"secs\"),\n    \" seconds]\\n\")\nglobal_performance\n#>     method  type  accuracy error_rate\n#> 1    kknn Train 0.8333333  0.1666667\n#> 2    kknn  Test 0.8076923  0.1923077\n#> 3    kknn Train 0.8405172  0.1594828\n#> 4    kknn  Test 0.8076923  0.1923077\n#> 5    kknn Train 0.8333333  0.1666667\n#> 6    kknn  Test 0.8461538  0.1538462\n#> 7    kknn Train 0.8405172  0.1594828\n#> 8    kknn  Test 0.7564103  0.2435897\n#> 9    kknn Train 0.8350072  0.1649928\n#> 10   kknn  Test 0.7922078  0.2077922\n#> 11   kknn Train 0.8278336  0.1721664\n#> 12   kknn  Test 0.7922078  0.2077922\n#> 13   kknn Train 0.8378766  0.1621234\n#> 14   kknn  Test 0.8051948  0.1948052\n#> 15   kknn Train 0.8350072  0.1649928\n#> 16   kknn  Test 0.7792208  0.2207792\n#> 17   kknn Train 0.8307030  0.1692970\n#> 18   kknn  Test 0.6623377  0.3376623\n#> 19   kknn Train 0.8278336  0.1721664\n#> 20   kknn  Test 0.7792208  0.2207792\n\n\n\n#考虑到每种方法都要采用交叉检验的方法，\n#根据事不过三法则，反反复复拷贝、更改以上代码是不合适的\n#为此，将上述代码改写为相应的函数\nkfold_cross_validation <-\n    function(formula, data, kfolds, learner, ...) {\n        sp <- Sys.time() #记录开始时间\n        cat(\"\\n[Start at:\", as.character(sp))\n        lapply(kfolds, function(curr_fold) {\n            train_set <- data[-curr_fold,] #训练集\n            test_set <- data[curr_fold,] #测试集\n            predictions <- do.call(learner, args = c(\n                list(\n                    formula = formula,\n                    train = train_set,\n                    test = test_set\n                ),\n                list(...)\n            ))\n            imetrics(learner,\n                     \"Train\",\n                     predictions$predicted_train,\n                     train_set$wlfk)\n            imetrics(learner,\n                     \"Test\",\n                     predictions$predicted_test,\n                     test_set$wlfk)\n        })\n        ep <- Sys.time()\n        cat(\"\\tFinised at:\", as.character(ep), \"]\\n\")\n        cat(\"[Time Ellapsed:\\t\",\n            difftime(ep, sp, units = \"secs\"),\n            \" seconds]\\n\")\n    }\n\n\nlearn.kknn <- function(formula, train, test, ...) {\n    predicted_train <-\n        kknn(formula, train = train, test = train, ...)$fit\n    predicted_test <-\n        kknn(formula, train = train, test = test, ...)$fit\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\n\nglobal_performance <- NULL\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.kknn\",\n    k = best_k,\n    kernel = best_kernel\n)\n\n\n# CART --------------------------------------------------------------------\n\n#决策树的生长\n#rpart.plot包会自动加载rpart包\nlibrary(rpart.plot)\nimodel <- rpart(wlfk ~ .,\n                data = cjb[train_set_idx,])\nimodel\n# n= 541\n#\n# node), split, n, loss, yval, (yprob)\n# * denotes terminal node\n#\n# 1) root 541 258 文科 (0.47689464 0.52310536)\n#   2) wl>=85.5 230  70 理科 (0.69565217 0.30434783)\n#     4) sx>=87.5 185  41 理科 (0.77837838 0.22162162)\n#       8) yw< 91.5 132  19 理科 (0.85606061 0.14393939) *\n#       9) yw>=91.5 53  22 理科 (0.58490566 0.41509434)\n#         18) wl>=88.5 38  10 理科 (0.73684211 0.26315789) *\n#         19) wl< 88.5 15   3 文科 (0.20000000 0.80000000) *\n#     5) sx< 87.5 45  16 文科 (0.35555556 0.64444444)\n#       10) xb=男 23  10 理科 (0.56521739 0.43478261) *\n#       11) xb=女 22   3 文科 (0.13636364 0.86363636) *\n#   3) wl< 85.5 311  98 文科 (0.31511254 0.68488746)\n#     6) xb=男 127  61 文科 (0.48031496 0.51968504)\n#       12) yw< 88.5 98  43 理科 (0.56122449 0.43877551)\n#         24) hx>=93 21   4 理科 (0.80952381 0.19047619) *\n#         25) hx< 93 77  38 文科 (0.49350649 0.50649351)\n#           50) ls< 86.5 41  16 理科 (0.60975610 0.39024390)\n#             100) yw>=77.5 34  10 理科 (0.70588235 0.29411765) *\n#             101) yw< 77.5 7   1 文科 (0.14285714 0.85714286) *\n#           51) ls>=86.5 36  13 文科 (0.36111111 0.63888889) *\n#       13) yw>=88.5 29   6 文科 (0.20689655 0.79310345) *\n#     7) xb=女 184  37 文科 (0.20108696 0.79891304)\n#       14) sw>=81.5 110  32 文科 (0.29090909 0.70909091)\n#         28) ls< 91.5 62  27 文科 (0.43548387 0.56451613)\n#           56) sx>=91.5 14   2 理科 (0.85714286 0.14285714) *\n#           57) sx< 91.5 48  15 文科 (0.31250000 0.68750000) *\n#         29) ls>=91.5 48   5 文科 (0.10416667 0.89583333) *\n#       15) sw< 81.5 74   5 文科 (0.06756757 0.93243243) *\n\n\npredicted_train <-\n    predict(imodel,\n            newdata = cjb[train_set_idx,],\n            type = \"class\")\nMetrics::ce(cjb$wlfk[train_set_idx],\n            predicted_train)\n#> [1] 0.1959335\n\n#当然，我们更关注的是测试误差\npredicted_test <-\n    predict(imodel,\n            newdata = cjb[-train_set_idx, ],\n            type = \"class\")\nMetrics::ce(cjb$wlfk[-train_set_idx],\n            predicted_test)\n#> [1] 0.2575107\n\n#决策树剪枝\nprintcp(imodel, digits = 2)\n#> Classification tree:\n#>   rpart(formula = wlfk ~ ., data = cjb[train_set_idx, ])\n#>\n#> Variables actually used in tree construction:\n#>   [1] hx ls sw sx wl wy xb\n#>\n#> Root node error: 266/542 = 0.49\n#>\n#> n= 542\n#>\n#>      CP nsplit rel error xerror  xstd\n#> 1 0.349      0      1.00   1.00 0.045\n#> 2 0.050      1      0.65   0.69 0.042\n#> 3 0.023      2      0.60   0.70 0.042\n#> 4 0.019      4      0.55   0.67 0.042\n#> 5 0.017      7      0.50   0.67 0.042\n#> 6 0.013      9      0.46   0.67 0.042\n#> 7 0.012     12      0.42   0.66 0.042\n#> 8 0.010     13      0.41   0.65 0.042\nplotcp(imodel)\n\nimodel$cptable\n#>            CP nsplit rel error    xerror       xstd\n#> 1 0.34883721      0 1.0000000 1.0000000 0.04502822\n#> 2 0.05038760      1 0.6511628 0.6627907 0.04191607\n#> 3 0.02325581      2 0.6007752 0.6589147 0.04184977\n#> 4 0.01937984      4 0.5542636 0.6666667 0.04198161\n#> 5 0.01744186      7 0.4961240 0.6627907 0.04191607\n#> 6 0.01291990      9 0.4612403 0.6434109 0.04157683\n#> 7 0.01162791     12 0.4224806 0.6356589 0.04143566\n#> 8 0.01000000     13 0.4108527 0.6356589 0.04143566\nimodel$cptable\n#剪枝的一般方法\nopt <- which.min(imodel$cptable[, \"xerror\"])\ncp <- imodel$cptable[opt, \"CP\"]\n#> [1] 0.01\nimodel_pruned <- prune(imodel, cp = cp)\nprint(imodel_pruned)\n\n#剪枝前后效果对比\npredicted_train <- predict(imodel_pruned,\n                           newdata = cjb[train_set_idx,],\n                           type = \"class\")\nMetrics::ce(cjb$wlfk[train_set_idx],\n            predicted_train)\n#> [1] 0.1959335\npredicted_test <- predict(imodel_pruned,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"class\")\nMetrics::ce(cjb$wlfk[-train_set_idx],\n            predicted_test)\n#> 0.2575107\n\n\n#绘制决策树的基本方法\nplot(imodel)\ntext(imodel)\n#上边的效果小伙伴们肯定是不满意的\nrpart.plot(\n    imodel_pruned,\n    type = 4,\n    fallen = F,\n    branch = 0.5,\n    round = 0,\n    leaf.round = 2,\n    clip.right.labs = T,\n    cex = 0.85,\n    under.cex = 0.75,\n    box.palette = \"GnYlRd\",\n    branch.col = \"gray\",\n    branch.lwd = 2,\n    extra = 108,\n    #extra参数的含义需留意\n    under = T,\n    split.cex = 1\n)\n\n#除了可视化之外，我们还希望把这个树导成规则\nlibrary(rattle)\nrules <- asRules(imodel_pruned, compact = TRUE)\n#> R  7 [22%,0.90] sx< 85.5 xb=女\n#> R 11 [11%,0.85] sx>=85.5 wl< 86.5 ls>=92.5\n#> R 51 [ 4%,0.79] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx< 76.5\n#> R 13 [ 5%,0.75] sx< 85.5 xb=男 hx< 83\n#> R101 [ 1%,0.75] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx>=76.5 sw>=89.5\n#> R 21 [ 3%,0.75] sx>=85.5 wl< 86.5 ls< 92.5 sw< 80.5\n#> R 19 [ 3%,0.75] sx>=85.5 wl>=86.5 ls>=95.5 sw< 92.5\n#> R 24 [ 5%,0.28] sx< 85.5 xb=男 hx>=83 wy< 81.5\n#> R 20 [13%,0.24] sx>=85.5 wl< 86.5 ls< 92.5 sw>=80.5\n#> R 18 [ 7%,0.22] sx>=85.5 wl>=86.5 ls>=95.5 sw>=92.5\n#> R100 [ 3%,0.19] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx>=76.5 sw< 89.5\n#> R  8 [23%,0.13] sx>=85.5 wl>=86.5 ls< 95.5\n\n#进行k-折交叉检验k-fold cross validation\nlearn.rpart <- function(formula, train, test, ...) {\n    imodel_kfold <- rpart(formula, train) #模型训练\n    opt <- which.min(imodel_kfold$cptable[, \"xerror\"])\n    cp <- imodel_kfold$cptable[opt, \"CP\"]\n    imodel_kfold <- prune(imodel_kfold, cp = cp)\n    predicted_train <- predict(imodel_kfold, train, type = \"class\")\n    predicted_test <- predict(imodel_kfold, test, type = \"class\")\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.rpart\"\n)\n\n\n\n# RandomForest ------------------------------------------------------------\n\n\nlibrary(randomForest)\nset.seed(2012)\nimodel <- randomForest(wlfk ~ .,\n                       ntree = 25,\n                       data = cjb[train_set_idx, ])\npredicted_train <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"response\")\nMetrics::ce(cjb$wlfk[train_set_idx],\n            predicted_train)\n#>[1] 0.001848429\npredicted_test <- predict(imodel,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"response\")\nMetrics::ce(cjb$wlfk[-train_set_idx],\n            predicted_test)\n#> [1] 0.1845494\n\n\nrf_ces <- sapply(1:500, function(x) {\n    set.seed(2012)\n    imodel <- randomForest(wlfk ~ .,\n                           ntree = x,\n                           data = cjb[train_set_idx, ])\n    predicted_train <- predict(imodel,\n                               newdata = cjb[train_set_idx,],\n                               type = \"response\")\n    Metrics::ce(cjb$wlfk[train_set_idx],\n                predicted_train)\n    #>[1] 0\n    predicted_test <- predict(imodel,\n                              newdata = cjb[-train_set_idx,],\n                              type = \"response\")\n    Metrics::ce(cjb$wlfk[-train_set_idx],\n                predicted_test)\n})\nwhich.min(rf_ces)\nplot(rf_ces, type = \"o\")\n\n#基于OOB的误分率\nimodel$confusion\n#>       理科 文科 class.error\n#> 理科  195   71   0.2669173\n#> 文科   54  222   0.1956522\n\n#进行k-折交叉检验k-fold cross validation\nlearn.randomForest <- function(formula, train, test, ...) {\n    imodel_kfold <- randomForest(formula, train, ...)\n    predicted_train <-\n        predict(imodel_kfold, train, type = \"response\")\n    predicted_test <- predict(imodel_kfold, test, type = \"response\")\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.randomForest\",\n    ntree = which.min(rf_ces)\n)\n\n\n\n# NaiveBayes --------------------------------------------------------------\n\nlibrary(e1071)\nimodel <- naiveBayes(wlfk ~ .,\n                     data = cjb[train_set_idx, ])\npredicted_train <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"class\")\nMetrics::ce(cjb$wlfk[train_set_idx], predicted_train)\n#> [1] 0.2920518\npredicted_test <- predict(imodel,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"class\")\nMetrics::ce(cjb$wlfk[-train_set_idx], predicted_test)\n#> [1] 0.27897\n\n#进行k-折交叉检验k-fold cross validation\nlearn.naiveBayes <- function(formula, train, test, ...) {\n    imodel_kfold <- naiveBayes(formula, train)\n    predicted_train <-  predict(imodel_kfold, train, type = \"class\")\n    predicted_test <- predict(imodel_kfold, test, type = \"class\")\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.naiveBayes\"\n)\n\n\n# Logistic Regression -----------------------------------------------------\n\nlibrary(ggplot2)\n\n# 以下代码仅为复现课件中的动画，感兴趣的小伙伴可以了解一下\n# library(animation)\n# saveGIF(\n#     expr = {\n#         mov_frame <- 5 * (1:30)\n#         for (i in mov_frame) {\n#             x <- seq(-i, i, len = 1000)\n#             y <- 1 / (1 + exp(-x))\n#             p <- ggplot(data.frame(x, y), aes(x = x, y = y)) +\n#                 geom_line()\n#             if (i == head(mov_frame, 1) ||\n#                 i == tail(mov_frame, 1)) {\n#                 #开始和结束时多停留一会儿\n#                 lapply(1:5, function(x)\n#                     plot(p))\n#             }\n#             plot(p)\n#         }\n#     },\n#     movie.name = \"animation.gif\",\n#     convert = \"gm convert\",\n#     interval = 0.2\n# )\n# dev.off()\n#\n\n\n\nimodel <- glm(wlfk ~ .,\n              data = cjb[train_set_idx,],\n              family = binomial(link = \"logit\"))\npredicted_logit <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"response\")\npredicted_train <-\n    rep(levels(cjb$wlfk)[2], length(train_set_idx))\npredicted_train[predicted_logit < 0.5] <- levels(cjb$wlfk)[1]\nMetrics::ce(cjb$wlfk[train_set_idx], predicted_train)\n#> [1] 0.2181146\npredicted_logit <- predict(imodel,\n                           newdata = cjb[-train_set_idx, ],\n                           type = \"response\")\npredicted_test <-\n    rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))\npredicted_test[predicted_logit < 0.5] <-\n    levels(cjb$wlfk)[1]\nMetrics::ce(cjb$wlfk[-train_set_idx], predicted_test)\n#> [1] 0.1888412\n\n#找到最好的分隔阈值\nbest_threshold <- NA\nmin_err <- Inf\ncur_threshold <- 0.1\nfor (cur_threshold in seq(0.1, 0.9, by = 0.001)) {\n    predicted_test <-\n        rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))\n    predicted_test[predicted_logit < cur_threshold] <-\n        levels(cjb$wlfk)[1]\n    cur_err <- Metrics::ce(cjb$wlfk[-train_set_idx],\n                           predicted_test)\n    if (cur_err < min_err) {\n        best_threshold <- cur_threshold\n        min_err <- cur_err\n    }\n}\nbest_threshold\n#> [1] 0.592\n\n#当然，也可以用下边这种写法\nthreshold_range <- seq(0.1, 0.9, by = 0.001)\nce_set <- sapply(threshold_range, function(cur_threshold) {\n    predicted_test <-\n        rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))\n    predicted_test[predicted_logit < cur_threshold] <-\n        levels(cjb$wlfk)[1]\n    cur_err <- Metrics::ce(cjb$wlfk[-train_set_idx],\n                           predicted_test)\n})\n#最佳阈值\nthreshold_range[which.min(ce_set)]\n#相应的分类错误率\nmin(ce_set)\n\n#进行k-折交叉检验k-fold cross validation\nlearn.LogisticRegression <- function(formula, train, test, ...) {\n    dot_args <- list(...)\n    imodel_kfold <-\n        glm(formula, train, family = binomial(link = \"logit\"))\n    predicted_logit <-\n        predict(imodel_kfold, train, type = \"response\")\n    predicted_train <- rep(levels(cjb$wlfk)[2], nrow(train))\n    predicted_train[predicted_logit < dot_args[[\"best_threshold\"]]] <-\n        levels(cjb$wlfk)[1]\n    predicted_logit <-\n        predict(imodel_kfold, test, type = \"response\")\n    predicted_test <- rep(levels(cjb$wlfk)[2], nrow(test))\n    predicted_test[predicted_logit < dot_args[[\"best_threshold\"]]] <-\n        levels(cjb$wlfk)[1]\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.LogisticRegression\",\n    best_threshold = threshold_range[which.min(ce_set)]\n)\n\n\n# Artificial Neural Network -----------------------------------------------\n\nlibrary(nnet)\nset.seed(2012)\nimodel <- nnet(wlfk ~ .,\n               data = cjb[train_set_idx, ],\n               size = 7)\nnames(imodel)\n#> [1] \"n\"             \"nunits\"        \"nconn\"\n#> [4] \"conn\"          \"nsunits\"       \"decay\"\n#> [7] \"entropy\"       \"softmax\"       \"censored\"\n#> [10] \"value\"         \"wts\"           \"convergence\"\n#> [13] \"fitted.values\" \"residuals\"     \"lev\"\n#> [16] \"call\"          \"terms\"         \"coefnames\"\n#> [19] \"contrasts\"     \"xlevels\"\n\nimodel$n\n#> [1] 10  7  1\nimodel$wts\n#> [1]   -0.394367962    0.341672486   -0.305656476\n#> [4]    0.609244299    0.344983392    0.524696717\n#> [7]    0.049098761    0.577261671    0.553892391\n#> [79]    0.851107738    0.275935098   -0.237562349\n#> [82]    0.109386068    0.637609693   -2.774100396\n#> [85]    0.019783268\nimodel$fitted.values\n#     [,1]\n# 1   0.8048857\n# 2   0.2047307\n# 3   0.8048857\n#\n# 540 0.8048857\n# 541 0.2047307\n\npredicted_train <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"class\")\nMetrics::ce(cjb$wlfk[train_set_idx], predicted_train)\n#> [1] 0.1996303\npredicted_test <- predict(imodel,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"class\")\nMetrics::ce(cjb$wlfk[-train_set_idx], predicted_test)\n#> [1] 0.1759657\n\n#神经网络参数的设置相对比较复杂\n#一般来讲，没有绝对的套路可循\n#我们当然可以写一些循环，来进行参数的选择\n#不过，类似于e1071::tune.nnet()已经替我们作了很多工作\n#下面，采用的是caret包中的方法\n#通过caret包中的grid搜索来进行参数选择\ntune_results <- e1071::tune.nnet(\n    wlfk ~ .,\n    data = cjb,\n    decay = c(0.01, 0.03, 0.1, 0.3, 0.6, 0.9),\n    size = 1:7\n)\n\nlibrary(caret)\nset.seed(2012)\nnn_grid <- expand.grid(size = c(1, 3, 7, 9),\n                       decay = c(0.01, 0.03, 0.1, 0.3, 0.6, 0.9))\n# nn_grid <- expand.grid(.decay = c(0.5, 0.1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7),\n#                        .size = c(3, 5, 10, 20))\nimodel <- train(\n    wlfk ~ .,\n    data = cjb,\n    method = \"nnet\",\n    maxit = 10000,\n    tuneGrid = nn_grid\n)\nimodel$bestTune\n#>    size decay\n#> 9    1  0.6\n#查看训练结果\nplot(imodel)\n\npredicted_train <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"raw\")\nMetrics::ce(cjb$wlfk[train_set_idx],\n            predicted_train)\n#> [1] 0.1697417\npredicted_test <- predict(imodel,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"raw\")\nMetrics::ce(cjb$wlfk[-train_set_idx],\n            predicted_test)\n#> [1] 0.1896552\n\n#绘制神经网络\nlibrary(NeuralNetTools)\nimodel2 <-  nnet(\n    wlfk ~ .,\n    data = train_set,\n    decay = imodel$bestTune$decay,\n    size = imodel$bestTune$size,\n    maxit = 2000\n)\nimodel2$wts\nstr(imodel2)\nlibrary(NeuralNetTools)\nplotnet(\n    imodel2,\n    rel_rsc = c(1.8, 3),\n    circle_cex = 3,\n    cex_val = 0.75,\n    bord_col = \"lightblue\",\n    max_sp = TRUE\n)\n\n\n#进行k-折交叉检验k-fold cross validation\nlearn.nnet <- function(formula, train, test, ...) {\n    imodel_kfold <-  nnet(formula, train, ...)\n    predicted_train <-  predict(imodel_kfold, train, type = \"class\")\n    predicted_test <- predict(imodel_kfold, test, type = \"class\")\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\n\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.nnet\",\n    decay = imodel$bestTune$decay,\n    size = imodel$bestTune$size,\n    maxit = 2000\n)\n\n\n\n# Support Vector Machine --------------------------------------------------\n\nlibrary(kernlab)\nset.seed(2012)\nimodel <- ksvm(wlfk ~ .,\n               data = cjb[train_set_idx, ])\npredicted_train <- predict(imodel,\n                           newdata = cjb[train_set_idx,],\n                           type = \"response\")\nMetrics::ce(cjb$wlfk[train_set_idx], predicted_train)\n#> [1] 0.1497227\npredicted_test <- predict(imodel,\n                          newdata = cjb[-train_set_idx,],\n                          type = \"response\")\nMetrics::ce(cjb$wlfk[-train_set_idx], predicted_test)\n#> [1] 0.1759657\nimodel\n#当然也可以通过caret来进行调参\nlibrary(caret)\nsvm_grid <- expand.grid(sigma = 2 ^ (-10:4),\n                        C = -5:20)\nset.seed(2012)\nimodel <- train(\n    wlfk ~ .,\n    data = cjb[train_set_idx, ],\n    method = \"svmRadial\",\n    preProc = c(\"center\", \"scale\"),\n    tuneGrid = svm_grid\n)\nimodel$bestTune\n#>   sigma C\n#> 2  0.25 1\n\n#同样也可以对train的结果进行绘制\nplot(imodel)\n\n#进行k-折交叉检验k-fold cross validation\nlearn.svm <- function(formula, train, test, ...) {\n    imodel_kfold <-  ksvm(formula, train, ...)\n    predicted_train <-\n        predict(imodel_kfold, train, type = \"response\")\n    predicted_test <- predict(imodel_kfold, test, type = \"response\")\n    return(list(predicted_train = predicted_train,\n                predicted_test = predicted_test))\n}\nkfold_cross_validation(\n    formula = wlfk ~ .,\n    data = cjb,\n    kfolds = kfolds,\n    learner = \"learn.svm\",\n    C = imodel$bestTune$C,\n    gamma = imodel$bestTune$sigma\n)\n\n\n# Variable Importance -----------------------------------------------------\n\n#完成了模型训练、模型评估，故事基本告一段落\n#再回顾一下本讲开始所讲的featurePlot\n#进行完模型训练之后，咱们再通过变量重要性印证一下\n#变量重要性，有很多评价方法\n#既有 Model Specific Metrics，也有Model Independent Metrics\n#如果是采用caret框架进行训练的话，多种指标可选\n#具体请参阅\n#http://topepo.github.io/caret/variable-importance.html\n\nlibrary(randomForest)\nimodel <- randomForest(wlfk ~ ., data = cjb)\n#变量重要性的分析\nrandomForest::importance(imodel) %>%\n    as.data.frame() %>%\n    rownames_to_column(var = \"variables\") %>%\n    arrange(desc(MeanDecreaseGini)) %>%\n    mutate(variables = factor(variables,\n                              levels = variables)) %>%\n    ggplot(aes(x = variables,\n               y = MeanDecreaseGini,\n               fill = variables)) +\n    geom_bar(stat = \"identity\", width = 0.5) +\n    geom_text(aes(\n        y = MeanDecreaseGini * 1.02,\n        label = format(MeanDecreaseGini, digits = 4)\n    ))\n\n\n\n\n\n\n# Model Comparison --------------------------------------------------------\n\n\n#模型进行评估\nglobal_performance %>%\n    group_by(method, type) %>%\n    summarise(mean_error_rate = mean(error_rate)) %>%\n    arrange(type, mean_error_rate) %>%\n    ggplot(aes(\n        x = fct_inorder(method),\n        y = mean_error_rate,\n        fill = type\n    )) +\n    geom_bar(stat = \"identity\", position = \"dodge\") +\n    geom_text(aes(label = format(mean_error_rate, digits = 3)),\n              position = position_dodge(width = 1)) +\n    scale_fill_manual(values = c(\"orange\", \"darkgrey\")) +\n    theme(axis.text.x = element_text(angle = 60, hjust = 1))\n\n\n\n#本实验中，为了减少小伙伴们熟悉问题背景本身的成本\n#再次使用了学生成绩这份数据\n#受数据本身的限制，也让我们错过了很多的精彩：\n#比如：\n#这份数据太干净，没有缺失值，也就不要通过mice::md.pattern()\n#之类的函数来观察缺失模式，或是通过近邻法等方法来填补缺失值\n#又如：我们面对的是类相对均衡的问题，文理科学生数大体相当\n#而实际问题中，也会有很多类不平衡的问题，这个时候可能专门\n#需要对数据、算法进行处理，评估指标也不能用简单的正确率来衡量\n#再比如：我们的数据量相对较少，没有涉及到复杂数据处理\n\n#分类与回归（实际上本讲只是涉及到分类，不过二者本质一致）到此结束\n#代码中，算法原理等阐述较少，请小伙伴们参照PPT讲义，\n#或是相应的机器学习/数据挖掘教材\n#当然，几乎所有的包、函数的帮助文档中，都列举了相应的参考文献，\n#小伙伴们可自行参考\n\n#分类与回归算法，其体量应该是数以百计的，\n#caret包中列举了百余种算法\n#本讲中，只是列举了比较经典的集中。有很多算法并未考虑纳入，\n#比如：\n#MASS::lda()\n#adabag::bagging()\n#adabag::boosting()\n#caretEnsemble::caretStack\n#xgboost::xgboost\n#即便是演示过的算法，参数调优过程也显得比较粗糙\n#更多的精彩，由小伙伴们自行探索吧\n#毕竟，这份代码只是一个引导性的参考，\n#并不是可以简单套用的标准模板\n\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "07_方以类聚、物以群分.R",
    "content": "\n\n# 07_方以类聚、物以群分 ---------------------------------------------------\n\n\n#前一实验，分类与回归，是有监督学习的代名词\n#本实验，则主要是聚焦于无监督学习\n#无监督学习涉及到特征降维、聚类分析等\n#本实验主体内容是聚类分析\n\n#日常生活中，我们都说“物以类聚、人以群分”\n#不过考虑到区分对象的普遍性\n#我们还是从《易传》中找了一个成语来概括我们的主题：\n#方以类聚、物以群分\n\n#同样，为了减少熟悉问题情境的时间和精力\n#和前述其他实验一样，\n#本实验数据依然是学生成绩\n\n# Data Import -------------------------------------------------------------\n\n#加载数据\nrm(list = ls())\nlibrary(tidyverse)\nlibrary(magrittr)\nlibrary(GGally)\ncjb_url <- \"data/cjb.csv\"\ncjb <- read_csv(cjb_url,\n                locale = locale(encoding = \"CP936\"))\ncjb %<>%\n    mutate(zcj = rowSums(.[4:12])) %>%\n    filter(zcj != 0) %>% #剔除脏数据\n    mutate_at(vars(xb, wlfk), factor) #类型转换\n\n\n#对于聚类而言，主要是观察数据空间的结构\n#这里的空间结构，主要就是距离结构\n#拿到数据之后，同样是进行数据点散布的观测\n#实际上，对于数据空间的分析，\n#并不像其他一些数学领域的XX空间那样，\n#具有相对完善的理论体系\n#整个机器学习/数据挖掘在数理基础方面还有所欠缺\n#目前对于数据空间的结构分析，典型的还是距离结构\n\n\n# Distance ----------------------------------------------------------------\n\n#聚类主要是考察数据点之间的距离关系\n#在R里边，最常用的是dist()函数\n#可以求取欧氏距离、明氏距离等\n#但不能求取混合类型数据的距离\n#先看一个简单的示例\nlibrary(cluster)\nartificial_data <- data.frame(x = as.factor(c(1, 1, 4, 1)),\n                              y = c(2, 3, 7, 2),\n                              z = c(1, 3, 1, 1))\ndist(artificial_data[, 2:3],\n     method = \"euclidean\",\n     diag = TRUE,\n     upper = TRUE)\ndist(artificial_data[, 2:3],\n     method = \"maximum\",\n     diag = TRUE,\n     upper = TRUE)\ndist(\n    artificial_data[, 2:3],\n    method = \"minkowski\",\n    diag = TRUE,\n    upper = TRUE,\n    p = 1\n)\ndist(artificial_data[, 2:3],\n     method = \"manhattan\",\n     diag = TRUE,\n     upper = TRUE)\n#求取混合类型的距离\nlibrary(cluster)\nas.matrix(daisy(artificial_data))\n\n#若只考虑各科成绩，\n#观察各数据点（一个同学一个数据点）之间的距离\ncj <- cjb[, 4:12]\ncj_matri <- as.matrix(cj)\ncj_dist <- dist(cj_matri)\n#距离可视化\nlibrary(\"factoextra\")\nfviz_dist(\n    cj_dist,\n    order = FALSE,\n    gradient = list(low = \"#00AFBB\",\n                    mid = \"white\",\n                    high = \"#FC4E07\"),\n    show_labels = FALSE\n)\n#也可以考虑性别\nlibrary(cluster)\ncj_dist_plus <- daisy(cjb[, 3:12])\nfviz_dist(\n    cj_dist_plus,\n    order = FALSE,\n    gradient = list(low = \"#00AFBB\",\n                    mid = \"white\",\n                    high = \"#FC4E07\"),\n    show_labels = FALSE\n)\n\n\n# MDS ---------------------------------------------------------------------\n\n#多维标度分析\n#将高维空间的距离关系，映射到二维空间\ncj_mds <- cmdscale(d = cj_dist, k = 2)\nlibrary(tidyverse)\ncj_mds %>%\n    as.data.frame() %>%\n    mutate(name = cjb$xm,\n           type = cjb$wlfk) %>%\n    setNames(c(\"x\", \"y\", \"name\", \"type\")) %>%\n    ggplot(aes(x = x, y = y)) +\n    geom_text(aes(label = name, colour = type),\n              size = 3,\n              alpha = 0.75)\n#当然也可以增加性别的因素\ncj_mds_plus <- cmdscale(d = cj_dist_plus, k = 2)\nlibrary(tidyverse)\ncj_mds_plus %>%\n    as.data.frame() %>%\n    mutate(name = cjb$xm,\n           type = cjb$wlfk,\n           sex = cjb$xb) %>%\n    setNames(c(\"x\", \"y\", \"name\", \"type\", \"sex\")) %>%\n    ggplot(aes(x = x, y = y)) +\n    geom_text(aes(label = name, colour = type),\n              size = 3,\n              alpha = 0.75)\n\n\n# Hopkins -----------------------------------------------------------------\n\n#数据能聚类么？\n#霍普金斯统计量\n#对数据进行聚类，有一个逻辑前提：\n#数据不是均匀分布的\n#而是呈现一定的模式\n#这里的模式，就是表现为数据是倾斜的，而非均匀的\nlibrary(clustertend)\nset.seed(2012)\nscores <- cjb %>%\n    select(yw:sw)\nn <- floor(nrow(cjb) * 0.05)\nhopkins_100 <- unlist(replicate(100,  hopkins(scores, n)))\nmean(hopkins_100)\n#> [1] 0.1577968\nggplot(data.frame(H = hopkins_100), aes(x = factor(0), y = H)) +\n    geom_boxplot(width = 0.5) +\n    geom_rug(position = \"jitter\",\n             sides = \"b\") +\n    coord_flip()\n#取值偏向于0.5时，是均匀的\n#偏向于0时，是倾斜的\n#由此可见，目前这份数据进行聚类还是可行的\n\n\n# k-means -----------------------------------------------------------------\n\n# #以下代码，仅为复现课件中的动画，感兴趣的小伙伴可以了解\n# #先直观展示一下kmeans的迭代过程\n# #小伙伴们也可以如法炮制\n# #写一些简单版的算法，有助于理解算法原理\n# library(animation)\n# library(deldir)\n# saveGIF(\n#     expr = {\n#         data(\"iris\")\n#         set.seed(2012)\n#         init_centers_idx <- sample(150, 3)\n#         init_centers <-\n#             iris[init_centers_idx, c(\"Petal.Length\", \"Petal.Width\")]\n#         voronoi <- deldir(init_centers[, 1], init_centers[, 2]) #对空间进行划分\n#         cur_centers <- init_centers\n#         old_centers <-\n#             -cur_centers #其实可以是和cur_centers长宽相同的任意data.frame,但里边的值不能相同\n#         old_centers == cur_centers\n#         repeat_first <- TRUE\n#         while (any(old_centers != cur_centers)) {\n#             #新的中心和原来的中心是否完全一样\n#             #将点分派至不同的中心\n#             Species_type <-\n#                 apply(iris[, c(\"Petal.Length\", \"Petal.Width\")], 1, function(x) {\n#                     which.min(apply(cur_centers[, c(\"Petal.Length\", \"Petal.Width\")], 1, function(y) {\n#                         sqrt(sum((x - y) ^ 2))\n#                     }))\n#                 })\n#             iris$Species <-\n#                 c(\"setosa\", \"versicolor\", \"virginica\")[Species_type]\n#             voronoi <-\n#                 deldir(cur_centers$Petal.Length, cur_centers$Petal.Width)\n#             p <- ggplot() +\n#                 geom_point(data = iris,\n#                            aes(\n#                                x = Petal.Length,\n#                                y = Petal.Width,\n#                                colour = Species\n#                            )) +\n#                 geom_point(\n#                     data = cur_centers,\n#                     aes(\n#                         x = Petal.Length,\n#                         y = Petal.Width,\n#                         colour = \"red\"\n#                     ),\n#                     size = 3\n#                 ) +\n#                 geom_segment(\n#                     data = voronoi$dirsgs,\n#                     aes(\n#                         x = x1,\n#                         y = y1,\n#                         xend = x2,\n#                         yend = y2\n#                     ),\n#                     size = 1.2,\n#                     linetype = 1,\n#                     color = \"red\"\n#                 ) +\n#                 coord_fixed() + #固定长宽比例，否则看不出垂直的效果\n#                 theme(legend.position = \"none\")\n#\n#             plot(p)\n#             if (repeat_first) {\n#                 plot(p)\n#                 plot(p)\n#                 plot(p)\n#                 repeat_first <- FALSE\n#             }\n#\n#\n#             old_centers <- cur_centers\n#             #重新计算各组的中心\n#             cur_centers <- iris %>%\n#                 group_by(Species) %>%\n#                 summarise(\n#                     Petal.Length = mean(Petal.Length),\n#                     Petal.Width = mean(Petal.Width)\n#                 ) %>%\n#                 mutate(Species = NULL)\n#         }\n#     },\n#     movie.name = \"animation.gif\",\n#     convert = \"gm convert\",\n#     interval = 1,\n#     ani.width = diff(range(iris$Petal.Length)) * 100,\n#     ani.height = diff(range(iris$Petal.Width)) * 100\n# )\n\nscores <- cjb %>%\n    select(yw:sw)\n#stats包中的kmeans()函数\nset.seed(2012)\nimodel <- kmeans(scores,\n                 centers = 2)\nnames(imodel)\n#> [1] \"cluster\"      \"centers\"      \"totss\"\n#> [4] \"withinss\"     \"tot.withinss\" \"betweenss\"\n#> [7] \"size\"         \"iter\"         \"ifault\"\n\nimodel$cluster\n#> [1] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1\n#> [24] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n#> [47] 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1\n#> [714] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2\n#> [737] 2 2 2 2 2 2 2 2 2 1 2 2 2 1 1 1 1 1 1 1 1 1 1\n#> [760] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n\nimodel$centers\n#>    yw       sx       wy       zz       ls\n#> 1 85.07616 77.06623 83.24172 90.54967 84.85430\n#> 2 88.85169 92.03178 90.24153 93.46822 91.88559\n#>    dl       wl       hx       sw\n#> 1 90.08940 71.22185 85.64901 79.58609\n#> 2 94.91949 87.58475 95.54661 90.72034\n\nimodel$totss\n#> [1] 431975.9\nimodel$withinss\n#> [1] 168392.4 105535.6\nimodel$tot.withinss\n#> [1] 273928\nimodel$betweenss\n#> [1] 158047.9\n\nimodel$size\n#> [1] 472 302\n\ncluster_idx <- imodel$cluster\n\n#手工实现一次代码\nglobal_center <- apply(scores, 2, mean)\ntotal_SS <- sum(apply(scores, 1, function(x) {\n    sum((x - global_center) ^ 2)\n}))\ntotal_SS\n#> [1] 431975.9\n\n\nlibrary(fpc)\nkmeans_results <- kmeansruns(scores,\n                             criterion = \"asw\")\nkmeans_results\n#> Available components:\n#>\n#>   [1] \"cluster\"      \"centers\"      \"totss\"\n#> [4] \"withinss\"     \"tot.withinss\" \"betweenss\"\n#> [7] \"size\"         \"iter\"         \"ifault\"\n#> [10] \"crit\"         \"bestk\"\n\nkmeans_results$crit\n#> [1] 0.0000000 0.3330389 0.2490188 0.2453507\n#> [5] 0.1889768 0.1652665 0.1603259 0.1579961\n#> [9] 0.1552047 0.1401957\n\nplot(kmeans_results$crit, type = \"o\")#> 后续有更好的展示方式，当然本质上是一样的\n\nkmeans_results$bestk\n#> [1] 2\n\nlibrary(factoextra)\nfviz_nbclust(scores,\n             kmeans,\n             method = \"silhouette\",\n             k.max = 20) +\n    geom_vline(xintercept = 2, linetype = 2)\n\n#绘制聚类效果图\nlibrary(factoextra)\nfviz_cluster(imodel,\n             data = scores,\n             ellipse.type = \"convex\") +\n    theme_minimal()\n\nrequire(cluster)\nscores_dist <- dist(scores)\nimodel2 <- kmeans(scores, 2)\ncluster_idx2 <- imodel2$cluster\n#计算轮廓系数\nkmeans_k2_silhouette <- silhouette(cluster_idx2, scores_dist)\n#绘制轮廓系数\nfviz_silhouette(kmeans_k2_silhouette)\nimodel3 <- kmeans(scores, 3)\ncluster_idx3 <- imodel3$cluster\nk3_silhouette <- silhouette(cluster_idx3, scores_dist)\nfviz_silhouette(k3_silhouette)\n\n\nfviz_cluster(\n    imodel,\n    data = scores,\n    palette = c(\"#2E9FDF\", \"#00AFBB\"),\n    ellipse.type = \"euclid\",\n    # Concentration ellipse\n    star.plot = TRUE,\n    # Add segments from centroids to items\n    #repel = TRUE, # Avoid label overplotting (slow)\n    ggtheme = theme_minimal()\n)\n\nmin(Metrics::ce(cjb$wlfk,\n                c(\"理科\", \"文科\")[imodel$cluster]),\n    1 - Metrics::ce(cjb$wlfk,\n                    c(\"理科\", \"文科\")[imodel$cluster]))\n#> [1] 0.3540052\n\n#是特征越多越好么？\n#未必如此\nset.seed(2012)\nimodel <- kmeans(scores[, c(\"sw\", \"wl\", \"sx\")],\n                 centers = 2)\ntable(cjb$wlfk, c(\"文科\", \"理科\")[imodel$cluster])\nmin(Metrics::ce(cjb$wlfk,\n                c(\"理科\", \"文科\")[imodel$cluster]),\n    1 - Metrics::ce(cjb$wlfk,\n                    c(\"理科\", \"文科\")[imodel$cluster]))\n#> [1] 0.3294574\n\nMetrics::ce(cjb$wlfk, c(\"理科\", \"文科\")[imodel$cluster])\nimodel$centers\n\n\n# Hierarchical Clustering -------------------------------------------------\n\n#Demo\nselected_students <- c(\"伊礼贤\", \"鲁孟秋\", \"焦金音\", \"宁琦\", \"赖旺\",\n                       \"于知平\", \"方顺\", \"谭思缘\", \"僪福星\", \"尚玉芳\")\nscores <- cjb %>%\n    filter(xm %in% selected_students) %>%\n    select(xm, yw:sw) %>%\n    column_to_rownames(var = \"xm\")\nrow.names(scores)\n#计算距离矩阵\ndemo_dist <- dist(scores)\n#利用hclust进行聚类\nimodel <- hclust(demo_dist)\nimodel\n#>\n#> Call:\n#>   hclust(d = demo_dist)\n#>\n#> Cluster method   : complete\n#> Distance         : euclidean\n#> Number of objects: 10\nnames(imodel)\n#> [1] \"merge\"       \"height\"      \"order\"       \"labels\"\n#> [5] \"method\"      \"call\"        \"dist.method\"\nimodel$merge\n#>       [,1] [,2]\n#> [1,]   -7   -8\n#> [2,]   -6   -9\n#> [3,]    1    2\n#> [4,]   -2  -10\n#> [5,]   -3    4\n#> [6,]   -5    5\n#> [7,]   -1    6\n#> [8,]   -4    7\n#> [9,]    3    8\n\nmin(dist(scores))\nimodel$height\n#> [1]   4.000000\n#> [2]   5.291503\n#> [3]   7.937254\n#> [4]  24.819347\n#> [5]  29.933259\n#> [6]  41.073106\n#> [7]  44.068129\n#> [8]  76.360985\n#> [9] 134.000000\nimodel$height\n#> [1]   4.000000   5.291503   7.937254  24.819347\n#> [5]  29.933259  41.073106  44.068129  76.360985\n#> [9] 134.000000\nsort(dist(scores))\n#> [1]   4.000000   5.196152   5.291503   5.567764\n#> [5]   7.000000   7.937254  24.819347  26.888659\n#> [9]  27.092434  29.933259  32.572995  36.891733\n#> [13]  38.639358  41.073106  43.347434  44.068129\n#> [17]  54.194096  57.271284  63.229740  70.285134\n#> [21]  76.360985  76.674637  77.485483  77.833155\n#> [25]  79.517294  81.492331  82.042672  82.788888\n#> [29]  83.928541  90.752410  90.901045  91.010988\n#> [33]  91.021975  91.656969  92.238820  92.293012\n#> [37]  93.616238 100.682670 100.935623 101.113797\n#> [41] 102.815369 132.461315 133.540256 133.787144\n#> [45] 134.000000\nimodel$order\n#> [1]  7  8  6  9  4  1  5  3  2 10\nimodel$labels\n#> [1] \"于知平\" \"僪福星\" \"谭思缘\" \"赖旺\"   \"尚玉芳\"\n#> [6] \"焦金音\" \"伊礼贤\" \"鲁孟秋\" \"宁琦\"   \"方顺\"\nimodel$method\n#>[1] \"complete\"\nimodel$call\n#>hclust(d = demo_dist)\nimodel$dist.method\n#>[1] \"euclidean\"\n\nimodel$order <- rev(imodel$order)\nplot(imodel, hang = -1)\n\ncluster_idx <- cutree(imodel, k = 2)\n#> 于知平 僪福星 谭思缘\n#> 1      1      1\n#> 赖旺 尚玉芳 焦金音\n#> 1      1      2\n#> 伊礼贤 鲁孟秋   宁琦\n#> 2      2      2\n#> 方顺\n#> 1\nplot(imodel, hang = -1)\nrect.hclust(imodel, k = 2)\n\nlibrary(factoextra)\nres <- hcut(\n    dist(scores),\n    k = 2,\n    hc_func = \"hclust\",\n    hc_method = \"complete\",\n    hc_metric = \"euclidean\",\n    stand = FALSE,\n    graph = FALSE\n)\nfviz_dend(\n    res,\n    rect = TRUE,\n    cex = 0.75,\n    horiz = TRUE,\n    type = \"rectangle\",\n    k_colors = c(\"#CD534CFF\", \"#0073C2FF\")\n)\n\nrequire(cluster)\nscores <- cjb %>%\n    select(yw:sw)\nscores_dist <- dist(scores)\nimodel <- hclust(scores_dist, method = \"ward.D\")\ncluster_idx <- cutree(imodel, k = 2)\n#计算轮廓系数\nkmeans_k2_silhouette <- silhouette(cluster_idx, scores_dist)\n#绘制轮廓系数\nfviz_silhouette(kmeans_k2_silhouette)\ncluster_idx <- cutree(imodel, k = 3)\nk3_silhouette <- silhouette(cluster_idx, scores_dist)\nfviz_silhouette(k3_silhouette)\n\n\nfviz_nbclust(scores,\n             FUNcluster = hcut,\n             method = \"silhouette\",\n             kmax = 20) +\n    geom_vline(xintercept = 2, linetype = 2)\n\n\nimodel <- hclust(scores_dist, method = \"ward.D\")\ncluster_idx <- cutree(imodel, k = 2)\n(ic_metric <- min(Metrics::ce(cjb$wlfk,\n                              c(\"理科\", \"文科\")[cluster_idx]),\n                  1 - Metrics::ce(cjb$wlfk,\n                                  c(\"理科\", \"文科\")[cluster_idx])))\n#> [1] 0.2860892\n#分类准确率挺近70%大关\n\n\n# About Model Innovation --------------------------------------------------\n\n#了解完以上的基本原理之后，\n#小伙伴们也应该有算法创造者的角度，\n#对其开展研究\nlibrary(DMwR)\nout_rank <- outliers.ranking(scores_dist,\n                             clus = list(dist = \"euclidean\",\n                                         alg = \"hclust\", meth = \"ward.D\"))\ncjb %>%\n    arrange(desc(out_rank$prob.outliers)) %>%\n    View()\n#与箱线图异常值检测作比较\n(outliers <- boxplot.stats(cjb$zcj)$out)\noutliers_idx <- which(cjb$zcj %in% outliers)\nView(cjb[outliers_idx,])\n\nout_rank$prob.outliers\ncjb[order(out_rank$prob.outliers, decreasing = TRUE)[1:10],] %>%\n    View()\n(outliers <- boxplot.stats(cjb$zcj)$out)\noutliers_idx <- which(cjb$zcj %in% outliers)\nView(cjb[outliers_idx,])\n\n\n\n# The End ^-^ -------------------------------------------------------------\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2012-2022 Xinbo Ai\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "\n# Data Analytics with R\n\nThis repository contains code and data for the course **Data Analytics with R**, instructed by *Xinbo Ai*, at Beijing University of Posts and Telecommunications.   \nThe corresponding MOOC is available at [xuetangx](https://www.xuetangx.com/course/buptP08541002314/19318369)\n\n\n---\n\n本库托管了北京邮电大学《R语言数据分析》课程代码及数据。   \n本课程已在[学堂在线](https://www.xuetangx.com/course/buptP08541002314/19318369)上线\n\n\n---\n\n\nR语言数据分析  \n> 上部：问道  \n>> [![](https://img.shields.io/badge/%E7%AC%AC1%E7%AB%A0-%E6%B0%94%E8%B1%A1%E4%B8%87%E5%8D%83%E3%80%81%E6%95%B0%E4%BB%A5%E7%AD%89%E8%A7%82-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>> [![](https://img.shields.io/badge/%E7%AC%AC2%E7%AB%A0-%E6%89%80%E8%B0%93%E5%AD%A6%E4%B9%A0%E3%80%81%E5%BD%92%E7%B1%BB%E8%80%8C%E5%B7%B2-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>> [![](https://img.shields.io/badge/%E7%AC%AC3%E7%AB%A0-%E6%A0%BC%E8%A8%80%E8%81%94%E7%92%A7%E8%AF%9D%E5%AD%A6%E4%B9%A0-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>> [![](https://img.shields.io/badge/%E7%AC%AC4%E7%AB%A0-%E6%BA%90%E4%BA%8E%E6%95%B0%E5%AD%A6%E3%80%81%E5%BD%92%E4%BA%8E%E5%B7%A5%E7%A8%8B-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>\n> 中部：执具  \n>> [![](https://img.shields.io/badge/%E7%AC%AC5%E7%AB%A0-%E5%B7%A5%E6%AC%B2%E5%96%84%E5%85%B6%E4%BA%8B%E3%80%81%E5%BF%85%E5%85%88%E5%88%A9%E5%85%B6%E5%99%A8-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>> [![](https://img.shields.io/badge/%E7%AC%AC6%E7%AB%A0-%E5%9F%BA%E7%A1%80%E7%BC%96%E7%A8%8B-blue)](https://github.com/byaxb/RDataAnalytics/blob/master/02_%E5%9F%BA%E7%A1%80%E7%BC%96%E7%A8%8B.R)——用别人的包和函数讲述自己的故事  \n>>> 6.1编程环境  \n>>> 6.2Mini案例  \n>>> 6.3站在巨人的肩膀上  \n>>> 6.4控制流  \n>>> 6.5函数（I）  \n>>> 6.6函数（II）  \n>>  \n>> [![](https://img.shields.io/badge/%E7%AC%AC7%E7%AB%A0-%E6%95%B0%E6%8D%AE%E5%AF%B9%E8%B1%A1-green)](https://github.com/byaxb/RDataAnalytics/blob/master/03_%E6%95%B0%E6%8D%AE%E5%AF%B9%E8%B1%A1.R)——面向数据对象学习R语言  \n>>> 7.1向量与因子（I）  \n>>> 7.2向量与因子（II）  \n>>> 7.3矩阵与数组（I）  \n>>> 7.4矩阵与数组（II）  \n>>> 7.5列表与数据框（I）  \n>>> 7.6列表与数据框（II）  \n>>\n>> [![](https://img.shields.io/badge/%E7%AC%AC8%E7%AB%A0-%E4%BA%BA%E4%BA%BA%E9%83%BD%E7%88%B1tidyverse-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>> [![](https://img.shields.io/badge/%E7%AC%AC9%E7%AB%A0-%E6%9C%80%E7%BE%8E%E4%B8%8D%E8%BF%87%E6%95%B0%E6%8D%AE%E6%A1%86-inactive)](https://github.com/byaxb/RDataAnalytics)  \n>  \n> 下部：博术  \n>> [![](https://img.shields.io/badge/%E7%AC%AC10%E7%AB%A0-%E8%A7%82%E6%95%B0%E4%BB%A5%E5%BD%A2-yellow)](https://github.com/byaxb/RDataAnalytics/blob/master/04_%E8%A7%82%E6%95%B0%E4%BB%A5%E5%BD%A2.R)  \n>>> 10.1一维数据空间（I）  \n>>> 10.2一维数据空间（II）  \n>>> 10.3二维数据空间  \n>>> 10.4高维数据空间  \n>>  \n>> [![](https://img.shields.io/badge/%E7%AC%AC11%E7%AB%A0-%E7%9B%B8%E9%9A%8F%E7%9B%B8%E4%BC%B4%E3%80%81%E8%B0%93%E4%B9%8B%E5%85%B3%E8%81%94-important)](https://github.com/byaxb/RDataAnalytics/blob/master/05_%E7%9B%B8%E9%9A%8F%E7%9B%B8%E4%BC%B4%E3%80%81%E8%B0%93%E4%B9%8B%E5%85%B3%E8%81%94.R)  \n>>> 11.1关联规则（I）  \n>>> 11.2关联规则（II）  \n>>> 11.3关联规则（III）  \n>>  \n>> [![](https://img.shields.io/badge/%E7%AC%AC12%E7%AB%A0-%E6%97%A2%E6%98%AF%E4%B8%96%E9%97%B4%E6%B3%95%E3%80%81%E8%87%AA%E5%BD%93%E6%9C%89%E5%88%86%E5%88%AB-blueviolet)](https://github.com/byaxb/RDataAnalytics/blob/master/06_%E6%97%A2%E6%98%AF%E4%B8%96%E9%97%B4%E6%B3%95%E3%80%81%E8%87%AA%E5%BD%93%E6%9C%89%E5%88%86%E5%88%AB.R)  \n>>> 12.1近邻法（I）  \n>>> 12.2近邻法（II）  \n>>> 12.3决策树（I）  \n>>> 12.4决策树（II）  \n>>> 12.5随机森林  \n>>> 12.6朴素贝叶斯  \n>>> 12.7逻辑斯蒂回归  \n>>> 12.8人工神经网络（I）  \n>>> 12.9人工神经网络（II）  \n>>> 12.10支持向量机  \n>>  \n>> [![](https://img.shields.io/badge/%E7%AC%AC13%E7%AB%A0-%E6%96%B9%E4%BB%A5%E7%B1%BB%E8%81%9A%E3%80%81%E7%89%A9%E4%BB%A5%E7%BE%A4%E5%88%86-yellowgreen)](https://github.com/byaxb/RDataAnalytics/blob/master/07_%E6%96%B9%E4%BB%A5%E7%B1%BB%E8%81%9A%E3%80%81%E7%89%A9%E4%BB%A5%E7%BE%A4%E5%88%86.R)  \n>>> 13.1划分方法  \n>>> 13.2层次方法  \n>>\n>> [![](https://img.shields.io/badge/%E7%AC%AC14%E7%AB%A0-%E5%BA%90%E5%B1%B1%E7%83%9F%E9%9B%A8%E6%B5%99%E6%B1%9F%E6%BD%AE-inactive)](https://github.com/byaxb/RDataAnalytics)  \n\n"
  },
  {
    "path": "data/cjb.csv",
    "content": "xm,bj,xb,yw,sx,wy,zz,ls,dl,wl,hx,sw,wlfk\r\n,1101,Ů,94,82,96,97,97,98,95,94,88,Ŀ\r\n,1101,,87,94,89,95,94,94,90,90,89,Ŀ\r\n潭,1101,,92,79,86,98,95,96,89,94,87,Ŀ\r\n̿,1101,Ů,91,84,96,93,97,94,82,90,83,Ŀ\r\nǿ,1101,,85,92,82,93,87,88,95,94,93,Ŀ\r\nտ,1101,Ů,92,82,85,91,90,92,82,98,90,Ŀ\r\n,1101,Ů,88,72,86,94,87,88,89,98,94,Ŀ\r\nΤ,1101,,81,89,87,97,94,96,81,88,83,Ŀ\r\n,1101,Ů,88,77,95,94,84,94,87,94,82,Ŀ\r\n,1101,Ů,94,81,88,91,85,98,81,88,88,Ŀ\r\n,1101,Ů,87,83,92,91,86,94,84,90,87,Ŀ\r\nۢ,1101,,88,82,91,89,81,98,89,98,75,Ŀ\r\n,1101,,79,84,91,87,91,87,85,96,90,Ŀ\r\n,1101,,78,81,83,86,88,98,85,90,99,Ŀ\r\nɣ,1101,,83,85,82,91,88,88,88,86,93,Ŀ\r\n,1101,Ů,80,84,91,91,85,100,73,90,90,Ŀ\r\nӢ,1101,Ů,89,71,90,95,91,96,84,88,80,Ŀ\r\n,1101,Ů,89,84,85,97,84,86,72,96,89,Ŀ\r\nƼ,1101,Ů,91,78,89,94,90,90,89,90,71,Ŀ\r\n־,1101,,85,79,88,87,78,93,84,96,87,Ŀ\r\n뻪,1101,,90,61,82,93,90,96,76,96,92,Ŀ\r\n,1101,Ů,87,74,86,95,81,88,85,96,84,Ŀ\r\nʢ,1101,Ů,87,88,89,95,87,84,85,80,80,Ŀ\r\n,1101,Ů,90,70,88,91,84,98,89,86,78,Ŀ\r\nʫ,1101,Ů,84,55,95,90,86,100,80,96,86,Ŀ\r\n¦,1101,Ů,90,92,86,90,83,86,85,80,79,Ŀ\r\nƽʩ,1101,Ů,85,78,87,87,71,95,75,98,87,Ŀ\r\nľ,1101,Ů,85,83,86,95,87,92,69,88,78,Ŀ\r\nʱ,1101,Ů,82,78,86,91,85,92,81,88,79,Ŀ\r\n԰,1101,,84,74,83,93,98,88,64,86,90,Ŀ\r\n,1101,Ů,85,70,80,93,91,88,83,94,76,Ŀ\r\n,1101,Ů,81,70,81,80,93,92,89,86,87,Ŀ\r\nڻ۶,1101,Ů,86,73,88,92,88,86,68,86,80,Ŀ\r\nշ,1101,Ů,80,65,86,98,86,98,90,76,68,Ŀ\r\n,1101,Ů,86,74,88,85,69,90,79,90,78,Ŀ\r\n,1101,,84,72,65,93,95,98,66,82,81,Ŀ\r\nСٻ,1101,Ů,80,64,89,92,70,92,78,90,80,Ŀ\r\n,1101,,81,71,80,92,96,88,71,72,79,Ŀ\r\n,1101,Ů,82,81,85,88,78,98,64,82,72,Ŀ\r\n˺,1101,,79,74,75,95,97,92,56,76,83,Ŀ\r\n,1101,Ů,89,70,74,91,88,92,62,88,73,Ŀ\r\n,1101,Ů,92,69,85,91,84,88,57,76,82,Ŀ\r\nС,1101,Ů,77,73,81,94,84,88,63,80,79,Ŀ\r\nҦ,1101,Ů,78,60,87,91,83,70,79,82,82,Ŀ\r\n,1101,,83,65,90,94,86,86,58,76,66,Ŀ\r\n۪,1101,Ů,77,68,85,83,77,86,69,74,72,Ŀ\r\n⽭,1101,Ů,86,71,77,83,69,92,62,84,67,Ŀ\r\nС,1101,,77,60,83,81,84,88,70,78,69,Ŀ\r\n,1101,Ů,83,59,77,90,81,78,68,70,78,Ŀ\r\n,1101,Ů,84,57,73,92,84,92,53,64,80,Ŀ\r\n,1101,,81,59,77,83,69,90,56,92,68,Ŀ\r\n֪ƽ,1101,,70,67,74,92,73,88,40,52,58,Ŀ\r\n,1102,Ů,88,91,88,95,85,96,85,100,94,Ŀ\r\n,1102,Ů,91,82,91,96,91,94,90,92,90,Ŀ\r\nŽ,1102,,88,83,84,94,91,88,96,98,92,Ŀ\r\n,1102,Ů,91,82,76,96,94,92,87,96,96,Ŀ\r\nĩ,1102,Ů,92,76,87,96,95,98,84,94,88,Ŀ\r\n,1102,,86,75,90,94,92,98,89,92,92,Ŀ\r\nȫ,1102,Ů,90,81,88,94,88,93,87,96,90,Ŀ\r\nǮӢ,1102,Ů,87,85,82,95,90,92,88,98,89,Ŀ\r\n,1102,,89,94,85,96,89,94,90,86,79,Ŀ\r\nȴ,1102,Ů,91,82,93,97,85,94,78,94,86,Ŀ\r\nƳ,1102,,85,87,82,94,94,90,83,92,92,Ŀ\r\nŰ,1102,,86,87,87,97,83,100,72,98,88,Ŀ\r\n,1102,Ů,85,84,93,90,89,94,74,98,88,Ŀ\r\nӢ,1102,Ů,81,88,89,93,85,100,81,98,80,Ŀ\r\n˳,1102,,80,96,87,96,97,92,69,92,85,Ŀ\r\n,1102,Ů,81,87,95,76,84,96,85,94,94,Ŀ\r\n۬,1102,,84,89,84,97,88,96,69,92,92,Ŀ\r\nΣ,1102,Ů,90,87,90,94,88,92,77,92,80,Ŀ\r\n,1102,Ů,86,78,89,92,91,98,77,92,86,Ŀ\r\n,1102,Ů,85,75,88,92,92,90,82,88,94,Ŀ\r\nţ־,1102,Ů,89,78,91,96,94,94,75,88,80,Ŀ\r\nh,1102,,87,80,85,92,91,97,82,92,79,Ŀ\r\n帣,1102,Ů,76,79,89,92,78,96,90,94,89,Ŀ\r\n,1102,,83,72,76,93,86,94,87,98,91,Ŀ\r\nС,1102,Ů,93,86,80,94,95,90,68,86,88,Ŀ\r\nķ,1102,,85,81,85,95,86,92,77,88,87,Ŀ\r\n,1102,Ů,83,90,82,91,78,94,75,94,85,Ŀ\r\n,1102,,90,72,88,93,84,90,75,90,88,Ŀ\r\n,1102,,88,70,93,87,87,96,80,86,79,Ŀ\r\nѩ,1102,Ů,87,73,93,95,85,94,61,90,85,Ŀ\r\n死,1102,Ů,85,83,89,90,85,90,72,88,81,Ŀ\r\nС,1102,,80,87,83,88,88,94,73,86,81,Ŀ\r\n,1102,Ů,85,70,87,84,90,94,83,82,84,Ŀ\r\nǿи,1102,,88,81,88,95,89,92,60,82,81,Ŀ\r\nԶ,1102,,89,73,86,91,80,94,65,90,86,Ŀ\r\nʢ,1102,,90,77,75,90,94,90,70,88,78,Ŀ\r\nң,1102,,88,71,82,66,88,92,86,94,84,Ŀ\r\n,1102,,85,75,87,89,76,90,78,92,77,Ŀ\r\n¼Ѻ,1102,Ů,89,58,88,87,71,90,83,90,88,Ŀ\r\nС,1102,Ů,81,79,84,97,75,88,68,90,79,Ŀ\r\n,1102,Ů,88,85,80,94,80,90,60,84,77,Ŀ\r\n¡,1102,,85,63,73,91,96,92,82,74,80,Ŀ\r\n,1102,Ů,82,75,87,87,79,96,61,84,84,Ŀ\r\n̸,1102,Ů,82,80,79,88,86,88,75,72,80,Ŀ\r\n,1102,Ů,88,83,80,93,83,94,59,76,73,Ŀ\r\n,1102,,84,68,89,90,87,78,58,86,86,Ŀ\r\n,1102,Ů,89,51,83,98,87,84,73,80,81,Ŀ\r\nʷ,1102,,82,83,87,86,89,90,58,72,78,Ŀ\r\n,1102,,74,62,82,88,79,90,82,88,78,Ŀ\r\nӢ,1102,Ů,86,70,76,90,86,84,72,76,82,Ŀ\r\nǿ,1102,,89,74,77,91,78,84,74,78,77,Ŀ\r\nʷ,1102,,83,65,79,89,82,80,79,84,79,Ŀ\r\n,1102,Ů,88,70,78,91,89,94,62,80,68,Ŀ\r\nѩ,1102,Ů,88,73,83,94,82,92,49,82,68,Ŀ\r\nϼ,1102,Ů,82,56,90,94,83,81,65,68,81,Ŀ\r\nС,1102,Ů,83,73,78,86,74,82,62,86,69,Ŀ\r\nС,1102,,80,68,74,87,76,84,44,80,69,Ŀ\r\nh,1102,,85,63,76,72,72,80,53,82,68,Ŀ\r\nմ,1103,Ů,90,87,89,92,93,88,91,94,90,Ŀ\r\n˫,1103,Ů,89,77,90,97,92,94,80,98,91,Ŀ\r\n,1103,Ů,91,75,82,90,94,98,85,96,93,Ŀ\r\n԰,1103,Ů,91,82,94,97,80,94,83,96,86,Ŀ\r\n,1103,,80,95,87,95,93,96,80,90,84,Ŀ\r\n,1103,Ů,87,88,89,98,87,94,74,88,92,Ŀ\r\nԽ,1103,,87,91,88,84,91,92,83,90,90,Ŀ\r\n廪,1103,,85,95,77,94,98,90,80,92,84,Ŀ\r\nֺ,1103,Ů,86,85,84,93,91,90,81,94,90,Ŀ\r\n,1103,,91,84,86,96,96,96,71,88,86,Ŀ\r\n׿,1103,,70,86,86,94,87,92,92,98,88,Ŀ\r\nӨ,1103,Ů,84,84,86,90,85,92,92,82,95,Ŀ\r\n,1103,Ů,82,80,85,91,87,94,85,96,85,Ŀ\r\nǮܿ,1103,Ů,90,73,83,93,91,96,83,90,82,Ŀ\r\n,1103,Ů,86,83,84,90,96,92,81,90,79,Ŀ\r\nӡľ,1103,Ů,82,83,86,91,91,94,83,84,84,Ŀ\r\n,1103,Ů,83,88,81,95,89,90,75,94,82,Ŀ\r\n컨,1103,Ů,86,77,84,92,94,98,87,90,69,Ŀ\r\nϯ,1103,Ů,86,76,87,87,79,94,91,94,82,Ŀ\r\n,1103,Ů,87,89,82,98,90,94,71,90,74,Ŀ\r\n,1103,Ů,85,85,90,94,74,94,73,92,86,Ŀ\r\nҦ,1103,Ů,86,85,84,95,81,90,84,82,86,Ŀ\r\n޷,1103,,80,75,92,93,95,81,82,88,85,Ŀ\r\nʤ,1103,,92,79,90,96,74,95,67,94,81,Ŀ\r\n,1103,Ů,90,68,89,89,93,88,70,94,86,Ŀ\r\n,1103,Ů,94,80,88,98,82,86,69,88,82,Ŀ\r\n,1103,,84,65,87,92,87,94,74,98,85,Ŀ\r\nۺ,1103,Ů,88,65,81,86,87,92,84,90,91,Ŀ\r\nʻ,1103,Ů,87,77,85,94,84,94,78,92,73,Ŀ\r\nɿ,1103,,77,85,76,95,92,98,72,86,81,Ŀ\r\n׵,1103,Ů,86,86,84,97,80,88,84,90,66,Ŀ\r\n¶,1103,,91,65,69,87,96,90,79,96,82,Ŀ\r\n¶,1103,,82,72,71,91,91,92,81,92,81,Ŀ\r\nԶ,1103,,83,81,80,92,81,80,90,80,83,Ŀ\r\n,1103,,87,72,92,86,83,93,78,88,71,Ŀ\r\nʹ,1103,,84,76,82,96,86,82,74,88,81,Ŀ\r\n,1103,,84,84,72,95,93,94,67,80,77,Ŀ\r\n,1103,,84,72,81,96,94,92,68,88,71,Ŀ\r\n,1103,Ů,87,68,95,92,88,94,64,84,67,Ŀ\r\nͯ,1103,Ů,82,75,82,90,91,90,59,80,84,Ŀ\r\n,1103,Ů,89,73,79,88,87,90,53,84,83,Ŀ\r\nղ,1103,Ů,80,73,78,84,78,88,90,82,73,Ŀ\r\nͨʫ,1103,Ů,86,74,77,93,83,79,80,88,66,Ŀ\r\nʫ,1103,,82,71,67,83,75,94,80,82,89,Ŀ\r\n,1103,,76,55,84,92,84,90,85,80,75,Ŀ\r\n,1103,Ů,86,59,87,89,85,92,73,74,68,Ŀ\r\nҽ,1103,,79,73,82,78,80,88,79,84,69,Ŀ\r\nӢ,1103,Ů,86,61,84,93,82,84,61,86,70,Ŀ\r\n,1103,,75,82,76,87,79,94,79,68,65,Ŀ\r\nḣ,1103,,81,68,80,92,97,78,64,76,61,Ŀ\r\n,1103,,75,72,83,83,63,92,74,80,72,Ŀ\r\n,1103,Ů,86,71,89,89,67,90,51,74,74,Ŀ\r\n,1103,Ů,78,67,79,87,75,80,63,82,73,Ŀ\r\nƼ,1103,Ů,85,63,76,88,68,80,61,82,73,Ŀ\r\nҶ,1103,,69,62,69,82,85,94,48,90,63,Ŀ\r\n̷˼Ե,1103,Ů,87,67,64,85,76,88,37,74,69,Ŀ\r\n,1103,,65,26,53,87,91,96,21,56,58,Ŀ\r\n,1104,Ů,88,94,93,99,92,100,94,98,97,Ŀ\r\nΤ,1104,Ů,92,84,96,97,98,94,91,98,94,Ŀ\r\nף,1104,Ů,88,88,95,96,84,95,98,98,88,Ŀ\r\n˹Զ,1104,,86,84,88,94,96,98,82,100,96,Ŀ\r\n˫֦,1104,Ů,91,92,96,90,91,94,91,90,87,Ŀ\r\nӢ,1104,Ů,86,78,89,91,97,100,83,98,97,Ŀ\r\n,1104,,87,93,88,91,80,92,98,98,91,Ŀ\r\nʢ,1104,Ů,94,92,91,92,86,98,85,96,80,Ŀ\r\nɳɳ,1104,Ů,86,84,83,95,91,91,94,100,89,Ŀ\r\nԷ,1104,Ů,90,95,92,95,89,90,82,94,86,Ŀ\r\n,1104,Ů,86,93,89,98,96,88,84,96,82,Ŀ\r\nͨ,1104,Ů,92,83,88,95,91,92,86,92,92,Ŀ\r\n,1104,Ů,89,89,93,97,93,98,73,92,87,Ŀ\r\n,1104,,90,94,95,91,96,94,89,86,76,Ŀ\r\nཨ,1104,,84,94,93,93,95,90,90,90,81,Ŀ\r\n,1104,,86,74,88,94,94,100,88,94,84,Ŀ\r\nӳѩ,1104,Ů,89,84,95,93,87,96,89,92,75,Ŀ\r\nش,1104,,84,95,85,90,89,94,89,84,88,Ŀ\r\n,1104,,92,95,81,95,95,94,76,88,81,Ŀ\r\n,1104,,89,78,90,92,93,92,68,98,92,Ŀ\r\n,1104,,86,77,90,73,95,95,92,94,89,Ŀ\r\n·,1104,,89,88,82,92,94,92,88,80,82,Ŀ\r\n,1104,Ů,86,84,86,93,91,92,78,88,81,Ŀ\r\nϳ,1104,,88,88,80,95,95,92,63,94,83,Ŀ\r\n,1104,,85,81,92,95,86,90,81,92,76,Ŀ\r\n,1104,,86,97,86,93,94,96,63,88,74,Ŀ\r\nޱ,1104,Ů,89,91,82,90,88,82,77,92,85,Ŀ\r\nͼΰ,1104,,92,67,90,92,98,94,61,96,85,Ŀ\r\n,1104,Ů,92,85,83,92,92,90,79,86,76,Ŀ\r\n,1104,Ů,86,89,92,90,81,88,75,90,83,Ŀ\r\nƽ,1104,Ů,88,71,88,94,83,94,84,90,81,Ŀ\r\nС,1104,Ů,92,85,90,97,94,92,63,82,78,Ŀ\r\n,1104,Ů,87,81,92,91,93,93,70,86,79,Ŀ\r\n,1104,Ů,90,81,94,95,87,88,74,88,71,Ŀ\r\n,1104,,82,84,80,92,97,96,75,70,84,Ŀ\r\nֵ,1104,Ů,88,79,80,95,93,96,58,94,77,Ŀ\r\nĲ,1104,Ů,85,82,88,85,85,84,69,90,90,Ŀ\r\nۺҡ,1104,Ů,89,82,83,95,92,85,67,86,78,Ŀ\r\nԴ,1104,,81,83,70,86,86,94,80,94,82,Ŀ\r\n,1104,Ů,85,77,91,94,88,96,59,86,80,Ŀ\r\nƽ,1104,Ů,83,82,83,84,81,94,83,84,78,Ŀ\r\nͥ,1104,,89,84,77,94,84,94,65,84,76,Ŀ\r\nķ,1104,,82,79,78,93,78,94,71,84,87,Ŀ\r\n,1104,Ů,87,74,83,87,93,82,69,86,81,Ŀ\r\n,1104,Ů,87,81,85,100,83,86,58,80,81,Ŀ\r\n,1104,Ů,87,83,85,89,81,88,73,78,77,Ŀ\r\n,1104,Ů,89,82,87,87,83,96,64,76,77,Ŀ\r\n,1104,Ů,85,71,77,96,90,90,60,80,85,Ŀ\r\n,1104,Ů,88,81,90,96,79,72,70,82,75,Ŀ\r\nĪƼ,1104,Ů,86,80,95,90,86,90,62,76,66,Ŀ\r\n޻,1104,,87,72,88,92,87,86,49,84,80,Ŀ\r\n,1104,Ů,83,63,85,88,79,84,63,78,86,Ŀ\r\nѦ,1104,Ů,85,76,79,89,81,84,60,82,72,Ŀ\r\n,1104,Ů,90,63,92,89,78,80,57,80,68,Ŀ\r\n,1104,Ů,93,66,92,90,77,86,53,70,66,Ŀ\r\n,1104,Ů,78,70,78,98,83,86,42,86,59,Ŀ\r\n,1105,Ů,91,98,86,94,90,96,88,100,93,Ŀ\r\n,1105,Ů,89,91,92,95,90,96,94,96,93,Ŀ\r\n嵴,1105,,92,97,86,94,98,100,77,96,94,Ŀ\r\n,1105,Ů,92,93,88,98,98,100,88,88,85,Ŀ\r\nѽ,1105,Ů,90,84,95,94,97,96,93,98,83,Ŀ\r\nƻ,1105,Ů,89,95,92,95,89,94,81,98,90,Ŀ\r\nٻ,1105,Ů,90,96,91,97,98,94,82,90,85,Ŀ\r\n,1105,Ů,90,92,87,97,95,92,81,94,89,Ŀ\r\nл,1105,Ů,85,84,85,96,94,98,89,96,89,Ŀ\r\nʩ,1105,Ů,88,92,92,98,89,94,84,90,82,Ŀ\r\nĪ,1105,,91,86,83,97,94,90,81,94,92,Ŀ\r\n,1105,,89,93,88,91,98,98,79,90,79,Ŀ\r\nٻ,1105,Ů,93,87,90,96,92,92,87,90,77,Ŀ\r\nҶܷܷ,1105,Ů,84,86,92,97,82,92,92,94,84,Ŀ\r\n,1105,Ů,91,91,90,94,82,96,78,98,83,Ŀ\r\nҶ˫,1105,Ů,94,95,95,96,90,92,66,86,88,Ŀ\r\nʫ,1105,,92,94,90,92,94,98,75,92,75,Ŀ\r\n軪,1105,,88,84,85,93,94,94,77,96,90,Ŀ\r\n,1105,Ů,90,78,88,97,94,100,79,86,87,Ŀ\r\nκ,1105,Ů,91,71,87,93,98,98,84,90,86,Ŀ\r\n,1105,Ů,91,91,89,95,86,90,81,90,84,Ŀ\r\nǿƽ,1105,,90,82,84,92,80,94,79,96,99,Ŀ\r\nС,1105,Ů,88,88,93,94,87,94,72,96,83,Ŀ\r\n,1105,Ů,94,96,86,94,91,92,70,92,80,Ŀ\r\nӦɻ,1105,Ů,87,88,89,92,93,84,81,96,84,Ŀ\r\nɶ,1105,,87,90,77,92,96,94,85,86,84,Ŀ\r\n,1105,Ů,92,90,91,96,85,90,73,92,78,Ŀ\r\n,1105,Ů,92,68,87,94,89,94,89,90,83,Ŀ\r\n÷,1105,Ů,92,90,86,96,91,94,68,86,83,Ŀ\r\n,1105,,87,96,87,95,86,88,71,92,83,Ŀ\r\n,1105,Ů,89,86,95,97,88,90,71,90,78,Ŀ\r\n,1105,Ů,81,83,91,96,86,92,86,92,72,Ŀ\r\n,1105,,90,84,78,96,98,92,64,90,86,Ŀ\r\nȫ,1105,Ů,89,82,86,91,92,98,66,90,84,Ŀ\r\nෲ,1105,,89,79,71,94,91,98,85,82,84,Ŀ\r\n,1105,,91,93,85,92,83,90,74,90,75,Ŀ\r\nҶ,1105,Ů,95,90,92,94,71,86,80,92,73,Ŀ\r\nɣ,1105,,87,92,86,94,90,88,75,80,78,Ŀ\r\nʱ,1105,Ů,89,74,83,95,93,92,72,92,79,Ŀ\r\nͽ,1105,Ů,92,90,86,92,87,92,76,86,66,Ŀ\r\n,1105,Ů,84,77,79,95,91,86,71,92,91,Ŀ\r\n,1105,Ů,82,81,82,100,91,90,64,94,82,Ŀ\r\n,1105,Ů,84,80,85,95,82,82,86,90,79,Ŀ\r\n,1105,Ů,91,79,84,99,94,88,65,76,85,Ŀ\r\n,1105,Ů,85,83,81,88,88,86,77,88,81,Ŀ\r\n,1105,Ů,82,90,91,94,83,90,78,74,75,Ŀ\r\nǿ,1105,,89,87,82,93,95,96,66,76,72,Ŀ\r\nʢ,1105,Ů,92,79,92,89,89,90,72,84,69,Ŀ\r\n,1105,Ů,89,73,82,93,90,90,60,92,85,Ŀ\r\n,1105,Ů,89,61,79,90,79,96,88,82,86,Ŀ\r\n,1105,Ů,93,81,82,96,82,96,59,80,81,Ŀ\r\n,1105,Ů,90,74,75,94,95,86,66,84,81,Ŀ\r\n,1105,Ů,89,61,89,95,87,94,82,70,76,Ŀ\r\nѩ,1105,Ů,86,82,83,92,94,81,77,74,74,Ŀ\r\n佭,1105,Ů,80,72,87,94,88,92,67,84,78,Ŀ\r\n,1105,Ů,75,67,83,89,75,86,58,92,77,Ŀ\r\n,1105,Ů,68,74,61,91,78,74,69,76,67,Ŀ\r\n,1106,Ů,93,98,92,99,100,98,97,100,93,Ŀ\r\n,1106,Ů,94,95,91,97,98,100,95,100,99,Ŀ\r\nλ,1106,Ů,93,97,98,96,97,100,91,100,96,Ŀ\r\n,1106,Ů,93,95,95,98,100,98,94,96,92,Ŀ\r\n,1106,,91,95,93,94,97,98,95,100,95,Ŀ\r\n,1106,Ů,94,92,95,98,100,98,82,98,97,Ŀ\r\nӢ,1106,Ů,92,93,96,97,96,98,97,92,91,Ŀ\r\nӱ,1106,Ů,93,95,97,91,99,98,86,100,92,Ŀ\r\n,1106,Ů,95,96,99,92,96,96,89,96,90,Ŀ\r\n,1106,Ů,94,97,93,99,94,96,80,98,97,Ŀ\r\n,1106,Ů,90,92,97,97,97,92,91,98,94,Ŀ\r\nάά,1106,Ů,93,98,96,96,97,98,77,100,93,Ŀ\r\nع,1106,Ů,91,98,94,97,94,95,92,96,91,Ŀ\r\nʤ,1106,,86,89,91,92,100,98,95,100,93,Ŀ\r\n,1106,Ů,92,92,91,98,98,98,93,96,84,Ŀ\r\nˮ,1106,Ů,93,95,94,97,97,98,86,90,91,Ŀ\r\n,1106,Ů,93,95,94,94,94,98,88,92,91,Ŀ\r\n¶Ӣ,1106,Ů,95,94,91,97,92,91,91,96,90,Ŀ\r\nƽ,1106,Ů,89,94,95,96,98,96,76,98,94,Ŀ\r\n÷,1106,Ů,93,94,92,92,95,94,86,98,92,Ŀ\r\n,1106,Ů,92,87,92,97,92,98,89,100,86,Ŀ\r\n,1106,,87,97,90,95,98,90,85,98,92,Ŀ\r\në,1106,Ů,93,95,93,97,95,96,78,92,92,Ŀ\r\n,1106,,87,90,96,96,97,96,96,82,91,Ŀ\r\n,1106,,91,94,86,94,100,98,84,96,88,Ŀ\r\n,1106,Ů,94,96,97,96,93,94,71,100,88,Ŀ\r\nСС,1106,Ů,87,92,93,97,94,96,81,96,91,Ŀ\r\nֽ,1106,,92,96,73,98,100,100,88,94,86,Ŀ\r\nֶ,1106,,91,94,92,92,93,100,81,94,86,Ŀ\r\n½ܧ,1106,Ů,91,97,83,96,100,100,83,88,85,Ŀ\r\nѾ,1106,Ů,93,93,91,97,91,94,86,94,84,Ŀ\r\n,1106,,90,94,79,96,97,98,85,92,91,Ŀ\r\n,1106,Ů,92,94,96,99,96,96,86,80,82,Ŀ\r\nС,1106,Ů,88,93,89,95,98,94,82,88,90,Ŀ\r\n,1106,Ů,90,95,93,95,94,98,72,92,88,Ŀ\r\n,1106,Ů,94,91,92,97,96,94,82,90,80,Ŀ\r\nʱ,1106,,88,97,90,94,97,98,70,94,87,Ŀ\r\nͼ,1106,Ů,87,96,93,95,94,98,79,94,78,Ŀ\r\n˼ޱ,1106,Ů,91,95,87,89,94,94,70,98,94,Ŀ\r\n,1106,Ů,91,93,81,97,96,98,74,94,88,Ŀ\r\n,1106,Ů,89,98,97,94,94,96,65,96,83,Ŀ\r\n,1106,Ů,94,90,79,94,89,94,86,98,87,Ŀ\r\n庬,1106,Ů,89,93,94,96,96,98,74,86,84,Ŀ\r\n,1106,Ů,90,93,92,93,98,96,65,92,85,Ŀ\r\nС,1106,Ů,88,94,83,96,96,96,73,86,91,Ŀ\r\nѩ,1106,Ů,90,93,93,97,96,94,65,86,88,Ŀ\r\n,1106,Ů,92,93,90,94,91,96,74,92,77,Ŀ\r\nԪ,1106,Ů,90,93,87,93,92,92,78,92,81,Ŀ\r\n,1106,Ů,93,90,87,93,94,98,61,94,82,Ŀ\r\n躽,1106,,89,91,94,96,91,94,60,80,92,Ŀ\r\n,1106,Ů,92,90,96,93,85,96,73,80,82,Ŀ\r\nﾲ,1106,Ů,92,85,95,98,88,84,80,78,78,Ŀ\r\n֣,1106,Ů,91,84,91,93,88,92,61,90,83,Ŀ\r\nӢ,1106,Ů,87,90,90,94,92,92,52,90,78,Ŀ\r\n,1106,Ů,93,84,89,93,81,86,56,76,70,Ŀ\r\n,1106,Ů,87,78,97,93,83,90,58,66,58,Ŀ\r\n,1107,Ů,94,100,96,98,100,98,88,100,97,Ŀ\r\n,1107,Ů,95,95,95,95,100,98,89,98,96,Ŀ\r\n߻,1107,Ů,92,95,94,94,100,100,88,98,97,Ŀ\r\nɽ,1107,,88,96,92,97,96,100,97,98,93,Ŀ\r\n,1107,,89,96,96,94,100,100,85,98,95,Ŀ\r\nʦ,1107,,89,96,95,95,100,98,80,98,93,Ŀ\r\n,1107,,87,90,90,96,99,100,90,96,95,Ŀ\r\n޷,1107,Ů,94,99,96,97,92,92,86,96,91,Ŀ\r\n,1107,,87,94,94,93,91,94,95,98,95,Ŀ\r\nܹǿ,1107,,94,96,92,92,98,96,91,94,88,Ŀ\r\nӢ,1107,Ů,90,91,92,97,97,98,91,96,88,Ŀ\r\n,1107,,85,92,86,95,98,100,95,96,92,Ŀ\r\nҷ,1107,Ů,87,94,91,94,94,96,94,98,91,Ŀ\r\n,1107,Ů,94,97,95,97,97,100,79,94,86,Ŀ\r\n,1107,Ů,92,92,89,96,93,94,93,94,94,Ŀ\r\n,1107,Ů,91,94,94,98,94,96,83,94,93,Ŀ\r\n,1107,Ů,89,95,97,97,98,96,77,98,90,Ŀ\r\nڽ,1107,,89,94,92,95,98,96,87,94,91,Ŀ\r\nɽ˼,1107,Ů,93,91,92,93,99,94,90,94,90,Ŀ\r\n,1107,Ů,93,89,96,89,99,96,88,94,91,Ŀ\r\nũ,1107,Ů,93,91,98,95,97,98,85,92,86,Ŀ\r\n,1107,Ů,91,90,94,98,98,96,82,94,91,Ŀ\r\n,1107,,89,92,91,95,95,98,92,92,90,Ŀ\r\n,1107,,92,93,94,93,96,92,94,88,91,Ŀ\r\n³,1107,Ů,91,92,95,94,96,98,80,96,90,Ŀ\r\n,1107,,92,82,88,97,97,100,90,98,88,Ŀ\r\nʷ,1107,Ů,90,95,95,95,92,98,88,90,88,Ŀ\r\nС,1107,Ů,89,93,94,94,96,96,84,98,87,Ŀ\r\nδ,1107,Ů,90,89,92,95,94,94,91,98,86,Ŀ\r\nӺƼ,1107,Ů,89,85,85,96,97,98,90,92,96,Ŀ\r\n,1107,Ů,92,89,92,98,95,92,79,94,96,Ŀ\r\n,1107,Ů,96,91,88,94,97,96,85,84,94,Ŀ\r\nӢ,1107,Ů,91,89,92,97,99,96,82,94,83,Ŀ\r\n,1107,Ů,88,97,97,93,95,100,71,94,83,Ŀ\r\n,1107,Ů,89,87,94,95,98,98,72,92,89,Ŀ\r\nʼ,1107,,91,88,89,97,95,96,82,84,89,Ŀ\r\n,1107,Ů,95,97,95,98,96,94,62,94,79,Ŀ\r\n,1107,Ů,95,85,95,96,98,100,75,94,72,Ŀ\r\n˷ƽ,1107,,91,87,89,94,99,100,71,86,89,Ŀ\r\n,1107,Ů,89,81,92,95,95,98,82,96,78,Ŀ\r\nǿ,1107,,92,95,95,93,92,88,85,92,74,Ŀ\r\n˫,1107,Ů,89,94,88,92,95,90,81,82,93,Ŀ\r\n˫,1107,Ů,90,87,95,99,99,95,61,92,86,Ŀ\r\n,1107,Ů,91,93,95,94,92,92,81,86,79,Ŀ\r\n,1107,,89,96,82,93,97,96,74,100,73,Ŀ\r\nȫ,1107,Ů,87,76,94,93,97,100,72,96,84,Ŀ\r\n,1107,,86,90,80,97,100,94,79,92,81,Ŀ\r\n,1107,Ů,88,89,89,90,95,94,72,92,81,Ŀ\r\n˿,1107,,91,91,86,96,95,98,62,88,81,Ŀ\r\n,1107,Ů,87,89,91,97,89,96,67,94,78,Ŀ\r\nղ,1107,,86,91,90,93,92,96,68,90,81,Ŀ\r\nС,1107,Ů,90,86,87,97,89,93,73,82,88,Ŀ\r\n,1107,,91,89,75,95,100,100,59,86,85,Ŀ\r\n,1107,,89,90,82,88,93,98,66,78,81,Ŀ\r\n,1107,Ů,86,85,90,95,86,88,66,78,76,Ŀ\r\n,1108,Ů,91,98,95,99,99,100,100,100,97,\r\n,1108,,92,97,99,95,94,100,100,100,99,\r\n,1108,Ů,96,98,94,97,94,98,98,98,97,\r\n,1108,,93,97,93,97,94,98,100,98,99,\r\n,1108,Ů,93,99,92,95,100,97,95,100,97,\r\nѧ,1108,,94,96,97,97,95,96,95,100,96,\r\n,1108,,93,93,95,96,100,98,94,100,96,\r\nѩ,1108,Ů,92,97,96,94,98,98,97,98,95,\r\n¦,1108,,88,97,96,91,95,96,100,100,99,\r\nԻ,1108,Ů,89,97,97,96,95,100,94,96,97,\r\nӡݼ,1108,Ů,96,98,97,91,95,94,95,100,95,\r\n,1108,Ů,91,95,91,94,99,94,98,100,97,\r\n־˧,1108,,92,93,95,94,99,96,95,100,95,\r\n־,1108,,86,97,92,90,99,98,100,98,97,\r\nӱ,1108,,94,94,95,95,97,96,94,98,94,\r\nݻ,1108,,90,98,97,94,96,96,91,100,94,\r\n,1108,,91,95,91,95,94,98,96,100,94,\r\n,1108,Ů,90,97,96,91,96,94,97,100,93,\r\n¡ʤ,1108,,94,93,85,99,94,100,100,98,91,\r\n,1108,,90,95,93,93,91,96,97,100,98,\r\nƽ,1108,Ů,92,93,90,94,94,94,99,98,97,\r\nƽ,1108,,91,96,94,92,91,96,92,100,98,\r\n,1108,,86,98,93,92,95,96,95,100,95,\r\nѩ,1108,Ů,92,96,96,93,93,94,94,100,92,\r\nʿ,1108,,86,97,88,97,95,94,95,100,96,\r\n,1108,Ů,93,97,94,91,93,92,93,100,95,\r\nũ,1108,Ů,86,98,95,92,91,98,92,98,97,\r\n,1108,Ů,93,94,95,94,89,100,89,100,93,\r\n,1108,,90,97,94,90,95,94,94,96,96,\r\n,1108,,96,96,88,94,99,95,89,98,91,\r\nƼ,1108,Ů,93,92,91,90,98,98,90,96,96,\r\n,1108,,90,96,92,94,97,96,86,98,95,\r\nʱ,1108,,91,96,89,98,94,96,92,98,90,\r\nʽ,1108,,89,89,94,94,91,100,94,98,93,\r\nɽ,1108,,89,94,95,90,94,96,97,96,91,\r\n,1108,Ů,88,89,98,91,99,94,91,96,95,\r\n,1108,Ů,89,93,97,89,86,90,94,98,97,\r\n,1108,Ů,92,88,91,91,91,96,89,100,95,\r\n,1108,,86,96,82,95,90,94,92,100,97,\r\n,1108,,88,88,87,92,97,96,91,96,93,\r\n,1108,Ů,93,93,95,91,79,94,91,96,92,\r\nĽԲ,1108,Ů,89,94,86,91,91,96,82,100,94,\r\n,1108,Ů,91,85,93,92,97,94,81,100,90,\r\n,1108,Ů,88,97,93,89,80,92,94,98,91,\r\n˾,1108,Ů,92,92,91,92,89,98,74,100,92,\r\n,1108,,86,89,86,94,89,96,93,94,89,\r\n³,1109,Ů,93,98,98,97,96,98,100,100,98,\r\nͯ,1109,Ů,92,98,95,95,95,100,100,98,98,\r\n,1109,,91,93,92,95,100,100,100,100,98,\r\n,1109,Ů,94,92,94,96,96,100,100,100,97,\r\nţ,1109,,90,98,94,97,95,100,94,100,98,\r\n,1109,Ů,93,98,97,94,98,97,94,100,95,\r\n߬B,1109,Ů,95,94,97,95,99,96,91,100,96,\r\nʦ,1109,Ů,89,93,97,96,92,100,95,100,100,\r\nƽ,1109,,82,99,93,94,98,100,97,100,98,\r\n˳,1109,,90,96,95,94,93,100,94,100,97,\r\n˫,1109,,90,96,94,89,97,95,99,98,100,\r\n޺,1109,Ů,88,96,95,99,93,96,96,98,97,\r\n,1109,,91,97,90,93,98,98,97,98,95,\r\n,1109,,89,96,97,88,98,96,97,100,95,\r\n,1109,Ů,93,97,93,96,93,94,97,98,95,\r\nŻ,1109,,87,96,97,96,92,98,94,98,96,\r\nѹƽ,1109,,89,98,91,95,91,98,94,100,97,\r\nС,1109,,89,93,94,94,98,98,94,98,95,\r\nƾ,1109,,89,96,94,93,90,94,100,98,98,\r\nׯ,1109,Ů,90,94,96,89,98,96,95,100,93,\r\n,1109,Ů,93,97,96,90,86,98,94,98,97,\r\nѫ,1109,,85,99,93,97,90,96,94,98,97,\r\n,1109,,92,94,92,93,96,98,91,100,93,\r\nɳ,1109,,88,100,88,98,91,98,95,98,93,\r\n,1109,,92,97,92,95,94,94,97,96,91,\r\n÷,1109,Ů,90,97,94,94,95,96,95,94,92,\r\n,1109,Ů,90,93,95,91,96,98,89,96,98,\r\n,1109,,87,94,94,94,91,96,96,96,97,\r\nï,1109,,91,98,88,95,90,96,93,100,94,\r\n,1109,,89,97,94,94,87,98,92,100,94,\r\n,1109,Ů,93,97,89,97,90,94,94,100,91,\r\n,1109,Ů,93,93,93,95,91,98,85,100,96,\r\nˮ,1109,Ů,92,96,92,97,92,96,86,100,93,\r\nѳ֮,1109,Ů,90,98,98,86,94,96,93,96,93,\r\n,1109,,92,94,87,95,94,92,94,96,97,\r\n,1109,,86,94,90,93,94,96,92,100,95,\r\n,1109,Ů,94,95,94,91,93,96,85,100,90,\r\n,1109,,89,96,94,91,85,96,89,100,96,\r\n½,1109,,86,90,96,96,92,98,88,94,95,\r\n,1109,Ů,90,95,92,95,88,90,90,98,96,\r\n,1109,Ů,92,93,87,93,93,96,93,96,89,\r\n𸻳,1109,,90,87,88,95,94,100,87,96,94,\r\n,1109,Ů,90,96,98,96,91,82,82,100,93,\r\néϵ,1109,,89,90,95,96,94,92,79,100,92,\r\n,1109,Ů,90,94,92,90,89,94,91,96,91,\r\n갾,1109,Ů,91,92,96,97,82,90,86,96,96,\r\nʢ,1109,Ů,90,89,85,95,90,98,85,98,95,\r\n,1109,,89,95,92,94,94,94,77,96,94,\r\n־,1109,,92,97,84,98,97,90,85,90,86,\r\nֽᾴ,1109,,89,94,89,92,83,96,89,84,94,\r\n,1110,,94,97,97,97,100,100,100,100,100,\r\n,1110,,88,98,99,95,100,100,97,100,95,\r\n˫Ļ,1110,,92,97,93,98,95,96,100,100,98,\r\nʢҶٻ,1110,Ů,88,96,97,96,97,100,100,100,95,\r\n,1110,,93,99,98,99,93,98,95,100,92,\r\nκ,1110,,88,96,96,98,95,100,97,100,96,\r\n,1110,Ů,94,92,97,99,97,96,91,100,99,\r\n,1110,,88,97,96,99,100,96,91,100,97,\r\nʤ,1110,,93,95,90,93,98,98,97,100,99,\r\nִ,1110,Ů,89,96,95,99,94,100,95,98,97,\r\n,1110,,90,93,93,96,94,98,97,100,99,\r\nǱ,1110,,91,91,92,92,98,98,100,100,96,\r\n¿,1110,Ů,90,96,97,92,93,98,94,100,97,\r\nϴ,1110,Ů,90,97,92,96,97,98,92,100,94,\r\n,1110,Ů,89,96,93,95,93,94,97,100,98,\r\nСϣ,1110,Ů,92,95,94,96,92,94,94,100,97,\r\nƻ,1110,Ů,91,96,98,95,93,96,93,96,96,\r\n,1110,,89,98,99,91,88,98,98,98,95,\r\nά,1110,,92,96,95,95,95,96,96,98,91,\r\n,1110,,89,96,93,91,97,98,93,98,98,\r\n,1110,,90,98,89,96,97,94,94,98,97,\r\nԷ,1110,Ů,87,97,97,99,95,98,86,100,93,\r\nƽ,1110,Ů,88,97,94,90,92,96,96,100,98,\r\nγ,1110,,92,93,90,99,94,96,93,98,96,\r\nտ,1110,Ů,91,96,97,92,90,92,96,98,98,\r\n,1110,Ů,86,97,94,93,96,91,94,100,97,\r\n,1110,,90,96,91,96,94,98,87,100,95,\r\n˫ѩƼ,1110,Ů,87,96,92,94,96,90,97,100,94,\r\n,1110,Ů,91,95,90,95,93,100,92,100,89,\r\nη,1110,Ů,93,97,97,92,88,94,92,96,95,\r\nɽǮ,1110,,91,90,93,93,96,96,89,100,92,\r\n̾,1110,,90,96,88,91,97,92,91,100,93,\r\nȪ,1110,,87,92,96,97,95,94,88,92,96,\r\n,1110,Ů,90,95,92,94,84,98,94,94,95,\r\n,1110,,83,96,91,92,83,94,100,100,96,\r\n,1110,,81,96,82,95,95,94,100,98,93,\r\nϯ,1110,Ů,91,95,91,95,91,94,86,100,90,\r\n׿Ҽ,1110,,87,92,89,94,91,96,92,96,95,\r\n˼ƽ,1110,Ů,93,92,95,90,80,96,94,98,94,\r\n,1110,Ů,90,93,91,94,96,94,83,92,94,\r\n,1110,Ů,86,93,95,89,88,98,83,96,96,\r\nС,1110,Ů,91,94,94,88,89,90,88,96,93,\r\n˵,1110,Ů,90,97,90,96,83,94,87,100,84,\r\nС,1110,Ů,87,95,91,89,83,96,93,98,88,\r\n,1110,Ů,88,94,86,90,87,90,92,96,84,\r\n۬С,1110,Ů,93,91,93,93,76,94,78,92,96,\r\n΢,1110,Ů,83,96,92,93,78,90,79,100,92,\r\n,1110,,84,89,78,96,85,96,79,98,93,\r\n,1111,Ů,89,97,88,98,98,98,94,96,93,\r\n,1111,,83,96,90,95,98,98,91,94,97,\r\n,1111,,85,96,94,93,91,94,100,98,91,\r\n,1111,Ů,94,94,97,93,94,96,94,94,86,\r\nС,1111,,84,95,91,92,95,92,91,100,99,\r\nԬ÷,1111,Ů,93,93,88,97,98,92,88,96,90,\r\nĵ˿,1111,,89,97,91,91,90,92,95,98,91,\r\nٲ,1111,,88,94,98,94,89,92,91,98,90,\r\n÷˼,1111,,89,99,90,95,86,98,89,98,89,\r\nӺƼ,1111,Ů,90,95,94,95,91,88,95,100,85,\r\nĻ,1111,,87,95,92,89,91,96,95,96,90,\r\nۭ,1111,Ů,93,94,92,100,91,98,73,100,88,\r\n,1111,,89,92,95,93,84,98,94,98,85,\r\n׿Сٻ,1111,Ů,89,93,94,90,88,94,89,94,96,\r\nƤӢ,1111,Ů,87,87,93,93,91,96,88,98,94,\r\nҦ,1111,,86,90,87,86,94,98,97,96,92,\r\n,1111,Ů,85,96,91,93,85,94,90,98,92,\r\n̽,1111,,84,94,91,87,92,94,91,94,96,\r\nݾ,1111,,92,96,89,88,89,96,92,96,85,\r\n,1111,,85,96,85,93,90,92,97,94,90,\r\nϯ,1111,,89,95,94,97,85,96,85,98,83,\r\nӢ,1111,Ů,90,96,87,96,89,96,85,98,83,\r\nС,1111,,86,92,76,92,91,96,93,100,93,\r\nʩ,1111,Ů,88,94,86,92,94,94,82,92,96,Ŀ\r\n,1111,,89,87,88,97,89,92,94,96,86,\r\n,1111,,86,94,90,96,95,86,89,92,89,\r\n︣,1111,,87,93,92,89,84,92,95,98,86,\r\n´,1111,,83,93,86,86,86,92,98,98,93,\r\nԪ,1111,,92,94,91,92,80,91,91,94,87,\r\n,1111,,84,98,97,87,89,89,87,92,88,\r\nݽ,1111,Ů,91,91,89,91,89,94,80,96,88,\r\nݷƷ,1111,Ů,78,93,84,88,89,94,97,96,89,\r\n,1111,,88,96,92,88,84,98,91,82,89,\r\n½,1111,Ů,87,97,90,95,87,90,81,94,86,\r\n,1111,Ů,91,94,90,96,78,95,73,98,91,\r\nԬ,1111,Ů,94,96,96,90,85,92,81,82,89,\r\n,1111,Ů,88,90,91,91,84,96,88,94,83,\r\nͽ,1111,Ů,89,80,90,89,87,98,79,96,91,\r\n֣,1111,,84,94,83,94,85,88,77,94,96,\r\n,1111,,77,91,86,93,90,95,77,98,87,\r\nά,1111,,87,94,86,84,85,94,81,88,90,\r\nΡ,1111,,83,93,88,87,80,84,87,96,87,\r\n˼,1111,Ů,89,96,90,91,79,90,81,82,86,\r\n,1111,,86,93,88,87,83,92,72,88,85,\r\n׿F,1111,,84,98,79,92,84,90,70,84,78,\r\n۬·ң,1111,,88,92,83,89,75,90,70,86,84,\r\n菪,1111,,83,86,77,91,78,84,81,86,86,\r\n,1111,,86,93,85,87,81,84,47,98,90,\r\nǴ,1112,Ů,92,96,86,97,94,93,91,100,99,\r\nı,1112,,91,97,94,96,86,98,94,98,94,\r\n·,1112,Ů,93,98,96,97,84,91,87,100,99,\r\n»,1112,Ů,93,97,93,99,91,96,85,100,90,\r\n,1112,Ů,90,95,90,94,90,96,82,98,100,\r\nع,1112,,88,97,93,99,78,100,89,98,92,\r\nؽ,1112,,85,97,86,94,92,96,89,98,95,\r\nպ,1112,,87,93,85,92,88,94,97,98,96,\r\n˻,1112,,80,93,95,90,89,96,92,96,94,\r\n־,1112,,91,93,84,94,95,94,86,96,92,\r\n,1112,,91,93,83,94,92,92,91,94,92,\r\nٻ,1112,Ů,86,91,95,88,92,90,89,100,91,\r\n,1112,Ů,89,98,91,95,87,94,91,86,89,\r\n,1112,Ů,88,92,90,89,93,92,87,100,87,\r\nſǿ,1112,,81,96,85,89,91,90,97,94,94,\r\nϼϼ,1112,Ů,90,89,89,94,90,94,94,86,91,\r\nԪ,1112,,92,82,78,92,92,94,95,98,93,\r\nȺ,1112,,83,91,83,97,94,98,75,96,97,\r\nͨ,1112,,92,90,85,95,89,92,80,96,95,\r\nԺ÷,1112,Ů,90,94,94,90,90,92,75,96,93,\r\n߶,1112,Ů,89,91,94,98,87,94,81,90,90,\r\nʢO,1112,Ů,83,96,83,91,90,87,96,94,93,\r\n뾧,1112,,91,92,88,89,89,96,84,96,88,\r\nи,1112,,87,90,74,95,90,94,86,100,96,\r\n,1112,Ů,86,95,88,94,81,92,92,96,88,\r\n,1112,,90,90,86,93,87,88,85,98,91,\r\n⿡,1112,,84,98,80,90,89,88,93,96,89,\r\n,1112,,86,92,85,97,88,93,78,94,92,\r\n,1112,,84,95,87,91,96,91,86,88,87,\r\n˼,1112,Ů,90,91,89,90,85,100,75,90,94,\r\n,1112,,89,94,87,93,84,88,77,98,92,\r\nǳ,1112,,84,96,81,81,81,94,95,94,92,\r\n,1112,,90,92,89,93,92,92,69,94,86,\r\nӺ,1112,,84,92,86,89,85,92,89,92,86,\r\nⴺ,1112,,84,93,93,89,94,88,76,90,87,\r\n,1112,,86,87,90,84,85,94,86,90,88,\r\n³,1112,,82,88,70,89,92,90,94,90,89,\r\nܾ,1112,Ů,93,89,86,92,81,92,64,90,93,\r\n϶,1112,Ů,87,91,81,89,76,84,83,94,93,\r\nСӢ,1112,Ů,81,76,89,94,87,86,75,96,92,\r\nԺ,1112,,87,81,92,91,93,86,70,86,89,\r\n,1112,Ů,85,85,76,90,81,96,74,88,96,\r\n,1112,Ů,84,86,91,89,84,86,70,86,91,\r\n,1112,Ů,84,85,86,88,82,96,81,82,83,\r\nͤͤ,1112,Ů,86,89,88,89,90,80,58,96,82,\r\nɳ,1112,,84,96,91,91,0,96,89,98,91,\r\n鸣,1113,,93,97,90,92,94,96,95,100,93,\r\nԳ,1113,,89,86,94,94,96,94,89,96,95,\r\nɣ,1113,,92,90,89,95,94,98,89,92,90,\r\n,1113,,88,89,86,91,92,94,94,98,94,\r\n,1113,Ů,90,92,85,91,95,96,88,98,90,\r\nҵ,1113,,85,97,82,94,91,96,91,100,88,\r\n,1113,,87,95,92,92,89,94,83,96,92,\r\nҳ,1113,,92,95,79,90,91,96,80,98,91,\r\nۭ,1113,,92,86,88,92,85,94,80,94,94,\r\n,1113,,77,96,83,93,92,98,83,90,93,\r\n,1113,,84,86,90,91,88,98,84,90,92,\r\nۺ,1113,,91,81,90,94,96,90,73,98,90,\r\nϯ޺,1113,,88,84,91,94,89,98,80,90,88,\r\n÷,1113,Ů,87,88,90,91,86,98,77,94,87,\r\n²,1113,,88,91,83,95,95,92,73,96,79,\r\n΢΢,1113,Ů,91,71,91,94,88,88,85,94,89,\r\nĲ,1113,,76,92,82,89,84,88,95,100,85,\r\nظ,1113,,87,81,84,84,98,94,87,92,84,\r\n,1113,,80,81,87,91,89,94,84,92,90,\r\n񼶨,1113,,86,84,79,85,95,92,92,90,84,\r\nഫ,1113,,88,77,88,89,89,91,76,94,94,\r\n,1113,Ů,85,92,84,90,85,96,81,88,81,\r\n,1113,,85,81,82,87,90,98,81,90,87,\r\n,1113,Ů,87,76,89,97,86,96,72,90,87,\r\n,1113,Ů,80,77,90,92,79,89,91,90,89,\r\nľ,1113,Ů,90,82,89,96,92,88,75,86,76,Ŀ\r\nԴ,1113,,84,82,85,88,80,86,92,90,84,\r\nʱ,1113,,83,75,81,87,84,94,81,96,89,\r\n갷Ӣ,1113,Ů,86,90,87,86,88,88,78,78,87,\r\n,1113,Ů,80,91,94,93,75,92,69,92,82,\r\nݴ,1113,,88,68,79,84,88,92,82,96,88,\r\n,1113,Ů,87,83,84,95,73,90,76,90,87,\r\n˾ǽ,1113,,86,66,82,87,89,96,69,96,89,\r\n,1113,,89,85,80,96,84,93,60,86,87,\r\nũ־,1113,,82,73,84,95,96,94,63,92,79,\r\nǮ,1113,,81,71,90,84,78,92,87,84,83,\r\nٽ,1113,,88,82,72,92,78,88,69,96,84,\r\n,1113,,89,78,84,84,83,79,89,78,84,\r\nȪ,1113,,81,81,91,85,82,86,77,86,76,\r\nӽ,1113,,91,69,81,83,88,86,65,94,80,\r\nȫ,1113,,86,71,75,78,84,94,79,90,78,\r\n޺,1113,Ů,86,77,74,91,77,88,63,88,85,\r\nͯƽ,1113,,67,85,83,73,75,98,75,94,78,\r\n,1113,,83,80,85,89,86,92,56,80,66,\r\n,1113,,82,67,75,95,74,87,68,84,79,\r\nӢ,1113,,86,60,68,82,85,90,60,94,81,\r\n,1113,,82,68,70,74,77,90,71,78,83,\r\nï,1113,,66,58,73,67,58,80,51,80,72,\r\n,1113,,33,46,30,65,82,76,56,76,59,\r\n,1114,Ů,85,97,95,86,91,96,95,100,98,\r\nС,1114,,87,92,90,98,98,98,91,92,89,\r\n姳³,1114,,87,89,88,95,94,98,95,94,93,\r\n,1114,Ů,91,82,88,94,92,100,87,100,92,\r\n,1114,,86,89,88,94,93,96,86,96,95,\r\n˽,1114,,89,97,90,86,90,96,86,94,88,\r\n³·,1114,Ů,87,90,91,93,94,92,83,96,89,\r\n,1114,,91,84,89,92,83,96,97,90,91,\r\n־,1114,,88,95,87,93,96,92,77,92,90,\r\nϼ,1114,Ů,91,97,93,96,79,96,76,98,84,\r\n֦,1114,,87,95,86,94,83,82,89,98,93,\r\n,1114,Ů,88,83,85,97,87,95,92,96,78,\r\n÷,1114,Ů,88,88,87,95,93,88,83,98,78,\r\n,1114,,91,74,76,92,98,92,86,98,90,\r\nȫ,1114,,83,86,82,95,89,98,89,88,86,\r\n¶,1114,Ů,86,95,87,97,94,84,74,96,82,\r\n,1114,Ů,86,81,85,92,91,98,74,94,93,\r\n־,1114,,93,87,88,93,86,92,77,94,84,\r\n,1114,Ů,87,82,92,91,95,100,75,86,85,\r\nڽ,1114,,85,80,83,92,86,94,78,98,93,\r\n,1114,Ů,88,81,86,90,86,88,81,96,90,\r\n,1114,,88,88,86,71,85,92,87,94,90,\r\n,1114,,86,75,81,89,91,90,87,84,96,\r\n,1114,,83,77,93,84,87,90,85,96,83,\r\n,1114,,86,70,84,96,81,88,88,94,90,\r\n˷,1114,Ů,89,87,80,92,82,92,76,96,82,\r\n,1114,,81,84,91,96,94,88,78,88,75,\r\n־,1114,Ů,85,92,85,95,92,88,77,78,82,\r\nԶ,1114,,86,90,93,86,80,92,86,82,79,\r\n̫ǿ,1114,,85,78,84,94,86,90,76,88,90,\r\n,1114,,86,77,85,87,96,92,76,88,84,\r\n,1114,Ů,84,88,92,93,77,82,79,90,85,\r\nν,1114,,88,86,89,83,89,85,73,94,79,\r\nӢ,1114,Ů,80,77,81,92,82,94,80,92,87,\r\n첨,1114,Ů,85,94,87,89,83,88,78,82,75,\r\nƽ,1114,,82,84,80,88,81,84,85,92,77,\r\n־,1114,,82,84,77,80,86,89,74,94,86,\r\n,1114,,85,73,73,86,77,90,80,94,93,\r\nΣ,1114,,81,92,79,93,72,92,78,86,75,\r\nʩ,1114,,84,79,67,88,83,94,77,90,85,\r\n,1114,Ů,88,80,66,86,86,86,66,92,89,\r\nţ,1114,,85,74,74,90,86,88,64,92,73,\r\n,1114,,86,84,84,83,83,91,68,66,73,\r\nھ,1114,Ů,83,83,75,86,85,94,43,88,78,\r\n,1114,,78,74,80,93,76,92,58,92,72,\r\nӳ,1114,,79,68,79,80,80,86,68,84,82,\r\n཭,1114,,78,64,82,81,77,82,79,82,66,\r\n˳,1114,,77,62,78,77,67,80,50,60,70,Ŀ\r\n,1115,Ů,88,89,91,95,93,94,91,100,99,\r\n,1115,,86,94,86,94,90,96,94,98,93,\r\n|,1115,,86,97,88,91,98,100,89,94,85,\r\n,1115,,86,96,84,89,92,92,91,100,89,\r\n,1115,,83,98,86,95,84,94,84,100,92,\r\n,1115,,90,83,92,93,92,96,85,96,88,\r\n,1115,,85,98,83,85,88,92,90,100,93,\r\n,1115,,88,96,87,97,86,93,82,94,88,\r\nʷ־,1115,,86,89,83,92,83,97,91,92,93,\r\n־,1115,,87,97,88,90,92,91,84,90,87,\r\nף,1115,,84,87,88,87,90,96,80,96,93,\r\n,1115,,84,87,84,94,91,96,83,96,86,\r\n,1115,,80,88,85,93,89,90,86,100,86,\r\n,1115,Ů,85,92,87,95,81,88,87,94,86,\r\n,1115,,79,83,93,88,88,94,84,100,84,\r\nׯ,1115,,80,87,85,88,83,90,92,98,88,\r\n˳,1115,,82,89,83,94,85,98,83,88,88,\r\n,1115,,86,90,85,82,88,88,86,92,90,\r\n׺,1115,,91,83,77,91,86,94,89,92,83,\r\n,1115,,87,78,88,89,87,94,84,98,79,\r\n㷢,1115,,90,91,89,87,86,80,78,92,88,\r\n㷼,1115,Ů,81,95,88,86,68,96,88,92,85,\r\n,1115,Ů,78,91,81,89,93,94,85,90,78,\r\nﻳƽ,1115,,88,80,87,92,84,88,88,90,79,\r\nС,1115,,84,82,84,90,84,92,85,90,83,\r\nС÷,1115,Ů,92,83,88,93,88,93,72,82,83,\r\nС,1115,Ů,90,74,87,93,85,90,85,92,78,\r\nͨ,1115,,85,88,89,88,83,92,89,82,74,\r\nС,1115,Ů,83,86,85,95,79,96,75,86,84,\r\nʷѧ,1115,,82,78,85,89,89,88,81,94,80,\r\nɽ,1115,,88,87,82,82,86,94,75,86,81,\r\n,1115,,87,85,79,91,86,94,75,84,80,\r\n,1115,,87,91,86,90,72,92,67,90,85,\r\nܿܿ,1115,,87,82,78,96,81,94,67,88,84,\r\nƽ,1115,,0,0,0,0,0,0,0,0,0,\r\nȴ,1115,,82,82,81,89,79,84,91,88,80,\r\n,1115,,79,74,75,89,81,86,86,94,84,\r\në,1115,,82,72,81,92,86,82,72,90,88,\r\nھ,1115,,79,89,72,84,87,86,73,88,81,\r\nٻ,1115,,81,79,79,87,92,86,72,84,79,\r\nh,1115,,78,62,79,94,93,90,83,86,74,\r\n,1115,,88,73,84,87,81,84,81,88,73,\r\n,1115,Ů,91,85,88,89,61,92,71,82,79,\r\nı,1115,,88,68,77,91,82,87,62,88,67,\r\n̸ݸ,1115,,79,78,70,83,86,86,76,78,70,\r\nӦ,1115,,87,63,55,93,100,94,43,88,82,\r\nƽ,1115,,85,59,89,80,85,82,61,64,75,\r\nФ,1115,,81,62,76,89,76,91,49,68,74,\r\n֣Ľ,1115,,72,59,82,92,85,82,59,58,55,"
  }
]