4

tab一段时间以来,我一直在使用一个小函数,它显示向量的频率、百分比和累积百分比。输出看起来像这样

          Freq    Percent        cum
ARSON      462 0.01988893 0.01988893
BURGLARY 22767 0.98011107 1.00000000
         23229 1.00000000         NA

优秀的dplyr包激励我更新功能。现在我想知道如何使更新版本更快。这是旧功能

tab = function(x,useNA =FALSE) {
  k=length(unique(x[!is.na(x)]))+1
  if (useNA) k=k+1
  tab=array(NA,c(k,3))
  colnames(tab)=c("freq.","prob.","cum.")
  useNA=ifelse(useNA,"always","no")
  rownames(tab)=names(c(table(x,useNA=useNA),""))

  tab[-nrow(tab),1]=table(x,useNA=useNA)
  tab[-nrow(tab),2]=prop.table(table(x,useNA=useNA))
  tab[,3] = cumsum(tab[,2])
  if(k>2)  tab[nrow(tab),-3]=colSums(tab[-nrow(tab),-3])
  if(k==2) tab[nrow(tab),-3]=tab[-nrow(tab),-3]

  tab
}

和新的基于dplyr

tab2 = function(x, useNA =FALSE) {
    if(!useNA) if(any(is.na(x))) x = na.omit(x)
    n = length(x)
    out = data.frame(x,1) %.%
        group_by(x) %.%
        dplyr::summarise(
            Freq    = length(X1),
            Percent = Freq/n
        ) %.%
        dplyr::arrange(x)
    ids = as.character(out$x)
    ids[is.na(ids)] = '<NA>'
    out = select(out, Freq, Percent)
    out$cum = cumsum(out$Percent)
    class(out)="data.frame"
    out = rbind(out,c(n,1,NA))
    rownames(out) = c(ids,'')
    out
}

最后,一些性能基准:

x1 = c(rep('ARSON',462),rep('BURGLARY',22767))
x2 = c(rep('ARSON',462),rep('BURGLARY',22767),rep(NA,100))
x3 = c(c(1:10),c(1:10),1,4)
x4 = c(rep(c(1:100),500),rep(c(1:50),20),1,4)

library('rbenchmark')

benchmark(tab(x1), tab2(x1), replications=100)[,c('test','elapsed','relative')]
#       test elapsed relative
# 1  tab(x1)   1.412    2.307
# 2 tab2(x1)   0.612    1.000

benchmark(tab(x2),tab2(x2), replications=100)[,c('test','elapsed','relative')]
#       test elapsed relative
# 1  tab(x2)   1.351    1.475
# 2 tab2(x2)   0.916    1.000

benchmark(tab(x2,useNA=TRUE), tab2(x2,useNA=TRUE), replications=100)[,c('test','elapsed','relative')]
#                     test elapsed relative
# 1  tab(x2, useNA = TRUE)   1.883    2.282
# 2 tab2(x2, useNA = TRUE)   0.825    1.000

benchmark(tab(x3), tab2(x3), replications=1000)[,c('test','elapsed','relative')]
#       test elapsed relative
# 1  tab(x3)   0.997    1.000
# 2 tab2(x3)   2.194    2.201

benchmark(tab(x4), tab2(x4), table(x4), replications=100)[,c('test','elapsed','relative')]
#        test elapsed relative
# 1   tab(x4)  19.481   18.714
# 2  tab2(x4)   1.041    1.000
# 3 table(x4)   6.515    6.258

tab2除了非常短的向量之外,速度更快。性能提升在更大的向量中变得明显(参见x451002 obs)。它table甚至比想象的要快得多。

现在我的问题是:如何进一步提高性能?创建具有频率和百分比的表是一个非常标准的应用程序,当您处理大型数据集时,快速实现非常好。

编辑:这是一个带有 2e6 向量的附加测试用例(包括data.table下面提出的解决方案)

x5 = sample(c(1:100),2e6, replace=TRUE)
benchmark(tab(x5), tab2(x5), table(x5), tabdt(x5), replications=100)[,c('test','elapsed','relative')]
#        test elapsed relative
# 1   tab(x5) 350.878   19.444
# 2  tab2(x5)  52.917    2.932
# 4 tabdt(x5)  18.046    1.000
# 3 table(x5)  98.429    5.454
4

1 回答 1

8

因为我是我的忠实粉丝,library(data.table)所以我写了类似的函数:

tabdt <- function(x){
    n <- length(which(!is.na(x)))
    dt <- data.table(x)
    out <- dt[, list(Freq = .N, Percent = .N / n), by = x]
    out[!is.na(x), CumSum := cumsum(Percent)]
    out
}

> benchmark(tabdt(x1), tab2(x1), replications=1000)[,c('test','elapsed','relative')]
       test elapsed relative
2  tab2(x1)    5.60    1.879
1 tabdt(x1)    2.98    1.000
> benchmark(tabdt(x2), tab2(x2), replications=1000)[,c('test','elapsed','relative')]
       test elapsed relative
2  tab2(x2)    6.34    1.686
1 tabdt(x2)    3.76    1.000
> benchmark(tabdt(x3), tab2(x3), replications=1000)[,c('test','elapsed','relative')]
       test elapsed relative
2  tab2(x3)    1.65    1.000
1 tabdt(x3)    2.34    1.418
> benchmark(tabdt(x4), tab2(x4), replications=1000)[,c('test','elapsed','relative')]
       test elapsed relative
2  tab2(x4)   14.35    1.000
1 tabdt(x4)   22.04    1.536

因此,data.table方法对于 and 来说更快x1x2而对于anddplyr来说更快。实际上,我认为使用这些方法没有任何改进空间。x3x4

ps 你会data.table为这个问题添加关键字吗?我相信人们会很乐意看到dplyrdata.table性能的比较(例如,参见data.table vs dplyr:一个人可以做得很好,而另一个人不能或做得很差?)。

于 2014-01-31T12:53:08.463 回答