4

我正在使用glmnet适合某些模型并正在交叉验证lambda. 我cv.glmnet默认使用(因为它确实完成了内部的交叉验证lambda),但下面我将重点关注该功能的第一步,这是导致问题的一个步骤。

第一个数据设置。我没有制作可重现的示例,也无法共享原始数据,但dim(smat)大约有 470 万行乘 50 列,其中大约一半是密集的。我尝试了一种简单的方法来用完全随机的列重现问题,但无济于事。

# data setup (censored)
library(data.table)
DT = fread(...)
n_cv = 10L

# assign cross-validation group to an ID (instead of to a row)
IDs = DT[ , .(rand_id = runif(1L)), keyby = ID]
IDs[order(rand_id), cv_grp := .I %% n_cv + 1L]
DT[IDs, cv_grp := i.cv_grp, on = 'ID']

# key by cv_grp to facilitate subsetting different training sets
setkey(DT, cv_grp)
# assign row number as column to facilitate subsetting model matrix
DT[ , rowN := .I]

library(glmnet)
library(Matrix)

# y is 0/1 (actually TRUE/FALSE)
model = y ~ ...
smat = sparse.model.matrix(model, data = DT)
# this is what's done internally to 0-1 data to create
#   an n x 2 matrix with FALSE in the 1st and TRUE in the 2nd column
ymat = diag(2L)[factor(DT$y), ]

以下是cv.glmnet在传递给之前所做的定制版本cv.lognet

train_models = lapply(seq_len(n_cv), function(i) {
  train_idx = DT[!.(i), rowN]
  glmnet(smat[train_idx, , drop = FALSE], ymat[train_idx, ],
         alpha = 1, family = 'binomial')
})

这似乎工作正常,但速度很慢。如果我们将其替换为等效版本parallel = TRUE

library(doMC)
registerDoMC(detectCores())
train_models_par = foreach(i = seq_len(n_cv), .packages = c("glmnet", "data.table")) %dopar% {
  train_idx = DT[!.(i), rowN]
  glmnet(smat[train_idx, , drop = FALSE], ymat[train_idx, ],
         alpha = 1, family = 'binomial')
}

调用在某些节点上静默失败(glmnet与之相比any(sapply(train_models, is.null))is FALSE):

sapply(train_models_par, is.null)
# [1] FALSE  TRUE FALSE FALSE FALSE  TRUE FALSE FALSE FALSE FALSE

哪个任务失败是不一致的(所以这不是问题,例如,cv_grp = 2 本身)。我试过捕获输出glmnet并检查is.null无济于事。我还添加了.verbose = TRUE标志foreach,没有出现任何可疑的情况。请注意,语法是辅助的,因为(也导致类似的失败)data.table的默认行为依赖于使用来拆分训练和测试集。cv.glmnetwhich = foldid == i

我该如何调试这个问题?为什么任务在并行化时可能会失败,但不是串行化时,我如何才能捕捉到任务失败的情况(例如,我可以尝试并重试)?

当前环境信息:

sessionInfo()
# R version 3.4.3 (2017-11-30)
# Platform: x86_64-pc-linux-gnu (64-bit)
# Running under: Ubuntu 16.04.3 LTS
# 
# Matrix products: default
# BLAS: /usr/lib/libblas/libblas.so.3.6.0
# LAPACK: /usr/lib/lapack/liblapack.so.3.6.0
# 
# locale:
#  [1] LC_CTYPE=en_US.UTF-8      
#  [2] LC_NUMERIC=C              
#  [3] LC_TIME=en_US.UTF-8       
#  [4] LC_COLLATE=en_US.UTF-8    
#  [5] LC_MONETARY=en_US.UTF-8   
#  [6] LC_MESSAGES=en_US.UTF-8   
#  [7] LC_PAPER=en_US.UTF-8      
#  [8] LC_NAME=C                 
#  [9] LC_ADDRESS=C              
# [10] LC_TELEPHONE=C            
# [11] LC_MEASUREMENT=en_US.UTF-8
# [12] LC_IDENTIFICATION=C       
# 
# attached base packages:
# [1] parallel  stats     graphics  grDevices utils    
# [6] datasets  methods   base     
# 
# other attached packages:
# [1] ggplot2_2.2.1     doMC_1.3.5       
# [3] iterators_1.0.8   glmnet_2.0-13    
# [5] foreach_1.4.3     Matrix_1.2-12    
# [7] data.table_1.10.5
# 
# loaded via a namespace (and not attached):
#  [1] Rcpp_0.12.14     lattice_0.20-35 
#  [3] codetools_0.2-15 plyr_1.8.3      
#  [5] grid_3.4.3       gtable_0.1.2    
#  [7] scales_0.5.0     rlang_0.1.4     
#  [9] lazyeval_0.2.1   tools_3.4.3     
# [11] munsell_0.4.2    yaml_2.1.13     
# [13] compiler_3.4.3   colorspace_1.2-4
# [15] tibble_1.3.4   

system('free -m')
# total        used        free      shared  buff/cache   available
# Mem:          30147        1786       25087           1        3273       28059
# Swap:             0           0           0

detectCores()
# [1] 16

system('lscpu | grep "Model name"')
# Model name:            Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz
4

0 回答 0