我正在尝试使用并行后端在超参数网格上caret
训练模型。xgboost
下面是一些代码,它使用Give Me Some Creditcaret
数据来演示为的超参数网格搜索设置并行后端。
library(plyr)
library(dplyr)
library(pROC)
library(caret)
library(xgboost)
library(readr)
library(parallel)
library(doParallel)
if(exists("xgboost_cluster")) stopCluster(xgboost_cluster)
hosts = paste0("192.168.18.", 52:53)
xgboost_cluster = makePSOCKcluster(hosts, master="192.168.18.51")
# load the packages across the cluster
clusterEvalQ(xgboost_cluster, {
deps = c("plyr", "Rcpp", "dplyr", "caret", "xgboost", "pROC", "foreach", "doParallel")
for(d in deps) library(d, character.only = TRUE)
rm(d, deps)
})
registerDoParallel(xgboost_cluster)
# load in the training data
df_train = read_csv("04-GiveMeSomeCredit/Data/cs-training.csv") %>%
na.omit() %>% # listwise deletion
select(-`[EMPTY]`) %>%
mutate(SeriousDlqin2yrs = factor(SeriousDlqin2yrs, # factor variable for classification
labels = c("Failure", "Success")))
# set up the cross-validated hyper-parameter search
xgb_grid_1 = expand.grid(
nrounds = 1000,
eta = c(0.01, 0.001, 0.0001),
max_depth = c(2, 4, 6, 8, 10),
gamma = 1
)
# pack the training control parameters
xgb_trcontrol_1 = trainControl(
method = "cv",
number = 5,
verboseIter = TRUE,
returnData = FALSE,
returnResamp = "all", # save losses across all models
classProbs = TRUE, # set to TRUE for AUC to be computed
summaryFunction = twoClassSummary,
allowParallel = TRUE
)
# train the model for each parameter combination in the grid,
# using CV to evaluate
xgb_train_1 = train(
x = as.matrix(df_train %>%
select(-SeriousDlqin2yrs)),
y = as.factor(df_train$SeriousDlqin2yrs),
trControl = xgb_trcontrol_1,
tuneGrid = xgb_grid_1,
method = "xgbTree"
)
我检查了所有核心hosts
都被用于训练,但在master
节点上,没有使用任何进程。这是预期的行为吗?有什么方法可以改变这种行为并利用主节点上的核心进行处理?