1

如果我使用插入符号的两种方法(NN 和 KNN)然后我想提供显着性检验,我该如何进行 wilcoxon 检验。

我提供了我的数据样本如下

structure(list(Input = c(25, 193, 70, 40), Output = c(150, 98, 
        27, 60), Inquiry = c(75, 70, 0, 20), File = c(60, 36, 12, 12), 
        FPAdj = c(1, 1, 0.8, 1.15), RawFPcounts = c(1750, 1902, 535, 
        660), AdjFP = c(1750, 1902, 428, 759), Effort = c(102.4, 
        105.2, 11.1, 21.1)), row.names = c(NA, 4L), class = "data.frame")

    d=readARFF("albrecht.arff") 
    index <- createDataPartition(d$Effort, p = .70,list = FALSE)
    tr <- d[index, ]
    ts <- d[-index, ] 

    boot <- trainControl(method = "repeatedcv", number=100)

         cart1 <- train(log10(Effort) ~ ., data = tr,
                        method = "knn",
                        metric = "MAE",
                        preProc = c("center", "scale", "nzv"),
                        trControl = boot)

           postResample(predict(cart1, ts), log10(ts$Effort))

           cart2 <- train(log10(Effort) ~ ., data = tr,
                          method = "knn",
                          metric = "MAE",
                          preProc = c("center", "scale", "nzv"),
                          trControl = boot)

           postResample(predict(cart2, ts), log10(ts$Effort))

如何在wilcox.test()这里表演。

    Warm regards
4

2 回答 2

5

处理您的问题的一种方法是为 knn 和 NN 生成多个性能值,您可以使用统计测试进行比较。这可以使用嵌套重采样来实现。

在嵌套重采样中,您将多次执行训练/测试拆分并在每个测试集上评估模型。

例如,让我们使用 BostonHousing 数据:

library(caret)
library(mlbench)

data(BostonHousing)

让我们为示例选择数字列以使其简单:

d <- BostonHousing[,sapply(BostonHousing, is.numeric)]

据我所知,没有办法在开箱即用的插入符号中执行嵌套 CV,因此需要一个简单的包装器:

为嵌套 CV 生成外部折叠:

outer_folds <- createFolds(d$medv, k = 5)

让我们使用引导重采样作为内部重采样循环来调整超参数:

boot <- trainControl(method = "boot",
                     number = 100)

现在循环外部折叠并使用训练集执行超参数优化并在测试集上进行预测:

CV_knn <- lapply(outer_folds, function(index){
  tr <- d[-index, ]
  ts <- d[index,]
  
  cart1 <- train(medv ~ ., data = tr,
                 method = "knn",
                 metric = "MAE",
                 preProc = c("center", "scale", "nzv"),
                 trControl = boot,
                 tuneLength = 10) #to keep it short we will just probe 10 combinations of hyper parameters
  
  postResample(predict(cart1, ts), ts$medv)
})

从结果中只提取 MAE:

sapply(CV_knn, function(x) x[3]) -> CV_knn_MAE
CV_knn_MAE
#output
Fold1.MAE Fold2.MAE Fold3.MAE Fold4.MAE Fold5.MAE 
 2.503333  2.587059  2.031200  2.475644  2.607885 

例如,对 glmnet 学习者执行相同的操作:

CV_glmnet <- lapply(outer_folds, function(index){
  tr <- d[-index, ]
  ts <- d[index,]
  
  cart1 <- train(medv ~ ., data = tr,
                 method = "glmnet",
                 metric = "MAE",
                 preProc = c("center", "scale", "nzv"),
                 trControl = boot,
                 tuneLength = 10)
  
  postResample(predict(cart1, ts), ts$medv)
})

sapply(CV_glmnet, function(x) x[3]) -> CV_glmnet_MAE

CV_glmnet_MAE
#output
Fold1.MAE Fold2.MAE Fold3.MAE Fold4.MAE Fold5.MAE 
 3.400559  3.383317  2.830140  3.605266  3.525224

现在比较两者使用wilcox.test. 由于两个学习器的性能是使用相同的数据拆分生成的,因此适合进行配对测试:

wilcox.test(CV_knn_MAE,
            CV_glmnet_MAE,
            paired = TRUE)

如果比较两种以上的算法,可以使用friedman.test

于 2020-06-04T11:07:08.540 回答
1

这对你有用吗?

library(caret)
df <- structure(list(Input = c(25, 193, 70, 40), Output = c(150, 98, 
                                                      27, 60), Inquiry = c(75, 70, 0, 20), File = c(60, 36, 12, 12), 
               FPAdj = c(1, 1, 0.8, 1.15), RawFPcounts = c(1750, 1902, 535, 
                                                           660), AdjFP = c(1750, 1902, 428, 759), Effort = c(102.4, 
                                                                                                             105.2, 11.1, 21.1)), row.names = c(NA, 4L), class = "data.frame")

# not enough data points in df for ML: increase the number of df rows X10
d <- df[rep(seq_len(nrow(df)), 10), ]

index <- createDataPartition(d$Effort, p = .70,list = FALSE)
tr <- d[index, ]
ts <- d[-index, ] 

boot <- trainControl(method = "repeatedcv", number=100)

cart1 <- train(log10(Effort) ~ ., data = tr,
               method = "knn",
               metric = "MAE",
               preProc = c("center", "scale", "nzv"),
               trControl = boot)

# save the output to "model_predictions_1"
model_predictions_1 <- postResample(predict(cart1, ts), log10(ts$Effort))

cart2 <- train(log10(Effort) ~ ., data = tr,
               method = "knn",
               metric = "MAE",
               preProc = c("center", "scale", "nzv"),
               trControl = boot)

# save the output to "model_predictions_2"
model_predictions_2 <- postResample(predict(cart2, ts), log10(ts$Effort))

# test model_predictions_1 vs model_predictions_2
wilcox.test(model_predictions_1, model_predictions_2, exact = FALSE)
于 2020-06-04T00:38:32.117 回答