0

In sklearn 0.17.1 there was-->> grid_scores_ : list of named tuples (https://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html#sklearn.grid_search.GridSearchCV)

Now in sklearn 0.21.2 it is replaced with-->> cv_results_ : dict of numpy (masked) ndarrays (https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)

Previously with sklearn 0.17.1, I was able to plot all grid parameters on a single plot using grid_scores_ but now I am unable to aggregate the values obtained from cv_results_ as there is no "mean_validation_score" in newer version.

I have an existing code which plotted all the parameters score in sklearn 0.17.1 (https://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html#sklearn.grid_search.GridSearchCV) where grid_scores_ was used and it perfectly plotted all the values on one plot.

In newer version of slearn cv_results_ has been replaced with grid_scores_. I have tried to append all the values in want to plot all the parameters on one plot, currently I am unable to add the correct values to plot on the graph.

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics.ranking import precision_recall_curve
from sklearn.metrics import confusion_matrix
from sklearn.datasets import make_classification
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import tree
from sklearn.metrics import accuracy_score
import sklearn
import itertools
from pandas.tools.plotting import scatter_matrix
import os
import datetime as dt
from operator import itemgetter
from itertools import chain
import graphviz 

from sklearn.metrics import precision_recall_fscore_support
import scikitplot as skplt

X_train = np.random.randint(0,1, size=[500,5000])
y_train = np.random.randint(0,1, size=500)

print(X_train.shape, y_train.shape)
# (500, 5000) (500,)

#grid_search = GridSearchCV(clf, param_grid, cv=3) # 10 fold cross validation

### hyperparameter estimator
param_grid = {"criterion": ["gini", "entropy"], 
              "splitter": ["best", "random"],
              "max_depth": np.arange(1,9,7), 
              "min_samples_split": np.arange(2,150,90),
              "min_samples_leaf": np.arange(1,60,45), 
              "min_weight_fraction_leaf": np.arange(0.1,0.4, 0.3), 
              "max_features": [1000, 500, 5000],  
              "max_leaf_nodes": np.arange(2,60,45),
              "min_impurity_decrease": [0.0, 0.5], 
              }  


def evaluate_param(parameter, param_range, index):
    grid_search = GridSearchCV(clf, param_grid = {parameter: param_range}, cv=3) # 3 fold cross validation
    grid_search.fit(X_train, y_train) ### grid_search.fit(X_train[features], y_train)

    df = {}
    #for i, score in enumerate(grid_search.grid_scores_): # previously used methods
    for i, score in enumerate(grid_search.cv_results_["params"]):
        ## How do we save the correct values here for plotting
        df[parameter] = grid_search.cv_results_["params"][i][parameter]
        #df[parameter].update(grid_search.cv_results_["params"][i][parameter])
        #print("df : ", df)
        #df[parameter].append(grid_search.cv_results_["params"][i][parameter])

    #print("df : ", df) # the values are not appended to the keys
    df = pd.DataFrame.from_dict(df, orient='index')
    df.reset_index(level=0, inplace=True)
    df = df.sort_values(by='index')

    plt.subplot(5,2,index) # Change here according to the number of parameters
    plt.xlabel(parameter, color = "red")
    plt.ylabel("GridSearchCV Score", color= "blue")
    plot = plt.plot(df['index'], df[0])
    plt.title(parameter.capitalize(), color = "red")
    plt.savefig('DT_GridSearchCV_Score_Hyperparameter.png')
    return plot, df

clf = tree.DecisionTreeClassifier(random_state=99) # verbose=True, n_jobs=-1 :: Dt does not support it

### hyperparameter estimator
index = 1
plt.figure(figsize=(30,30))
for parameter, param_range in dict.items(param_grid):   
    evaluate_param(parameter, param_range, index)  ## 120 features
    index += 1

This image is not filled as there is no "mean_validation_score" which can be filled for each subplot now: https://ibb.co/Z6jwnMr

## Keys() gives the list of keys that gridsearchcv has:
grid_search.cv_results_.keys()
# output
# dict_keys(['mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time', 'param_criterion', 'param_max_depth', 'param_max_features', 'param_max_leaf_nodes', 'param_min_impurity_decrease', 'param_min_samples_leaf', 'param_min_samples_split', 'param_min_weight_fraction_leaf', 'param_splitter', 'params', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'mean_test_score', 'std_test_score', 'rank_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'mean_train_score', 'std_train_score'])

grid_search.best_estimator_
# output
# DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=1,
        #max_features=1000, max_leaf_nodes=2, min_impurity_decrease=0.0,
        #min_impurity_split=None, min_samples_leaf=1,
        #min_samples_split=2, min_weight_fraction_leaf=0.1,
        #presort=False, random_state=99, splitter='best')

Expected Result (should be filled): https://ibb.co/Z6jwnMr

However each subplot on the plot should have a curve depicting best value for the parameter. The keys do not have a "mean_validation_score" to plot the actual test score which was there in sklearn 0.17.1 but not in sklearn 0.20.2

Kindly let me know if there is still a way to plot all test scores on subplots of a single plot. Thanks in advance!!

4

0 回答 0