1

我对 tensorflow_addons 的 CohenKappa 指标有疑问。我正在尝试训练图像分类模型,但我将此问题视为回归问题。所以,我用 MSE 损失训练了模型。但是,我需要知道分类性能并且我想使用 CohenKappa。很高兴,Tensorflow 使用名为 tensorflow_addons 的插件支持 CohenKappa 度量。但是,我需要自定义指标,所以我添加了一个额外的逻辑来剪辑 y_pred,将它们四舍五入,然后将它们提供给 CohenKappa API。这是代码:

import tensorflow_addons as tfa
from tensorflow_addons.metrics import CohenKappa
from tensorflow.keras.metrics import Metric
from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike

from typeguard import typechecked
from typing import Optional

from tensorflow.python.ops import math_ops
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import metrics_utils

class CohenKappaMetric(CohenKappa):
    def __init__(
        self,
        num_classes: FloatTensorLike,
        name: str = "cohen_kappa",
        weightage: Optional[str] = None,
        sparse_labels: bool = False,
        regression: bool = False,
        dtype: AcceptableDTypes = None,
      ):
      """Creates a `CohenKappa` instance."""
      super().__init__(num_classes=num_classes, name=name, weightage=weightage, sparse_labels=sparse_labels,
                        regression=regression,dtype=dtype)

    def update_state(self, y_true, y_pred, sample_weight=None):
        y_pred = tf.clip_by_value(y_pred, 0, 4)
        y_pred = tf.math.round(y_pred)
        y_pred = tf.cast(y_pred, dtype=tf.uint8)

        y_true = math_ops.cast(y_true, self._dtype)
        y_pred = math_ops.cast(y_pred, self._dtype)
        [y_true, y_pred], sample_weight = \
            metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)
        print(f'y_true after ragged assert: {y_true}')
        print(f'y_pred after ragged assert: {y_pred}')
        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
        print(f'y_true after squeeze: {y_true}')
        print(f'y_pred after squeeze: {y_pred}')
             
        return super().update_state(y_true, y_pred, sample_weight)

我使用 tf 的 Keras API 和 tf.Dataset 对象对其进行了训练。这是上下文的完整脚本。

========= 完整脚本 ==========

# Import Library

import numpy as np
import pandas as pd

import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline

import cv2
from PIL import Image

import tensorflow as tf
from keras import layers
from tensorflow.keras import applications 
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.optimizers import Adam
from keras import models

import os, glob, pathlib

from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score, confusion_matrix

from tqdm import tqdm

SIZE = 224
DATASET_DIR = 'Dataset/APTOS-2019-RAW/'
BATCH_SIZE = 32
RESHUFFLE = 700
model_backbone = tf.keras.applications.EfficientNetB0
ARCH = 'EfficientNetB0'

train_df = pd.read_csv('Dataset/CSVs/converted_x_train_8.csv')
valid_df = pd.read_csv('Dataset/CSVs/converted_x_valid_8.csv')

#resample
from sklearn.utils import resample
X=train_df
normal=X[X.diagnosis==0]
mild=X[X.diagnosis==1]
moderate=X[X.diagnosis==2]
severe=X[X.diagnosis==3]
pdr=X[X.diagnosis==4]

#downsampled
mild = resample(mild,
                replace=True, # sample with replacement
                n_samples=RESHUFFLE, # match number in majority class
                random_state=2020) # reproducible results
moderate = resample(moderate,
                    replace=False, # sample with replacement
                    n_samples=RESHUFFLE, # match number in majority class
                    random_state=2020) # reproducible results
severe = resample(severe,
                  replace=True, # sample with replacement
                  n_samples=RESHUFFLE, # match number in majority class
                  random_state=2020) # reproducible results
normal = resample(normal,
                  replace=False, # sample with replacement
                  n_samples=RESHUFFLE, # match number in majority class
                  random_state=2020) # reproducible results
pdr = resample(pdr,
               replace=True, # sample with replacement
               n_samples=RESHUFFLE, # match number in majority class
               random_state=2020) # reproducible results    

# combine minority and downsampled majority
sampled = pd.concat([normal, mild, moderate, severe, pdr])

# checking counts
sampled.diagnosis.value_counts()

train_df = sampled
train_df = train_df.sample(frac=1).reset_index(drop=True)

train_df['id_code'] = train_df['id_code'].apply(lambda x: DATASET_DIR+x)
valid_df['id_code'] = valid_df['id_code'].apply(lambda x: DATASET_DIR+x)

list_ds = tf.data.Dataset.list_files(list(train_df['id_code']), shuffle=False)
list_ds = list_ds.shuffle(len(train_df), reshuffle_each_iteration=True)

val_list_ds = tf.data.Dataset.list_files(list(valid_df['id_code']), shuffle=False)
val_list_ds = val_list_ds.shuffle(len(valid_df), reshuffle_each_iteration=True)

class_names = np.array(sorted([item.name for item in pathlib.Path(DATASET_DIR).glob('*') if item.name != "LICENSE.txt"]))
print(class_names)

train_ds = list_ds
val_ds = val_list_ds

def get_label(file_path):
  # convert the path to a list of path components
  parts = tf.strings.split(file_path, os.path.sep)
  # The second to last is the class-directory
  one_hot = parts[-2] == class_names
  # Integer encode the label
  return tf.argmax(one_hot)

def decode_img(img):
  # convert the compressed string to a 3D uint8 tensor
  img = tf.image.decode_jpeg(img, channels=3)
  # resize the image to the desired size
  return tf.image.resize(img, [SIZE, SIZE])

def process_path(file_path):
  label = get_label(file_path)
  # load the raw data from the file as a string
  img = tf.io.read_file(file_path)
  img = decode_img(img)
  return img, label

AUTOTUNE = tf.data.AUTOTUNE

# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
train_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(process_path, num_parallel_calls=AUTOTUNE)

def configure_for_performance(ds):
  ds = ds.cache()
  ds = ds.shuffle(buffer_size=1000)
  ds = ds.batch(BATCH_SIZE)
  ds = ds.prefetch(buffer_size=AUTOTUNE)
  return ds

train_ds = configure_for_performance(train_ds)
val_ds = configure_for_performance(val_ds)

import tensorflow_addons as tfa
from tensorflow_addons.metrics import CohenKappa
from tensorflow.keras.metrics import Metric
from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike

from typeguard import typechecked
from typing import Optional

from tensorflow.python.ops import math_ops
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import metrics_utils

class CohenKappaMetric(CohenKappa):
    def __init__(
        self,
        num_classes: FloatTensorLike,
        name: str = "cohen_kappa",
        weightage: Optional[str] = None,
        sparse_labels: bool = False,
        regression: bool = False,
        dtype: AcceptableDTypes = None,
      ):
      """Creates a `CohenKappa` instance."""
      super().__init__(num_classes=num_classes, name=name, weightage=weightage, sparse_labels=sparse_labels,
                        regression=regression,dtype=dtype)

    def update_state(self, y_true, y_pred, sample_weight=None):
        y_pred = tf.clip_by_value(y_pred, 0, 4)
        y_pred = tf.math.round(y_pred)
        y_pred = tf.cast(y_pred, dtype=tf.uint8)

        y_true = math_ops.cast(y_true, self._dtype)
        y_pred = math_ops.cast(y_pred, self._dtype)
        [y_true, y_pred], sample_weight = \
            metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)
        print(f'y_true after ragged assert: {y_true}')
        print(f'y_pred after ragged assert: {y_pred}')
        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
        print(f'y_true after squeeze: {y_true}')
        print(f'y_pred after squeeze: {y_pred}')
             
        return super().update_state(y_true, y_pred, sample_weight)

class QWKCallback(tf.keras.callbacks.Callback):
    def __init__(self, patience = 10):
      super().__init__()
      self.patience = patience

    def on_train_begin(self, logs=None):
        # The number of epoch it has waited when loss is no longer minimum.
        self.wait = 0
        # The epoch the training stops at.
        self.stopped_epoch = 0
        # Initialize the best as infinity.
        self.best = -1

    def on_epoch_end(self, epoch, logs=None):
        current = logs.get("cohen_kappa")
        if np.greater(current, self.best):
            self.best = current
            self.wait = 0
            # Record the best weights if current results is better (less).
            self.best_weights = self.model.get_weights()
            if current > 0.75:
              print("Validation Kappa has improved and greater than 0.75. Worth saving, dude. Saving model.")
              self.model.save(f'Kaggle - Model Weights/{ARCH}-model.h5')
        else:
            self.wait += 1
            if self.wait >= self.patience:
                self.stopped_epoch = epoch
                self.model.stop_training = True
                print("Restoring model weights from the end of the best epoch.")
                self.model.set_weights(self.best_weights)

    def on_train_end(self, logs=None):
        if self.stopped_epoch > 0:
            print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))

efficientnet = model_backbone(include_top=False, weights='imagenet', input_shape=(SIZE,SIZE,3))

dummy_model = Sequential([
    Rescaling(1/.255, input_shape = (224, 224, 3)),
    RandomFlip(seed = 2019),
    RandomRotation((-0.5, 0.5), fill_mode = 'constant', seed = 2019),
    RandomZoom(0.1),
    layers.GlobalAveragePooling2D(),
    layers.Dense(10), 
    layers.Dense(1)])

dummy_model.compile(
    loss='mse',
    optimizer=Adam(lr=0.0001),
    metrics = [CohenKappaMetric(num_classes=5, weightage='quadratic', sparse_labels = True)]
)

dummy_model.fit(
    train_ds,
    epochs = 9,
    validation_data = val_ds,
    callbacks = [QWKCallback(patience = 10)]
)

这个脚本的结果是这个日志:

Epoch 1/9
y_true after ragged assert: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after ragged assert: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
y_true after squeeze: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after squeeze: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
y_true after ragged assert: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after ragged assert: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
y_true after squeeze: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after squeeze: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
109/110 [============================>.] - ETA: 0s - loss: 144816.2047 - cohen_kappa: 0.0000e+00y_true after ragged assert: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after ragged assert: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
y_true after squeeze: Tensor("Cast_2:0", shape=(None, 1), dtype=float32)
y_pred after squeeze: Tensor("Cast_3:0", shape=(None, 1), dtype=float32)
110/110 [==============================] - 3s 18ms/step - loss: 144618.4215 - cohen_kappa: 0.0000e+00 - val_loss: 119745.2266 - val_cohen_kappa: 0.0000e+00
Epoch 2/9
110/110 [==============================] - 2s 16ms/step - loss: 105063.3554 - cohen_kappa: 0.0000e+00 - val_loss: 86080.0625 - val_cohen_kappa: 0.0000e+00
Epoch 3/9
110/110 [==============================] - 2s 16ms/step - loss: 75889.1368 - cohen_kappa: 0.0000e+00 - val_loss: 60222.9531 - val_cohen_kappa: 0.0000e+00
Epoch 4/9
110/110 [==============================] - 2s 16ms/step - loss: 52277.5727 - cohen_kappa: 0.0000e+00 - val_loss: 40955.3906 - val_cohen_kappa: 0.0000e+00
Epoch 5/9
110/110 [==============================] - 2s 16ms/step - loss: 35806.8430 - cohen_kappa: 0.0000e+00 - val_loss: 26828.6133 - val_cohen_kappa: 0.0000e+00
Epoch 6/9
110/110 [==============================] - 2s 16ms/step - loss: 23043.7091 - cohen_kappa: 0.0000e+00 - val_loss: 16888.7090 - val_cohen_kappa: 0.0000e+00
Epoch 7/9
110/110 [==============================] - 2s 16ms/step - loss: 14327.0133 - cohen_kappa: 0.0000e+00 - val_loss: 10193.4795 - val_cohen_kappa: 0.0000e+00
Epoch 8/9
110/110 [==============================] - 2s 16ms/step - loss: 8697.9348 - cohen_kappa: 0.0000e+00 - val_loss: 5862.7231 - val_cohen_kappa: 0.0000e+00
Epoch 9/9
110/110 [==============================] - 2s 16ms/step - loss: 4940.2150 - cohen_kappa: 0.0000e+00 - val_loss: 3193.6562 - val_cohen_kappa: 0.0000e+00
<tensorflow.python.keras.callbacks.History at 0x7f6c285d6c50>

从这些结果中,我有两个问题:

  • 如何解决这个问题,以便我可以获得一个有效的 cohen kappa 指标?它应该从 0 提高到 1。
  • 我想查看每个度量 update_state 方法的 y_pred 和 y_true,是否应该将 Tensor 对象作为 y_true 和 y_pred?谢谢!
4

1 回答 1

0

没关系,几分钟后我发现我错了。tensorflow 插件尚不支持 tf.data,因此在此 github 问题上说明了此问题的快速修复:https ://github.com/tensorflow/addons/issues/2417

于 2021-03-18T07:06:29.300 回答