我正在尝试对 3D 黑白 MRI 数据进行二进制分类。由于缺乏黑白数据中固有的通道,我正在使用 2D 卷积。我添加了一个维度来排列维度,本质上,这些数据的深度充当了批处理维度。我正在使用数据的子样本,20 个文件,每个文件 189 x 233 x 197。就像一个快速背景。
根据下面的代码,我有一个 csv 文件,其中包含一堆信息,包括我尝试提取的每个文件的标签数据。
import numpy as np
import glob
import os
import tensorflow as tf
import pandas as pd
import glob
import SimpleITK as sitk
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model
from keras.utils import to_categorical
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from google.colab import drive
drive.mount('/content/gdrive')
datapath = ('/content/gdrive/My Drive/DirectoryTest/All Data/')
patients = os.listdir(datapath)
labels_df = pd.read_csv('/content/Data_Index.csv', index_col = 0 )
labelset = []
for i in patients:
label = labels_df.loc[i, 'Group']
if label is 'AD':
np.char.replace(label, ['AD'], [0])
if label is 'CN':
np.char.replace(label, ['CN'], [1])
labelset.append(label)
label_encoder = LabelEncoder()
labelset = label_encoder.fit_transform(labelset)
labelset = np_utils.to_categorical(labelset, num_classes= 2)
FullDataSet = []
for i in patients:
a = sitk.ReadImage(datapath + i)
b = sitk.GetArrayFromImage(a)
c = np.reshape(b, (189,233,197, 1))
FullDataSet.append(c)
training_data, testing_data, training_labels, testing_labels = train_test_split(FullDataSet, labelset, train_size=0.70,test_size=0.30)
dataset_train = tf.data.Dataset.from_tensor_slices((training_data, training_labels))
dataset_test = tf.data.Dataset.from_tensor_slices((testing_data, testing_labels))
CNN_model = tf.keras.Sequential(
[
#tf.keras.layers.Input(shape=(189, 233, 197, 1), batch_size=2),
#tf.keras.layers.Reshape((197, 233, 189, 1)),
tf.keras.layers.Conv2D(kernel_size=(7, 7), data_format='channels_last', filters=64, activation='relu',
padding='same', strides=( 3, 3), input_shape=( 233, 197, 1)),
#tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.MaxPool2D(pool_size=(3, 3), padding='same'),
tf.keras.layers.Dropout(0.20),
tf.keras.layers.Conv2D(kernel_size=( 7, 7), filters=128, activation='relu', padding='same', strides=( 3, 3)),
#tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.MaxPool2D(pool_size=(3, 3), padding='same'),
tf.keras.layers.Dropout(0.20),
tf.keras.layers.Conv2D(kernel_size=( 7, 7), filters=256, activation='relu', padding='same', strides=( 3, 3)),
#tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.MaxPool2D(pool_size=(3, 3), padding = 'same'),
tf.keras.layers.Dropout(0.20),
# last activation could be either sigmoid or softmax, need to look into this more. Sig for binary output, Soft for multi output
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.20),
tf.keras.layers.Dense(2, activation='softmax')
])
# Compile the model
CNN_model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.00001), loss='binary_crossentropy', metrics=['accuracy'])
# print model layers
CNN_model.summary()
CNN_history = CNN_model.fit(dataset_train, epochs=10, validation_data=dataset_test)
当我去拟合模型时,我收到以下错误:
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-35-a8b210ec2e72> in <module>()
1 #running of the model
2 #CNN_history = CNN_model.fit(dataset_train, epochs=100, validation_data =dataset_test, validation_steps=1)
----> 3 CNN_history = CNN_model.fit(dataset_train, epochs=10, validation_data=dataset_test)
4
5
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:749 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/compile_utils.py:204 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:149 __call__
losses = ag_call(y_true, y_pred)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:253 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:1605 binary_crossentropy
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py:4829 binary_crossentropy
bce = target * math_ops.log(output + epsilon())
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:1141 binary_op_wrapper
raise e
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:1125 binary_op_wrapper
return func(x, y, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:1457 _mul_dispatch
return multiply(x, y, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:509 multiply
return gen_math_ops.mul(x, y, name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_math_ops.py:6176 mul
"Mul", x=x, y=y, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:744 _apply_op_helper
attrs=attr_protos, op_def=op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py:593 _create_op_internal
compute_device)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:3485 _create_op_internal
op_def=op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:1975 __init__
control_input_ops, op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:1815 _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 2 and 189 for '{{node binary_crossentropy/mul}} = Mul[T=DT_FLOAT](ExpandDims, binary_crossentropy/Log)' with input shapes: [2,1], [189,2].
我知道 [189,2] 中的 2 与最终的 softmax 层相关联,但我不知道如何处理该信息,或者从这里去哪里。任何帮助将不胜感激,谢谢!