0

我有 5 个 EfficientNetB3 模型的堆叠模型。我试图从中得到预测。以下是数据集和模型构建:

数据集:

def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'):
    def decode(path):
        file_bytes = tf.io.read_file(path)
        if ext == 'png':
           img = tf.image.decode_png(file_bytes, channels=3)
        elif ext in ['jpg', 'jpeg']:
           img = tf.image.decode_jpeg(file_bytes, channels=3)
        else:
           raise ValueError("Image extension not supported")

        img = tf.cast(img, tf.float32) / 255.0
        img = tf.image.resize(img, target_size)

        return img

   def decode_with_labels(path, label):
       return decode(path), label

   return decode_with_labels if with_labels else decode


def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=""):
   if cache_dir != "" and cache is True:
       os.makedirs(cache_dir, exist_ok=True)

   if decode_fn is None:
       decode_fn = build_decoder(labels is not None)

   if augment_fn is None:
       augment_fn = build_augmenter(labels is not None)

   AUTO = tf.data.experimental.AUTOTUNE
   slices = paths if labels is None else (paths, labels)

   dset = tf.data.Dataset.from_tensor_slices(slices)
   dset = dset.map(decode_fn, num_parallel_calls=AUTO)
   dset = dset.cache(cache_dir) if cache else dset
   dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
   dset = dset.repeat() if repeat else dset
   dset = dset.shuffle(shuffle) if shuffle else dset
   dset = dset.batch(bsize).prefetch(AUTO)

   return dset


test_decoder = build_decoder(with_labels=False, target_size=(300, 300), ext='png')
dtest = build_dataset(test_paths, bsize=2, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder)

堆叠模型:

with strategy.scope():
   models = load_all_models()

   for i, model in enumerate(models):
       model.layers[1]._name = f'effnet_layer{i}'
       for layer in model.layers:
           layer._name = layer.name + str(f"_{i}")
           layer.trainable = False

   ensemble_visible = [model.input for model in models]
   ensemble_outputs = [model.output for model in models]
   merge = tf.keras.layers.concatenate(ensemble_outputs)
   merge = tf.keras.layers.Dense(10, activation='relu')(merge)
   output = tf.keras.layers.Dense(n_labels, activation='softmax')(merge)
   model = tf.keras.models.Model(inputs=ensemble_visible, outputs=output)

   model.compile(optimizer=tf.keras.optimizers.Adam(),
            loss='categorical_crossentropy',
            metrics=[tf.keras.metrics.AUC(multi_label=True)])

我正在尝试使用以下代码段进行预测:

X = tf.data.Dataset.zip((dtest, dtest, dtest, dtest, dtest))
X_pred = []
for image in X.take(-1):
    X_pred.append(image)

sub_df[label_cols] = stack_model.predict(X_pred, verbose=1)

但是,我收到以下错误:

ValueError:层模型需要 5 个输入,但它接收到 3035 个输入张量。收到的输入:[<tf.Tensor 'IteratorGetNext:0' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, 300, 300, 3) dtype =float32>, <tf.Tensor 'IteratorGetNext:2' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:3' shape=(None, 300, 300, 3) dtype =float32>, <tf.Tensor 'IteratorGetNext:4' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:5' shape=(None, 300, 300, 3) dtype =float32>, <tf.Tensor 'IteratorGetNext:6' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:7' shape=(None, 300, 300, 3) dtype =float32>, <tf.Tensor 'IteratorGetNext:8' shape=(None, 300, 300,

我该如何解决这个错误?顺便说一句,数据集有 1214 张图像。提前致谢。

4

0 回答 0