-1

Python 3.8 版 Tensorflow/Keras 2.5.0 版

大家好,我正在尝试使用自定义构建块实现 Unet 架构,但我不断收到错误消息:

ValueError: Graph disconnected: cannot get value for tensor KerasTensor(type_spec=TensorSpec(shape=(None, 128, 128, 64), dtype=tf.float32, name='convnext_block_2_0_input'), name='convnext_block_2_0_input', description="由层“convnext_block_2_0_input”)在“convnext_block_2_0”层创建。访问以下先前层没有问题:['stem', 'convnext_stage_0', 'downsampling_block_0']

在阅读了类似的问题后,我找不到解决方案。我在哪里犯了错误以及如何解决?

提前致谢。代码:

import tensorflow as tf
import tensorflow.keras.layers as layers 
from modules import CBAMBlock, ConvNextBlock 

def build_encoder(input_shape, 
              depths=[3, 3, 9, 3],                      # depths=[3, 3, 9, 3]
              dims= [16, 32,64,128],                    # dims (papier) = [96, 192, 384, 768]
              layer_scale_init_value=1e-6,)->tf.keras.Model:
inputs = layers.Input(input_shape)
stem = tf.keras.Sequential(
    [
        layers.Conv2D(dims[0], kernel_size=3, padding='same'),# strides = 4 (papier)
        CBAMBlock(ratio=8, kernel_size=7),
        layers.LayerNormalization(epsilon=1e-6),
    ],
    name="stem",
)

downsample_layers = []
downsample_layers.append(stem)
for i in range(3): # bylo 3
    downsample_layer = tf.keras.Sequential(
        [
            layers.LayerNormalization(epsilon=1e-6),
            CBAMBlock(ratio=8, kernel_size=7),
            layers.Conv2D(dims[i + 1], kernel_size=2, strides=2),
        ],
        name=f"downsampling_block_{i}",
    )
    downsample_layers.append(downsample_layer)

stages = []
dp_rates = [x for x in tf.linspace(0.0, drop_path_rate, sum(depths))]

cur = 0
for i in range(4): # bylo 4
    stage = tf.keras.Sequential(name=f"convnext_stage_{i}")
    for j in range(depths[i]):
        
        layer = ConvNextBlock(
                    dim=dims[i],
                    drop_path=dp_rates[cur + j],
                    layer_scale_init_value=layer_scale_init_value,
                    name=f"convnext_block_{i}_{j}")
        stage.add(layer)
        
    stages.append(stage)
    cur += depths[i]    
        
x = inputs
for i in range(len(stages)):
    x = downsample_layers[i](x)
    x = stages[i](x)
outputs = layers.LayerNormalization(epsilon=1e-6, name="encoder_outputs")(x)

return tf.keras.Model(inputs,outputs, name="Encoder")

def add_decoder(encoder, dims= [64, 32, 16]) -> tf.keras.Model:


x = encoder.output

encoder_layer_iterator = 2

for dim in dims:
    
    convnext_sequential = encoder.get_layer(name=f"convnext_stage_{encoder_layer_iterator}")
    convnext_output = (convnext_sequential.layers[-1]).output
    
    x = layers.Conv2DTranspose(dim, 3, padding="same", strides=2, name=f"decoder_transposed_{dim}")(x)
    x = layers.LayerNormalization(epsilon=1e-6, name=f"decoder_lnorm_{dim}")(x)
    x = layers.Conv2D(filters=dim, kernel_size=3, padding='same',name=f"decoder_conv2d_{dim}")(x)
    
    print(convnext_sequential.name, encoder_layer_iterator)

    x = layers.Concatenate(axis=-1)([x, convnext_output])
    encoder_layer_iterator -=1
    
outputs = layers.Conv2D(filters=3, kernel_size=1, padding='same', name="decoder_output")(x) # RGB channels

return tf.keras.Model(encoder.input, outputs)

代码是这样运行的:

encoder = build_encoder(...) # successful
model = add_decoder(encoder,..) # unsuccessful
4

0 回答 0