如果您不想使用自定义图层,我相信您可以执行以下操作:
x = Input(shape=(Batch_size, time, y, x, 1))
# Process data wrt time and spaciall info, as many layer as you want
x = Conv3D(filters_num_0, kernel_size_0, strides=1, padding="same", activation=activation_0)(x) # (Batch_size, time, y, x, filters_num_0)
x = Conv3D(filters_num_1, kernel_size_1, strides=1, padding="same", activation=activation_1)(x) # (Batch_size, time, y, x, filters_num_1)
# # Aggregate channel info
x = Conv3D(1, kernel_size_2, strides=1, padding="same", activation=activation_2)(x) # (Batch_size, time, y, x, 1)
# Reshape for dimension reduce
x = Reshape(target_shape=(Batch_size, y, x, time))(x)
# Aggregate time info
x = Conv2D(16, kernel_size_3, strides=1, padding="same", activation=activation_3 ) # (Batch_size, y, x, 16)
使用自定义层为跨时间的全局平均池做:
class GlobalAvgPoolAcrossTime(layers.Layer):
def __init__(self, **kwargs):
super(GlobalAvgPoolAcrossTime, self).__init__(**kwargs)
# (Batch_size, time, y, x, channels) -> (Batch_size, 1, y, x, channels)
def call(self, inputs):
return keras.backend.mean(inputs, axis=1, keepdims=True)
x = Input(shape=(Batch_size, time, y, x, 1))
# Process data wrt time and spaciall info, as many layer as you want
x = Conv3D(filters_num_0, kernel_size_0, strides=1, padding="same", activation=activation_0)(x) # (Batch_size, time, y, x, filters_num_0)
x = Conv3D(filters_num_1, kernel_size_1, strides=1, padding="same", activation=activation_1)(x) # (Batch_size, time, y, x, filters_num_1)
x = GlobalAvgPoolAcrossTime()(x) # (Batch_size, 1, y, x, filters_num_1)
# Reshape for dimension reduce
x = Reshape(target_shape=(Batch_size, y, x, filters_num_1))(x)
x = Conv2D(16, kernel_size_2, strides=1, padding="same", activation=activation_3 ) # (Batch_size, y, x, 16)