0

我正在尝试将 PyTorch 代码转换为 TensorFlow,但唯一缺少的是计算损失函数相对于模型的权重和偏差的梯度。输出只是一个带有None值的列表。

由于它是一个无需任何训练数据即可预测流体流动速度和压力的网络,因此找不到太多类似的代码。代码链接。

我的代码的主要部分是:

class Net1(tf.keras.models.Model):
    def __init__(self):
        super(Net1, self).__init__()
        self.dense1 = tf.keras.layers.Dense(20, activation=activations.swish, name='d1', trainable=True)
        self.dense2 = tf.keras.layers.Dense(20, activation=activations.swish, name='d2', trainable=True)
        self.dense3 = tf.keras.layers.Dense(20, activation=activations.swish, name='d3', trainable=True)
        self.linear = tf.keras.layers.Dense(1, activation=activations.linear, name='l1', trainable=True)

    def call(self, inputs):
        x = self.dense1(inputs, training=True)
        x = self.dense2(x, training=True)
        x = self.dense3(x, training=True)
        return self.linear(x)

net1 = Net1()
opt1 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=10 ** -15)
mse = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()


if __name__ == "__main__":
    # Main loop
    epochs = 5
    LOSS = []
    tic = time.time()

    for epoch in range(epochs):
        for batch_idx, (x_in, y_in, scale_in) in enumerate(dataset):
            x_in = tf.convert_to_tensor(x_in, dtype=tf.float32)
            y_in = tf.convert_to_tensor(y_in, dtype=tf.float32)
            scale_in = tf.convert_to_tensor(scale_in, dtype=tf.float32)

            net_in = tf.concat((x_in, y_in, scale_in), 1)


            var_1 = net1.trainable_variables
            var_2 = net2.trainable_variables
            var_3 = net2.trainable_variables
            
            u = net1.predict(net_in)
            v = net2.predict(net_in)
            P = net3.predict(net_in)

            with tf.GradientTape(persistent=True, watch_accessed_variables=True) as g:

                g.watch(x_in)
                g.watch(y_in)
                g.watch(scale_in)
                g.watch(var_1)
                g.watch(var_2)
                g.watch(var_3)

                R = scale_in * (1.0 / sqrt(2.0 * pi * sigma ** 2.0)) * tf.math.exp(
                    -(x_in - mu) ** 2.0 / (2.0 * sigma ** 2.0))
                h = rInlet - R

                u_hard = u * (h ** 2.0 - y_in ** 2.0)
                v_hard = v * (h ** 2.0 - y_in ** 2.0)
                P_hard = (xStart - x_in) * 0 + dP * (xEnd - x_in) / L + 0 * y_in + (xStart - x_in) * (xEnd - x_in) * P

                u_x = g.gradient(u_hard, x_in)
                u_y = g.gradient(u_hard, y_in)
                v_x = g.gradient(v_hard, x_in)
                v_y = g.gradient(v_hard, y_in)
                P_x = g.gradient(P_hard, x_in)
                P_y = g.gradient(P_hard, y_in)

            u_xx = g.gradient(u_x, x_in)
            u_yy = g.gradient(u_y, y_in)
            v_xx = g.gradient(v_x, x_in)
            v_yy = g.gradient(v_y, y_in)

            loss_1 = (u_hard * u_x + v_hard * u_y - nu * (u_xx + u_yy) + 1.0 / rho * P_x)
            loss_2 = (u_hard * v_x + v_hard * v_y - nu * (v_xx + v_yy) + 1.0 / rho * P_y)
            loss_3 = u_x + v_y

            loss_sum = mse(loss_1, tf.zeros_like(loss_1)) + mse(loss_2, tf.zeros_like(loss_2)) + mse(
                            loss_3, tf.zeros_like(loss_3))

            grads = g.gradient(loss_sum, var_1, unconnected_gradients='zero')

它计算了我所有的损失,但就像我说的那样,它给出None了可训练变量的损失梯度。似乎它们没有连接,但话又说回来,为什么它在 PyTorch 代码中起作用?

4

0 回答 0