我在估计器的 model_fn 内使用来自 Tensorflow 1.5 的 contrib 包的卷积 LSTM 单元。我想向该单元格添加 L2 正则化。我尝试了以下代码:
def myModelFn(features,labels,mode,params):
trainingFlag = (mode == tf.estimator.ModeKeys.TRAIN)
inferFlag = (mode == tf.estimator.ModeKeys.PREDICT)
dataShape = tuple(params['dataShape'])
XSize,YSize,ZSize = dataShape[0],dataShape[1],dataShape[2]
dataTensor = features['data']
dataTensor = tf.reshape(dataTensor, [-1, XSize, YSize, ZSize, 1])
labelTensor = tf.cast(labels['labels'], tf.int64)
with tf.variable_scope('myModel'):
normalizedData = tf.layers.batch_normalization(dataTensor,
center=True,
scale=True,
training=trainingFlag,
name='bnInput')
with tf.variable_scope('module1'):
conv1 = tf.layers.conv3d(normalizedData,
filters = 3,
kernel_size = (5,5,5),
kernel_regularizer=tf.nn.l2_loss,
name='conv3d_1')
max1 = tf.layers.max_pooling3d(conv1,
pool_size = (5,5,5),
strides= (2,2,2),
name = 'max_1')
这是我创建 convLSTM2D 的地方,我想在其中添加 L2 正则化:
with tf.variable_scope('module2'):
lstmInput = tf.transpose(max1, [0, 3, 1, 2, 4], 'lstmInput')
lstmInputShape = lstmInput.shape.as_list()[2:]
lstmInput = tf.unstack(lstmInput, axis=1)
convLSTMNet = tf.contrib.rnn.ConvLSTMCell(conv_ndims=2,
input_shape=lstmInputShape,
output_channels=3,
kernel_shape=[3, 3],
use_bias=True,
name='lstmConv2d')
lstmKernelVars = [var for var in tf.trainable_variables(
convLSTMNet.scope_name) if 'kernel' in var.name]
tf.contrib.layers.apply_regularization(tf.nn.l2_loss,
lstmKernelVars)
lstmOutput, _ = tf.nn.static_rnn(convLSTMNet, lstmInput,
dtype=tf.float32)[-1]
module2Output = tf.layers.flatten(lstmOutput, name='module2Output')
with tf.variable_scope('module3'):
dense1 = tf.layers.dense(module2Output, 150, name='dense1')
dropout1 = tf.layers.dropout(dense1, 0.6, training=trainingFlag,
name='dropout1')
dense2 = tf.layers.dense(dropout1,50,name='dense2')
dropout2 = tf.layers.dropout(dense2, 0.5, training=trainingFlag,
name='dropout2')
logits = tf.layers.dense(dropout2, 4, name='logits')
outputLabel = tf.nn.softmax(logits,name='myLabel')
predictions = {'prediction': tf.cast(tf.argmax(outputLabel, 1), tf.int64)}
if not inferFlag:
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labelTensor),
name='myLoss')
l2Loss = tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='l2Loss')
fullLoss = tf.add(loss, l2Loss)
tf.summary.scalar('fullLoss', fullLoss)
if trainingFlag:
globalStep = tf.train.get_global_step()
optimizer = tf.train.AdamOptimizer()
updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(updateOps):
trainOp = optimizer.minimize(
fullLoss, global_step=globalStep)
else:
trainOp = None
if not inferFlag:
evalOp = tf.metrics.accuracy(labelTensor,predictions['prediction'])
return tf.estimator.EstimatorSpec(mode, predictions, fullLoss, trainOp,
evalOp)
我收到以下错误消息:
ValueError:图层范围没有可用的名称,因为尚未使用图层“lstmConv2d”。范围名称在第一次调用层实例时确定。因此,您必须在查询“scope_name”之前调用该层
如果我用任何其他类型的 tf.layer 替换 convLSTM2D/static_rnn ,它工作正常(如果我使用 kernel_regularizer=tf.nn.l2_loss)...