我正在使用 Tensorflow 来处理一个简单的矩阵分解算法。每一步都正确,但在最后一步,我想要eval()
一个张量来存储它,程序没有工作,只会占用越来越多的内存。那么我的代码有问题吗?我是 Tensorflow 的初学者,我不知道问题出在哪里。下面是我的代码。
class model(object):
def __init__(self, D, Q, stepsize = 6e-7, max_iter = 200, inner_maxiter = 50, dim = 200, verbose = 5):
self.D = tf.constant(D, dtype = tf.float32)
self.Q = tf.constant(Q, dtype = tf.float32)
self.rank = dim
self.stepsize = stepsize
self.max_iter = max_iter
self.inner_maxiter = inner_maxiter
self.verbose = verbose
self.W = tf.Variable((np.random.rand(self.rank, sample_num)), dtype = tf.float32, name = 'W')
self.C = tf.Variable((np.random.rand(context_num, self.rank)), dtype = tf.float32, name = 'C')
def _run(self, sess):
Q = self.Q
D = self.D
W = self.W
C = self.C
for i in xrange(self.max_iter):
if (i + 1) % 2 == 1:
for j in xrange(self.inner_maxiter):
ED = tf.transpose(Q) * (1.0 / (1.0 + tf.exp(- tf.matmul(C, W))))
recons = D - ED
W_grad = tf.matmul(tf.transpose(C), recons)
W = W + self.stepsize * W_grad
else:
for j in xrange(self.inner_maxiter):
ED = tf.transpose(Q) * (1.0 / (1.0 + tf.exp(- tf.matmul(C, W))))
recons = D - ED
C_grad = tf.matmul(recons, tf.transpose(W))
C = C + self.stepsize * C_grad
print 'epoch: %d' % i
print W.eval()
print C.eval()
train_epoch = model(D, Q, args.step_size, \
args.max_iter, args.inner_maxiter, args.dim, args.verbose)
with tf.Session(config = config) as sess:
tf.initialize_all_variables().run()
train_epoch._run(sess)
程序在最后两行停止并占用内存_run()
,其中包含W.eval()
和C.eval()
。那么我应该怎么做才能修复它呢?有人可以帮忙吗?