1

我有一个冷冻模型和 4 个 gpus。我想尽可能快地对尽可能多的数据进行推理。我基本上想在同一模型对 4 个批次执行推理的情况下执行数据并行性:每个 gpu 一个批次。

这就是我大致想做的事情

def return_ops():
    # load the graph
    with tf.Graph().as_default() as graph:
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(model_path, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    inputs = []
    outputs = []
    with graph.as_default() as g:
        for gpu in ['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']:
            with tf.device(gpu):
                image_tensor = g.get_tensor_by_name('input:0')
                get_embeddings = g.get_tensor_by_name('embeddings:0')
            inputs.append(image_tensor)
            outputs.append(get_embeddings)

    return inputs, outputs, g

但是,当我跑步时

#sample batch
x = np.ones((100,160,160,3))
# get ops
image_tensor_list, pt_list, emb_list, graph = return_ops()
# construct feed dict
feed_dict = {it: x for it in image_tensor_list}

# run the ops
with tf.Session(graph=graph, config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    inf = sess.run(emb_list, feed_dict=feed_dict)

/gpu:0使用 nvidia-smi 检查时,一切都在运行。

但是,我可以运行

with tf.device("/gpu:1"):
    t = tf.range(1000)

with tf.Session() as sess:
    sess.run(t)

并且第二个 gpu 上有活动...

如何正确实现此数据并行任务?

4

1 回答 1

3

我了解到在导入 graph_def 时需要在 GPU 上放置张量。下面的代码返回我可以运行的操作sess.run([output1, ..., outputk], feed_dict)。它将所有操作都放在 gpu 上,这并不理想,因此我通过allow_soft_placement会话配置为 true。

class MultiGPUNet(object):

    def __init__(self, model_path, n_gpu):

        self.model_path = model_path
        self.n_gpu = n_gpu
        self.graph = tf.Graph()

        # specify device for n_gpu copies of model
        # during graphdef parsing
        for i in range(self.n_gpu):
            self._init_models(i, self.graph)

    def _init_models(self, i, graph):

        with self.graph.as_default():
            od_graph_def = tf.GraphDef()

            with tf.gfile.GFile(model_path, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)

                with tf.device('/device:GPU:{}'.format(i)):
                    tf.import_graph_def(od_graph_def, name='{}'.format(i))

    def get_tensors(self):

        output_tensors = []
        input_tensors = []
        train_tensors = []

        for i in range(self.n_gpu):
            input_tensors.append(
                self.graph.get_tensor_by_name('{}/<input_name>:0'.format(i)))
            output_tensors.append(
                self.graph.get_tensor_by_name('{}/<out_name>:0'.format(i)))
            train_tensors.append(
                self.graph.get_tensor_by_name('{}/<train_name>:0'.format(i)))

        def make_feed_dict(x):
            """x will be a list of batches"""
            assert len(x)==len(input_tensors)
            input_data = zip(input_tensors, x)
            train_bool = zip(train_tensors, [False]*len(train_tensors))
            return dict(input_data + train_bool)

        return output_tensors, make_feed_dict
于 2019-10-10T16:19:41.883 回答