我使用来自该站点的预训练模型:https ://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet 我下载的 v3-large_224_1.0_float.pb 模型链接是(Large dm=1 (漂浮) )。在我运行它之后,我经常得到这个结果,[
===== TENSORFLOW RESULTS =======
cello, violoncello (score = 1.00000)
hip, rose hip, rosehip (score = 0.00000)
banjo (score = 0.00000)
steel drum (score = 0.00000)
marimba, xylophone (score = 0.00000)
] 1 然后,我改变输入图像,但结果是一样的。我知道如何解决这个问题。谢谢。我的代码如下:
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Tensorflow Models
=========================
This article is an introductory tutorial to deploy tensorflow models with TVM.
For us to begin with, tensorflow python module is required to be installed.
Please refer to https://www.tensorflow.org/install
"""
# tvm, relay
import tvm
from tvm import te
from tvm import relay
# os and numpy
import numpy as np
import os.path
# Tensorflow imports
import tensorflow as tf
try:
tf_compat_v1 = tf.compat.v1
except ImportError:
tf_compat_v1 = tf
# Tensorflow utility functions
import tvm.relay.testing.tf as tf_testing
# Target settings
# Use these commented settings to build for cuda.
#target = 'cuda'
#target_host = 'llvm'
#layout = "NCHW"
#ctx = tvm.gpu(0)
target = 'llvm'
target_host = 'llvm'
layout = None
ctx = tvm.cpu(0)
pwd = os.path.dirname(os.path.realpath(__file__))
img_url=os.path.join(pwd, "../data")
model_path = os.path.join(pwd, 'model/v3-large_224_1.0_float.pb')
map_proto_path = os.path.join(pwd, '../data/imagenet_2012_challenge_label_map_proto.pbtxt')
label_path = os.path.join(pwd, '../data/imagenet_synset_to_human_label_map.txt')
# preprocess one picture
def preprocess(img_path):
jpg = open(img_path, 'rb')
image = tf.image.decode_jpeg(jpg.read(), channels = 3)
image = tf.image.central_crop(image, 0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [224, 224],
align_corners=False)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
# prepare input data
def prepare_input(num):
x_all = []
for i in range(num):
#load jpg
index = i % 67
img_path = os.path.join(
img_url, 'imagenet_images/val/ILSVRC2012_val_000000%02d.JPEG' % (index+1))
x = preprocess(img_path)
x_all.append(x)
tx = tf.concat(x_all, 0)
with tf.Session() as session:
x = tx.eval()
return x
# set number pictures to predict
BATCH = 1
print("Test mobilenet-v3 model with batch " + str(BATCH))
# prepare input data
x = prepare_input(BATCH)
######################################################################
# Inference on tensorflow
# -----------------------
# Run the corresponding model on tensorflow
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf_compat_v1.gfile.GFile(model_path, 'rb') as f:
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
def run_inference_on_image(image):
"""Runs inference on an image.
Parameters
----------
image: String
Image file name.
Returns
-------
Nothing
"""
# Creates graph from saved GraphDef.
create_graph()
with tf_compat_v1.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('MobilenetV3/Predictions/Softmax:0')
predictions = sess.run(softmax_tensor,
{'input:0': image})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,
uid_lookup_path=label_path)
# Print top 5 predictions from tensorflow.
top_k = predictions.argsort()[-5:][::-1]
print ("===== TENSORFLOW RESULTS =======")
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
run_inference_on_image(x)