我正在尝试使用 TF Serving 为我的 TF 模型提供服务。这是我的模型输入:
raw_feature_spec = {
'x': tf.io.VarLenFeature(tf.string),
'y': tf.io.VarLenFeature(tf.string),
'z': tf.io.FixedLenFeature([], tf.string)
}
然后我导出我的模型:
import tensorflow as tf
import tensorflow_transform as tft
tf_transform_output = tft.TFTransformOutput('saved_transform_graph_folder')
estimator = tf.keras.estimator.model_to_estimator(keras_model_path='model_folder')
estimator.export_saved_model('OUTPUT_MODEL_NAME', make_serving_input_fn(tf_transform_output))
def make_serving_input_fn(tf_transform_output):
raw_feature_spec = {
'x': tf.io.VarLenFeature(tf.string),
'y': tf.io.VarLenFeature(tf.string),
'z': tf.io.FixedLenFeature([], tf.string)
}
def serving_input_fn():
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(raw_feature_spec)
raw_reatures = raw_input_fn().features
transformed_features = tf_transform_output.transform_raw_features(raw_reatures)
return tf.estimator.export.ServingInputReceiver(transformed_features, raw_reatures)
return serving_input_fn
因此,当我使用 Docker Hub 中的 TFS 映像通过此代码提供导出模型时,将 HTTP GET 请求发送到 /model/metadata 我得到:
{
"model_spec": {
"name": "newModel",
"signature_name": "",
"version": "1585581470"
},
"metadata": {
"signature_def": {
"signature_def": {
"serving_default": {
"inputs": {
"x": {
"coo_sparse": {
"values_tensor_name": "ParseExample/ParseExampleV2:3",
"indices_tensor_name": "ParseExample/ParseExampleV2:1",
"dense_shape_tensor_name": "ParseExample/ParseExampleV2:5"
},
"dtype": "DT_STRING",
"tensor_shape": {
"dim": [
{
"size": "-1",
"name": ""
},
{
"size": "-1",
"name": ""
}
],
"unknown_rank": false
}
},
"y": {
"coo_sparse": {
"values_tensor_name": "ParseExample/ParseExampleV2:2",
"indices_tensor_name": "ParseExample/ParseExampleV2:0",
"dense_shape_tensor_name": "ParseExample/ParseExampleV2:4"
},
"dtype": "DT_STRING",
"tensor_shape": {
"dim": [
{
"size": "-1",
"name": ""
},
{
"size": "-1",
"name": ""
}
],
"unknown_rank": false
},
"z": {
"dtype": "DT_STRING",
"tensor_shape": {
"dim": [
{
"size": "-1",
"name": ""
}
],
"unknown_rank": false
},
"name": "ParseExample/ParseExampleV2:6"
}
},
"outputs": {
"main_output": {
"dtype": "DT_FLOAT",
"tensor_shape": {
"dim": [
{
"size": "-1",
"name": ""
},
{
"size": "20",
"name": ""
}
],
"unknown_rank": false
},
"name": "main_output/Softmax:0"
}
},
"method_name": "tensorflow/serving/predict"
}
}
}
}
但是当我向 /model:predict 发送 HTTP POST 请求时:
{
"instances":
[
{
"x": ["text","text","text","text","text","text"],
"y": ["test","test","test","test","test","test"],
"z": "str"
}
]
}
我收到一个错误
{
"error": "Tensor :0, specified in either feed_devices or fetch_devices was not found in the Graph"
}
那么我应该如何重新组织 JSON 以从 API 获得正确的响应?TF 版本:2.1,TF Serving 最新和 TF Transform - 0.21.0。