0

我是计算机视觉的新手

我从这里获取了 Raspberry Pi 图像分类程序(官方 TF-lite 示例):https ://github.com/tensorflow/examples/blob/master/lite/examples/image_classification/raspberry_pi/README.md

我从这里获取了用于图像分类的 Keras 模型:https ://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html

在我训练并保存 Keras 模型并在执行程序后转换为 TF-lite 模型后,它返回:

  File "/home/pi/tf_tests/tesi/src/classify.py", line 61, in run<br>
    results = classify_image(interpreter, image)<br>
  File "/home/pi/tf_tests/tesi/src/classify.py", line 40, in classify_image<br>
    ordered = np.argpartition(-output, top_k)<br>
  File "<__array_function__ internals>", line 6, in argpartition<br>
  File "/home/pi/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 830, in argpartition<br>
    return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)<br>
  File "/home/pi/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 58, in _wrapfunc<br>
    return _wrapit(obj, method, *args, **kwds)<br>
  File "/home/pi/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 47, in _wrapit<br>
    result = getattr(asarray(obj), method)(*args, **kwds)<br>
ValueError: kth(=1) out of bounds (1)<br>

这是分类代码:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import io
import time
import numpy as np
import picamera
import tensorflow as tf

from PIL import Image
from tflite_runtime.interpreter import Interpreter


def load_labels(path):
  with open(path, 'r') as f:
    return {i: line.strip() for i, line in enumerate(f.readlines())}


def set_input_tensor(interpreter, image):
  tensor_index = interpreter.get_input_details()[0]['index']
  input_tensor = interpreter.tensor(tensor_index)()[0]
  input_tensor[:, :] = image


def classify_image(interpreter, image, top_k=1):
  """Returns a sorted array of classification results."""
  set_input_tensor(interpreter, image)
  interpreter.invoke()
  output_details = interpreter.get_output_details()[0]
  output = np.squeeze(interpreter.get_tensor(output_details['index']))

  # If the model is quantized (uint8 data), then dequantize the results
  if output_details['dtype'] == np.uint8:
    scale, zero_point = output_details['quantization']
    output = scale * (output - zero_point)

  print(-output)
  ordered = np.argpartition(-output, top_k)
  #ordered = tf.nn.top_k(-output, top_k)
  return [(i, output[i]) for i in ordered[:top_k]]


def run():
  labels = load_labels('labels/simple_model_keras.txt')
  interpreter = Interpreter('model/generated/another_model_keras.tflite')
  #labels = load_labels('labels/default.txt')
  #interpreter = Interpreter('model/generated/default.tflite')
  interpreter.allocate_tensors()
  _, height, width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
    camera.start_preview()
    try:
      stream = io.BytesIO()
      for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        image = Image.open(stream).convert('RGB').resize((width, height), Image.ANTIALIAS)
        start_time = time.time()
        results = classify_image(interpreter, image)
        elapsed_ms = (time.time() - start_time) * 1000
        label_id, prob = results[0]
        stream.seek(0)
        stream.truncate()
        camera.annotate_text = '%s %.2f\n%.1fms' % (labels[label_id], prob, elapsed_ms)
    finally:
      camera.stop_preview()

这是模型的代码:

def another_keras_model():
    from keras.preprocessing.image import ImageDataGenerator
    from keras.models import Sequential
    from keras.layers import Conv2D, MaxPooling2D
    from keras.layers import Activation, Dropout, Flatten, Dense
    from keras import backend as K

    img_width, img_height = 150, 150

    train_data_dir = '../img/train'
    validation_data_dir = '../img/test'
    nb_train_samples = 480*2
    nb_validation_samples = 206*2
    epochs = 50
    batch_size = 16

    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    model = Sequential()
    model.add(Conv2D(32, (3, 3), input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
    validation_generator = test_datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')

    model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size)
    model.save('model/generated/another_model_keras.h5')

    return

def keras_to_tfl(model_name):
    import tensorflow as tf
    model = tf.keras.models.load_model('model/generated/'+model_name+'.h5')
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    tflite_model = converter.convert()
    open('model/generated/'+model_name+'.tflite', "wb").write(tflite_model)
    return

有人可以帮我理解我错在哪里吗?
谢谢
安德鲁

4

0 回答 0