0

我已经在图像大小为 224 的帕斯卡数据集上训练了 ML 模型,但是在推断新图像时(有些是高分辨率的,有些比帕斯卡图像的分辨率略高),我得到了错误pil2tensor()

@app.route('/analyze', methods=['POST'])
async def analyze(request):
    data = await request.form()
    img_bytes = await (data['file'].read())
    img = open_image(BytesIO(img_bytes))

    t_img= PIL.Image.open(BytesIO(img_bytes)).convert('RGB')
    t_img = pil2tensor(t_img, np.float32)
    t_img = t_img.div_(255)
    with torch.no_grad():
        # test_output = learn.model.eval()(t_img.unsqueeze_(0).cuda())
        test_output = learn.model.eval()(t_img.unsqueeze_(0))

对于小尺寸图像(比如一些来自谷歌的低分辨率图像),ML 模型能够在几秒钟内正确地进行推理,但对于分辨率稍高的图像,大约需要 20-40 分钟!!!

4

1 回答 1

0

解决了这个问题,这是正确的代码:

@app.route('/analyze', methods=['POST'])
async def analyze(request):
    data = await request.form()
    img_bytes = await (data['file'].read())
    img = open_image(BytesIO(img_bytes))
    localtime = _utc_to_local(datetime.utcnow())

    current_dir = os.path.dirname(__file__)
    media_path = os.path.join(current_dir, "media")
    media_path_original = os.path.join(media_path, "original")
    media_path_processed = os.path.join(media_path, "processed")

    img_path = os.path.join(media_path_original, localtime+".jpg")
    img.save(img_path)
    verify_image(Path(img_path), idx=0, delete=False, max_size=600, dest=Path(media_path_processed))
    processed_img_path = os.path.join(media_path_processed, localtime+".jpg")
    processed_img = open_image(processed_img_path) if os.path.exists(processed_img_path) else img
    processed_img.refresh()
    with torch.no_grad():
        test_output = learn.model.eval()(processed_img.data.unsqueeze(0))
        predictions = show_preds(processed_img, test_output, 0, detect_thresh=0.4, classes=t_classes)
    img_height, img_width = processed_img.size
    return JSONResponse({'predictions': predictions, 'img_height': img_height, 'img_width': img_width})
于 2019-07-29T18:25:50.870 回答