0

这会从 Github 下载 20 张图片,运行大约需要 57 秒。当我在另一台具有不同 Internet 连接的 PC 上运行它时,我得到了相同的结果,有什么办法可以加快速度吗?

图像约为 1 MB

def loadImage(url, Finger):
    response = requests.get(url)
    image = np.asarray(bytearray(response.content), dtype="uint8")
    image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)

    if Finger:
        image = cv2.resize(image,(320, 550))
    else:
        image = cv2.resize(image,(127,127))

    return image

class Finerprint:
    def __init__(self):
        self.fingerImg = []
        self.fingerSol = [None] * 4


fingerprints = [None] * 4


def initFingerprints():
    # Initialize the four fingerprints
    for i in range(4):
        fingerprints[i] = Finerprint()
        fingerprints[i].fingersol = []

    for i in range(4):
        print(f"Loading F{i}")
        fingerprints[i].fingerImg = loadImage(f"https://raw.githubusercontent.com/HazemMohamed98/GTA-Online-Fingerprint/Images/F{i + 1}.jpg", True)
        for j in range(4):
            print(f"Loading F{i}S{j}")
            fingerprints[i].fingerSol[j] = loadImage(f"https://raw.githubusercontent.com/HazemMohamed98/GTA-Online-Fingerprint/Images/F{i + 1}S{j + 1}.jpg", False)

initFingerprints()
4

1 回答 1

0

@Brendan Abel 的建议启发了我对此进行尝试multiprocessing。我想出了这个:

#!/usr/bin/python3
import cv2
from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
import requests
from time import time

f_count = 4
s_count = 4

def loadImage(response, Finger):
    image = np.asarray(bytearray(response.content), dtype="uint8")
    image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)

    if Finger:
        image = cv2.resize(image,(320, 550))
    else:
        image = cv2.resize(image,(127,127))

    return image

def build_URL(f=0, s=None):
    url = "https://raw.githubusercontent.com/HazemMohamed98/GTA-Online-Fingerprint/Images/"
    insert = f'F{f}' + (f'S{s}' if s is not None else '')
    return f'{url}{insert}.jpg'

def fetch_all(rf, rs):
    urls = [build_URL(i+1) for i in range(rf)] + [build_URL(i+1,j+1) for i in range(rf) for j in range(rs)]
    t = time()
    with ThreadPool(20) as pool:
        responses = list(pool.map(requests.get, urls))
    t = time() - t
    print(f'Parallel fetch time: {t} sec')
    return responses

class Fingerprint:
    def __init__(self):
        self.fingerImg = []
        self.fingerSol = [None] * s_count


fingerprints = [None] * f_count


def initFingerprints():
    # Acquire all URLs in parallel
    responses = fetch_all(f_count, s_count)
    # Initialize the four fingerprints
    ndx = f_count
    for i in range(f_count):
        fingerprints[i] = Fingerprint()

    for i in range(f_count):
        print(f"{i} Loading image from {responses[i].url}")
        fingerprints[i].fingerImg = loadImage(responses[i], True)
        for j in range(s_count):
            print(f"{i} {j} {ndx} Loading image from {responses[ndx].url}")
            fingerprints[i].fingerSol[j] = loadImage(responses[ndx], False)
            ndx += 1
    return fingerprints

请注意,我留下了打印循环索引的部分,以及在获取操作之后将哪个 url 分配给了什么。我没有实际的方法来检查 的结果cv2,我只是想看看使用多处理的结果是什么。

在我的测试中,并行获取所有 20 个 URL 的实际时间在 0.3 到 0.6 秒之间。它比串行约 6 秒的结果有了很大的改进。

好吧,这对我来说很有教育意义,我希望它对 OP 有用!

于 2020-09-20T02:47:36.813 回答