1

我正在尝试制作钓鱼小游戏的脚本,网上有一个关于albion钓鱼脚本的帖子,问题是在这个游戏中指针很细,并且有很多不同的纹理和颜色,对于简单的灰度匹配工作在滑块的一个部分上很好(例如在水纹理上),有时在其他部分(树木纹理)上工作,在第三个部分(例如天空)上不起作用。如果我更改为较低的阈值,它通常会在不匹配的情况下激活。

屏幕

import numpy as np
import cv2
from mss.windows import MSS as mss
from PIL import Image
import time
import pyautogui as pg
import cv2
import mss
import numpy

template = cv2.imread("perfect3.png", cv2.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]
fgbg = cv2.createBackgroundSubtractorMOG2(
    history=10,
    varThreshold=2,
    detectShadows=False)

with mss.mss() as sct:
    monitor = {"top": 344, "left": 4419, "width": 150, "height": 666}

    while "Screen capturing":
        last_time = time.time()
        img = numpy.array(sct.grab(monitor))
        gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= 0.85)
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3)
            print('click')
        cv2.imshow("OpenCV/Numpy normal", img)

        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break

没有运气也尝试了边缘检测 cv2.canny。

关键是当鱼在最小的绿色区域时单击按钮。该字段出现在滑块的随机部分中。

有任何想法吗?

=================

更新

正如Furas所说,尝试了配色

# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import numpy
import pyautogui
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
    help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
    help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (42, 84, 211)
greenUpper = (69, 130, 255)
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
pts = deque(maxlen=args["buffer"])

# grab video from screen(monitor area)
with mss.mss() as sct:
    monitor = {"top": 325, "left": 4423, "width": 136, "height": 662}
    while "Screen capturing":
        #last_time = time.time()
        #vs = numpy.array(sct.grab(monitor))
        #print("fps: {}".format(1 / (time.time() - last_time)))
        vs = sct.grab(monitor)
        # grab the current frame
        #frame = vs
        frame = np.array(vs)
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        # construct a mask for the color "green", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        mask2 = cv2.inRange(hsv, blueLower, blueUpper)
        mask2 = cv2.erode(mask2, None, iterations=2)
        mask2 = cv2.dilate(mask2, None, iterations=2)
        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        center = None
        cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts2 = imutils.grab_contours(cnts2)
        center2 = None
        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing rectangle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            (x, y, w, h) = cv2.boundingRect(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            c2 = max(cnts2, key=cv2.contourArea)
            (x2, y2, w2, h2) = cv2.boundingRect(c2)
            M2 = cv2.moments(c2)
            center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
            # draw the rectangle and centroid on the frame,
            # then update the list of tracked points
            cv2.rectangle(frame, (int(x), int(y)), (int(x+w), int(y+h)),(0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)
            cv2.rectangle(frame, (int(x2), int(y2)), (int(x2+w2), int(y2+h2)),(0, 255, 255), 2)
            cv2.circle(frame, center2, 5, (0, 0, 255), -1)
            # update the points queue
            pts.appendleft(center)
            if y-15 < y2 < y+15:
                pyautogui.click(4908, 984)
                time.sleep(2)
                y2 = 0           
        cv2.imshow("frame", frame)
        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break

但在回合之前或回合之间我得到错误

Traceback (most recent call last):
  File "C:\Users\Game\Desktop\Py\Fish.py", line 74, in <module>
    c2 = max(cnts2, key=cv2.contourArea)
ValueError: max() arg is an empty sequence

如何解决?

按照弗拉斯的建议,将两个面具合二为一

mask = cv2.bitwise_or(mask1, mask2)

但是我可以用什么来找到绿地和鱼的最大值?到目前为止,它是 2 个掩码、2 个 cnts 和 2 个最大值。

使用 mss sct.grab 我没有那么好的 FPS(平均 25fps)还有其他更好的捕捉方法吗?

非常感谢!

4

1 回答 1

0

好的,经过一些解决方法后,我得到了它的工作和测试,通过颜色检测,它可以在第一张“日图”上工作。并不总是得到“完美”的命中,但至少“好”,猜想这是因为 FPS 游行,也许如果我找到重新调整 FPS 的方法,那就更好了。

    # import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import pyautogui
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api

# define the lower and upper boundaries in the HSV color space, then initialize the
# list of tracked points
# Green "perfect" field
greenLower = (42, 79, 211)
greenUpper = (69, 130, 255)
# Blue fish
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
# Purple field
blue2Lower = (114, 139, 218)
blue2Upper = (123, 165, 255)
# Gray field "Status" (after fish is caught)
grayLower = (0, 0, 114)
grayUpper = (0, 0, 132)
# Purple miss
purpleLower = (123, 148, 239)
purpleUpper = (125, 165, 243)
# Define Vars
y2 = 0
a = 0
b = 0
startTime = time.time()
startTime2 = time.time()
# grab video from screen(monitor area)
with mss.mss() as sct:
    #grab picture of slider and fish
    monitor = {"top": 846, "left": 4726, "width": 162, "height": 398}
    #grab picture of gray field "Status" when the fish is caught
    monitor2 = {"top": 1017, "left": 4366, "width": 11, "height": 23}
    #grab picture of purple pixels if fish is lost
    monitor3 = {"top": 1013, "left": 4484, "width": 5, "height": 6}
    while "Screen capturing":
        vs = sct.grab(monitor)
        vs2 = sct.grab(monitor2)
        vs3 = sct.grab(monitor3) 
        # grab the current frame
        frame = np.array(vs)
        frame2 = np.array(vs2)
        frame3 = np.array(vs3)
        # resize the frame, blur it, and convert it to the HSV
        # color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        blurred2 = cv2.GaussianBlur(frame2, (11, 11), 0)
        hsv2 = cv2.cvtColor(blurred2, cv2.COLOR_BGR2HSV)
        blurred3 = cv2.GaussianBlur(frame3, (11, 11), 0)
        hsv3 = cv2.cvtColor(blurred3, cv2.COLOR_BGR2HSV)
        # construct a mask for the color, then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        # Mask for green 'perfect' field
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        # Mask for Fish
        mask2 = cv2.inRange(hsv, blueLower, blueUpper)
        mask2 = cv2.erode(mask2, None, iterations=2)
        mask2 = cv2.dilate(mask2, None, iterations=2)
        # Mask for purple Start slider
        mask3 = cv2.inRange(hsv, blue2Lower, blue2Upper)
        mask3 = cv2.erode(mask3, None, iterations=2)
        mask3 = cv2.dilate(mask3, None, iterations=2)
        # Mask for gray field "Status" when the fish is caught
        mask4 = cv2.inRange(hsv2, grayLower, grayUpper)
        mask4 = cv2.erode(mask4, None, iterations=2)
        mask4 = cv2.dilate(mask4, None, iterations=2)
        # Mask for purple fish miss
        mask5 = cv2.inRange(hsv3, purpleLower, purpleUpper)
        mask5 = cv2.erode(mask5, None, iterations=2)
        mask5 = cv2.dilate(mask5, None, iterations=2)
        # find contours in the mask and initialize the current
        # (x, y) center of the rectangle
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts2 = imutils.grab_contours(cnts2)
        cnts3 = cv2.findContours(mask3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts3 = imutils.grab_contours(cnts3)
        cnts4 = cv2.findContours(mask4.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts4 = imutils.grab_contours(cnts4)
        cnts5 = cv2.findContours(mask5.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts5 = imutils.grab_contours(cnts5)
        # only proceed if at least one contour was found
        if len(cnts3) > 0:
            print ("start")
            startTime2 = time.time()
            time.sleep(0.1)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts) > 0 and len(cnts2) > 0:
            startTime2 = time.time()
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing rectangle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            (x, y) = center
            c2 = max(cnts2, key=cv2.contourArea)
            M2 = cv2.moments(c2)
            center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
            (x2, y2) = center2
            if y-65 < y2 < y+65:
                print ("catch")
                pyautogui.click(4978, 1239)
                time.sleep(1)
                y2 = 0
                startTime2 = time.time()
        elif len(cnts4) > 0:
            time.sleep(1)
            a +=1
            endTime = time.time()
            timer = endTime-startTime
            hour = timer // 3600
            timer %= 3600
            minutes = timer // 60
            timer %= 60
            seconds = timer
            print (a, "fishes and", b, "misses in %d:%d:%d" % (hour, minutes, seconds))
            print ("start over")
            time.sleep(1)
            pyautogui.click(4741, 913)
            pyautogui.click(4741, 913)
            time.sleep(1)
            pyautogui.click(4978, 1239)
            pyautogui.click(4978, 1239)
            time.sleep(1)
            startTime2 = time.time()
        elif len(cnts5) > 0:
            b +=1
            print ("Miss")
            time.sleep(2)
            pyautogui.click(4978, 1239)
            startTime2 = time.time()
        else:
            endTime2 = time.time()
            if (endTime2 - startTime2 > 40):
                print("Longer than 40 seconds")
                startTime2 = time.time()
                print ("restart")
                pyautogui.click(4741, 913)
                time.sleep(1)
                pyautogui.click(4978, 1239)
                time.sleep(1) 
        key = cv2.waitKey(1)
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break
于 2020-03-12T11:05:41.637 回答