3

我正在尝试在我的精神病任务期间捕获定时屏幕截图。我有一个注视十字,然后是屏幕左右两侧的 2 个面,然后是一个点。我只想要两张脸出现在屏幕上的 1 秒时间段的屏幕截图。例程中有 10 个不同的人脸对,例程循环 3 次。理想情况下,我希望通过此代码将 30 张图像保存到我的计算机中。到目前为止,我的代码如下:

from __future__ import division  # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import *  # things like STARTED, FINISHED
import numpy as np  # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg,    linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os  # handy system and path functions

import socket
import time

# Store info about the experiment session
expName = 'DotProbe_EyeTracker_BSchool' 
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit()  # user pressed cancel
expInfo['date'] = data.getDateStr()  # add a simple timestamp
expInfo['expName'] = expName

# Setup files for saving
if not os.path.isdir('data'):
    os.makedirs('data')  # if this fails (e.g. permissions) we will get error
filename = 'data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING)  # this outputs to the screen, not a file

# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
    extraInfo=expInfo, runtimeInfo=None,
    originPath=None,
    savePickle=True, saveWideText=True,
    dataFileName=filename)

# Start Code - component code to be run before the window creation

# Setup the Window
win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=False,      
    allowStencil=False, monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb')
myClock = core.Clock()

# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
    frameDur = 1.0/round(expInfo['frameRate'])
else:
    frameDur = 1.0/60.0 # couldn't get a reliable measure so guess

# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='Respond to the probe once it appears. EIther click "2" when probe replaces left     face or click "3" when probe replaces right face.',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=None,
    color='white', colorSpace='rgb', opacity=1,
    depth=0.0)

# Initialize components for Routine "block1"
block1Clock = core.Clock()
fixation = visual.TextStim(win=win, ori=0, name='fixation',
    text='+',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=None,
    color='white', colorSpace='rgb', opacity=1,
    depth=0.0)

leftimage = visual.ImageStim(win=win, name='leftimage',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[1, 1.34],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-1.0)

rightimage = visual.ImageStim(win=win, name='rightimage',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[1, 1.34],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-2.0)

probe = visual.ImageStim(win=win, name='probe',
    image='sin', mask=None,
    ori=0, pos=[0,0], size=[0.5, 0.5],
    color=[1,1,1], colorSpace='rgb', opacity=1,
    texRes=128, interpolate=False, depth=-3.0)

#Get and save a screen shot" of everything in stimlist:
stimlist = [leftimage, rightimage]
t0 = myClock.getTime()
rect=(-1,1,1,-1)
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
    # rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
    # as a list of the edges: Left Top Right Bottom, in norm units.

# Create some handy timers
globalClock = core.Clock()  # to track the time since experiment started
routineTimer = core.CountdownTimer()  # to track time remaining of each (non-slip) routine
4

3 回答 3

5

使用win.getMovieFramewin.saveMovieFrames其他人建议的那样。你不需要visual.BufferImageStim. 当您完成脚本时,您可能会遇到条件循环。我会在实际实验运行时截取屏幕截图,而不是事先“模拟”。它可以确保您的屏幕截图准确描述了实验过程中实际发生的情况——即使您犯了错误并绘制不正确的东西 :-) 当然,如果屏幕截图的目的纯粹是为了文档,请删除/在运行实际实验以提高性能时对这些行进行注释。

# Loop through trials. You may organize them using ``data.TrialHandler`` or generate them yourself.
for trial in myTrialList:
    # Draw whatever you need, probably dependent on the condition. E.g.:
    if trial['condition'] == 'right':
        rightimage.draw()
    else:
        leftimage.draw() 
    fixation.draw()

    # Show your stimulus
    win.flip()

    # Save screenshot. Maybe outcomment these line during production.
    win.getMovieFrame()   # Defaults to front buffer, I.e. what's on screen now.
    win.saveMovieFrames('screenshot' + trial['condition'])  # save with a descriptive and unique filename.                                .      
于 2014-08-11T07:15:06.327 回答
4

我无法对此进行测试,因为我没有在我当前的计算机上设置 PyschoPy,但是使用Window.getMovieFrame()andWindow.saveMovieFrames()应该可以让你到达你需要的地方,例如:

screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
    # rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
    # as a list of the edges: Left Top Right Bottom, in norm units.
# screenshot is currently on the 'back' buffer as we haven't flipped yet
win.getMovieFrame(buffer='back')
win.saveMovieFrames('stimuli.png')
于 2014-08-11T00:07:18.800 回答
1

在使用 PsychoPy 构建器时,我无法使用提供的所有需要​​ win.flip() 的答案。以下是对我有用并使用 PIL 的解决方案:

from PIL import ImageGrab
import os

os.makedirs("./data/" + expInfo['participant'], exist_ok=True)
output_image_name = "./data/" + expInfo['participant'] + "/" + str(datetime.datetime.now()).replace("-", "_").replace(" ", "_").replace(".", "_").replace(":", "_") + ".png"
im = ImageGrab.grab()
im.save(output_image_name, 'png') 
于 2021-10-07T20:46:06.520 回答