我正在尝试在我的精神病任务期间捕获定时屏幕截图。我有一个注视十字,然后是屏幕左右两侧的 2 个面,然后是一个点。我只想要两张脸出现在屏幕上的 1 秒时间段的屏幕截图。例程中有 10 个不同的人脸对,例程循环 3 次。理想情况下,我希望通过此代码将 30 张图像保存到我的计算机中。到目前为止,我的代码如下:
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import socket
import time
# Store info about the experiment session
expName = 'DotProbe_EyeTracker_BSchool'
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Setup files for saving
if not os.path.isdir('data'):
os.makedirs('data') # if this fails (e.g. permissions) we will get error
filename = 'data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=False,
allowStencil=False, monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb')
myClock = core.Clock()
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='Respond to the probe once it appears. EIther click "2" when probe replaces left face or click "3" when probe replaces right face.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "block1"
block1Clock = core.Clock()
fixation = visual.TextStim(win=win, ori=0, name='fixation',
text='+', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
leftimage = visual.ImageStim(win=win, name='leftimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-1.0)
rightimage = visual.ImageStim(win=win, name='rightimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-2.0)
probe = visual.ImageStim(win=win, name='probe',
image='sin', mask=None,
ori=0, pos=[0,0], size=[0.5, 0.5],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-3.0)
#Get and save a screen shot" of everything in stimlist:
stimlist = [leftimage, rightimage]
t0 = myClock.getTime()
rect=(-1,1,1,-1)
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
# as a list of the edges: Left Top Right Bottom, in norm units.
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine