How to use the psychopy.visual.Window function in psychopy

To help you get started, we’ve selected a few psychopy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lupyanlab / lab-computer / psychopy-tests / test_psychopy_sound_blocking.py View on Github external
from psychopy import sound,core, visual


if prefs.general['audioLib'][0] == 'pyo':
    print 'initializing pyo'
    #if pyo is the first lib in the list of preferred libs then we could use small buffer
    #pygame sound is very bad with a small buffer though
    sound.init(48000,buffer=128)
print 'Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver)

dogSound = sound.Sound('sounds/dog-label.wav')
highA = sound.Sound('A',octave=3, sampleRate=44100, secs=1.0, bits=24)
tick = sound.Sound('300',secs=0.01,sampleRate=44100, bits=24)
tock = sound.Sound('3500',secs=0.01, sampleRate=44100, bits=24)

win = visual.Window([200,200], pos=[0,0],color="blue", allowGUI=False, monitor='testingRoom',units='pix',winType='pyglet')
print 'd for dog, i for tick, o for tock, q for quit'
while True:
	key = event.getKeys()
	try:
		if key[0]=='d':
			dogSound.play()
		elif key[0]=='i':
			tick.play()
		elif key[0]=='o':
			tock.play()
		elif key[0]=='q':
			break
	except:
		pass
github fsxfreak / esys-pbi / src / pushData.py View on Github external
NUM_CHANNELS = 8
SAMP_RATE = 100

info = StreamInfo('OpenBCI', 'EEG', NUM_CHANNELS, SAMP_RATE, 'float32', 'myuid34234')
outlet = StreamOutlet(info)

#funtion call to start displaying images
#def displayStimuli
# for file in os.listdir('directory'):
# for i in range(0,len(images)):

# def display(files, .....):
#   ex: file_name = ['/dir/dir2/img.png']

window = visual.Window([512, 512])

cfg = esys_cfg.create_config('../stimulus-config/test.yml')
print(cfg.trial_order)
#trial_order = ['one', 'two', 'one']

for element in cfg.trial_order: #loop through all elements in array trial_order
  imageIndex = 0
  for imageIndex in range(len(cfg.trials[element].files)):

    stimulis = cfg.trials[element].stimuli_folder + '/' + cfg.trials[element].files[imageIndex]
    showStim = visual.ImageStim(window, stimulis)
    #visual.ImageStim(window, image = stimulus)
    showStim.draw(window)
    window.flip()
    core.wait(2.0) 
    #stimuli_running = True
github jadref / buffer_bci / python / imaginedMovement_psychoPy / simple_imagined_movement_lastrun.py View on Github external
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
    extraInfo=expInfo, runtimeInfo=None,
    originPath='C:\\Users\\srw-install\\Desktop\\buffer_bci\\python\\imaginedMovement_psychoPy\\simple_imagined_movement.psyexp',
    savePickle=True, saveWideText=True,
    dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING)  # this outputs to the screen, not a file

endExpNow = False  # flag for 'escape' or other condition => quit the exp

# Start Code - component code to be run before the window creation

# Setup the Window
win = visual.Window(size=[1024, 600], fullscr=False, screen=0, allowGUI=True, allowStencil=False,
    monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
    blendMode='avg', useFBO=True,
    )
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
    frameDur = 1.0/round(expInfo['frameRate'])
else:
    frameDur = 1.0/60.0 # couldn't get a reliable measure so guess

# Initialize components for Routine "Instructions"
InstructionsClock = core.Clock()
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
    text='Simple Imagined Movement Experiment\n\nDuring the trails imagine yourself vigerously shaking the indicated body part (left hand, right hand, left foot or right foot).\n\nPress  to continue.',    font='Arial',
    pos=[0, 0], height=0.1, wrapWidth=80,
    color='white', colorSpace='rgb', opacity=1,
github psychopy / psychopy / psychopy / demos / coder / hardware / labjack_u3.py View on Github external
has u3 (and others below an umbrella called labjack) so the import
line is slightly different to the documentation on LabJack's website
"""

from __future__ import absolute_import, division, print_function

from builtins import range
from psychopy import visual, core, event, sound
try:
    from labjack import u3
except ImportError:
    import u3

# sound.setAudioAPI('pyaudio')

win = visual.Window([800, 800])
stim = visual.GratingStim(win, color=-1, sf=0)
snd = sound.Sound(880)
print(snd)
# setup labjack U3
ports = u3.U3()
FIO4 = 6004  # the address of line FIO4

while True:
    # do this repeatedly for timing tests
    ports.writeRegister(FIO4, 0)  # start low

    # draw black square
    stim.draw()
    win.flip()

    # wait for a key press
github psychopy / psychopy / psychopy / demos / coder / stimuli / screensAndWindows.py View on Github external
"""
Using multiple screens and windows with PsychoPy
"""

from __future__ import division

from psychopy import visual, event
from numpy import sin, pi  # numeric python

if True:  # use two positions on one screen
    winL = visual.Window(size=[400, 300], pos=[100, 200], screen=0,
                         allowGUI=False)  # , fullscr=True)
    winR = visual.Window(size=[400, 300], pos=[400, 200], screen=0,
                         allowGUI=False)  # , fullscr=True)  # same screen diff place
else:
    winL = visual.Window(size=[400, 300], pos=[100, 200], screen=0,
                         allowGUI=False, fullscr=False)
    winR = visual.Window(size=[400, 300], pos=[100, 200], screen=1,
                         allowGUI=False, fullscr=False)  # same place diff screen

# create some stimuli
# NB. if the windows have the same characteristics then

# left screen
contextPatchL = visual.GratingStim(winL, tex='sin', mask='circle',
    size=1.0, sf=3.0, texRes=512)
targetStimL = visual.GratingStim(winL, ori=20, tex='sin', mask='circle',
    size=0.4, sf=3.0, texRes=512, autoLog=False)

# right screen
contextPatchR = visual.GratingStim(winR, tex='sin', mask='circle',
    size=1.0, sf=3.0, texRes=512)
github psychopy / psychopy / psychopy / demos / demo_captureFrames.py View on Github external
#! /usr/local/bin/python2.5
from psychopy import visual

#copy pixels from the frame buffer
myWin = visual.Window([200,200])
myStim = visual.PatchStim(myWin, pos=[-0.5,-0.5],size=1, sf=5,rgb=[0,1,1],ori=30, mask='gauss')
n=10
for frameN in range(n): #for n frames
  myStim.setPhase(0.1, '+')
  myStim.draw()
  #you can either read from the back buffer BEFORE win.flip() or 
  #from the front buffer just AFTER the flip. The former has the
  #advantage that it won't be affected by other windows whereas
  #latter can be.
  myWin.getMovieFrame(buffer='back')
  myWin.flip()
  
#save the movie in the format of your choice
#myWin.saveMovieFrames('frame.jpg')
#myWin.saveMovieFrames('myMovie.gif')
myWin.saveMovieFrames('myMovie.mpg')
github psychopy / psychopy / psychopy / demos / coder / input / customMouse.py View on Github external
#!/usr/bin/env python2
# -*- coding: utf-8 -*-

"""
Demo of CustomMouse(), showing movement limits, click detected upon release,
and ability to change the pointer.
"""

from __future__ import division

# author Jeremy Gray

from psychopy import visual, event

win = visual.Window()

# a virtual mouse, vm, :
vm = visual.CustomMouse(win,
    leftLimit=-0.2, topLimit=0, rightLimit=0.2, bottomLimit=-0.4,
    showLimitBox=True, clickOnUp=True)

instr = visual.TextStim(win, text="move the mouse around.\n"
    "click to give the mouse more room to move.", pos=(0, .3))
new_pointer = visual.TextStim(win, text='o')
print("[getPos] [getWheelRel] click time")
while not event.getKeys():
    instr.draw()
    vm.draw()
    win.flip()
    if vm.getClicks():
        vm.resetClicks()
github BciPy / BciPy / display / Demo_rsvp_disp_images.py View on Github external
# Dummy Bar Graph Params
dummy_bar_schedule_t = [['A', 'B', 'C', 'D', '<', '-', 'G'],
                        ['A', 'B', 'C', 'D', '<', 'H', 'G'],
                        ['A', 'B', 'C', 'R', 'M', 'K', 'G'],
                        ['A', 'B', 'C', 'R', '<', 'Z', 'G']]
dummy_bar_schedule_p = [[1, 1, 1, 2, 3, 2, 3], [1, 1, 1, 2, 7, 2, 1],
                        [1, 1, 1, 2, 3, 2, 3], [1, 1, 2, 12, 1, 2, 1]]

task_text = ['1/100', '2/100', '3/100', '4/100']
task_color = [['white'], ['white'], ['white'], ['white']]

# Initialize decision
ele_list_dec = [['[<]'], ['[R]']]

# Initialize Window
win = visual.Window(size=[500, 500], fullscr=False, screen=0, allowGUI=False,
                    allowStencil=False, monitor='testMonitor', color='black',
                    colorSpace='rgb', blendMode='avg',
                    waitBlanking=True)
win.recordFrameIntervals = True
frameRate = win.getActualFrameRate()
visual.useFBO = False
print frameRate

# Initialize Clock
clock = core.StaticPeriod(screenHz=frameRate)
experiment_clock = core.MonotonicClock(start_time=None)

rsvp = CalibrationTask(window=win, clock=clock,
                       experiment_clock=experiment_clock,
                       text_information=text_text,
                       color_information=color_text, pos_information=pos_text,
github psychopy / psychopy / psychopy / demos / coder / variousVisualStims.py View on Github external
from psychopy import visual, event, core
import numpy

win = visual.Window([600,600], rgb=-1)
gabor = visual.PatchStim(win, mask='gauss', pos=[-0.5,-0.5], dkl=[0,0,1],sf=5, ori=30)
movie = visual.MovieStim(win, 'testmovie.mpg', units='pix',pos=[100,100],size=[160,120])
text = visual.TextStim(win, pos=[0.5,-0.5],text=u"unicode (eg \u03A8 \u040A \u03A3)", font=['Times New Roman'])
faceRGB = visual.PatchStim(win,tex='face.jpg',pos=[-0.5,0.5])
fixSpot = visual.PatchStim(win,tex="none", mask="gauss", size=(0.05,0.05),rgb=1)
myMouse=event.Mouse(win=win)

t=0.0
while True:
    #get mouse events
    mouse_dX,mouse_dY = myMouse.getRel()
    mouse1, mouse2, mouse3 = myMouse.getPressed()
    if (mouse1):
        gabor.setOri(mouse_dY/10.0, '-')
        text.setOri(mouse_dY/10.0, '+')
        faceRGB.setOri(mouse_dY/10.0, '+')
github psychopy / psychopy / psychopy / demos / coder / experiment control / autoDraw_autoLog.py View on Github external
# -*- coding: utf-8 -*-

"""
One easy way to handle stimuli that are drawn repeatedly is to
setAutoDraw(True) for that stimulus. It will continue to be drawn until
stim.setAutoDraw(False) is called. By default a logging message of
level EXP will be created when the setAutoDraw is called.

This can be turned off for each call with stim.setAutoDraw(True, autoLog=False)
"""

from __future__ import division

from psychopy import visual, core

win = visual.Window([800, 800])

# a stim's name is used in log entries
stim1 = visual.GratingStim(win, pos=[-0.5, -0.5], name='stim1')
stim2 = visual.TextStim(win, pos=[0.5, 0.5], text='stim2', name='textStim')

# no need to log the fixation point info, use autoLog=False
fixation = visual.GratingStim(win, mask='gauss', tex=None, size=0.02,
    name='fixation', autoLog=False)

fixation.setAutoDraw(True)
stim1.setAutoDraw(True)
stim2.setAutoDraw(True)
# both on
for frameN in range(20):  # run 20 frames like this
    win.flip()