How to use the psychopy.visual.ImageStim function in psychopy

To help you get started, we’ve selected a few psychopy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jadref / buffer_bci / tutorial / project_assignment / project_assignment.py View on Github external
triggerevents=["sos"]
    stopevent=("sos","end")
    trlen_samp = 50
    state = []
    endSOS = False
    current_idx = 0
    print("Waiting for triggers: %s and endtrigger: %s.%s"%(triggerevents[0],stopevent[0],stopevent[1]))
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "on":
                    current_image = visual.ImageStim(mywin, "png/alarm_done.png") # set image
                    endSOS = True
                else:
                    current_image = visual.ImageStim(mywin, "png/alarm_error.png") # set image
                    errors = errors + 1
                current_image.draw()
                mywin.flip()
                core.wait(1)
                current_image = visual.ImageStim(mywin, image=sos[r]) # set image
                current_image.draw()
                mywin.flip()
    current_image = visual.ImageStim(mywin, "png/sos_choice.png") # set image
    current_image.draw()
    mywin.flip()
    endSOS = False
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
github psychopy / psychopy / psychopy / demos / coder / iohub / eyetracking / gcCursor / run.py View on Github external
res=display.getPixelResolution() # Current pixel resolution of the Display to be used
        coord_type=display.getCoordinateType()
        window=visual.Window(res,monitor=display.getPsychopyMonitorName(), # name of the PsychoPy Monitor Config file if used.
                                    units=coord_type, # coordinate space to use.
                                    fullscr=True, # We need full screen mode.
                                    allowGUI=False, # We want it to be borderless
                                    screen= display.getIndex() # The display index to use, assuming a multi display setup.
                                    )

        # Create a dict of image stim for trials and a gaze blob to show the
        # reported gaze position with.
        #
        image_cache=dict()
        image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg']
        for iname in image_names:
            image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname),
                        name=iname,units=coord_type)

        # Create a circle to use for the Gaze Cursor. Current units assume pix.
        #
        gaze_dot =visual.GratingStim(window,tex=None, mask="gauss",
                                     pos=(0,0 ),size=(66,66),color='green',
                                                        units=coord_type)

        # Create a Text Stim for use on /instruction/ type screens.
        # Current units assume pix.
        instructions_text_stim = visual.TextStim(window, text='', pos=[0,0],
                                    height=24,
                                    color=[-1,-1,-1], colorSpace='rgb',
                                    wrapWidth=window.size[0]*.9)
github isolver / ioHub / examples / tobiiTrackerTest / run.py View on Github external
screen_resolution= display.getStimulusScreenResolution()
        # get the index of the screen to create the PsychoPy window in.
        screen_index=display.getStimulusScreenIndex()
        # Read the coordinate space the script author specified in the config file (right now only pix are supported)
        coord_type=display.getDisplayCoordinateType()

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        self.window = visual.Window(screen_resolution, monitor="testMonitor", units=coord_type, fullscr=True, allowGUI=False, screen=screen_index)
        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name='./images/party.png'
        imageStim = visual.ImageStim(self.window, image=image_name, name='image_stim')
        gaze_dot =visual.GratingStim(self.window,tex=None, mask="gauss", pos=(-2000,-2000),size=(100,100),color='green')

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen=ClearScreen(self)
        self.clearScreen.setScreenColor((128,128,128))

        self.clearScreen.flip(text='EXPERIMENT_INIT')

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.clearScreen.sendMessage("Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
        self.clearScreen.sendMessage("Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getStimulusScreenIndex(),display.getStimulusScreenResolution(),display.getDisplayCoordinateType()))
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPPD()))        
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")
github marsja / psypy / Serial_recall_pt_exp1 / serial_recall_PT_exp1.py View on Github external
def imStim(win, image, pos, name):
    return visual.ImageStim(win=win, name=name,
        image=image, mask=None,
        ori=0, pos=pos,
        colorSpace=u'rgb', opacity=1,
        flipHoriz=False, flipVert=False,
        texRes=128, interpolate=True, depth=-5.0)
github BciPy / BciPy / bcipy / helpers / triggers.py View on Github external
trigger_callback = TriggerCallback()

    # If sound trigger is selected, output calibration tones
    if trigger_type == SOUND_TYPE:
        play_sound(
            sound_file_path='bcipy/static/sounds/1k_800mV_20ms_stereo.wav',
            dtype='float32',
            track_timing=True,
            sound_callback=trigger_callback,
            sound_load_buffer_time=0.5,
            experiment_clock=experiment_clock,
            trigger_name='calibration_trigger')

    elif trigger_type == IMAGE_TYPE:
        if display:
            calibration_box = visual.ImageStim(
                win=display,
                image='bcipy/static/images/testing_images/white.png',
                pos=(-.5, -.5),
                mask=None,
                ori=0.0)
            calibration_box.size = resize_image(
                'bcipy/static/images/testing_images/white.png',
                display.size, 0.75)

            display.callOnFlip(
                trigger_callback.callback,
                experiment_clock,
                trigger_name)
            if on_trigger is not None:
                display.callOnFlip(on_trigger, trigger_name)
github psychopy / psychopy / psychopy / demos / coder / stimuli / colorPalette.py View on Github external
# Create array 
    hsv = np.zeros([20,size,3], dtype=float)
    # Set value
    hsv[:,:,2] = np.linspace(0,1, size, endpoint=False)
    # Convert to RGB
    rgb = misc.hsv2rgb(hsv)

    # Make in range 0:1 for image stim
    rgb[:][:][:] =  (rgb[:][:][:] + 1) / 2
    return rgb
    

# Setup the Window
win = visual.Window(size=[1920, 1080], fullscr=False, units='height')

colorPalette = visual.ImageStim(win=win,name='colorPalette', units='pix', 
                                image=None, mask=None,
                                texRes=64, depth=0.0)
    
valuePalette = visual.ImageStim(win=win, name='valuePalette', units='pix', 
                                pos=(0, -250), depth=-1.0)
    
hueSlider = visual.Slider(win=win, name='hueSlider',
                          size=(.37, .02), pos=(0, 0.2),
                          labels=None, ticks=(0, 360), style=['rating'])
    
satSlider = visual.Slider(win=win, name='satSlider',
                          size=(.02, .37), pos=(0.2, 0),
                          labels=None, ticks=(0, 1), style=['rating'])
    
valSlider = visual.Slider(win=win, name='valSlider',
                          size=(.37, .02), pos=(0, -0.25),
github psychopy / psychopy / psychopy / demos / coder / stimuli / imagesAndPatches.py View on Github external
# -*- coding: utf-8 -*-

"""
Demo of ImageStim and GratingStim with image contents.
"""

from __future__ import division
from __future__ import print_function

from psychopy import core, visual, event

# Create a window to draw in
win = visual.Window((800, 800), monitor='testMonitor', allowGUI=False, color='black')

# Initialize some stimuli
beach = visual.ImageStim(win, image='beach.jpg', flipHoriz=True, pos=(0, 4.50), units='deg')
faceRGB = visual.ImageStim(win, image='face.jpg', mask=None,
    pos=(50, -50), size=None,  # will be the size of the original image in pixels
    units='pix', interpolate=True, autoLog=False)
print("original image size:", faceRGB.size)
faceALPHA = visual.GratingStim(win, pos=(-0.7, -0.2),
    tex="sin", mask="face.jpg", color=[1.0, 1.0, -1.0],
    size=(0.5, 0.5), units="norm", autoLog=False)
message = visual.TextStim(win, pos=(-0.95, -0.95),
    text='[Esc] to quit', color='white',
    anchorVert='bottom', anchorHoriz='left')

trialClock = core.Clock()
t = lastFPSupdate = 0
win.recordFrameIntervals = True
while not event.getKeys():
    t = trialClock.getTime()
github fsxfreak / esys-pbi / src / display_stimuli.py View on Github external
self.cfg = esys_cfg.create_config(cfg_filename)

    # psychopy setup
    self.window = visual.Window(self.cfg.resolution)

    # preload all of stimuli, in sorted order
    self.loaded_stims = {}
    for trial_name, trial in self.cfg.trials.iteritems():
      stimuli_type = trial.stimuli_type
      path_prefix = '%s/' % trial.stimuli_folder

      if stimuli_type == 'images':
        self.loaded_stims[trial_name] = [ visual.ImageStim(self.window, 
                                            '%s%s' % (path_prefix, stim_file)) 
                                          for stim_file in trial.files[:-1]]
        self.fix_file = visual.ImageStim(self.window,'%s%s' %(path_prefix, trial.files[-1]))

      elif stimuli_type == 'sounds':
        self.loaded_stims[trial_name] = [ sound.Sound(
                                            '%s%s' % (path_prefix, stim_file))
                                          for stim_file in trial.files[:-1]]
        self.fix_file = sound.Sound('%s%s' % (path_prefix, trial.files[-1]))
      else:
        print('Unsupported stimuli_type: %s' % stimuli_type)
        raise ValueError 

    # setup LSL 
    # TODO generalize uid
    info = StreamInfo(self.LSL_STREAM_NAME, self.LSL_STREAM_TYPE,
        self.LSL_NUM_CHANNELS, self.LSL_SAMPLE_RATE, 'string', 'uid1')
    self.outlet = StreamOutlet(info)
github jadref / buffer_bci / tutorial / project_assignment / project_assignment.py View on Github external
current_image.draw()
                mywin.flip()
    current_image = visual.ImageStim(mywin, "png/sos_choice.png") # set image
    current_image.draw()
    mywin.flip()
    endSOS = False
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "food": 
                    if r == 0:
                        current_image = visual.ImageStim(mywin, "png/food.png") # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(mywin, "png/food_error.png") # set image
                        errors = errors + 1
                elif ev.value == "pain":
                    if r == 1:
                        current_image = visual.ImageStim(mywin, "png/pain.png") # set image
                        endSOS = True
                    else:
                        current_image = visual.ImageStim(mywin, "png/pain_error.png") # set image
                        errors = errors + 1
                elif ev.value == "toilet":
                    if r == 2:
                        current_image = visual.ImageStim(mywin, "png/toilet.png") # set image
                        endSOS = True
                    else:
github psychopy / psychopy / psychopy / demos / coder / stimuli / imagesAndPatches.py View on Github external
"""
Demo of ImageStim and GratingStim with image contents.
"""

from __future__ import division
from __future__ import print_function

from psychopy import core, visual, event

# Create a window to draw in
win = visual.Window((800, 800), monitor='testMonitor', allowGUI=False, color='black')

# Initialize some stimuli
beach = visual.ImageStim(win, image='beach.jpg', flipHoriz=True, pos=(0, 4.50), units='deg')
faceRGB = visual.ImageStim(win, image='face.jpg', mask=None,
    pos=(50, -50), size=None,  # will be the size of the original image in pixels
    units='pix', interpolate=True, autoLog=False)
print("original image size:", faceRGB.size)
faceALPHA = visual.GratingStim(win, pos=(-0.7, -0.2),
    tex="sin", mask="face.jpg", color=[1.0, 1.0, -1.0],
    size=(0.5, 0.5), units="norm", autoLog=False)
message = visual.TextStim(win, pos=(-0.95, -0.95),
    text='[Esc] to quit', color='white',
    anchorVert='bottom', anchorHoriz='left')

trialClock = core.Clock()
t = lastFPSupdate = 0
win.recordFrameIntervals = True
while not event.getKeys():
    t = trialClock.getTime()
    # Images can be manipulated on the fly