How to use the psychopy.visual.TextStim function in psychopy

To help you get started, we’ve selected a few psychopy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github psychopy / psychopy / sandbox / testBitsFast.py View on Github external
import psychopy
from math import sin, pi

#create a window to draw in
myWin = psychopy.visual.Window((800,600),fullscr=0, winType="pygame",
                                        bitsMode='fast', rgb=-0, gamma=1.0)

grating1 = psychopy.visual.PatchStim(myWin,tex="sin",mask="circle",texRes=128,
			rgb=[1.0,1.0,1.0],opacity=1.0,
			size=(1.0,1.0), sf=(4.0,2.0),
			ori = 45, contrast=0.99)

fpsDisplay = psychopy.visual.TextStim(myWin,pos=(-0.95,-0.95),text='fps...')

trialClock = psychopy.Clock()
timer = psychopy.Clock()
t = lastFPSupdate = 0
while t<20:#quits after 20 secs
	t=trialClock.getTime()
	#grating1.draw()  #redraw it
	myWin.update()   #update the screen
	
	for keys in psychopy.event.getKeys():
		if keys in ['escape','q']:
			psychopy.core.quit()
	
psychopy.core.quit()
github psychopy / psychopy / psychopy / demos / coder / experiment control / fMRI_launchScan.py View on Github external
'skip': 0, # number of volumes lacking a sync pulse at start of scan (for T1 stabilization)
    'sound': True # in test mode only, play a tone as a reminder of scanner noise
    }
infoDlg = gui.DlgFromDict(MR_settings, title='fMRI parameters', order=['TR','volumes'])
if not infoDlg.OK: core.quit()

win = visual.Window(fullscr=True)
globalClock = core.Clock()

# summary of run timing, for each key press:
output = 'vol    onset key\n'
for i in range(-1 * MR_settings['skip'], 0):
    output += '%d prescan skip (no sync)\n' % i

key_code = MR_settings['sync']
counter = visual.TextStim(win, height=.05, pos=(0,0), color=win.rgb+0.5)
output += "  0    0.000 %s start of scanning run, vol 0\n" % key_code
pause_during_delay = (MR_settings['TR'] > .4)
sync_now = False

# can simulate user responses, here 3 key presses in order 'a', 'b', 'c' (they get sorted by time):
simResponses = [(0.123, 'a'), (4.789, 'c'), (2.456, 'b')]

# launch: operator selects Scan or Test (emulate); see API documentation
vol = launchScan(win, MR_settings, globalClock=globalClock, simResponses=simResponses)

infer_missed_sync = False # best if your script timing works without this, but this might be useful sometimes
max_slippage = 0.02 # how long to allow before treating a "slow" sync as missed
    # any slippage is almost certainly due to timing issues with your script or PC, and not MR scanner

duration = MR_settings['volumes'] * MR_settings['TR']
# note: globalClock has been reset to 0.0 by launchScan()
github jadref / buffer_bci / python / imaginedMovement_psychoPy / simple_imagined_movement.py View on Github external
sendEvent("experiment.block",current_block)
        core.wait(2)
        for trial in range(1,len(stimulus_conditions)):
            instruction = visual.TextStim(mywin, text=stimulus_instructions[stimulus_conditions[trial-1]-1],color=(1,1,1),height = 50) 
            instruction.draw()
            mywin.flip()
            sendEvent("experiment.trial",stimulus_instructions[stimulus_conditions[trial-1]-1])
            core.wait(4)
            if feedback is True:
                feedbackEvt = waitnewevents("feedback",1000)
                if feedbackEvt is None:
                    feedbackTxt='None'
                else:
                    feedbackTxt=str(feedbackEvt.value)
                getFeedbackCounter = getFeedbackCounter + 1
                visual.TextStim(mywin,text="Feedback = " + feedbackTxt,color=(1,1,1),height=50).draw()
                mywin.flip()
                sendEvent("experiment.feedback",feedbackTxt)
                core.wait(2)
                
            mywin.flip()
            core.wait(1)
        if block < nr_blocks:
            # break
            do_break()
        current_block += 1
        sendEvent("experiment.block",0)
github psychopy / psychopy / psychopy / demos / coder / iohub_extended / _manual / quickStart / ioXInputPsychoPy / run.py View on Github external
gamepad.updateCapabilitiesInformation()
		caps=gamepad.getLastReadCapabilitiesInfo()
		print "Capabilities: ",caps


		unit_type = display.getCoordinateType()
		
		fixSpot = visual.PatchStim(myWin,tex="none", mask="gauss",pos=(0,0), 
							size=(30,30),color='black',units=unit_type)
		
		grating = visual.PatchStim(myWin,pos=(0,0), tex="sin",mask="gauss",
							color='white',size=(200,200), sf=(0.01,0),units=unit_type)

		screen_resolution= display.getPixelResolution()
		msgText='Left Stick = Spot Pos; Right Stick = Grating Pos;\nLeft Trig = SF; Right Trig = Ori;\n"r" key = Rumble; "q" = Quit\n'
		message = visual.TextStim(myWin,pos=(0,-200),
							text=msgText,units=unit_type,
							alignHoriz='center',alignVert='center',height=24,
							wrapWidth=screen_resolution[0]*.9)
	
		END_DEMO=False
		
		while not END_DEMO:
			

			#update stim from joystick
			x,y,mag=gamepad.getThumbSticks()['RightStick'] # sticks are 3 item lists (x,y,magnitude)
			xx=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
			yy=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
			grating.setPos((xx, yy))
			
			x,y,mag=gamepad.getThumbSticks()['LeftStick'] # sticks are 3 item lists (x,y,magnitude)
github oguayasa / tobii_pro_wrapper / ToPy.py View on Github external
# line object for showing left eye gaze position during calibration                          
    leftEyeLine = visual.Line(curWin, 
                              units ='pix',
                              lineColor ='yellow',
                              lineWidth = 30,
                              start = (0.0, 0.0),
                              end = (0.0, 0.0))
    # number for identifying point in dictionary
    pointText = visual.TextStim(curWin, 
                                text = " ", 
                                color = [0.8, 0.8, 0.8],  # lighter than bkg
                                units = 'pix',
                                pos = [0.0, 0.0],
                                height = 120)
        # Make a dummy message
    checkMsg = visual.TextStim(curWin,
                               text = ("      Press 'q' to abort, or" + \
                                       "\n'c' to continue with calibration."),
                               color = [1.0, 1.0, 1.0],
                               units = 'norm',
                               pos = [0.0, -0.65],
                               height = 0.07)

    # turn on keyboard reporting
    keyboard.reporting = True
    
    # make empty dictionary for holding points to be recalibrated
    holdRedoDict = []
    holdColorPoints = []
   
    # draw and update screen
    while True:
github oguayasa / tobii_pro_wrapper / tobii_pro_wrapper / tobii_pro_wrapper.py View on Github external
# line object for showing right eye gaze position during calibration 
        rightEyeLine = visual.Line(calibWin, 
                                   units ='pix',
                                   lineColor ='red',
                                   lineWidth = 20,
                                   start = (0.0, 0.0),
                                   end = (0.0, 0.0))                              
        # line object for showing left eye gaze position during calibration                          
        leftEyeLine = visual.Line(calibWin, 
                                  units ='pix',
                                  lineColor ='yellow',
                                  lineWidth = 20,
                                  start = (0.0, 0.0),
                                  end = (0.0, 0.0))
        # number for identifying point in dictionary
        pointText = visual.TextStim(calibWin, 
                                    text = " ", 
                                    color = [0.8, 0.8, 0.8],  # lighter than bkg
                                    units = 'pix',
                                    pos = [0.0, 0.0],
                                    height = 60)
            # Make a dummy message
        checkMsg = visual.TextStim(calibWin,
                                   text = 'Wait for the experimenter.',
                                   color = [1.0, 1.0, 1.0],
                                   units = 'norm',
                                   pos = [0.0, -0.5],
                                   height = 0.07)

        # make empty dictionary for holding points to be recalibrated
        holdRedoDict = []
        holdColorPoints = []
github psychopy / psychopy / psychopy / demos / coder / iohub / xinputgamepad.py View on Github external
color='white', size=(200, 200), sf=(0.01, 0),
                                 units=unit_type)

    button_state_str = 'Pressed Buttons: {}'
    button_state_txt = visual.TextStim(win, pos=(0, -200),
                                       text=button_state_str.format([]),
                                       units=unit_type,
                                       alignHoriz='center',
                                       alignVert='center',
                                       height=24,
                                       wrapWidth=display_resolution[0] * .9)

    msgText = ('Left Stick: Spot Pos; Right Stick: Grating Pos; '
               'Left Trig: SF; Right Trig: Ori; "A" Button: Rumble; '
               '"q" key: Quit')
    message = visual.TextStim(win, pos=(0, -350), text=msgText, color='Red',
                              units=unit_type, alignHoriz='center',
                              alignVert='center', height=32,
                              wrapWidth=display_resolution[0] * .9)

    key_presses = []
    last_buttons = []
    while not 'q' in keyboard.getPresses():
        # Update stim from gamepad.
        # Sticks are 3 item lists (x, y, magnitude).
        #
        x, y, mag = gamepad.getThumbSticks()['right_stick']
        xx, yy = normalizedValue2Coord((x, y), mag, coord_size)
        grating.setPos((xx, yy))

        x, y, mag = gamepad.getThumbSticks()['left_stick']
        xx, yy = normalizedValue2Coord((x, y), mag, coord_size)
github psychopy / psychopy / psychopy / demos / coder / iohub / keyboard.py View on Github external
pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 4],
    color='black', alignText='left', anchorHoriz='left',
    height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
keypress_duration_stim = visual.TextStim(win, units=unit_type, text=u'',
    pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 5],
    color='black', alignText='left', anchorHoriz='left',
    height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
all_pressed_stim = visual.TextStim(win, units=unit_type, text=u'',
    pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 6],
    color='black', alignText='left', anchorHoriz='left',
    height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
event_type_stim = visual.TextStim(win, units=unit_type, text=u'',
    pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 7],
    color='black', alignText='left', anchorHoriz='left',
    height=TEXT_STIM_HEIGHT, wrapWidth=LABEL_WRAP_LENGTH)
psychopy_key_stim = visual.TextStim(win, units=unit_type, text=u'',
    pos=[VALUE_COLUMN_X, TEXT_ROWS_START_Y - TEXT_ROW_HEIGHT * 8],
    color='black', alignText='left', anchorHoriz='left',
    height=TEXT_STIM_HEIGHT,  wrapWidth=dw * 2)

# Having all the stim to update / draw in a list makes drawing code
# more compact and reusable
STIM_LIST = [title_label, title2_label, key_text_label, char_label,
    modifiers_label, keypress_duration_label, all_pressed__label,
    event_type_label, psychopy_key_label,
    key_text_stim, char_stim, modifiers_stim, keypress_duration_stim,
    all_pressed_stim, event_type_stim, psychopy_key_stim]

# Clear all events from the global and device level ioHub Event Buffers.

io.clearEvents('all')
github BciPy / BciPy / bcipy / display / rsvp / display.py View on Github external
# Create multiple text objects based on input
        self.text = []
        for idx in range(len(self.info_text)):
            self.text.append(visual.TextStim(
                win=self.window,
                color=info_color[idx],
                height=info_height[idx],
                text=self.info_text[idx],
                font=info_font[idx],
                pos=info_pos[idx],
                wrapWidth=None, colorSpace='rgb',
                opacity=1, depth=-6.0))

        # Create Stimuli Object
        if self.is_txt_stim:
            self.sti = visual.TextStim(
                win=self.window,
                color='white',
                height=self.stimuli_height,
                text='+',
                font=self.stimuli_font,
                pos=self.stimuli_pos,
                wrapWidth=None, colorSpace='rgb',
                opacity=1, depth=-6.0)
        else:
            self.sti = visual.ImageStim(
                win=self.window,
                image=None,
                mask=None,
                pos=self.stimuli_pos,
                ori=0.0)
github oguayasa / tobii_pro_wrapper / ToPy.py View on Github external
# randomize points as ordered dictionary 
    random.shuffle(pointList)
    calibDict = collections.OrderedDict(pointList)

    # create window for calibration
    calibWin = visual.Window(size = [GetSystemMetrics(0), GetSystemMetrics(1)],
                             pos = [0, 0],
                             units = 'pix',
                             fullscr = True,
                             allowGUI = True,
                             monitor = myMon,
                             winType = 'pyglet',
                             color = [0.4, 0.4, 0.4])  
    # stimuli for holding text
    calibMessage = visual.TextStim(calibWin, 
                                   color = [1.0, 1.0, 1.0],  # text
                                   units = 'norm', 
                                   height = 0.08, 
                                   pos = (0.0, 0.1))
    # stimuli for fixation cross
    fixCross = visual.TextStim(calibWin,
                               color = [1.0, 1.0, 1.0],
                               units = 'norm', 
                               height = 0.1, 
                               pos = (0.0, 0.0),
                               text = "+")
   
    # track box to position participant
    # subject instructions for track box
    calibMessage.text = ("Please position yourself so that the\n" + \
                         "eye-tracker can locate your eyes." + \