How to use the psychopy.core function in psychopy

To help you get started, we’ve selected a few psychopy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jnaulty / SSVEP_OpenBCI / SSVEP.py View on Github external
#another way to change color with 1 pattern 
                #self.pattern1.color *= -1    
                self.pattern1.setAutoDraw(False)
                self.pattern2.setAutoDraw(True)
                
                
                for frameN in range(self.frame_off):
                    self.mywin.flip()
                self.pattern2.setAutoDraw(False)
                
            #self.epoch(0)
            #clean black screen off
            self.mywin.flip()
            #wait certain time for next trial
            core.wait(self.waitdur)
            #reset clock for next trial
            self.Trialclock.reset()    
            #count number of trials
            self.count+=1
     
            """
            ###Tagging the Data at end of stimulus###
            
    """          
        #self.collector.disconnect()
        self.stop()
github psychopy / psychopy / psychopy / demos / coder / experiment control / JND_staircase_exp.py View on Github external
#blank screen
    fixation.draw()
    win.flip()

    #get response
    thisResp=None
    while thisResp==None:
        allKeys=event.waitKeys()
        for thisKey in allKeys:
            if (thisKey=='left' and targetSide==-1) or (thisKey=='right' and targetSide==1):
                thisResp = 1#correct
            elif (thisKey=='right' and targetSide==-1) or (thisKey=='left' and targetSide==1):
                thisResp = 0#incorrect
            elif thisKey in ['q', 'escape']:
                core.quit()#abort experiment
        event.clearEvents('mouse')#only really needed for pygame windows

    #add the data to the staircase so it can calculate the next level
    staircase.addData(thisResp)
    dataFile.write('%i	%.3f	%i\n' %(targetSide, thisIncrement, thisResp))

#staircase has ended
dataFile.close()
staircase.saveAsPickle(fileName)#special python binary file to save all the info

#give some output to user
print 'reversals:'
print staircase.reversalIntensities
print 'mean of final 6 reversals = %.3f' %(numpy.average(staircase.reversalIntensities[-6:]))

core.quit()
github psychopy / psychopy / psychopy / demos / coder / input / latencyFromTone.py View on Github external
circle.draw()
    text.draw()
    win.flip()
    while mic.recorder.running:
        core.wait(.01, 0)

    # When in the file did the onset tone start and stop?
    onset, offset = mic.getMarkerOnset(chunk=64, secs=0.2)  # increase secs if miss the markers
    onsets.append(onset)

    # display options:
    text.draw()
    win.flip()
    print("%.3f %.3f" % (onset, offset))
    if len(event.getKeys(['escape'])):
        core.quit()
    if len(event.getKeys()):
        msg2.draw()
        win.flip()
        data, sampleRate = microphone.readWavFile(filename)
        plotYX(data, range(len(data)), "time domain @ %iHz" % sampleRate)
        mag, freqV = microphone.getDft(data, sampleRate)
        plotYX(mag, freqV, "frequency domain (marker at %i Hz)" % mic.getMarkerInfo()[0])

    # no need to keep the recorded file:
    os.unlink(filename)

print("\nmarker onset = %.3fs %.3f (mean SD), relative to start of file" % (np.mean(onsets), np.std(onsets)))

win.close()
core.quit()
github psychopy / psychopy / psychopy / demos / coder / stimuli / colors.py View on Github external
# stims.append( visual.GratingStim(win, mask='gauss', color=(0, 0, 0.5), colorSpace='lms', pos=[0.5, -0.5], sf=2))

# HSV. This is a device-dependent space
# (i.e. it will differ on each monitor but needs no calibration)
stims.append( visual.GratingStim(win, mask='gauss', color=(0, 1, 1), colorSpace='hsv', pos=[0.5, 0.5], sf=2))
stims.append( visual.GratingStim(win, mask='gauss', color=(45, 1, 1), colorSpace='hsv', pos=[0.5, 0], sf=2))
stims.append( visual.GratingStim(win, mask='gauss', color=(90, 1, 1), colorSpace='hsv', pos=[0.5, -0.5], sf=2))

labels = visual.TextStim(win, text='RGB          DKL          HSV', pos=(0,.85), wrapWidth=2)

for thisStim in stims:
    thisStim.draw()
labels.draw()
win.flip()

clock = core.Clock()
while not event.getKeys() and clock.getTime() < 10:
    core.wait(.2)

win.close()
core.quit()
github psychopy / psychopy / psychopy / demos / coder / stimuli / elementArrays.py View on Github external
new = makeCoherentOris(newXYs[deadElements, : ], coherence, 45)
    newOris[deadElements] = new

    # update the oris and xys of the new elements
    globForm.xys = newXYs
    globForm.pris = newOris

    globForm.draw()

    win.flip()
    lives = lives + 1

    event.clearEvents('mouse')  # only really needed for pygame windows

win.close()
core.quit()
github jnaulty / SSVEP_OpenBCI / motorimagery.py View on Github external
self.epoch(0)

            # self.Trialclock.reset()    

            p = self.pattern_order[self.trial_num]
            
            self.patterns[p].draw()
            self.mywin.flip()
            self.epoch(p)
            core.wait(self.trialdur)
                
            #clean black screen off
            self.mywin.flip()
            self.epoch(0)
            #wait certain time for next trial
            core.wait(self.waitdur)

            #count number of trials
            self.trial_num += 1
     
        self.collector.stop()
        self.collector.disconnect()
        self.stop()
github psychopy / psychopy / psychopy / sound.py View on Github external
pyoSndServer = Server(sr=rate, nchnls=maxChnls,
                                  buffersize=buffer, audio=audioDriver)
        else:
            # with others we just use portaudio and then set the OutputDevice
            # below
            pyoSndServer = Server(sr=rate, nchnls=maxChnls, buffersize=buffer)

        pyoSndServer.setVerbosity(1)
        if platform == 'win32':
            pyoSndServer.setOutputDevice(outputID)
            if inputID is not None:
                pyoSndServer.setInputDevice(inputID)
        # do other config here as needed (setDuplex? setOutputDevice?)
        pyoSndServer.setDuplex(duplex)
        pyoSndServer.boot()
    core.wait(0.5)  # wait for server to boot before starting te sound stream
    pyoSndServer.start()
    try:
        Sound()  # test creation, no play
    except pyo.PyoServerStateException:
        msg = "Failed to start pyo sound Server"
        if platform == 'darwin' and audioDriver != 'portaudio':
            msg += "; maybe try prefs.general.audioDriver 'portaudio'?"
        logging.error(msg)
        core.quit()
    logging.debug('pyo sound server started')
    logging.flush()
github psychopy / psychopy / psychopy / demos / coder / iohub_extended / mcu / iosyncTime.py View on Github external
# -*- coding: utf-8 -*-
"""
Script can be used to test the offset and drift correction used to convert
iohub times -> ioSync times and visa versa.
"""

repetitions=100

import numpy as np    
import time
from pprint import pprint

from psychopy import core
from psychopy.iohub import launchHubServer,Computer,OrderedDict
getTime=core.getTime

psychopy_mon_name='testMonitor'
exp_code='events'
sess_code='S_{0}'.format(long(time.mktime(time.localtime())))

iohub_config={
"psychopy_monitor_name":psychopy_mon_name,
"mcu.iosync.MCU":dict(serial_port='COM8',monitor_event_types=[]),#['DigitalInputEvent']),
"experiment_code":exp_code, 
"session_code":sess_code
}

results=np.zeros((repetitions,5),dtype=np.float64)
io=launchHubServer(**iohub_config)

#Computer.enableHighPriority()
github psychopy / psychopy / psychopy / speech.py View on Github external
def _removeThread(self, gsqthread):
        del core.runningThreads[core.runningThreads.index(gsqthread)]
    def getThread(self):
github BciPy / BciPy / bcipy / display / rsvp / demo / demo_copyphrase_rsvp.py View on Github external
ele_list_dec = [['[<]'], ['[R]']]

# Initialize Window
win = visual.Window(size=[500, 500], screen=0, allowGUI=False,
                    allowStencil=False, monitor='testMonitor', color='black',
                    colorSpace='rgb', blendMode='avg',
                    waitBlanking=True,
                    winType='pyglet')
win.recordFrameIntervals = True
frameRate = win.getActualFrameRate()

print(frameRate)

# Initialize Clock
clock = core.StaticPeriod(screenHz=frameRate)
experiment_clock = core.MonotonicClock(start_time=None)

rsvp = CopyPhraseDisplay(
    win,
    clock,
    experiment_clock,
    marker_writer=NullMarkerWriter(),
    static_task_text=text_task,
    static_task_color=color_task,
    info_text=text_text,
    info_color=color_text,
    info_pos=pos_text,
    info_height=txt_height,
    info_font=font_text,
    task_color=['white'],
    task_font=font_task, task_text='COPY_PH',
    task_height=height_task,