How to use the psychopy.core.wait function in psychopy

To help you get started, we’ve selected a few psychopy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jadref / buffer_bci / tutorial / project_assignment / project_assignment.py View on Github external
while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "on":
                    current_image = visual.ImageStim(mywin, "png/alarm_done.png") # set image
                    endSOS = True
                else:
                    current_image = visual.ImageStim(mywin, "png/alarm_error.png") # set image
                    errors = errors + 1
                current_image.draw()
                mywin.flip()
                core.wait(1)
                current_image = visual.ImageStim(mywin, image=sos[r]) # set image
                current_image.draw()
                mywin.flip()
    current_image = visual.ImageStim(mywin, "png/sos_choice.png") # set image
    current_image.draw()
    mywin.flip()
    endSOS = False
    while endSOS is False:
        # grab data after every t:'stimulus' event until we get a {t:'stimulus.training' v:'end'} event 
        #data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,stopevent, state, milliseconds=False)
        data, events, stopevents, state = bufhelp.gatherdata(triggerevents,trlen_samp,[], state, milliseconds=False)
        for ei in np.arange(len(events)-1,-1,-1):
            ev = events[ei]
            if ev.type == "sos":
                if ev.value == "food": 
                    if r == 0:
github BciPy / BciPy / bcipy / tasks / rsvp / calibration.py View on Github external
_write_triggers_from_sequence_calibration(
                    last_sequence_timing, self.trigger_file)

                # Wait for a time
                core.wait(self.buffer_val)

            # Set run to False to stop looping
            run = False

        # Say Goodbye!
        self.rsvp.text = trial_complete_message(self.window, self.parameters)
        self.rsvp.draw_static()
        self.window.flip()

        # Give the system time to process
        core.wait(self.buffer_val)

        if self.daq.is_calibrated:
            _write_triggers_from_sequence_calibration(
                ['offset', self.daq.offset], self.trigger_file, offset=True)

        # Close this sessions trigger file and return some data
        self.trigger_file.close()

        # Wait some time before exiting so there is trailing eeg data saved
        core.wait(self.eeg_buffer)

        return self.file_save
github BciPy / BciPy / bcipy / tasks / rsvp / inter_sequence_feedback_calibration.py View on Github external
# Write triggers for the sequence
                _write_triggers_from_sequence_calibration(
                    last_sequence_timing, self._task.trigger_file)

                message, color, height = self._get_feedback_decision()
                # self.visual_feedback.height_stim = height # Changes height of shape
                timing = self.visual_feedback.administer(
                    message,
                    line_color=self.feedback_line_color,
                    fill_color=color,
                    stimuli_type=FeedbackType.SHAPE)

                # TODO write the visual feedback timing

                # Wait for a time
                core.wait(self._task.buffer_val)

            # Set run to False to stop looping
            run = False

        # Say Goodbye!
        self.rsvp.text = trial_complete_message(self.window, self.parameters)
        self.rsvp.draw_static()
        self.window.flip()

        # Give the system time to process
        core.wait(self._task.buffer_val)

        if self.daq.is_calibrated:
            _write_triggers_from_sequence_calibration(
                ['offset', self.daq.offset], self._task.trigger_file, offset=True)
github BciPy / BciPy / bcipy / tasks / rsvp / copy_phrase.py View on Github external
self.rsvp.draw_static()
        self.window.flip()

        # Give the system time to process
        core.wait(self.buffer_val)

        if self.daq.is_calibrated:
            _write_triggers_from_sequence_copy_phrase(
                ['offset', self.daq.offset], self.trigger_file,
                self.copy_phrase, text_task, offset=True)

        # Close the trigger file for this session
        self.trigger_file.close()

        # Wait some time before exiting so there is trailing eeg data saved
        core.wait(self.eeg_buffer)

        return self.file_save
github BciPy / BciPy / bcipy / feedback / visual / visual_feedback.py View on Github external
fill_color,
                stimuli_type)

            assert_stim.draw()
        else:
            stim = self._construct_stimulus(
                stimulus,
                self.pos_stim,
                line_color,
                fill_color,
                stimuli_type)

        self._show_stimuli(stim)
        time = ['visual_feedback', self.clock.getTime()]

        core.wait(self.feedback_length)
        timing.append(time)

        return timing
github BciPy / BciPy / bcipy / tasks / rsvp / calibration / inter_sequence_feedback_calibration.py View on Github external
# Do the sequence
                last_sequence_timing = self.rsvp.do_sequence()

                # Write triggers for the sequence
                _write_triggers_from_sequence_calibration(
                    last_sequence_timing, self._task.trigger_file)

                self.logger.info('[Feedback] Getting Decision')

                position = self._get_feedback_decision(last_sequence_timing)
                self.logger.info(
                    f'[Feedback] Administering feedback position {position}')
                self.visual_feedback.administer(position=position)

                # Wait for a time
                core.wait(self._task.buffer_val)

            # Set run to False to stop looping
            run = False

        # Say Goodbye!
        self.rsvp.text = trial_complete_message(self.window, self.parameters)
        self.rsvp.draw_static()
        self.window.flip()

        # Give the system time to process
        core.wait(self._task.buffer_val)

        if self.daq.is_calibrated:
            _write_triggers_from_sequence_calibration(
                ['offset', self.daq.offset], self._task.trigger_file, offset=True)
github psychopy / psychopy / psychopy / hardware / emulator.py View on Github external
def run(self):
        self.running = True
        self.clock.reset()
        last_onset = 0.000
        # wait until next event requested, and simulate a key press
        for onset, key in self.responses:
            core.wait(float(onset) - last_onset)
            if type(key) == int:
                # avoid cryptic error if int
                key = "{}".format(key)[0]
            if type(key) == type(""):
                event._onPygletKey(symbol=key, modifiers=0, emulated=True)
            else:
                logging.error('ResponseEmulator: only keyboard events '
                              'are supported')
            last_onset = onset
            if self.stopflag:
                break
        self.running = False
        return self
github oguayasa / tobii_pro_wrapper / tobii_pro_wrapper / tobii_pro_wrapper.py View on Github external
calibWin.flip()    
            # first wait to let the eyes settle 
            pcore.wait(0.5)  
            
            # conduct calibration of point
            print ("Collecting data at {0}." .format(i + 1))
            while self.calibration.collect_data(pointList[i][0], 
                                                pointList[i][1]) != tobii.CALIBRATION_STATUS_SUCCESS:
                self.calibration.collect_data(pointList[i][0], 
                                              pointList[i][1])   
                
            # feedback from calibration
            print ("{0} for data at point {1}." 
                   .format(self.calibration.collect_data(pointList[i][0],
                   pointList[i][1]), i + 1))
            pcore.wait(0.3)  # wait before continuing
          
            # Return point to original size
            for frame in range(moveFrames):
                pointLargeRadius += radiusStep
                calibPoint.radius = pointLargeRadius
                calibPoint.draw()
                calibWin.flip()      
            # let the eyes settle and move to the next point 
            pcore.wait(0.2)      
              
            # check to quit  
            # depending on response, either abort script or continue to calibration
            if event.getKeys(keyList=['q']):
                calibWin.close()
                self.calibration.leave_calibration_mode()
                raise KeyboardInterrupt("You aborted the script manually.")
github BciPy / BciPy / display / Demo_rsvp_disp_images.py View on Github external
rsvp.ele_list_sti = ele_sti[counter]
    if is_txt_sti:
        rsvp.color_list_sti = color_sti[counter]
      
    rsvp.time_list_sti = timing_sti[counter]

    core.wait(.4)
    sequence_timing = rsvp.do_sequence()

    _write_triggers_from_sequence_calibration(sequence_timing, file)

    # Get parameters from Bar Graph and schedule
    rsvp.bg.schedule_to(letters=dummy_bar_schedule_t[counter],
                        weight=dummy_bar_schedule_p[counter])

    core.wait(.5)

    if show_bg:
        rsvp.show_bar_graph()

    counter += 1

# close the window and file
win.close()
file.close()

# Print intervals
intervalsMS = np.array(win.frameIntervals) * 1000
print(intervalsMS)
github jadref / buffer_bci / python / imaginedMovement_psychoPy / simple_imagined_movement.py View on Github external
instruction = visual.TextStim(mywin, text=stimulus_instructions[stimulus_conditions[trial-1]-1],color=(1,1,1),height = 50) 
            instruction.draw()
            mywin.flip()
            sendEvent("experiment.trial",stimulus_instructions[stimulus_conditions[trial-1]-1])
            core.wait(4)
            if feedback is True:
                feedbackEvt = waitnewevents("feedback",1000)
                if feedbackEvt is None:
                    feedbackTxt='None'
                else:
                    feedbackTxt=str(feedbackEvt.value)
                getFeedbackCounter = getFeedbackCounter + 1
                visual.TextStim(mywin,text="Feedback = " + feedbackTxt,color=(1,1,1),height=50).draw()
                mywin.flip()
                sendEvent("experiment.feedback",feedbackTxt)
                core.wait(2)
                
            mywin.flip()
            core.wait(1)
        if block < nr_blocks:
            # break
            do_break()
        current_block += 1
        sendEvent("experiment.block",0)