How to use the apscheduler.events.EVENT_JOB_ERROR function in APScheduler

To help you get started, we’ve selected a few APScheduler examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github agronholm / apscheduler / tests / test_executors.py View on Github external
job = create_job(func=func, id='foo')
    job._jobstore_alias = 'test_jobstore'
    run_time = (timezone.localize(datetime(1970, 1, 1)) if event_code == EVENT_JOB_MISSED else
                freeze_time.current)
    executor.submit_job(job, [run_time])
    executor.shutdown()

    assert mock_scheduler._dispatch_event.call_count == 1
    event = mock_scheduler._dispatch_event.call_args[0][0]
    assert event.code == event_code
    assert event.job_id == 'foo'
    assert event.jobstore == 'test_jobstore'

    if event_code == EVENT_JOB_EXECUTED:
        assert event.retval == 5
    elif event_code == EVENT_JOB_ERROR:
        assert str(event.exception) == 'test failure'
        assert isinstance(event.traceback, str)
github marwinxxii / apschedulerweb / apschedulerweb.py View on Github external
'''
    if conf_file is not None:
        with open(conf_file, 'r') as f:
            conf = json.load(f)
        bottle_conf = conf.get('bottle', bottle_conf)
        web_conf = conf.get('web', web_conf)
    bottle_conf = fill_defaults(bottle_conf, bottle_config)
    global webapp
    webapp = fill_defaults(web_conf, web_config)
    webapp['sched'] = sched
    for job, jobstore in sched._pending_jobs:
        job.fails = 0
        job.stopped = False
    webapp['jobs'] = list(sched._pending_jobs)
    webapp['logs'] = {}
    sched.add_listener(error_listener, mask=EVENT_JOB_ERROR)
    sched.start()
    if webapp['users'] is not None:
        bottle.install(BasicAuthPlugin(webapp['users'],
                       max_auth_tries=webapp['max_auth_tries']))
    if 'user' in web_conf:
        gid = grp.getgrnam(web_conf['user']).gr_gid
        os.setreuid(gid, gid)
    if os.path.exists(webapp['pid_file']):
        print('Warning! PID file already exists')
    with open(webapp['pid_file'], 'w') as f:
        f.write(str(os.getpid()))
    signal.signal(signal.SIGTERM, kill_handler)
    bottle.run(**bottle_conf)
    on_exit()
github Netflix / security_monkey / security_monkey / scheduler.py View on Github external
"misfire_grace_time": app.config.get('MISFIRE_GRACE_TIME', 30)
    },
    executors={
        "default": {
            "type": "threadpool",
            "max_workers": app.config.get("MAX_THREADS", 30)
        }
    }
)


def exception_listener(event):
    store_exception("scheduler-change-reporter-uncaught", None, event.exception)


scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)


def setup_scheduler():
    """Sets up the APScheduler"""
    log = logging.getLogger('apscheduler')

    try:
        accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all()  # noqa
        accounts = [account.name for account in accounts]
        for account in accounts:
            app.logger.debug("Scheduler adding account {}".format(account))
            rep = Reporter(account=account)
            delay = app.config.get('REPORTER_START_DELAY', 10)

            for period in rep.get_intervals(account):
                scheduler.add_job(
github kaiyuanshe / open-hackathon / open-hackathon / src / hackathon / scheduler.py View on Github external
def scheduler_listener(event):
    if event.code == EVENT_JOB_ERROR:
        print('The job crashed :(')
        log.warn("The schedule job crashed because of %s" % repr(event.exception))
    else:
        print('The job executed :)')
        log.debug("The schedule job %s executed and return value is '%s'" % (event.job_id, event.retval))
github agronholm / apscheduler / apscheduler / executors / base_py3.py View on Github external
if job.misfire_grace_time is not None:
            difference = datetime.now(utc) - run_time
            grace_time = timedelta(seconds=job.misfire_grace_time)
            if difference > grace_time:
                events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
                                                run_time))
                logger.warning('Run time of job "%s" was missed by %s', job, difference)
                continue

        logger.info('Running job "%s" (scheduled at %s)', job, run_time)
        try:
            retval = await job.func(*job.args, **job.kwargs)
        except BaseException:
            exc, tb = sys.exc_info()[1:]
            formatted_tb = ''.join(format_tb(tb))
            events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
                                            exception=exc, traceback=formatted_tb))
            logger.exception('Job "%s" raised an exception', job)
        else:
            events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
                                            retval=retval))
            logger.info('Job "%s" executed successfully', job)

    return events
github switchdoclabs / SDL_Pi_SmartPlantPi / SmartPlantPi.py View on Github external
# Pickle dictionary using protocol 0.
    	state.Moisture_Threshold = pickle.load(input)
    	state.EnglishMetric = pickle.load(input)
    	state.Alarm_Temperature = pickle.load(input)
    	state.Alarm_Moisture = pickle.load(input)
    	state.Alarm_Water = pickle.load(input)
    	state.Alarm_Air_Quality = pickle.load(input)
    	state.Alarm_Active = pickle.load(input)

    	input.close()
	



    scheduler.add_listener(ap_my_listener, apscheduler.events.EVENT_JOB_ERROR)	


    # prints out the date and time to console
    scheduler.add_job(tick, 'interval', seconds=60)

    # blink optional life light
    scheduler.add_job(blinkLED, 'interval', seconds=5, args=[1,0.250])


    # update device state
    scheduler.add_job(updateState, 'interval', seconds=10)

    # check for alarms
    scheduler.add_job(checkForAlarms, 'interval', seconds=15)
    #scheduler.add_job(checkForAlarms, 'interval', seconds=300)
github funkyfuture / deck-chores / deck_chores / jobs.py View on Github external
def start_scheduler():
    job_executors = {"default": ThreadPoolExecutor(cfg.job_executor_pool_size)}
    logger = log if cfg.debug else None
    scheduler.configure(executors=job_executors, logger=logger, timezone=cfg.timezone)
    scheduler.add_listener(on_error, events.EVENT_JOB_ERROR)
    scheduler.add_listener(on_executed, events.EVENT_JOB_EXECUTED)
    scheduler.add_listener(on_max_instances, events.EVENT_JOB_MAX_INSTANCES)
    scheduler.add_listener(on_missed, events.EVENT_JOB_MISSED)
    scheduler.start()
github geopython / GeoHealthCheck / GeoHealthCheck / scheduler.py View on Github external
def error_listener(event):
    event_code = event.code
    event_code_str = ''
    if event_code | EVENT_JOB_MISSED:
        event_code_str = 'EVENT_JOB_MISSED'
    elif event_code | EVENT_JOB_ERROR:
        event_code_str = 'EVENT_JOB_ERROR'

    LOGGER.error('error_listener: %s - %s' % (event_code_str, str(event)))