How to use the apscheduler.events.EVENT_JOB_EXECUTED function in APScheduler

To help you get started, we’ve selected a few APScheduler examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github agronholm / apscheduler / tests / testintegration.py View on Github external
def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_job(increment, 'interval', {'seconds': 0.3}, max_instances=2, max_runs=4, args=[vals, 1],
                               jobstore='persistent')
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
github agronholm / apscheduler / tests / testintegration.py View on Github external
def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_job(increment, 'interval', {'seconds': 0.3}, max_instances=2, max_runs=4, args=[vals, 1],
                               jobstore='persistent')
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
github ToolsForHumans / padre / padre / bot.py View on Github external
jobstores['sqlalchemy'] = SQLAlchemyJobStore(
                url=self.config.scheduler.db_uri)
        except AttributeError:
            pass
        executors = {}
        try:
            executors['default'] = ThreadPoolExecutor(
                max_workers=self.config.scheduler.max_workers)
        except AttributeError:
            executors['default'] = ThreadPoolExecutor(
                max_workers=default_max_workers)
        sched = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    tz=pytz.timezone(self.config.tz))
        sched.add_listener(functools.partial(_done_listener, sched),
                           events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
        sched.add_listener(functools.partial(_submitted_listener, sched),
                           events.EVENT_JOB_SUBMITTED)
        sched.add_listener(functools.partial(_modified_listener, sched),
                           events.EVENT_JOB_MODIFIED)
        return sched
github kaiyuanshe / open-hackathon / open-hackathon / src / hackathon / scheduler.py View on Github external
log.warn("The schedule job crashed because of %s" % repr(event.exception))
    else:
        print('The job executed :)')
        log.debug("The schedule job %s executed and return value is '%s'" % (event.job_id, event.retval))


if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
    scheduler = BackgroundScheduler(timezone=utc)

    # job store
    if safe_get_config("scheduler.job_store", "memory") == "mysql":
        scheduler.add_jobstore('sqlalchemy', url=get_config("scheduler.job_store_url"))

    # listener
    # do we need listen EVENT_JOB_MISSED?
    scheduler.add_listener(scheduler_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    scheduler.start()
github cadithealth / pyramid_scheduler / pyramid_scheduler / scheduler.py View on Github external
#apscheduler.events.EVENT_JOBSTORE_JOB_ADDED,
      #apscheduler.events.EVENT_JOBSTORE_JOB_REMOVED,
      #apscheduler.events.EVENT_JOB_EXECUTED,
      #apscheduler.events.EVENT_JOB_ERROR,
      #apscheduler.events.EVENT_JOB_MISSED,
      ):
      # these can be ignored
      return
    if isinstance(event, apscheduler.events.JobStoreEvent) \
        and event.alias == self.ramstore:
      # this is an "internal" event... squelch it.
      return
    if event.code == apscheduler.events.EVENT_JOBSTORE_JOB_ADDED \
        and hasattr(event, 'job'):
      return self._notify(api.Event(api.Event.JOB_CREATED, job=event.job))
    if event.code == apscheduler.events.EVENT_JOB_EXECUTED:
      return self._notify(api.Event(api.Event.JOB_EXECUTED, job=event.job))
    if event.code == apscheduler.events.EVENT_JOBSTORE_JOB_REMOVED \
        and hasattr(event, 'job'):
      return self._notify(api.Event(api.Event.JOB_REMOVED, job=event.job))
    # todo: any other messages that i should pass through?
github Tautulli / Tautulli / lib / apscheduler / executors / base.py View on Github external
exc, tb = sys.exc_info()[1:]
            formatted_tb = ''.join(format_tb(tb))
            events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
                                            exception=exc, traceback=formatted_tb))
            logger.exception('Job "%s" raised an exception', job)

            # This is to prevent cyclic references that would lead to memory leaks
            if six.PY2:
                sys.exc_clear()
                del tb
            else:
                import traceback
                traceback.clear_frames(tb)
                del tb
        else:
            events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
                                            retval=retval))
            logger.info('Job "%s" executed successfully', job)

    return events
github mazvv / travelcrm / travelcrm / lib / scheduler / companies.py View on Github external
job_id = gen_id(limit=12)
    schema_name = generate_company_schema()
    scheduler.add_job(
        _company_creation,
        trigger='date',
        id=job_id,
        run_date=datetime.now(pytz.utc),
        args=[company_name, schema_name, email, timezone, locale, tarif],
    )

    
    callback = partial(
        _notification_callback, job_id=job_id, 
        request=request, email=email, subdomain=schema_name
    )
    scheduler.add_listener(callback, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
github dongweiming / slack-alert / slack_alert.py View on Github external
return 1

    jobs = find_jobs(plugins_path)
    if not jobs:
        print('Not yet jobs!')
        return 1
    apscheduler = __import__('apscheduler.schedulers.{}'.format(
        scheduler_module))
    scheduler_cls = reduce(lambda x, y: getattr(x, y),
                           [apscheduler.schedulers, scheduler_module,
                            scheduler_name])
    scheduler = scheduler_cls()

    listener = partial(slack_listener, args)
    scheduler.add_listener(listener,
                           EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    for job, kw in jobs:
        scheduler.add_job(job, 'interval', **kw)
    update_scheduler_status = partial(
        _update_scheduler_status, scheduler, args)
    scheduler.add_job(update_scheduler_status, 'interval', seconds=5)
    g = scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        if scheduler_name == 'AsyncIOScheduler':
            asyncio.get_event_loop().run_forever()
        elif scheduler_name == 'GeventScheduler':
            g.join()
        elif scheduler_name == 'TornadoScheduler':
            IOLoop.instance().start()
        else:
github edouardpoitras / eva / eva / scheduler.py View on Github external
https://apscheduler.readthedocs.io/en/latest/modules/events.html#event-codes

    :note: This function most likely needs to be revisited as it may not be
        thread-safe. Eva and plugins can modify the config singleon
        simultaneously inside and outside of jobs.

    :return: The scheduler object used by plugins to schedule long-running jobs.
    :rtype: `apscheduler.schedulers.background.BackgroundScheduler
        `_
    """
    client = get_mongo_client()
    db_name = conf['mongodb']['database']
    scheduler = BackgroundScheduler(jobstore=MongoDBJobStore(database=db_name,
                                                             collection='scheduler',
                                                             client=client))
    scheduler.add_listener(job_succeeded, EVENT_JOB_EXECUTED)
    scheduler.add_listener(job_failed, EVENT_JOB_ERROR)
    scheduler.start()
    return scheduler
github jxltom / scrapymon / flask_template / kernel / scheduler / scheduler.py View on Github external
def __init__(self):
        """Init scheduler and store results."""
        self._scheduler = BackgroundScheduler(timezone='Asia/Hong_Kong')
        self._result_store, self._status_store = {}, {}
        self._scheduler.add_listener(self._job_execution_event,
                                     EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self._scheduler.add_listener(self._job_add_event, EVENT_JOB_ADDED)
        self._scheduler.add_listener(self._job_remove_event, EVENT_JOB_REMOVED)