Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Author: Pablo Iranzo Gomez (Pablo.Iranzo@gmail.com)
import datetime
import logging
import dateutil.parser
import feedparser
from apscheduler.schedulers.background import BackgroundScheduler
from lxml import html
import stampy.stampy
import stampy.plugin.stats
import stampy.plugin.config
from stampy.i18n import translate
_ = translate.ugettext
sched = BackgroundScheduler()
sched.start()
def init():
"""
Initializes module
:return: List of triggers for plugin
"""
botname = stampy.stampy.getme()['username']
if botname == 'redken_bot':
sched.add_job(obichero, 'cron', id='obichero', hour='15', replace_existing=True, misfire_grace_time=120)
triggers = ["^/obichero"]
return triggers
# Listen to the mavlink messages that will be used as trigger to set EKF home automatically
vehicle.add_message_listener('STATUSTEXT', statustext_callback)
if compass_enabled == 1:
# Listen to the attitude data in aeronautical frame
vehicle.add_message_listener('ATTITUDE', att_msg_callback)
data = None
current_confidence = None
H_aeroRef_aeroBody = None
H_camera_tag = None
is_landing_tag_detected = False # This flag returns true only if the tag with landing id is currently detected
heading_north_yaw = None
# Send MAVlink messages in the background
sched = BackgroundScheduler()
sched.add_job(send_vision_position_message, 'interval', seconds = 1/vision_msg_hz)
sched.add_job(send_confidence_level_dummy_message, 'interval', seconds = 1/confidence_msg_hz)
sched.add_job(send_land_target_message, 'interval', seconds = 1/landing_target_msg_hz_default)
# For scale calibration, we will use a thread to monitor user input
if scale_calib_enable == True:
scale_update_thread = threading.Thread(target=scale_update)
scale_update_thread.daemon = True
scale_update_thread.start()
sched.start()
if compass_enabled == 1:
# Wait a short while for yaw to be correctly initiated
time.sleep(1)
'freezer-agent', path=':'.join(sys.path))
LOG.debug('Freezer-agent found at {0}'
.format(self.freezerc_executable))
self.job_path = job_path
self._client = None
self.lock = threading.Lock()
job_defaults = {
'coalesce': True,
'max_instances': 1
}
executors = {
'default': {'type': 'threadpool', 'max_workers': 1},
'threadpool': {'type': 'threadpool',
'max_workers': concurrent_jobs}
}
self.scheduler = background.BackgroundScheduler(
job_defaults=job_defaults,
executors=executors)
if self.client:
self.scheduler.add_job(self.poll, 'interval',
seconds=interval, id='api_poll',
executor='default')
self.add_job = self.scheduler.add_job
self.remove_job = self.scheduler.remove_job
self.jobs = {}
jobstores = {
'default': SQLAlchemyJobStore(url=APSCHEDULER_DATABASE_URI),
'memory': MemoryJobStore()
}
executors = {
'default': ThreadPoolExecutor(20),
# 'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
'coalesce': True,
'max_instances': 1
}
# https://apscheduler.readthedocs.io/en/latest/userguide.html
# scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
# https://apscheduler.readthedocs.io/en/latest/userguide.html#scheduler-events
# EVENT_JOB_EXECUTED: 'code': 4096, 'exception': None
# EVENT_JOB_ERROR: 'code': 8192, 'exception': xxx
# apscheduler/executors/base.py
# events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
# run_time))
# logger.warning('Run time of job "%s" was missed by %s', job, difference)
# WARNING in apscheduler.executors.default: Run time of job "task_1" was missed by 0:00:26.030600
# apscheduler/schedulers/base.py
# self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
# self._logger.warning(
# 'Execution of job "%s" skipped: maximum number of running '
# 'instances reached (%d)', job, job.max_instances)
# event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id,
'memory': MemoryJobStore(),
}
jobstores['default'] = jobstores['memory']
try:
jobstores['sqlalchemy'] = SQLAlchemyJobStore(
url=self.config.scheduler.db_uri)
except AttributeError:
pass
executors = {}
try:
executors['default'] = ThreadPoolExecutor(
max_workers=self.config.scheduler.max_workers)
except AttributeError:
executors['default'] = ThreadPoolExecutor(
max_workers=default_max_workers)
sched = BackgroundScheduler(jobstores=jobstores,
executors=executors,
tz=pytz.timezone(self.config.tz))
sched.add_listener(functools.partial(_done_listener, sched),
events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
sched.add_listener(functools.partial(_submitted_listener, sched),
events.EVENT_JOB_SUBMITTED)
sched.add_listener(functools.partial(_modified_listener, sched),
events.EVENT_JOB_MODIFIED)
return sched
from db.manager import db_manager
class Scheduler(object):
mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT, connect=False)
task_col = 'apscheduler_jobs'
# scheduler jobstore
jobstores = {
'mongo': MongoDBJobStore(database=MONGO_DB,
collection=task_col,
client=mongo)
}
# scheduler instance
scheduler = BackgroundScheduler(jobstores=jobstores)
def execute_spider(self, id: str, params: str = None):
print(f'executing spider {id}')
print(f'params: {params}')
self.scheduler.print_jobs(jobstore='mongo')
query = {}
if params is not None:
query['params'] = params
r = requests.get('http://%s:%s/api/spiders/%s/on_crawl' % (
FLASK_HOST,
FLASK_PORT,
id
), query)
def update(self):
print('updating...')
#coding: utf-8
from __future__ import unicode_literals, absolute_import
import atexit
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from django.conf import settings
logger = logging.getLogger(__name__)
scheduler = BackgroundScheduler()
# import sys, socket
#
# try:
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.bind(("127.0.0.1", 47200))
# except socket.error:
# print "!!!scheduler already started, DO NOTHING"
# else:
# from apscheduler.schedulers.background import BackgroundScheduler
# scheduler = BackgroundScheduler()
# scheduler.start()
# print "scheduler started"
# http://stackoverflow.com/questions/16053364
def scrape_websites(self):
scheduler = BackgroundScheduler()
scheduler.add_job(self.scheduled_method, 'cron',
day_of_week=self.job_trigger_settings['day_of_job'], hour=self.job_trigger_settings['hour'], minute=self.job_trigger_settings['minute'], second=self.job_trigger_settings['second'])
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
def schedule_update_price(interval):
"""Scheduler the price update procedure every interval
Args:
interval (int): chosen interval (minutes)
"""
sched = BackgroundScheduler()
sched.add_job(push_updated_price,'interval',id='push_price', minutes=interval, replace_existing=True)
sched.start()
"""Create scheduler object & add functions to it."""
jobstores = {
'default': SQLAlchemyJobStore(url='postgresql:///nextbook')
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
scheduler = BackgroundScheduler(
jobstores=jobstores,
executors=executors,
job_defaults=job_defaults,
timezone=utc
)
scheduler.add_job(
generate_recommendation_delivery_dates,
trigger='cron',
hour='8',
minute='1')
scheduler.add_job(send_recommendation_email, trigger='cron', hour='20')
return scheduler