Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def update_sent_state(sender=None, body=None, **kwargs):
# Change task.status to 'SENT' for all tasks which are sent in.
# This allows one to distinguish between PENDING tasks which have been
# sent in and tasks which do not exist. State will change to
# SUCCESS, FAILURE, etc. once the process terminates.
task = current_app.tasks.get(sender)
backend = task.backend if task else current_app.backend
backend.store_result(body['id'], None, "SENT")
def fire(self, delay=None):
# Use send_task to avoid cyclic import
celery.current_app.send_task('apps.jobs.tasks.async_job_fire', task_id=self.id,
countdown=delay * 60 if delay else None)
def redirect_stdouts_to_logger(loglevel='INFO', logfile=None,
redirect_level='WARNING', stdout=False, stderr=True):
"""See :meth:`celery.log.Log.redirect_stdouts_to_logger`."""
# XXX Currently unused.
log = celery.log
handled = setup_logging(loglevel, logfile)
if not handled:
return log.redirect_stdouts_to_logger(
log.get_default_logger(name='cyme'),
redirect_level, stdout=stdout, stderr=stderr)
while True:
try:
inspect().stats()
except IOError as e:
current_try += 1
logging.info("Broker down, try: {0}, exception: {1}".format(current_try, e))
if current_try >= total_tries:
logging.info("Broker unreachable for {0} seconds.".format(total_wait_time))
return False, e, traceback.format_exc()
time.sleep(sleep_time)
continue
logging.info("Broker {0} up!".format(celery.current_app.conf['BROKER_URL']))
break
return True, None, None
qs = model.objects.filter(**self.es_filter).values_list('id', flat=True)
else:
most_recent_result = pendulum.parse(self.get_most_recently_modified())
logger.info('Looking for %ss that have been modified after %s', model, most_recent_result)
q = Q(date_modified__gt=most_recent_result)
if hasattr(model, 'subjects') and hasattr(model, 'subject_relations'):
q = q | Q(subjects__date_modified__gt=most_recent_result) | Q(subject_relations__date_modified__gt=most_recent_result)
qs = model.objects.filter(q).values_list('id', flat=True)
for batch in chunk(qs.iterator(), chunk_size):
if batch:
if not self.to_daemon:
tasks.index_model.apply_async((model.__name__, batch,), {'es_url': self.es_url, 'es_index': self.es_index})
else:
try:
SearchIndexer(celery.current_app).index(model.__name__, *batch, index=self.es_index if self.es_index != settings.ELASTICSEARCH['INDEX'] else None)
except ValueError:
logger.warning('Not sending model type %r to the SearchIndexer', model)
if self.es_models and 'source' in self.es_models:
logger.info('Starting task to index sources')
tasks.index_sources.apply_async((), {'es_url': self.es_url, 'es_index': self.es_index})
Two things to know about the queue size:
1. It's not 100% accurate, but the size is generally near that
number
2. I can't think of a second thing, but that first thing is
pretty important.
"""
# FIXME: 2015-04-23: This is busted.
from celery import current_app
# FIXME: This uses a private method, but I'm not sure how else to
# figure this out, either.
app = current_app._get_current_object()
conn = app.connection()
chan = conn.default_channel
# FIXME: This hard-codes the exchange, but I'm not sure how else
# to figure it out.
queue = chan.queue_declare('celery', passive=True)
return queue.message_count
def cancel(self):
"""
Cancel the associated task if it hasn't already finished running.
"""
if self.is_container:
children = UserTaskStatus.objects.filter(parent=self)
for child in children:
child.cancel()
elif self.state in (UserTaskStatus.PENDING, UserTaskStatus.RETRYING):
current_app.control.revoke(self.task_id)
user_task_stopped.send_robust(UserTaskStatus, status=self)
with transaction.atomic():
status = UserTaskStatus.objects.select_for_update().get(pk=self.id)
if status.state in (UserTaskStatus.CANCELED, UserTaskStatus.FAILED, UserTaskStatus.SUCCEEDED):
return
status.state = UserTaskStatus.CANCELED
status.save(update_fields={'state', 'modified'})
self.state = status.state
self.modified = status.modified
def __init__(self, task):
self._task = task
self.app = current_app._get_current_object()
self.name = self._task['name']
self.task = self._task['name']
# Fill out schedule
if self._task['schedule_type'] == 'crontab':
self.schedule = celery.schedules.crontab(
minute=self._task['crontab']['minute'],
hour=self._task['crontab']['hour'],
day_of_week=self._task['crontab']['day_of_week'],
day_of_month=self._task['crontab']['day_of_month'],
month_of_year=self._task['crontab']['month_of_year']
)
elif self._task['schedule_type'] == 'interval':
self.schedule = celery.schedules.schedule(datetime.timedelta(**{self._task['interval']['period']: self._task['interval']['every']}))
self.args = self._task['args']
... rate_limit='1/m',
... name='one_per_minute',
... )
>>> class SomeSprinkler(SprinklerBase):
... _async_subtask = one_per_minute
... def subtask(self, obj):
... print(obj)
... def get_queryset(self):
... return User.objects.all()[0:5]
...
>>> SomeSprinkler().start()
"""
return registry[sprinkler_name](**kwargs)._run_subtask(obj_pk)
_async_subtask = current_app.task(async_subtask)
@current_app.task()
def _async_shard_start(shard_id, from_pk, to_pk, sprinkler_name, kwargs):
sprinkler = registry[sprinkler_name](**kwargs)
return sprinkler.shard_start(shard_id, from_pk, to_pk)
@current_app.task()
def _sprinkler_shard_finished_wrap(results, shard_id, sprinkler_name, kwargs):
sprinkler = registry[sprinkler_name](**kwargs)
sprinkler.log(f"shard finished: {shard_id}")
sprinkler.shard_finished(shard_id, results)
@current_app.task()
setup_task_logger = Proxy(lambda: current_app.log.setup_task_logger)
get_task_logger = Proxy(lambda: current_app.log.get_task_logger)