Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run(self):
signals.worker_init.connect(self.on_worker_init)
signals.worker_ready.connect(self.on_worker_ready)
self.monitor.daemon = self.daemon
self.monitor.start()
worker = self.app.Worker()
if hasattr(worker, 'start'):
worker.start()
elif hasattr(worker, 'run'):
worker.run()
else:
raise Exception("Don't know how to start worker. Incompatible Celery?")
@signals.setup_logging.connect
def on_logging_setup(**kwargs):
logging_setup[0] = True
@celery.signals.worker_init.connect
def on_worker_init(sender, **kwargs):
# required to connect update_solr_index handler to task_postrun signal
# https://github.com/celery/celery/issues/1873#issuecomment-35288899
celery.signals.task_postrun.connect(
post_process_file_import, sender=sender.app.tasks[FileImportTask.name]
)
def handle_retry(self, task, req, store_errors=True, **kwargs):
"""Handle retry exception."""
# the exception raised is the Retry semi-predicate,
# and it's exc' attribute is the original exception raised (if any).
type_, _, tb = sys.exc_info()
try:
reason = self.retval
einfo = ExceptionInfo((type_, reason, tb))
if store_errors:
task.backend.mark_as_retry(
req.id, reason.exc, einfo.traceback, request=req,
)
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
signals.task_retry.send(sender=task, request=req,
reason=reason, einfo=einfo)
info(LOG_RETRY, {
'id': req.id,
'name': get_task_name(req, task.name),
'exc': text_t(reason),
})
return einfo
finally:
del tb
def autodiscover_tasks(self, packages, related_name='tasks', force=False):
if force:
return self._autodiscover_tasks(packages, related_name)
signals.import_modules.connect(promise(
self._autodiscover_tasks, (packages, related_name),
), weak=False, sender=self)
"""Create a Celery application."""
celery = current_celery_app
if CELERY_4_OR_GREATER:
v3tov4config(flask_app.config, V3TOV4MAPPING)
celery.config_from_object(flask_app.config, namespace='CELERY') # pragma: no cover
else:
celery.config_from_object(flask_app.config) # pragma: no cover
celery.Task = AppContextTask
# Set Flask application object on the Celery application.
if not hasattr(celery, 'flask_app'):
celery.flask_app = flask_app
signals.after_setup_task_logger.connect(setup_task_logger)
return celery
import os
from celery import Celery, signals
from django.utils.log import configure_logging
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example.settings')
def configure_logger(conf=None, **kwargs):
from django.conf import settings
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
signals.worker_process_init.connect(configure_logger)
signals.beat_init.connect(configure_logger)
app = Celery('example')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def _async_call(self, func, args, kwargs):
return func(*args, **kwargs)
def execute(self):
signals.task_prerun.send(sender=self.task, task_id=self.task_id,
task=self.task, args=self.args,
kwargs=self.kwargs)
retval = self._trace()
signals.task_postrun.send(sender=self.task, task_id=self.task_id,
task=self.task, args=self.args,
kwargs=self.kwargs, retval=retval)
return retval
def init_app(self, app):
self.__flask_context = []
self.__flask_app = app
self.conf.update(app.config)
signals.task_prerun.connect(self.on_task_prerun)
signals.task_postrun.connect(self.on_task_postrun)
@signals.task_prerun.connect
def handle_task_prerun(sender=None, task_id=None, **kwargs):
if task_id:
queryset = ModelTaskMeta.objects.filter(task_id=task_id)
queryset.update(state=ModelTaskMetaState.STARTED)