Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from __future__ import division
from time import sleep
from celery import Celery
from datetime import datetime, timedelta
from ..celery_dashboard import init
from ..celery_dashboard.utils import set_progress
celery_app = Celery('test_app', broker='redis://localhost', backend='redis://localhost')
celery_app.conf.update(accept_content=['json', 'pickle'],
CELERY_ACCEPT_CONTENT=['json', 'pickle'], # celery 3
worker_prefetch_multiplier=1)
init(celery_app, "postgresql://docker:docker@localhost:5432/docker", db_echo="debug")
@celery_app.task(name="retry_with_countdown", bind=True)
def retry_with_countdown(self, countdown):
self.retry(countdown=countdown)
@celery_app.task(name="retry_with_eta", bind=True)
def retry_with_eta(self, countdown):
self.retry(eta=datetime.utcnow() + timedelta(countdown))
def test_config3():
"""Test passing in config."""
c = Celery('mycurrent')
c.set_current()
app = Flask("myapp")
app.config.from_object(eager_conf)
celery = create_celery_app(app)
assert celery
assert celery.flask_app == app
assert celery.conf.CELERY_ALWAYS_EAGER
assert celery.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS
assert celery.conf.CELERY_RESULT_BACKEND == 'cache'
assert celery.conf.CELERY_CACHE_BACKEND == 'memory'
def test_without_request(self, monkeypatch):
async_result = pretend.stub()
apply_async = pretend.call_recorder(lambda *a, **kw: async_result)
get_current_request = pretend.call_recorder(lambda: None)
monkeypatch.setattr(tasks, "get_current_request", get_current_request)
task = tasks.WarehouseTask()
task.app = Celery()
monkeypatch.setattr(Task, "apply_async", apply_async)
assert task.apply_async() is async_result
assert apply_async.calls == [pretend.call(task)]
assert get_current_request.calls == [pretend.call()]
def __init__(self, *, auto_queue: bool = False) -> None:
if auto_queue:
self._queue = f'{WORKER_QUEUE_PREFIX}{random_string(12)}'
else:
self._queue = None
self._app = Celery()
self._app.config_from_object(CELERY_SETTINGS)
from config import config
from celery import Celery
import time
##
# This file defines our celery tasks
# essentially functions that we can call from our route handlers
# These functions can run in the background and deal with handling
# tasks transparently to the user and behind the application
#
# TODO REMOVE THIS FILE
# Initialize Celery
celery = Celery(config['NAME'], broker=config['CELERY_BROKER_URL'])
celery.conf.update(config)
# with bind = true we can monitor the progress of the celery process
@celery.task(bind=True)
def print_words(self):
# some long task that does work
for i in range(23):
print 'words' + str(i)
time.sleep(1)
# update metadata variables
self.update_state(state='PROCESSING',
meta={'current': i,
'status': 'counting ...'
}
)
"""Central location for celery-specific stuff."""
from celery import Celery
APP = Celery('cosmic-ray-celery4-executor')
APP.conf.CELERY_ACCEPT_CONTENT = ['pickle', 'json']
APP.conf.CELERY_TASK_SERIALIZER = 'pickle'
APP.conf.CELERY_RESULT_SERIALIZER = 'pickle'
APP.conf.CELERY_RESULT_BACKEND = 'amqp://'
# TODO(dcramer): this is getting heavy, we should find a better way to structure
# this
buffer = get_instance(settings.SENTRY_BUFFER, settings.SENTRY_BUFFER_OPTIONS)
cache = get_instance(settings.SENTRY_CACHE, settings.SENTRY_CACHE_OPTIONS)
quotas = get_instance(settings.SENTRY_QUOTAS, settings.SENTRY_QUOTA_OPTIONS)
nodestore = get_instance(
settings.SENTRY_NODESTORE, settings.SENTRY_NODESTORE_OPTIONS)
ratelimiter = get_instance(
settings.SENTRY_RATELIMITER, settings.SENTRY_RATELIMITER_OPTIONS)
search = get_instance(settings.SENTRY_SEARCH, settings.SENTRY_SEARCH_OPTIONS)
tsdb = get_instance(settings.SENTRY_TSDB, settings.SENTRY_TSDB_OPTIONS)
# Configuration for Celery happens in runner
celery = Celery()
# coding=UTF8
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydjangoapp.settings")
app = Celery('mydjangoapp')
CELERY_TIMEZONE = 'UTC'
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
def new_celery(worker_name: str):
return Celery(worker_name,
backend=os.getenv('CELERY_BACKEND_URL', 'redis://localhost:6379/0'),
broker=os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0'))
from yabi.constants import STATUS_ERROR, STATUS_READY, STATUS_COMPLETE, STATUS_EXEC, STATUS_STAGEOUT, STATUS_STAGEIN, STATUS_CLEANING, STATUS_ABORTED
from yabi.constants import MAX_CELERY_TASK_RETRIES
from yabi.yabi.models import DecryptedCredentialNotAvailable
from yabi.yabiengine.models import Job, Task, Syslog
from yabi.yabiengine.enginemodels import EngineWorkflow, EngineJob, EngineTask
from yabi.yabiengine.engine_logging import create_workflow_logger, create_job_logger, create_task_logger, create_logger, YabiDBHandler, YabiContextFilter
from yabi.backend import provisioning
import celery
from django.conf import settings
from django.db.models import Q
from celery.signals import after_setup_task_logger
import logging
logger = get_task_logger(__name__)
app = celery.Celery('yabi.backend.celerytasks')
app.config_from_object('django.conf:settings')
DYNBE_READY_POLL_INTERVAL = getattr(settings, 'DYNBE_READY_POLL_INTERVAL', 60)
TASK_LIMIT_REACHED_RETRY_INTERVAL = getattr(settings, 'TASK_LIMIT_REACHED_RETRY_INTERVAL', 10)
# Celery uses its own logging setup. All our custom logging setup has to be
# done in this callback
def setup_logging(*args, **kwargs):
handler = YabiDBHandler()
log_filter = YabiContextFilter()
handler.addFilter(log_filter)
level = getattr(settings, 'YABIDBHANDLER_LOG_LEVEL', 'DEBUG')
handler.setLevel(logging.getLevelName(level))
logger.addHandler(handler)