Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_store_result(self):
b = AMQPBackend(self.app)
tid = uuid()
request = Context(args=(1, 2, 3), kwargs={'foo': 'bar'},
task_name='mytask', retries=2,
hostname='celery@worker_1',
delivery_info={'routing_key': 'celery'})
b.store_result(tid, {'fizz': 'buzz'}, states.SUCCESS, request=request)
meta = b.get_task_meta(tid)
assert meta == {
'args': [1, 2, 3],
'children': [],
'kwargs': {'foo': 'bar'},
'name': 'mytask',
'queue': 'celery',
'result': {'fizz': 'buzz'},
'retries': 2,
'status': 'SUCCESS',
'task_id': tid,
'traceback': None,
'worker': 'celery@worker_1',
}
def test_mark_as_done(self):
tb = get_tyrant_or_None()
if not tb:
return
tid = gen_unique_id()
self.assertFalse(tb.is_successful(tid))
self.assertEquals(tb.get_status(tid), states.PENDING)
self.assertEquals(tb.get_result(tid), None)
tb.mark_as_done(tid, 42)
self.assertTrue(tb.is_successful(tid))
self.assertEquals(tb.get_status(tid), states.SUCCESS)
self.assertEquals(tb.get_result(tid), 42)
self.assertTrue(tb.get_result(tid), 42)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
next(r)
assert r.tid in r.state.tasks
task = r.state.tasks[r.tid]
assert task.state == states.RECEIVED
assert task.received
assert task.timestamp == task.received
assert task.worker.hostname == 'utest1'
# STARTED
next(r)
assert r.state.workers['utest1'].alive
assert task.state == states.STARTED
assert task.started
assert task.timestamp == task.started
assert task.worker.hostname == 'utest1'
# REVOKED
next(r)
assert task.state == states.REVOKED
assert task.revoked
assert task.timestamp == task.revoked
assert task.worker.hostname == 'utest1'
# RETRY
next(r)
assert task.state == states.RETRY
assert task.retried
assert task.timestamp == task.retried
send_pool = Pool(processes=num_processes)
key_and_async_results = send_pool.map(
celery_executor.send_task_to_executor,
task_tuples_to_send,
chunksize=chunksize)
send_pool.close()
send_pool.join()
for task_instance_key, _, result in key_and_async_results:
# Only pops when enqueued successfully, otherwise keep it
# and expect scheduler loop to deal with it.
result.backend = cached_celery_backend
executor.running.add(task_instance_key)
executor.tasks[task_instance_key] = result
executor.last_state[task_instance_key] = celery_states.PENDING
executor.running.add(('success', 'fake_simple_ti', execute_date, 0))
executor.running.add(('fail', 'fake_simple_ti', execute_date, 0))
executor.end(synchronous=True)
self.assertEqual(executor.event_buffer[('success', 'fake_simple_ti', execute_date, 0)], State.SUCCESS)
self.assertEqual(executor.event_buffer[('fail', 'fake_simple_ti', execute_date, 0)], State.FAILED)
self.assertNotIn('success', executor.tasks)
self.assertNotIn('fail', executor.tasks)
self.assertNotIn('success', executor.last_state)
self.assertNotIn('fail', executor.last_state)
def __init__(self, task_name, task_id, args, kwargs, task=None):
self.task_id = task_id
self.task_name = task_name
self.args = args
self.kwargs = kwargs
self.task = task or tasks[self.task_name]
self.status = states.PENDING
self.strtb = None
self._trace_handlers = {states.FAILURE: self.handle_failure,
states.RETRY: self.handle_retry,
states.SUCCESS: self.handle_success}
def __init__(self,
user_id=None,
status=celery.states.PENDING,
name=None,
queue_result_key=None,
children=None,
public=False,
parameters={},
recurrent=False,
recurrent_parent_id=None,
created=None,
store=False,
persistent_id=None):
if children is None:
children = []
self.user_id = user_id
if not self.user_id:
try:
def _is_to_show_result(self, state, show_success, show_error):
return (state == states.FAILURE and show_error) \
or (state == states.SUCCESS and show_success)
def update_task_state(self, key: TaskInstanceKeyType, state: str) -> None:
"""Updates state of a single task."""
# noinspection PyBroadException
try:
if self.last_state[key] != state:
if state == celery_states.SUCCESS:
self.success(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.FAILURE:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.REVOKED:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
else:
self.log.info("Unexpected state: %s", state)
self.last_state[key] = state
except Exception: # pylint: disable=broad-except
self.log.exception("Error syncing the Celery executor, ignoring it.")
def create_base(self):
self.info('create_base')
if app.current_task:
app.current_task.update_state(state=states.SUCCESS)
app.control.revoke(app.current_task.request.id, terminate=True)
self.provider = TaskProvider(self.session)
self.library_provider = DagLibraryProvider(self.session)
self.storage = Storage(self.session)
self.task = self.provider.by_id(
self.id, joinedload(Task.dag_rel, innerjoin=True)
)
if not self.task:
raise Exception(f'task with id = {self.id} is not found')
self.dag = self.task.dag_rel
self.executor = None
self.hostname = socket.gethostname()
def _unpack_chord_result(self, tup, decode,
EXCEPTION_STATES=states.EXCEPTION_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES):
_, tid, state, retval = decode(tup)
if state in EXCEPTION_STATES:
retval = self.exception_to_python(retval)
if state in PROPAGATE_STATES:
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
return retval