Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def test_custom_serializer():
class Foobar:
def __getstate__(self):
raise TypeError("this doesn't pickle")
def custom_serializer(x):
return b'0123456789'
r1 = serialize_result('foobar', (1,), {}, 1, 123, True, Foobar(), 123, 123, 'testing', serializer=custom_serializer)
assert r1 == b'0123456789'
r2 = serialize_result(
'foobar', (Foobar(),), {}, 1, 123, True, Foobar(), 123, 123, 'testing', serializer=custom_serializer
)
assert r2 == b'0123456789'
async def test_cant_unpickle_at_all():
class Foobar:
def __getstate__(self):
raise TypeError("this doesn't pickle")
r1 = serialize_result('foobar', (1,), {}, 1, 123, True, Foobar(), 123, 123, 'testing')
assert isinstance(r1, bytes)
r2 = serialize_result('foobar', (Foobar(),), {}, 1, 123, True, Foobar(), 123, 123, 'testing')
assert r2 is None
async def test_custom_serializer():
class Foobar:
def __getstate__(self):
raise TypeError("this doesn't pickle")
def custom_serializer(x):
return b'0123456789'
r1 = serialize_result('foobar', (1,), {}, 1, 123, True, Foobar(), 123, 123, 'testing', serializer=custom_serializer)
assert r1 == b'0123456789'
r2 = serialize_result(
'foobar', (Foobar(),), {}, 1, 123, True, Foobar(), 123, 123, 'testing', serializer=custom_serializer
)
assert r2 == b'0123456789'
async def test_cant_unpickle_at_all():
class Foobar:
def __getstate__(self):
raise TypeError("this doesn't pickle")
r1 = serialize_result('foobar', (1,), {}, 1, 123, True, Foobar(), 123, 123, 'testing')
assert isinstance(r1, bytes)
r2 = serialize_result('foobar', (Foobar(),), {}, 1, 123, True, Foobar(), 123, 123, 'testing')
assert r2 is None
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
result_data = serialize_result(
function_name,
args,
kwargs,
job_try,
enqueue_time_ms,
False,
JobExecutionFailed(f'max {max_tries} retries exceeded'),
start_ms,
timestamp_ms(),
ref,
serializer=self.job_serializer,
)
return await asyncio.shield(self.abort_job(job_id, result_data))
result = no_result
exc_extra = None
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = serialize_result(
function_name,
args,
kwargs,
job_try,
enqueue_time_ms,
success,
result,
start_ms,
finished_ms,
ref,
serializer=self.job_serializer,
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def job_failed(exc: Exception):
self.jobs_failed += 1
result_data_ = serialize_result(
function=function_name,
args=args,
kwargs=kwargs,
job_try=job_try,
enqueue_time_ms=enqueue_time_ms,
success=False,
result=exc,
start_ms=start_ms,
finished_ms=timestamp_ms(),
ref=f'{job_id}:{function_name}',
serializer=self.job_serializer,
)
await asyncio.shield(self.abort_job(job_id, result_data_))