Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
# CASE: with user assignment routine should not react to request_rejoin
coordinator.request_rejoin()
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
coordinator._rejoin_needed_fut = create_future(loop=self.loop)
# CASE: Changing subscription should propagete a rebalance
subscription.unsubscribe()
subscription.subscribe(set(["topic1"]))
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 2)
self.assertEqual(prepare_mock.call_count, 1)
self.assertEqual(rejoin_mock.call_count, 1)
self.assertEqual(autocommit_mock.call_count, 2)
# CASE: If rejoin fails, we do it again without autocommit
rejoin_ok = False
coordinator.request_rejoin()
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
async def do_rejoin(subsc):
if rejoin_ok:
subscription.assign_from_subscribed({tp})
coordinator._rejoin_needed_fut = create_future(loop=self.loop)
return True
else:
await asyncio.sleep(0.1, loop=self.loop)
return False
rejoin_mock.side_effect = do_rejoin
coordinator._maybe_do_autocommit = autocommit_mock = mock.Mock()
autocommit_mock.side_effect = asyncio.coroutine(lambda assign: None)
coordinator._start_heartbeat_task = mock.Mock()
client.force_metadata_update = metadata_mock = mock.Mock()
done_fut = create_future(loop=self.loop)
done_fut.set_result(None)
metadata_mock.side_effect = lambda: done_fut
# CASE: coordination should stop and wait if subscription is not
# present
task = start_coordination()
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 0)
# CASE: user assignment should skip rebalance calls
subscription.assign_from_user({tp})
await asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
def fatal_error(self, exc):
self._transition_to(TransactionState.FATAL_ERROR)
self._txn_partitions.clear()
self._txn_consumer_group = None
self._pending_txn_partitions.clear()
for _, _, fut in self._pending_txn_offsets:
fut.set_exception(exc)
self._pending_txn_offsets.clear()
# There may be an abortable error. We just override it
if self._transaction_waiter.done():
self._transaction_waiter = create_future(loop=self._loop)
self._transaction_waiter.set_exception(exc)
def __init__(self, transactional_id, transaction_timeout_ms, *, loop):
self.transactional_id = transactional_id
self.transaction_timeout_ms = transaction_timeout_ms
self.state = TransactionState.UNINITIALIZED
self._pid_and_epoch = PidAndEpoch(NO_PRODUCER_ID, NO_PRODUCER_EPOCH)
self._pid_waiter = create_future(loop)
self._sequence_numbers = defaultdict(lambda: 0)
self._transaction_waiter = None
self._task_waiter = None
self._txn_partitions = set()
self._pending_txn_partitions = set()
self._txn_consumer_group = None
self._pending_txn_offsets = deque()
self._loop = loop
def __init__(self, tp, builder, ttl, loop):
self._builder = builder
self._tp = tp
self._loop = loop
self._ttl = ttl
self._ctime = loop.time()
# Waiters
# Set when messages are delivered to Kafka based on ACK setting
self.future = create_future(loop)
self._msg_futures = []
# Set when sender takes this batch
self._drain_waiter = create_future(loop=loop)
self._retry_count = 0
def wait_for_assignment(self):
""" Wait for next assignment. Be careful, as this will always wait for
next assignment, even if the current one is active.
"""
fut = create_future(loop=self._loop)
self._assignment_waiters.append(fut)
return fut
def reset_drain(self):
"""Reset drain waiter, until we will do another retry"""
assert self._drain_waiter.done()
self._drain_waiter = create_future(self._loop)
self._connections_max_idle_ms = connections_max_idle_ms
self._sasl_mechanism = sasl_mechanism
self._sasl_plain_username = sasl_plain_username
self._sasl_plain_password = sasl_plain_password
self._sasl_kerberos_service_name = sasl_kerberos_service_name
self._sasl_kerberos_domain_name = sasl_kerberos_domain_name
self.cluster = ClusterMetadata(metadata_max_age_ms=metadata_max_age_ms)
self._topics = set() # empty set will fetch all topic metadata
self._conns = {}
self._loop = loop
self._sync_task = None
self._md_update_fut = None
self._md_update_waiter = create_future(loop=self._loop)
self._get_conn_lock = asyncio.Lock(loop=loop)
[self._md_update_waiter],
timeout=self._metadata_max_age_ms / 1000,
loop=self._loop)
topics = self._topics
if self._md_update_fut is None:
self._md_update_fut = create_future(loop=self._loop)
ret = await self._metadata_update(self.cluster, topics)
# If list of topics changed during metadata update we must update
# it again right away.
if topics != self._topics:
continue
# Earlier this waiter was set before sending metadata_request,
# but that was to avoid topic list changes being unnoticed, which
# is handled explicitly now.
self._md_update_waiter = create_future(loop=self._loop)
self._md_update_fut.set_result(ret)
self._md_update_fut = None
def add_offsets_to_txn(self, offsets, group_id):
assert self.is_in_transaction()
assert self.transactional_id
fut = create_future(loop=self._loop)
self._pending_txn_offsets.append(
(group_id, offsets, fut)
)
self.notify_task_waiter()
return fut