Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def test_coordinator__maybe_do_autocommit(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState(loop=self.loop)
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = self.loop.create_task(
asyncio.sleep(0.1, loop=self.loop)
)
self.add_cleanup(coordinator.close)
coordinator._do_commit_offsets = mocked = mock.Mock()
loop = self.loop
async def test_coordinator__maybe_refresh_commit_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState(loop=self.loop)
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = self.loop.create_task(
asyncio.sleep(0.1, loop=self.loop)
)
self.add_cleanup(coordinator.close)
coordinator._do_fetch_commit_offsets = mocked = mock.Mock()
fetched_offsets = {tp: OffsetAndMetadata(12, "")}
test_self = self
async def test_coordinator_subscription_replace_on_rebalance(self):
# See issue #88
client = AIOKafkaClient(
metadata_max_age_ms=2000, loop=self.loop,
bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
client.set_topics(('topic1', ))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='race-rebalance-subscribe-replace',
heartbeat_interval_ms=1000)
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
def _new(*args, **kw):
# Change the subscription to different topic before we finish
# rebalance
res = _perform_assignment(*args, **kw)
if subscription.subscription.topics == set(["topic1"]):
subscription.subscribe(topics=set(['topic2']))
client.set_topics(('topic2', ))
return res
mocked.side_effect = _new
async def test_coordinator__heartbeat_routine(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
heartbeat_interval_ms=100,
session_timeout_ms=300,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = self.loop.create_task(
asyncio.sleep(0.1, loop=self.loop)
)
self.add_cleanup(coordinator.close)
coordinator._do_heartbeat = mocked = mock.Mock()
coordinator.coordinator_id = 15
async def test_coordinator__send_req(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
self.add_cleanup(coordinator.close)
request = OffsetCommitRequest[2](topics=[])
# We did not call ensure_coordinator_known yet
with self.assertRaises(Errors.GroupCoordinatorNotAvailableError):
await coordinator._send_req(request)
await coordinator.ensure_coordinator_known()
self.assertIsNotNone(coordinator.coordinator_id)
with mock.patch.object(client, "send") as mocked:
async def mock_send(*args, **kw):
async def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState(loop=self.loop)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
await self.wait_topic(client, 'st-topic1')
await self.wait_topic(client, 'st-topic2')
subscription.subscribe_pattern(
re.compile('st-topic*'), listener=test_listener)
client.set_topics([])
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
assigned = set([('st-topic1', 0), ('st-topic1', 1),
async def test_coordinator_ensure_active_group_on_expired_membership(self):
# Do not fail group join if group membership has expired (ie autocommit
# fails on join prepare)
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
# Make sure we have something to commit before rejoining
tp = TopicPartition('topic1', 0)
subscription.seek(tp, 0)
offsets = assignment.all_consumed_offsets()
self.assertTrue(offsets) # Not empty
# during OffsetCommit, UnknownMemberIdError is raised
_orig_send_req = coordinator._send_req
resp_topics = [("topic1", [(0, Errors.UnknownMemberIdError.errno)])]
with mock.patch.object(coordinator, "_send_req") as mocked:
async def test_commit_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group')
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
await coordinator.commit_offsets(assignment, offsets)
_orig_send_req = coordinator._send_req
with mock.patch.object(coordinator, "_send_req") as mocked:
commit_error = None
async def mock_send_req(request):
if request.API_KEY == OffsetCommitRequest[0].API_KEY:
if isinstance(commit_error, list):
async def test_coordinator_metadata_change_by_broker(self):
# Issue #108. We can have a misleading metadata change, that will
# trigger additional rebalance
client = AIOKafkaClient(
loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
client.set_topics(['other_topic'])
await client.force_metadata_update()
subscription = SubscriptionState(loop=self.loop)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=2000000)
subscription.subscribe(topics=set(['topic1']))
await client.set_topics(('topic1', ))
await subscription.wait_for_assignment()
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
mocked.side_effect = _perform_assignment
subscription.subscribe(topics=set(['topic2']))
await client.set_topics(('topic2', ))
# Should only trigger 1 rebalance, but will trigger 2 with bug:
# Metadata snapshot will change:
self._client, self._subscription, loop=self._loop,
key_deserializer=self._key_deserializer,
value_deserializer=self._value_deserializer,
fetch_min_bytes=self._fetch_min_bytes,
fetch_max_bytes=self._fetch_max_bytes,
fetch_max_wait_ms=self._fetch_max_wait_ms,
max_partition_fetch_bytes=self._max_partition_fetch_bytes,
check_crcs=self._check_crcs,
fetcher_timeout=self._consumer_timeout,
retry_backoff_ms=self._retry_backoff_ms,
auto_offset_reset=self._auto_offset_reset,
isolation_level=self._isolation_level)
if self._group_id is not None:
# using group coordinator for automatic partitions assignment
self._coordinator = GroupCoordinator(
self._client, self._subscription, loop=self._loop,
group_id=self._group_id,
heartbeat_interval_ms=self._heartbeat_interval_ms,
session_timeout_ms=self._session_timeout_ms,
retry_backoff_ms=self._retry_backoff_ms,
enable_auto_commit=self._enable_auto_commit,
auto_commit_interval_ms=self._auto_commit_interval_ms,
assignors=self._partition_assignment_strategy,
exclude_internal_topics=self._exclude_internal_topics,
rebalance_timeout_ms=self._rebalance_timeout_ms,
max_poll_interval_ms=self._max_poll_interval_ms
)
if self._subscription.subscription is not None:
if self._subscription.partitions_auto_assigned():
# Either we passed `topics` to constructor or `subscribe`
# was called before `start`