Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def test_coordinator_metadata_update_during_rebalance(self):
# Race condition where client.set_topics start MetadataUpdate, but it
# fails to arrive before leader performed assignment
# Just ensure topics are created
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
await client.close()
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState(loop=self.loop)
client.set_topics(("topic1", ))
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='race-rebalance-metadata-update',
heartbeat_interval_ms=20000000)
self.add_cleanup(coordinator.close)
await subscription.wait_for_assignment()
# Check that topic's partitions are properly assigned
self.assertEqual(
subscription.assigned_partitions(),
{TopicPartition("topic1", 0), TopicPartition("topic1", 1)})
_metadata_update = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
async def _new(*args, **kw):
async def test_coordinator__send_req(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
self.add_cleanup(coordinator.close)
request = OffsetCommitRequest[2](topics=[])
# We did not call ensure_coordinator_known yet
with self.assertRaises(Errors.GroupCoordinatorNotAvailableError):
await coordinator._send_req(request)
await coordinator.ensure_coordinator_known()
self.assertIsNotNone(coordinator.coordinator_id)
async def test_fetcher__update_fetch_positions(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState(loop=self.loop)
fetcher = Fetcher(client, subscriptions, loop=self.loop)
self.add_cleanup(fetcher.close)
# Disable backgroud task
fetcher._fetch_task.cancel()
try:
await fetcher._fetch_task
except asyncio.CancelledError:
pass
fetcher._fetch_task = ensure_future(
asyncio.sleep(1000000, loop=self.loop), loop=self.loop)
partition = TopicPartition('test', 0)
offsets = {partition: OffsetAndTimestamp(12, -1)}
async def _proc_offset_request(node_id, topic_data):
return offsets
def _setup_error_after_data(self):
subscriptions = SubscriptionState(loop=self.loop)
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
fetcher = Fetcher(client, subscriptions, loop=self.loop)
tp1 = TopicPartition('some_topic', 0)
tp2 = TopicPartition('some_topic', 1)
subscriptions.subscribe(set(["some_topic"]))
subscriptions.assign_from_subscribed({tp1, tp2})
assignment = subscriptions.subscription.assignment
subscriptions.seek(tp1, 0)
subscriptions.seek(tp2, 0)
# Add some data
messages = [ConsumerRecord(
topic="some_topic", partition=1, offset=0, timestamp=0,
await coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
if subscription.subscription.assignment is None:
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
# Check if adding an additional coordinator will rebalance correctly
client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client2.bootstrap()
subscription2 = SubscriptionState(loop=self.loop)
subscription2.subscribe(topics=set(['topic1', 'topic2']))
coordinator2 = GroupCoordinator(
client2, subscription2, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
await asyncio.gather(
subscription.wait_for_assignment(),
subscription2.wait_for_assignment()
)
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
async def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState(loop=self.loop)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
await self.wait_topic(client, 'st-topic1')
await self.wait_topic(client, 'st-topic2')
subscription.subscribe_pattern(
re.compile('st-topic*'), listener=test_listener)
client.set_topics([])
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
async def test_coordinator_metadata_change_by_broker(self):
# Issue #108. We can have a misleading metadata change, that will
# trigger additional rebalance
client = AIOKafkaClient(
loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
client.set_topics(['other_topic'])
await client.force_metadata_update()
subscription = SubscriptionState(loop=self.loop)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=2000000)
subscription.subscribe(topics=set(['topic1']))
await client.set_topics(('topic1', ))
await subscription.wait_for_assignment()
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
mocked.side_effect = _perform_assignment
subscription.subscribe(topics=set(['topic2']))
await client.set_topics(('topic2', ))
# Should only trigger 1 rebalance, but will trigger 2 with bug:
async def test_failed_group_join(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
self.add_cleanup(client.close)
subscription = SubscriptionState(loop=self.loop)
subscription.subscribe(topics=set(['topic1']))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
retry_backoff_ms=10)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = self.loop.create_task(
asyncio.sleep(0.1, loop=self.loop)
)
coordinator.coordinator_id = 15
self.add_cleanup(coordinator.close)
_on_join_leader_mock = mock.Mock()
async def test_proc_fetch_request(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState(loop=self.loop)
fetcher = Fetcher(
client, subscriptions, auto_offset_reset="latest", loop=self.loop)
tp = TopicPartition('test', 0)
tp_info = (tp.topic, [(tp.partition, 4, 100000)])
req = FetchRequest(
-1, # replica_id
100, 100, [tp_info])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
def subscription_state(loop):
return SubscriptionState(loop=loop)