How to use the aiokafka.structs.TopicPartition function in aiokafka

To help you get started, we’ve selected a few aiokafka examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aio-libs / aiokafka / tests / test_coordinator.py View on Github external
async def test_coordinator__maybe_do_autocommit(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        subscription = SubscriptionState(loop=self.loop)
        tp = TopicPartition("topic1", 0)
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
            retry_backoff_ms=50)
        coordinator._coordination_task.cancel()  # disable for test
        try:
            await coordinator._coordination_task
        except asyncio.CancelledError:
            pass
        coordinator._coordination_task = self.loop.create_task(
            asyncio.sleep(0.1, loop=self.loop)
        )
        self.add_cleanup(coordinator.close)

        coordinator._do_commit_offsets = mocked = mock.Mock()
        loop = self.loop
github aio-libs / aiokafka / tests / test_fetcher.py View on Github external
client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState(loop=self.loop)
        fetcher = Fetcher(client, subscriptions, loop=self.loop)
        self.add_cleanup(fetcher.close)
        # Disable backgroud task
        fetcher._fetch_task.cancel()
        try:
            await fetcher._fetch_task
        except asyncio.CancelledError:
            pass
        fetcher._fetch_task = ensure_future(
            asyncio.sleep(1000000, loop=self.loop), loop=self.loop)

        partition = TopicPartition('test', 0)
        offsets = {partition: OffsetAndTimestamp(12, -1)}

        async def _proc_offset_request(node_id, topic_data):
            return offsets

        fetcher._proc_offset_request = mock.Mock()
        fetcher._proc_offset_request.side_effect = _proc_offset_request

        def reset_assignment():
            subscriptions.assign_from_user({partition})
            assignment = subscriptions.subscription.assignment
            tp_state = assignment.state_value(partition)
            return assignment, tp_state
        assignment, tp_state = reset_assignment()

        self.assertIsNone(tp_state._position)
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
loop=self.loop, bootstrap_servers=self.hosts,
            compression_type="gzip")
        await producer.start()
        self.add_cleanup(producer.stop)
        await self.wait_topic(producer.client, self.topic)

        # We must be sure that we will end up with 1 and only 1 batch
        batch = producer.create_batch()
        for i in range(10):
            batch.append(key=b"123", value=str(i).encode(), timestamp=None)
        fut = await producer.send_batch(
            batch, topic=self.topic, partition=0)
        batch_meta = await fut

        consumer = await self.consumer_factory()
        consumer.seek(TopicPartition(self.topic, 0), batch_meta.offset + 5)

        orig_send = consumer._client.send
        with mock.patch.object(consumer._client, "send") as m:
            recv_records = []

            async def mock_send(node_id, req, group=None, test_case=self):
                res = await orig_send(node_id, req, group=group)
                if res.API_KEY == FetchRequest[0].API_KEY:
                    for topic, partitions in res.topics:
                        for partition_data in partitions:
                            data = partition_data[-1]
                            # Manually do unpack using internal tools so that
                            # we can count how many were actually passed from
                            # broker
                            records = MemoryRecords(data)
                            while records.has_next():
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
async def test_simple_consumer(self):
        with self.assertRaises(ValueError):
            # check unsupported version
            consumer = await self.consumer_factory(api_version="0.8")

        now = time.time()
        await self.send_messages(0, list(range(0, 100)))
        await self.send_messages(1, list(range(100, 200)))
        # Start a consumer_factory
        consumer = await self.consumer_factory()

        p0 = TopicPartition(self.topic, 0)
        p1 = TopicPartition(self.topic, 1)
        assignment = consumer.assignment()
        self.assertEqual(sorted(list(assignment)), [p0, p1])

        topics = await consumer.topics()
        self.assertTrue(self.topic in topics)

        parts = consumer.partitions_for_topic(self.topic)
        self.assertEqual(sorted(list(parts)), [0, 1])

        offset = await consumer.committed(
            TopicPartition("uknown-topic", 2))
        self.assertEqual(offset, None)

        offset = await consumer.committed(p0)
        if offset is None:
            offset = 0
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
async def test_kafka_consumer_offsets_old_brokers(self):
        consumer = await self.consumer_factory()
        tp = TopicPartition(self.topic, 0)

        with self.assertRaises(UnsupportedVersionError):
            await consumer.offsets_for_times({tp: int(time.time())})
        with self.assertRaises(UnsupportedVersionError):
            await consumer.beginning_offsets(tp)
        with self.assertRaises(UnsupportedVersionError):
            await consumer.end_offsets(tp)
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
async def test_consumer_pause_resume(self):
        await self.send_messages(0, range(5))
        await self.send_messages(1, range(5))

        consumer = await self.consumer_factory()
        tp0 = TopicPartition(self.topic, 0)

        self.assertEqual(consumer.paused(), set())
        seen_partitions = set()
        for _ in range(10):
            msg = await consumer.getone()
            seen_partitions.add(msg.partition)
        self.assertEqual(seen_partitions, {0, 1})

        await consumer.seek_to_beginning()
        consumer.pause(tp0)
        self.assertEqual(consumer.paused(), {tp0})
        seen_partitions = set()
        for _ in range(5):
            msg = await consumer.getone()
            seen_partitions.add(msg.partition)
        self.assertEqual(seen_partitions, {1})
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
async def test_consumer_position(self):
        await self.send_messages(0, [1, 2, 3])

        consumer = await self.consumer_factory(enable_auto_commit=False)
        self.add_cleanup(consumer.stop)
        tp = TopicPartition(self.topic, 0)
        offset = await consumer.position(tp)
        self.assertEqual(offset, 0)
        await consumer.getone()
        offset = await consumer.position(tp)
        self.assertEqual(offset, 1)

        with self.assertRaises(IllegalStateError):
            await consumer.position(TopicPartition(self.topic, 1000))

        # If we lose assignment when waiting for position we should retry
        # with new assignment
        another_topic = self.topic + "-1"
        consumer.subscribe((self.topic, another_topic))
        await consumer._subscription.wait_for_assignment()
        assert tp in consumer.assignment()
        # At this moment the assignment is done, but position should be
github aio-libs / aiokafka / aiokafka / producer / producer.py View on Github external
delivered.
        """
        # first make sure the metadata for the topic is available
        await self.client._wait_on_metadata(topic)
        # We only validate we have the partition in the metadata here
        partition = self._partition(topic, partition, None, None, None, None)

        # Ensure transaction is started and not committing
        if self._txn_manager is not None:
            txn_manager = self._txn_manager
            if txn_manager.transactional_id is not None and \
                    not self._txn_manager.is_in_transaction():
                raise IllegalOperation(
                    "Can't send messages while not in transaction")

        tp = TopicPartition(topic, partition)
        log.debug("Sending batch to %s", tp)
        future = await self._message_accumulator.add_batch(
            batch, tp, self._request_timeout_ms / 1000)
        return future
github aio-libs / aiokafka / aiokafka / consumer / consumer.py View on Github external
async def seek_to_end(self, *partitions):
        """Seek to the most recent available offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            TypeError: If partitions are not instances of TopicPartition

        .. versionadded:: 0.3.0

        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to end of partition %s", tp)
        fut = self._fetcher.request_offset_reset(
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
def assign_all_partitions(self, check_unknown=False):
        """ Assign all partitions from subscribed topics to this consumer.
            If `check_unknown` we will raise UnknownTopicOrPartitionError if
            subscribed topic is not found in metadata response.
        """
        partitions = []
        for topic in self._subscription.subscription.topics:
            p_ids = self._cluster.partitions_for_topic(topic)
            if not p_ids and check_unknown:
                raise Errors.UnknownTopicOrPartitionError()
            for p_id in p_ids:
                partitions.append(TopicPartition(topic, p_id))

        # If assignment did not change no need to reset it
        assignment = self._subscription.subscription.assignment
        if assignment is None or set(partitions) != assignment.tps:
            self._subscription.assign_from_subscribed(partitions)