How to use the aiokafka.errors.TopicAuthorizationFailedError function in aiokafka

To help you get started, we’ve selected a few aiokafka examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aio-libs / aiokafka / tests / test_sasl.py View on Github external
async def test_sasl_deny_topic_write(self):

        self.acl_manager.add_acl(
            allow_principal="test", operation="All", topic=self.topic)
        self.acl_manager.add_acl(
            deny_principal="test", operation="WRITE", topic=self.topic)

        producer = await self.producer_factory()
        with self.assertRaises(TopicAuthorizationFailedError):
            await producer.send_and_wait(
                topic=self.topic, value=b"Super sasl msg")
github aio-libs / aiokafka / tests / test_sasl.py View on Github external
async def test_sasl_deny_topic_describe(self):
        # Before 1.0.0 Kafka if topic does not exist it will not report
        # Topic authorization errors, so we need to create topic beforehand
        # See https://kafka.apache.org/documentation/#upgrade_100_notable
        tmp_producer = await self.producer_factory()
        await tmp_producer.send_and_wait(self.topic, value=b"Autocreate topic")
        error_class = TopicAuthorizationFailedError
        if tmp_producer.client.api_version < (1, 0):
            error_class = UnknownTopicOrPartitionError
        del tmp_producer

        self.acl_manager.add_acl(
            allow_principal="test", operation="All", topic=self.topic)
        self.acl_manager.add_acl(
            deny_principal="test", operation="DESCRIBE", topic=self.topic)

        producer = await self.producer_factory(request_timeout_ms=10000)

        with self.assertRaises(error_class):
            await producer.send_and_wait(self.topic, value=b"Super sasl msg")

        # This will check for authorization on start()
        with self.assertRaises(error_class):
github aio-libs / aiokafka / tests / test_fetcher.py View on Github external
lambda n, r: FetchResponse(
                [('test', [(0, 3, 9, raw_batch)])]))
        cc = client.force_metadata_update.call_count
        needs_wake_up = await fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(client.force_metadata_update.call_count, cc + 1)

        # error -> topic auth failed (TopicAuthorizationFailedError)
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 29, 9, raw_batch)])]))
        needs_wake_up = await fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            await fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, -1, 9, raw_batch)])]))
        needs_wake_up = await fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range with offset strategy
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, raw_batch)])]))
        needs_wake_up = await fetcher._proc_fetch_request(
            assignment, 0, req)
github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
self._client.force_metadata_update()
                elif error_type is Errors.OffsetOutOfRangeError:
                    if self._default_reset_strategy != \
                            OffsetResetStrategy.NONE:
                        tp_state.await_reset(self._default_reset_strategy)
                    else:
                        err = Errors.OffsetOutOfRangeError({tp: fetch_offset})
                        self._set_error(tp, err)
                        needs_wakeup = True
                    log.info(
                        "Fetch offset %s is out of range for partition %s,"
                        " resetting offset", fetch_offset, tp)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    log.warning(
                        "Not authorized to read from topic %s.", tp.topic)
                    err = Errors.TopicAuthorizationFailedError(tp.topic)
                    self._set_error(tp, err)
                    needs_wakeup = True
                else:
                    log.warning('Unexpected error while fetching data: %s',
                                error_type.__name__)
        return needs_wakeup
github aio-libs / aiokafka / aiokafka / cluster.py View on Github external
if error_type is Errors.NoError:
                _new_partitions[topic] = {}
                for p_error, partition, leader, replicas, isr in partitions:
                    _new_partitions[topic][partition] = PartitionMetadata(
                        topic=topic, partition=partition, leader=leader,
                        replicas=replicas, isr=isr, error=p_error)
                    if leader != -1:
                        _new_broker_partitions[leader].add(
                            TopicPartition(topic, partition))

            elif error_type is Errors.LeaderNotAvailableError:
                log.warning("Topic %s is not available during auto-create"
                            " initialization", topic)
            elif error_type is Errors.UnknownTopicOrPartitionError:
                log.error("Topic %s not found in cluster metadata", topic)
            elif error_type is Errors.TopicAuthorizationFailedError:
                log.error("Topic %s is not authorized for this client", topic)
                _new_unauthorized_topics.add(topic)
            elif error_type is Errors.InvalidTopicError:
                log.error("'%s' is not a valid topic name", topic)
            else:
                log.error("Error fetching metadata for topic %s: %s",
                          topic, error_type)

        with self._lock:
            self._brokers = _new_brokers
            self.controller = _new_controller
            self._partitions = _new_partitions
            self._broker_partitions = _new_broker_partitions
            self.unauthorized_topics = _new_unauthorized_topics
            self.internal_topics = _new_internal_topics
github aio-libs / aiokafka / aiokafka / producer / sender.py View on Github external
batch.done(offset, timestamp)
                elif error is DuplicateSequenceNumber:
                    # If we have received a duplicate sequence error,
                    # it means that the sequence number has advanced
                    # beyond the sequence of the current batch, and we
                    # haven't retained batch metadata on the broker to
                    # return the correct offset and timestamp.
                    #
                    # The only thing we can do is to return success to
                    # the user and not return a valid offset and
                    # timestamp.
                    batch.done(offset, timestamp)
                elif not self._can_retry(error(), batch):
                    if error is InvalidProducerEpoch:
                        exc = ProducerFenced()
                    elif error is TopicAuthorizationFailedError:
                        exc = error(topic)
                    else:
                        exc = error()
                    batch.failure(exception=exc)
                else:
                    log.warning(
                        "Got error produce response on topic-partition"
                        " %s, retrying. Error: %s", tp, error)
                    # Ok, we can retry this batch
                    if getattr(error, "invalid_metadata", False):
                        self._client.force_metadata_update()
                    self._to_reenqueue.append(batch)
github aio-libs / aiokafka / aiokafka / client.py View on Github external
"""
        if topic in self.cluster.topics():
            return self.cluster.partitions_for_topic(topic)

        # add topic to metadata topic list if it is not there already.
        self.add_topic(topic)

        t0 = self._loop.time()
        while True:
            await self.force_metadata_update()
            if topic in self.cluster.topics():
                break
            if (self._loop.time() - t0) > (self._request_timeout_ms / 1000):
                raise UnknownTopicOrPartitionError()
            if topic in self.cluster.unauthorized_topics:
                raise Errors.TopicAuthorizationFailedError(topic)
            await asyncio.sleep(self._retry_backoff, loop=self._loop)

        return self.cluster.partitions_for_topic(topic)
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
errored[tp] = error

                else:
                    log.error(
                        "OffsetCommit failed for group %s on partition %s"
                        " with offset %s: %s", self.group_id, tp, offset,
                        error_type.__name__)
                    errored[tp] = error_type()

        if errored:
            first_error = list(errored.values())[0]
            raise first_error
        if unauthorized_topics:
            log.error("OffsetCommit failed for unauthorized topics %s",
                      unauthorized_topics)
            raise Errors.TopicAuthorizationFailedError(unauthorized_topics)
github aio-libs / aiokafka / aiokafka / consumer / consumer.py View on Github external
async def __anext__(self):
        """Asyncio iterator interface for consumer

        Note:
            TopicAuthorizationFailedError and OffsetOutOfRangeError
            exceptions can be raised in iterator.
            All other KafkaError exceptions will be logged and not raised
        """
        while True:
            try:
                return (await self.getone())
            except ConsumerStoppedError:
                raise StopAsyncIteration  # noqa: F821
            except (TopicAuthorizationFailedError,
                    OffsetOutOfRangeError,
                    NoOffsetForPartitionError) as err:
                raise err
            except RecordTooLargeError:
                log.exception("error in consumer iterator: %s")
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
errored = collections.OrderedDict()
        unauthorized_topics = set()
        for topic, partitions in response.topics:
            for partition, error_code in partitions:
                tp = TopicPartition(topic, partition)
                error_type = Errors.for_code(error_code)
                offset = offsets[tp]
                if error_type is Errors.NoError:
                    log.debug(
                        "Committed offset %s for partition %s", offset, tp)
                elif error_type is Errors.GroupAuthorizationFailedError:
                    log.error("OffsetCommit failed for group %s - %s",
                              self.group_id, error_type.__name__)
                    errored[tp] = error_type(self.group_id)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    unauthorized_topics.add(topic)
                elif error_type in (Errors.OffsetMetadataTooLargeError,
                                    Errors.InvalidCommitOffsetSizeError):
                    # raise the error to the user
                    log.info(
                        "OffsetCommit failed for group %s on partition %s"
                        " due to %s, will retry", self.group_id, tp,
                        error_type.__name__)
                    errored[tp] = error_type()
                elif error_type is Errors.GroupLoadInProgressError:
                    # just retry
                    log.info(
                        "OffsetCommit failed for group %s because group is"
                        " initializing (%s), will retry", self.group_id,
                        error_type.__name__)
                    errored[tp] = error_type()