How to use the aiokafka.errors function in aiokafka

To help you get started, we’ve selected a few aiokafka examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
# we did not read a single message from a non-empty
                        # buffer because that message's size is larger than
                        # fetch size, in this case record this exception
                        err = RecordTooLargeError(
                            "There are some messages at [Partition=Offset]: "
                            "%s=%s whose size is larger than the fetch size %s"
                            " and hence cannot be ever returned. "
                            "Increase the fetch size, or decrease the maximum "
                            "message size the broker will allow.",
                            tp, fetch_offset, self._max_partition_fetch_bytes)
                        self._set_error(tp, err)
                        tp_state.consumed_to(tp_state.position + 1)
                        needs_wakeup = True

                elif error_type in (Errors.NotLeaderForPartitionError,
                                    Errors.UnknownTopicOrPartitionError):
                    self._client.force_metadata_update()
                elif error_type is Errors.OffsetOutOfRangeError:
                    if self._default_reset_strategy != \
                            OffsetResetStrategy.NONE:
                        tp_state.await_reset(self._default_reset_strategy)
                    else:
                        err = Errors.OffsetOutOfRangeError({tp: fetch_offset})
                        self._set_error(tp, err)
                        needs_wakeup = True
                    log.info(
                        "Fetch offset %s is out of range for partition %s,"
                        " resetting offset", fetch_offset, tp)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    log.warning(
                        "Not authorized to read from topic %s.", tp.topic)
                    err = Errors.TopicAuthorizationFailedError(tp.topic)
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
elif error_type is Errors.UnknownMemberIdError:
            # reset the member id and retry immediately
            self._coordinator.reset_generation()
            log.debug(
                "Attempt to join group %s failed due to unknown member id",
                self.group_id)
        elif error_type in (Errors.GroupCoordinatorNotAvailableError,
                            Errors.NotCoordinatorForGroupError):
            # Coordinator changed we should be able to find it immediately
            err = error_type()
            self._coordinator.coordinator_dead()
            log.debug("Attempt to join group %s failed due to obsolete "
                      "coordinator information: %s", self.group_id,
                      err)
        elif error_type in (Errors.InconsistentGroupProtocolError,
                            Errors.InvalidSessionTimeoutError,
                            Errors.InvalidGroupIdError):
            err = error_type()
            log.error(
                "Attempt to join group failed due to fatal error: %s", err)
            raise err
        elif error_type is Errors.GroupAuthorizationFailedError:
            raise error_type(self.group_id)
        else:
            err = error_type()
            log.error(
                "Unexpected error in join group '%s' response: %s",
                self.group_id, err)
            raise Errors.KafkaError(repr(err))
        return None
github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
committed = await tp_state.fetch_committed()
            except asyncio.CancelledError:
                return needs_wakeup
            assert committed is not None

            # There could have been a seek() call of some sort while
            # waiting for committed point
            if tp_state.has_valid_position or tp_state.awaiting_reset:
                continue

            if committed.offset == UNKNOWN_OFFSET:
                # No offset stored in Kafka, need to reset
                if self._default_reset_strategy != OffsetResetStrategy.NONE:
                    tp_state.await_reset(self._default_reset_strategy)
                else:
                    err = Errors.NoOffsetForPartitionError(tp)
                    self._set_error(tp, err)
                    needs_wakeup = True
                log.debug(
                    "No committed offset found for %s", tp)
            else:
                log.debug("Resetting offset for partition %s to the "
                          "committed offset %s", tp, committed)
                tp_state.reset_to(committed.offset)

        topic_data = collections.defaultdict(list)
        needs_reset = []
        for tp in tps:
            tp_state = assignment.state_value(tp)
            if not tp_state.awaiting_reset:
                continue
            needs_reset.append(tp)
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
for topic, partitions in response.topics:
            for partition, error_code in partitions:
                tp = TopicPartition(topic, partition)
                error_type = Errors.for_code(error_code)
                offset = offsets[tp]
                if error_type is Errors.NoError:
                    log.debug(
                        "Committed offset %s for partition %s", offset, tp)
                elif error_type is Errors.GroupAuthorizationFailedError:
                    log.error("OffsetCommit failed for group %s - %s",
                              self.group_id, error_type.__name__)
                    errored[tp] = error_type(self.group_id)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    unauthorized_topics.add(topic)
                elif error_type in (Errors.OffsetMetadataTooLargeError,
                                    Errors.InvalidCommitOffsetSizeError):
                    # raise the error to the user
                    log.info(
                        "OffsetCommit failed for group %s on partition %s"
                        " due to %s, will retry", self.group_id, tp,
                        error_type.__name__)
                    errored[tp] = error_type()
                elif error_type is Errors.GroupLoadInProgressError:
                    # just retry
                    log.info(
                        "OffsetCommit failed for group %s because group is"
                        " initializing (%s), will retry", self.group_id,
                        error_type.__name__)
                    errored[tp] = error_type()
                elif error_type in (Errors.GroupCoordinatorNotAvailableError,
                                    Errors.NotCoordinatorForGroupError):
                    log.info(
github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
"Increase the fetch size, or decrease the maximum "
                            "message size the broker will allow.",
                            tp, fetch_offset, self._max_partition_fetch_bytes)
                        self._set_error(tp, err)
                        tp_state.consumed_to(tp_state.position + 1)
                        needs_wakeup = True

                elif error_type in (Errors.NotLeaderForPartitionError,
                                    Errors.UnknownTopicOrPartitionError):
                    self._client.force_metadata_update()
                elif error_type is Errors.OffsetOutOfRangeError:
                    if self._default_reset_strategy != \
                            OffsetResetStrategy.NONE:
                        tp_state.await_reset(self._default_reset_strategy)
                    else:
                        err = Errors.OffsetOutOfRangeError({tp: fetch_offset})
                        self._set_error(tp, err)
                        needs_wakeup = True
                    log.info(
                        "Fetch offset %s is out of range for partition %s,"
                        " resetting offset", fetch_offset, tp)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    log.warning(
                        "Not authorized to read from topic %s.", tp.topic)
                    err = Errors.TopicAuthorizationFailedError(tp.topic)
                    self._set_error(tp, err)
                    needs_wakeup = True
                else:
                    log.warning('Unexpected error while fetching data: %s',
                                error_type.__name__)
        return needs_wakeup
github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
def _unpack_records(self):
        # NOTE: if the batch is not compressed it's equal to 1 record in
        #       v0 and v1.
        tp = self._tp
        records = self._records
        while records.has_next():
            next_batch = records.next_batch()
            if self._check_crcs and not next_batch.validate_crc():
                # This iterator will be closed after the exception, so we don't
                # try to drain other batches here. They will be refetched.
                raise Errors.CorruptRecordException(
                    "Invalid CRC - {tp}".format(tp=tp))

            if self._isolation_level == READ_COMMITTED and \
                    next_batch.producer_id is not None:
                self._consume_aborted_up_to(next_batch.base_offset)

                if next_batch.is_control_batch:
                    if self._contains_abort_marker(next_batch):
                        self._aborted_producers.remove(next_batch.producer_id)

                if next_batch.is_transactional and \
                        next_batch.producer_id in self._aborted_producers:
                    log.debug(
                        "Skipping aborted record batch from partition %s with"
                        " producer_id %s and offsets %s to %s",
                        tp, next_batch.producer_id
github aio-libs / aiokafka / aiokafka / conn.py View on Github external
async def _do_sasl_handshake(self):
        # NOTE: We will only fallback to v0.9 gssapi scheme if user explicitly
        #       stated, that api_version is "0.9"
        if self._version_hint and self._version_hint < (0, 10):
            handshake_klass = None
            assert self._sasl_mechanism == 'GSSAPI', (
                "Only GSSAPI supported for v0.9"
            )
        else:
            handshake_klass = self._version_info.pick_best(
                SaslHandShakeRequest)

            sasl_handshake = handshake_klass(self._sasl_mechanism)
            response = await self.send(sasl_handshake)
            error_type = Errors.for_code(response.error_code)
            if error_type is not Errors.NoError:
                error = error_type(self)
                self.close(reason=CloseReason.AUTH_FAILURE, exc=error)
                raise error

            if self._sasl_mechanism not in response.enabled_mechanisms:
                exc = Errors.UnsupportedSaslMechanismError(
                    'Kafka broker does not support %s sasl mechanism. '
                    'Enabled mechanisms are: %s'
                    % (self._sasl_mechanism, response.enabled_mechanisms))
                self.close(reason=CloseReason.AUTH_FAILURE, exc=exc)
                raise exc

        assert self._sasl_mechanism in ('PLAIN', 'GSSAPI')
        if self._security_protocol == 'SASL_PLAINTEXT' and \
           self._sasl_mechanism == 'PLAIN':
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
def assign_all_partitions(self, check_unknown=False):
        """ Assign all partitions from subscribed topics to this consumer.
            If `check_unknown` we will raise UnknownTopicOrPartitionError if
            subscribed topic is not found in metadata response.
        """
        partitions = []
        for topic in self._subscription.subscription.topics:
            p_ids = self._cluster.partitions_for_topic(topic)
            if not p_ids and check_unknown:
                raise Errors.UnknownTopicOrPartitionError()
            for p_id in p_ids:
                partitions.append(TopicPartition(topic, p_id))

        # If assignment did not change no need to reset it
        assignment = self._subscription.subscription.assignment
        if assignment is None or set(partitions) != assignment.tps:
            self._subscription.assign_from_subscribed(partitions)