How to use the aiokafka.errors.KafkaError function in aiokafka

To help you get started, we’ve selected a few aiokafka examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aio-libs / aiokafka / tests / test_producer.py View on Github external
producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts, linger_ms=1000)
        await producer.start()
        self.add_cleanup(producer.stop)

        with mock.patch.object(producer._sender, '_send_produce_req') as m:
            m.side_effect = KeyError

            with self.assertRaisesRegex(
                    KafkaError, "Unexpected error during batch delivery"):
                await producer.send_and_wait(
                    self.topic, b'hello, Kafka!')

        with self.assertRaisesRegex(
                KafkaError, "Unexpected error during batch delivery"):
            await producer.send_and_wait(
                self.topic, b'hello, Kafka!')
github aio-libs / aiokafka / tests / test_consumer.py View on Github external
enable_auto_commit=False,
            auto_offset_reset="earliest",
            group_id="group-" + self.id(),
            bootstrap_servers=self.hosts)
        await consumer.start()
        self.add_cleanup(consumer.stop)

        with mock.patch.object(consumer._coordinator, "_send_req") as m:
            async def mock_send_req(request):
                res = mock.Mock()
                res.error_code = UnknownError.errno
                return res
            m.side_effect = mock_send_req

            consumer.subscribe([self.topic])  # Force join error
            with self.assertRaises(KafkaError):
                await consumer.getone()

            # This time we won't kill the fetch waiter, we will check errors
            # before waiting
            with self.assertRaises(KafkaError):
                await consumer.getone()

            # Error in aiokafka code case, should be raised to user too
            m.side_effect = ValueError
            with self.assertRaises(KafkaError):
                await consumer.getone()

        # Even after error should be stopped we already have a broken
        # coordination routine
        with self.assertRaises(KafkaError):
            await consumer.getone()
github aio-libs / aiokafka / aiokafka / client.py View on Github external
try:
                if not conn.connected():
                    await conn.connect()
                assert conn, 'no connection to node with id {}'.format(node_id)
                # request can be ignored by Kafka broker,
                # so we send metadata request and wait response
                task = self._loop.create_task(conn.send(request))
                await asyncio.wait([task], timeout=0.1, loop=self._loop)
                try:
                    await conn.send(MetadataRequest_v0([]))
                except KafkaError:
                    # metadata request can be cancelled in case
                    # of invalid correlationIds order
                    pass
                response = await task
            except KafkaError:
                continue
            else:
                # To avoid having a connection in undefined state
                if node_id != "bootstrap" and conn.connected():
                    conn.close()
                if isinstance(request, ApiVersionRequest_v0):
                    # Starting from 0.10 kafka broker we determine version
                    # by looking at ApiVersionResponse
                    version = self._check_api_version_response(response)
                return version

        raise UnrecognizedBrokerVersion()
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
async def _coordination_routine(self):
        try:
            await self.__coordination_routine()
        except asyncio.CancelledError:  # pragma: no cover
            raise
        except Exception as exc:
            log.error(
                "Unexpected error in coordinator routine", exc_info=True)
            kafka_exc = Errors.KafkaError(
                "Unexpected error during coordination {!r}".format(exc))
            self._subscription.abort_waiters(kafka_exc)
            raise kafka_exc
github aio-libs / aiokafka / aiokafka / consumer / fetcher.py View on Github external
def _contains_abort_marker(self, next_batch):
        # Control Marker is used to specify when we can stop
        # aborting batches
        try:
            control_record = next(next_batch)
        except StopIteration:  # pragma: no cover
            raise Errors.KafkaError(
                "Control batch did not contain any records")
        return ControlRecord.parse(control_record.key) == ABORT_MARKER
github aio-libs / aiokafka / aiokafka / producer / sender.py View on Github external
coordinator_id = await self.client.coordinator_lookup(
                    coordinator_type, coordinator_key)
            except Errors.TransactionalIdAuthorizationFailed:
                err = Errors.TransactionalIdAuthorizationFailed(
                    self._txn_manager.transactional_id)
                raise err
            except Errors.GroupAuthorizationFailedError:
                err = Errors.GroupAuthorizationFailedError(coordinator_key)
                raise err
            except Errors.CoordinatorNotAvailableError:
                await self.client.force_metadata_update()
                await asyncio.sleep(self._retry_backoff, loop=self._loop)
                continue
            except Errors.KafkaError as err:
                log.error("FindCoordinator Request failed: %s", err)
                raise KafkaError(repr(err))

            # Try to connect to confirm that the connection can be
            # established.
            ready = await self.client.ready(
                coordinator_id, group=ConnectionGroup.COORDINATION)
            if not ready:
                await asyncio.sleep(self._retry_backoff, loop=self._loop)
                continue

            self._coordinators[coordinator_type] = coordinator_id

            if coordinator_type == CoordinationType.GROUP:
                log.info(
                    "Discovered coordinator %s for group id %s",
                    coordinator_id,
                    coordinator_key
github aio-libs / aiokafka / aiokafka / conn.py View on Github external
def pick_best(self, request_versions):
        api_key = request_versions[0].API_KEY
        supported_versions = self._versions.get(api_key)
        if supported_versions is None:
            return request_versions[0]
        else:
            for req_klass in reversed(request_versions):
                if supported_versions[0] <= req_klass.API_VERSION and \
                        req_klass.API_VERSION <= supported_versions[1]:
                    return req_klass
        raise Errors.KafkaError(
            "Could not pick a version for API_KEY={} from {}. ".format(
                api_key, supported_versions)
        )
github aio-libs / aiokafka / aiokafka / client.py View on Github external
async def fetch_all_metadata(self):
        cluster_md = ClusterMetadata(
            metadata_max_age_ms=self._metadata_max_age_ms)
        updated = await self._metadata_update(cluster_md, [])
        if not updated:
            raise KafkaError(
                'Unable to get cluster metadata over all known brokers')
        return cluster_md
github aio-libs / aiokafka / aiokafka / consumer / group_coordinator.py View on Github external
# can rejoin.
            return True
        elif error_type is Errors.IllegalGenerationError:
            log.warning(
                "Heartbeat failed for group %s: generation id is not "
                " current.", self.group_id)
            self.reset_generation()
        elif error_type is Errors.UnknownMemberIdError:
            log.warning(
                "Heartbeat failed: local member_id was not recognized;"
                " resetting and re-joining group")
            self.reset_generation()
        elif error_type is Errors.GroupAuthorizationFailedError:
            raise error_type(self.group_id)
        else:
            err = Errors.KafkaError(
                "Unexpected exception in heartbeat task: {!r}".format(
                    error_type()))
            log.error("Heartbeat failed: %r", err)
            raise err
        return False
github aio-libs / aiokafka / aiokafka / producer / sender.py View on Github external
while True:
            try:
                coordinator_id = await self.client.coordinator_lookup(
                    coordinator_type, coordinator_key)
            except Errors.TransactionalIdAuthorizationFailed:
                err = Errors.TransactionalIdAuthorizationFailed(
                    self._txn_manager.transactional_id)
                raise err
            except Errors.GroupAuthorizationFailedError:
                err = Errors.GroupAuthorizationFailedError(coordinator_key)
                raise err
            except Errors.CoordinatorNotAvailableError:
                await self.client.force_metadata_update()
                await asyncio.sleep(self._retry_backoff, loop=self._loop)
                continue
            except Errors.KafkaError as err:
                log.error("FindCoordinator Request failed: %s", err)
                raise KafkaError(repr(err))

            # Try to connect to confirm that the connection can be
            # established.
            ready = await self.client.ready(
                coordinator_id, group=ConnectionGroup.COORDINATION)
            if not ready:
                await asyncio.sleep(self._retry_backoff, loop=self._loop)
                continue

            self._coordinators[coordinator_type] = coordinator_id

            if coordinator_type == CoordinationType.GROUP:
                log.info(
                    "Discovered coordinator %s for group id %s",