Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
log.debug("Fetching committed offsets for partitions: %s", partitions)
# construct the request
topic_partitions = collections.defaultdict(list)
for tp in partitions:
topic_partitions[tp.topic].append(tp.partition)
request = OffsetFetchRequest(
self.group_id,
list(topic_partitions.items())
)
response = await self._send_req(request)
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
error = error_type()
log.debug("Error fetching offset for %s: %s", tp, error)
if error_type is Errors.GroupLoadInProgressError:
# just retry
raise error
elif error_type is Errors.NotCoordinatorForGroupError:
# re-discover the coordinator and retry
self.coordinator_dead()
raise error
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning(
"OffsetFetchRequest -- unknown topic %s", topic)
continue
elif error_type is Errors.GroupAuthorizationFailedError:
raise error_type(self.group_id)
if not assignment.active:
log.debug(
"Discarding fetch response since the assignment changed during"
" fetch")
return False
fetch_offsets = {}
for topic, partitions in request.topics:
for partition, offset, _ in partitions:
fetch_offsets[TopicPartition(topic, partition)] = offset
now_ms = int(1000 * time.time())
for topic, partitions in response.topics:
for partition, error_code, highwater, *part_data in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
fetch_offset = fetch_offsets[tp]
tp_state = assignment.state_value(tp)
if not tp_state.has_valid_position or \
tp_state.position != fetch_offset:
log.debug(
"Discarding fetch response for partition %s "
"since its offset %s does not match the current "
"position", tp, fetch_offset)
continue
if error_type is Errors.NoError:
if request.API_VERSION >= 4:
aborted_transactions = part_data[-2]
lso = part_data[-3]
else:
aborted_transactions = None
async def _do_heartbeat(self):
version = 0 if self._client.api_version < (0, 11, 0) else 1
request = HeartbeatRequest[version](
self.group_id, self.generation, self.member_id)
log.debug("Heartbeat: %s[%s] %s",
self.group_id, self.generation, self.member_id)
# _send_req may fail with error like `RequestTimedOutError`
# we need to catch it so coordinator_routine won't fail
try:
resp = await self._send_req(request)
except Errors.KafkaError as err:
log.error("Heartbeat send request failed: %s. Will retry.", err)
return False
error_type = Errors.for_code(resp.error_code)
if error_type is Errors.NoError:
log.debug(
"Received successful heartbeat response for group %s",
self.group_id)
return True
if error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
log.warning(
"Heartbeat failed for group %s: coordinator (node %s)"
" is either not started or not valid",
self.group_id, self.coordinator_id)
self.coordinator_dead()
elif error_type is Errors.RebalanceInProgressError:
log.warning(
"Heartbeat failed for group %s because it is rebalancing",
self.group_id)
self.member_id,
OffsetCommitRequest.DEFAULT_RETENTION_TIME,
[(topic, tp_offsets) for topic, tp_offsets in offset_data.items()]
)
log.debug("Sending offset-commit request with %s for group %s to %s",
offsets, self.group_id, self.coordinator_id)
response = await self._send_req(request)
errored = collections.OrderedDict()
unauthorized_topics = set()
for topic, partitions in response.topics:
for partition, error_code in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
offset = offsets[tp]
if error_type is Errors.NoError:
log.debug(
"Committed offset %s for partition %s", offset, tp)
elif error_type is Errors.GroupAuthorizationFailedError:
log.error("OffsetCommit failed for group %s - %s",
self.group_id, error_type.__name__)
errored[tp] = error_type(self.group_id)
elif error_type is Errors.TopicAuthorizationFailedError:
unauthorized_topics.add(topic)
elif error_type in (Errors.OffsetMetadataTooLargeError,
Errors.InvalidCommitOffsetSizeError):
# raise the error to the user
log.info(
"OffsetCommit failed for group %s on partition %s"
" due to %s, will retry", self.group_id, tp,
def _check_api_version_response(self, response):
# The logic here is to check the list of supported request versions
# in descending order. As soon as we find one that works, return it
test_cases = [
# format (, )
((2, 1, 0), MetadataRequest[0].API_KEY, 7),
((1, 1, 0), FetchRequest[0].API_KEY, 7),
((1, 0, 0), MetadataRequest[0].API_KEY, 5),
((0, 11, 0), MetadataRequest[0].API_KEY, 4),
((0, 10, 2), OffsetFetchRequest[0].API_KEY, 2),
((0, 10, 1), MetadataRequest[0].API_KEY, 2),
]
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
max_versions = dict([
(api_key, max_version)
for api_key, _, max_version in response.api_versions
])
# Get the best match of test cases
for broker_version, api_key, version in test_cases:
if max_versions.get(api_key, -1) >= version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
if res is None:
break
payload, expect_response = res
# Before Kafka 1.0.0 Authentication bytes for SASL were send
# without a Kafka Header, only with Length. This made error
# handling hard, so they made SaslAuthenticateRequest to properly
# pass error messages to clients on source of error.
if auth_klass is None:
auth_bytes = await self._send_sasl_token(
payload, expect_response
)
else:
req = auth_klass(payload)
resp = await self.send(req)
error_type = Errors.for_code(resp.error_code)
if error_type is not Errors.NoError:
exc = error_type(resp.error_message)
self.close(reason=CloseReason.AUTH_FAILURE, exc=exc)
raise exc
auth_bytes = resp.sasl_auth_bytes
if self._sasl_mechanism == 'GSSAPI':
self.log.info(
'Authenticated as %s via GSSAPI',
self.sasl_principal)
else:
self.log.info('Authenticated as %s via PLAIN',
self._sasl_plain_username)
_new_controller = _new_brokers.get(metadata.controller_id)
_new_partitions = {}
_new_broker_partitions = collections.defaultdict(set)
_new_unauthorized_topics = set()
_new_internal_topics = set()
for topic_data in metadata.topics:
if metadata.API_VERSION == 0:
error_code, topic, partitions = topic_data
is_internal = False
else:
error_code, topic, is_internal, partitions = topic_data
if is_internal:
_new_internal_topics.add(topic)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
_new_partitions[topic] = {}
for p_error, partition, leader, replicas, isr in partitions:
_new_partitions[topic][partition] = PartitionMetadata(
topic=topic, partition=partition, leader=leader,
replicas=replicas, isr=isr, error=p_error)
if leader != -1:
_new_broker_partitions[leader].add(
TopicPartition(topic, partition))
elif error_type is Errors.LeaderNotAvailableError:
log.warning("Topic %s is not available during auto-create"
" initialization", topic)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.error("Topic %s not found in cluster metadata", topic)
elif error_type is Errors.TopicAuthorizationFailedError:
def handle_response(self, response):
for topic, partitions in response.topics:
for partition_info in partitions:
if response.API_VERSION < 2:
partition, error_code, offset = partition_info
# Mimic CREATE_TIME to take user provided timestamp
timestamp = -1
else:
partition, error_code, offset, timestamp = partition_info
tp = TopicPartition(topic, partition)
error = Errors.for_code(error_code)
batch = self._batches.get(tp)
if batch is None:
continue
if error is Errors.NoError:
batch.done(offset, timestamp)
elif error is DuplicateSequenceNumber:
# If we have received a duplicate sequence error,
# it means that the sequence number has advanced
# beyond the sequence of the current batch, and we
# haven't retained batch metadata on the broker to
# return the correct offset and timestamp.
#
# The only thing we can do is to return success to
# the user and not return a valid offset and
# timestamp.