repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
Yelp/kafka-utils | kafka_utils/util/__init__.py | to_h | def to_h(num, suffix='B'):
"""Converts a byte value in human readable form."""
if num is None: # Show None when data is missing
return "None"
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | python | def to_h(num, suffix='B'):
"""Converts a byte value in human readable form."""
if num is None: # Show None when data is missing
return "None"
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | [
"def",
"to_h",
"(",
"num",
",",
"suffix",
"=",
"'B'",
")",
":",
"if",
"num",
"is",
"None",
":",
"# Show None when data is missing",
"return",
"\"None\"",
"for",
"unit",
"in",
"[",
"''",
",",
"'Ki'",
",",
"'Mi'",
",",
"'Gi'",
",",
"'Ti'",
",",
"'Pi'",
... | Converts a byte value in human readable form. | [
"Converts",
"a",
"byte",
"value",
"in",
"human",
"readable",
"form",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L110-L118 | train | 202,600 |
Yelp/kafka-utils | kafka_utils/util/__init__.py | format_to_json | def format_to_json(data):
"""Converts `data` into json
If stdout is a tty it performs a pretty print.
"""
if sys.stdout.isatty():
return json.dumps(data, indent=4, separators=(',', ': '))
else:
return json.dumps(data) | python | def format_to_json(data):
"""Converts `data` into json
If stdout is a tty it performs a pretty print.
"""
if sys.stdout.isatty():
return json.dumps(data, indent=4, separators=(',', ': '))
else:
return json.dumps(data) | [
"def",
"format_to_json",
"(",
"data",
")",
":",
"if",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"data",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"else",
":"... | Converts `data` into json
If stdout is a tty it performs a pretty print. | [
"Converts",
"data",
"into",
"json",
"If",
"stdout",
"is",
"a",
"tty",
"it",
"performs",
"a",
"pretty",
"print",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L141-L148 | train | 202,601 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology._build_brokers | def _build_brokers(self, brokers):
"""Build broker objects using broker-ids."""
for broker_id, metadata in six.iteritems(brokers):
self.brokers[broker_id] = self._create_broker(broker_id, metadata) | python | def _build_brokers(self, brokers):
"""Build broker objects using broker-ids."""
for broker_id, metadata in six.iteritems(brokers):
self.brokers[broker_id] = self._create_broker(broker_id, metadata) | [
"def",
"_build_brokers",
"(",
"self",
",",
"brokers",
")",
":",
"for",
"broker_id",
",",
"metadata",
"in",
"six",
".",
"iteritems",
"(",
"brokers",
")",
":",
"self",
".",
"brokers",
"[",
"broker_id",
"]",
"=",
"self",
".",
"_create_broker",
"(",
"broker_... | Build broker objects using broker-ids. | [
"Build",
"broker",
"objects",
"using",
"broker",
"-",
"ids",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L79-L82 | train | 202,602 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology._create_broker | def _create_broker(self, broker_id, metadata=None):
"""Create a broker object and assign to a replication group.
A broker object with no metadata is considered inactive.
An inactive broker may or may not belong to a group.
"""
broker = Broker(broker_id, metadata)
if not metadata:
broker.mark_inactive()
rg_id = self.extract_group(broker)
group = self.rgs.setdefault(rg_id, ReplicationGroup(rg_id))
group.add_broker(broker)
broker.replication_group = group
return broker | python | def _create_broker(self, broker_id, metadata=None):
"""Create a broker object and assign to a replication group.
A broker object with no metadata is considered inactive.
An inactive broker may or may not belong to a group.
"""
broker = Broker(broker_id, metadata)
if not metadata:
broker.mark_inactive()
rg_id = self.extract_group(broker)
group = self.rgs.setdefault(rg_id, ReplicationGroup(rg_id))
group.add_broker(broker)
broker.replication_group = group
return broker | [
"def",
"_create_broker",
"(",
"self",
",",
"broker_id",
",",
"metadata",
"=",
"None",
")",
":",
"broker",
"=",
"Broker",
"(",
"broker_id",
",",
"metadata",
")",
"if",
"not",
"metadata",
":",
"broker",
".",
"mark_inactive",
"(",
")",
"rg_id",
"=",
"self",... | Create a broker object and assign to a replication group.
A broker object with no metadata is considered inactive.
An inactive broker may or may not belong to a group. | [
"Create",
"a",
"broker",
"object",
"and",
"assign",
"to",
"a",
"replication",
"group",
".",
"A",
"broker",
"object",
"with",
"no",
"metadata",
"is",
"considered",
"inactive",
".",
"An",
"inactive",
"broker",
"may",
"or",
"may",
"not",
"belong",
"to",
"a",
... | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L84-L96 | train | 202,603 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology._build_partitions | def _build_partitions(self, assignment):
"""Builds all partition objects and update corresponding broker and
topic objects.
"""
self.partitions = {}
for partition_name, replica_ids in six.iteritems(assignment):
# Get topic
topic_id = partition_name[0]
partition_id = partition_name[1]
topic = self.topics.setdefault(
topic_id,
Topic(topic_id, replication_factor=len(replica_ids))
)
# Creating partition object
partition = Partition(
topic,
partition_id,
weight=self.partition_measurer.get_weight(partition_name),
size=self.partition_measurer.get_size(partition_name),
)
self.partitions[partition_name] = partition
topic.add_partition(partition)
# Updating corresponding broker objects
for broker_id in replica_ids:
# Check if broker-id is present in current active brokers
if broker_id not in list(self.brokers.keys()):
self.log.warning(
"Broker %s containing partition %s is not in "
"active brokers.",
broker_id,
partition,
)
self.brokers[broker_id] = self._create_broker(broker_id)
self.brokers[broker_id].add_partition(partition) | python | def _build_partitions(self, assignment):
"""Builds all partition objects and update corresponding broker and
topic objects.
"""
self.partitions = {}
for partition_name, replica_ids in six.iteritems(assignment):
# Get topic
topic_id = partition_name[0]
partition_id = partition_name[1]
topic = self.topics.setdefault(
topic_id,
Topic(topic_id, replication_factor=len(replica_ids))
)
# Creating partition object
partition = Partition(
topic,
partition_id,
weight=self.partition_measurer.get_weight(partition_name),
size=self.partition_measurer.get_size(partition_name),
)
self.partitions[partition_name] = partition
topic.add_partition(partition)
# Updating corresponding broker objects
for broker_id in replica_ids:
# Check if broker-id is present in current active brokers
if broker_id not in list(self.brokers.keys()):
self.log.warning(
"Broker %s containing partition %s is not in "
"active brokers.",
broker_id,
partition,
)
self.brokers[broker_id] = self._create_broker(broker_id)
self.brokers[broker_id].add_partition(partition) | [
"def",
"_build_partitions",
"(",
"self",
",",
"assignment",
")",
":",
"self",
".",
"partitions",
"=",
"{",
"}",
"for",
"partition_name",
",",
"replica_ids",
"in",
"six",
".",
"iteritems",
"(",
"assignment",
")",
":",
"# Get topic",
"topic_id",
"=",
"partitio... | Builds all partition objects and update corresponding broker and
topic objects. | [
"Builds",
"all",
"partition",
"objects",
"and",
"update",
"corresponding",
"broker",
"and",
"topic",
"objects",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L98-L134 | train | 202,604 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology.active_brokers | def active_brokers(self):
"""Set of brokers that are not inactive or decommissioned."""
return {
broker for broker in six.itervalues(self.brokers)
if not broker.inactive and not broker.decommissioned
} | python | def active_brokers(self):
"""Set of brokers that are not inactive or decommissioned."""
return {
broker for broker in six.itervalues(self.brokers)
if not broker.inactive and not broker.decommissioned
} | [
"def",
"active_brokers",
"(",
"self",
")",
":",
"return",
"{",
"broker",
"for",
"broker",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"brokers",
")",
"if",
"not",
"broker",
".",
"inactive",
"and",
"not",
"broker",
".",
"decommissioned",
"}"
] | Set of brokers that are not inactive or decommissioned. | [
"Set",
"of",
"brokers",
"that",
"are",
"not",
"inactive",
"or",
"decommissioned",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L137-L142 | train | 202,605 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology.replace_broker | def replace_broker(self, source_id, dest_id):
"""Move all partitions in source broker to destination broker.
:param source_id: source broker-id
:param dest_id: destination broker-id
:raises: InvalidBrokerIdError, when either of given broker-ids is invalid.
"""
try:
source = self.brokers[source_id]
dest = self.brokers[dest_id]
# Move all partitions from source to destination broker
for partition in source.partitions.copy(): # Partitions set changes
# We cannot move partition directly since that re-orders the
# replicas for the partition
source.partitions.remove(partition)
dest.partitions.add(partition)
# Replace broker in replica
partition.replace(source, dest)
except KeyError as e:
self.log.error("Invalid broker id %s.", e.args[0])
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(e.args[0])
) | python | def replace_broker(self, source_id, dest_id):
"""Move all partitions in source broker to destination broker.
:param source_id: source broker-id
:param dest_id: destination broker-id
:raises: InvalidBrokerIdError, when either of given broker-ids is invalid.
"""
try:
source = self.brokers[source_id]
dest = self.brokers[dest_id]
# Move all partitions from source to destination broker
for partition in source.partitions.copy(): # Partitions set changes
# We cannot move partition directly since that re-orders the
# replicas for the partition
source.partitions.remove(partition)
dest.partitions.add(partition)
# Replace broker in replica
partition.replace(source, dest)
except KeyError as e:
self.log.error("Invalid broker id %s.", e.args[0])
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(e.args[0])
) | [
"def",
"replace_broker",
"(",
"self",
",",
"source_id",
",",
"dest_id",
")",
":",
"try",
":",
"source",
"=",
"self",
".",
"brokers",
"[",
"source_id",
"]",
"dest",
"=",
"self",
".",
"brokers",
"[",
"dest_id",
"]",
"# Move all partitions from source to destinat... | Move all partitions in source broker to destination broker.
:param source_id: source broker-id
:param dest_id: destination broker-id
:raises: InvalidBrokerIdError, when either of given broker-ids is invalid. | [
"Move",
"all",
"partitions",
"in",
"source",
"broker",
"to",
"destination",
"broker",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L154-L176 | train | 202,606 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py | ClusterTopology.update_cluster_topology | def update_cluster_topology(self, assignment):
"""Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid
"""
try:
for partition_name, replica_ids in six.iteritems(assignment):
try:
new_replicas = [self.brokers[b_id] for b_id in replica_ids]
except KeyError:
self.log.error(
"Invalid replicas %s for topic-partition %s-%s.",
', '.join([str(id) for id in replica_ids]),
partition_name[0],
partition_name[1],
)
raise InvalidBrokerIdError(
"Invalid replicas {0}.".format(
', '.join([str(id) for id in replica_ids])
),
)
try:
partition = self.partitions[partition_name]
old_replicas = [broker for broker in partition.replicas]
# No change needed. Save ourself some CPU time.
# Replica order matters as the first one is the leader.
if new_replicas == old_replicas:
continue
# Remove old partitions from broker
# This also updates partition replicas
for broker in old_replicas:
broker.remove_partition(partition)
# Add new partition to brokers
for broker in new_replicas:
broker.add_partition(partition)
except KeyError:
self.log.error(
"Invalid topic-partition %s-%s.",
partition_name[0],
partition_name[1],
)
raise InvalidPartitionError(
"Invalid topic-partition {0}-{1}."
.format(partition_name[0], partition_name[1]),
)
except KeyError:
self.log.error("Could not parse given assignment {0}".format(assignment))
raise | python | def update_cluster_topology(self, assignment):
"""Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid
"""
try:
for partition_name, replica_ids in six.iteritems(assignment):
try:
new_replicas = [self.brokers[b_id] for b_id in replica_ids]
except KeyError:
self.log.error(
"Invalid replicas %s for topic-partition %s-%s.",
', '.join([str(id) for id in replica_ids]),
partition_name[0],
partition_name[1],
)
raise InvalidBrokerIdError(
"Invalid replicas {0}.".format(
', '.join([str(id) for id in replica_ids])
),
)
try:
partition = self.partitions[partition_name]
old_replicas = [broker for broker in partition.replicas]
# No change needed. Save ourself some CPU time.
# Replica order matters as the first one is the leader.
if new_replicas == old_replicas:
continue
# Remove old partitions from broker
# This also updates partition replicas
for broker in old_replicas:
broker.remove_partition(partition)
# Add new partition to brokers
for broker in new_replicas:
broker.add_partition(partition)
except KeyError:
self.log.error(
"Invalid topic-partition %s-%s.",
partition_name[0],
partition_name[1],
)
raise InvalidPartitionError(
"Invalid topic-partition {0}-{1}."
.format(partition_name[0], partition_name[1]),
)
except KeyError:
self.log.error("Could not parse given assignment {0}".format(assignment))
raise | [
"def",
"update_cluster_topology",
"(",
"self",
",",
"assignment",
")",
":",
"try",
":",
"for",
"partition_name",
",",
"replica_ids",
"in",
"six",
".",
"iteritems",
"(",
"assignment",
")",
":",
"try",
":",
"new_replicas",
"=",
"[",
"self",
".",
"brokers",
"... | Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid | [
"Modify",
"the",
"cluster",
"-",
"topology",
"with",
"given",
"assignment",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/cluster_topology.py#L178-L233 | train | 202,607 |
Yelp/kafka-utils | kafka_utils/util/validation.py | plan_to_assignment | def plan_to_assignment(plan):
"""Convert the plan to the format used by cluster-topology."""
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | python | def plan_to_assignment(plan):
"""Convert the plan to the format used by cluster-topology."""
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | [
"def",
"plan_to_assignment",
"(",
"plan",
")",
":",
"assignment",
"=",
"{",
"}",
"for",
"elem",
"in",
"plan",
"[",
"'partitions'",
"]",
":",
"assignment",
"[",
"(",
"elem",
"[",
"'topic'",
"]",
",",
"elem",
"[",
"'partition'",
"]",
")",
"]",
"=",
"el... | Convert the plan to the format used by cluster-topology. | [
"Convert",
"the",
"plan",
"to",
"the",
"format",
"used",
"by",
"cluster",
"-",
"topology",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L26-L33 | train | 202,608 |
Yelp/kafka-utils | kafka_utils/util/validation.py | assignment_to_plan | def assignment_to_plan(assignment):
"""Convert an assignment to the format used by Kafka to
describe a reassignment plan.
"""
return {
'version': 1,
'partitions':
[{'topic': t_p[0],
'partition': t_p[1],
'replicas': replica
} for t_p, replica in six.iteritems(assignment)]
} | python | def assignment_to_plan(assignment):
"""Convert an assignment to the format used by Kafka to
describe a reassignment plan.
"""
return {
'version': 1,
'partitions':
[{'topic': t_p[0],
'partition': t_p[1],
'replicas': replica
} for t_p, replica in six.iteritems(assignment)]
} | [
"def",
"assignment_to_plan",
"(",
"assignment",
")",
":",
"return",
"{",
"'version'",
":",
"1",
",",
"'partitions'",
":",
"[",
"{",
"'topic'",
":",
"t_p",
"[",
"0",
"]",
",",
"'partition'",
":",
"t_p",
"[",
"1",
"]",
",",
"'replicas'",
":",
"replica",
... | Convert an assignment to the format used by Kafka to
describe a reassignment plan. | [
"Convert",
"an",
"assignment",
"to",
"the",
"format",
"used",
"by",
"Kafka",
"to",
"describe",
"a",
"reassignment",
"plan",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L36-L47 | train | 202,609 |
Yelp/kafka-utils | kafka_utils/util/validation.py | validate_plan | def validate_plan(
new_plan,
base_plan=None,
is_partition_subset=True,
allow_rf_change=False,
):
"""Verify that the new plan is valid for execution.
Given kafka-reassignment plan should affirm with following rules:
- Plan should have at least one partition for re-assignment
- Partition-name list should be subset of base-plan partition-list
- Replication-factor for each partition of same topic is same
- Replication-factor for each partition remains unchanged
- No duplicate broker-ids in each replicas
"""
if not _validate_plan(new_plan):
_log.error('Invalid proposed-plan.')
return False
# Validate given plan in reference to base-plan
if base_plan:
if not _validate_plan(base_plan):
_log.error('Invalid assignment from cluster.')
return False
if not _validate_plan_base(
new_plan,
base_plan,
is_partition_subset,
allow_rf_change
):
return False
# Plan validation successful
return True | python | def validate_plan(
new_plan,
base_plan=None,
is_partition_subset=True,
allow_rf_change=False,
):
"""Verify that the new plan is valid for execution.
Given kafka-reassignment plan should affirm with following rules:
- Plan should have at least one partition for re-assignment
- Partition-name list should be subset of base-plan partition-list
- Replication-factor for each partition of same topic is same
- Replication-factor for each partition remains unchanged
- No duplicate broker-ids in each replicas
"""
if not _validate_plan(new_plan):
_log.error('Invalid proposed-plan.')
return False
# Validate given plan in reference to base-plan
if base_plan:
if not _validate_plan(base_plan):
_log.error('Invalid assignment from cluster.')
return False
if not _validate_plan_base(
new_plan,
base_plan,
is_partition_subset,
allow_rf_change
):
return False
# Plan validation successful
return True | [
"def",
"validate_plan",
"(",
"new_plan",
",",
"base_plan",
"=",
"None",
",",
"is_partition_subset",
"=",
"True",
",",
"allow_rf_change",
"=",
"False",
",",
")",
":",
"if",
"not",
"_validate_plan",
"(",
"new_plan",
")",
":",
"_log",
".",
"error",
"(",
"'Inv... | Verify that the new plan is valid for execution.
Given kafka-reassignment plan should affirm with following rules:
- Plan should have at least one partition for re-assignment
- Partition-name list should be subset of base-plan partition-list
- Replication-factor for each partition of same topic is same
- Replication-factor for each partition remains unchanged
- No duplicate broker-ids in each replicas | [
"Verify",
"that",
"the",
"new",
"plan",
"is",
"valid",
"for",
"execution",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L50-L82 | train | 202,610 |
Yelp/kafka-utils | kafka_utils/util/validation.py | _validate_plan_base | def _validate_plan_base(
new_plan,
base_plan,
is_partition_subset=True,
allow_rf_change=False,
):
"""Validate if given plan is valid comparing with given base-plan.
Validate following assertions:
- Partition-check: New partition-set should be subset of base-partition set
- Replica-count check: Replication-factor for each partition remains same
- Broker-check: New broker-set should be subset of base broker-set
"""
# Verify that partitions in plan are subset of base plan.
new_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in new_plan['partitions']
])
base_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in base_plan['partitions']
])
if is_partition_subset:
invalid_partitions = list(new_partitions - base_partitions)
else:
# partition set should be equal
invalid_partitions = list(
new_partitions.union(base_partitions) -
new_partitions.intersection(base_partitions),
)
if invalid_partitions:
_log.error(
'Invalid partition(s) found: {p_list}'.format(
p_list=invalid_partitions,
)
)
return False
# Verify replication-factor remains consistent
base_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in base_plan['partitions']
}
new_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in new_plan['partitions']
}
if not allow_rf_change:
invalid_replication_factor = False
for new_partition, replicas in six.iteritems(new_partition_replicas):
base_replica_cnt = len(base_partition_replicas[new_partition])
if len(replicas) != base_replica_cnt:
invalid_replication_factor = True
_log.error(
'Replication-factor Mismatch: Partition: {partition}: '
'Base-replicas: {expected}, Proposed-replicas: {actual}'
.format(
partition=new_partition,
expected=base_partition_replicas[new_partition],
actual=replicas,
),
)
if invalid_replication_factor:
return False
# Validation successful
return True | python | def _validate_plan_base(
new_plan,
base_plan,
is_partition_subset=True,
allow_rf_change=False,
):
"""Validate if given plan is valid comparing with given base-plan.
Validate following assertions:
- Partition-check: New partition-set should be subset of base-partition set
- Replica-count check: Replication-factor for each partition remains same
- Broker-check: New broker-set should be subset of base broker-set
"""
# Verify that partitions in plan are subset of base plan.
new_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in new_plan['partitions']
])
base_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in base_plan['partitions']
])
if is_partition_subset:
invalid_partitions = list(new_partitions - base_partitions)
else:
# partition set should be equal
invalid_partitions = list(
new_partitions.union(base_partitions) -
new_partitions.intersection(base_partitions),
)
if invalid_partitions:
_log.error(
'Invalid partition(s) found: {p_list}'.format(
p_list=invalid_partitions,
)
)
return False
# Verify replication-factor remains consistent
base_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in base_plan['partitions']
}
new_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in new_plan['partitions']
}
if not allow_rf_change:
invalid_replication_factor = False
for new_partition, replicas in six.iteritems(new_partition_replicas):
base_replica_cnt = len(base_partition_replicas[new_partition])
if len(replicas) != base_replica_cnt:
invalid_replication_factor = True
_log.error(
'Replication-factor Mismatch: Partition: {partition}: '
'Base-replicas: {expected}, Proposed-replicas: {actual}'
.format(
partition=new_partition,
expected=base_partition_replicas[new_partition],
actual=replicas,
),
)
if invalid_replication_factor:
return False
# Validation successful
return True | [
"def",
"_validate_plan_base",
"(",
"new_plan",
",",
"base_plan",
",",
"is_partition_subset",
"=",
"True",
",",
"allow_rf_change",
"=",
"False",
",",
")",
":",
"# Verify that partitions in plan are subset of base plan.",
"new_partitions",
"=",
"set",
"(",
"[",
"(",
"p_... | Validate if given plan is valid comparing with given base-plan.
Validate following assertions:
- Partition-check: New partition-set should be subset of base-partition set
- Replica-count check: Replication-factor for each partition remains same
- Broker-check: New broker-set should be subset of base broker-set | [
"Validate",
"if",
"given",
"plan",
"is",
"valid",
"comparing",
"with",
"given",
"base",
"-",
"plan",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L85-L152 | train | 202,611 |
Yelp/kafka-utils | kafka_utils/util/validation.py | _validate_format | def _validate_format(plan):
"""Validate if the format of the plan as expected.
Validate format of plan on following rules:
a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions'
b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas',
'partition', 'topic'
c) Verify desired type of each value
d) Verify non-empty partitions and replicas
Sample-plan format:
{
"version": 1,
"partitions": [
{"partition":0, "topic":'t1', "replicas":[0,1,2]},
{"partition":0, "topic":'t2', "replicas":[1,2]},
...
]}
"""
# Verify presence of required keys
if set(plan.keys()) != set(['version', 'partitions']):
_log.error(
'Invalid or incomplete keys in given plan. Expected: "version", '
'"partitions". Found:{keys}'
.format(keys=', '.join(list(plan.keys()))),
)
return False
# Invalid version
if plan['version'] != 1:
_log.error(
'Invalid version of plan {version}'
.format(version=plan['version']),
)
return False
# Empty partitions
if not plan['partitions']:
_log.error(
'"partitions" list found empty"'
.format(version=plan['partitions']),
)
return False
# Invalid partitions type
if not isinstance(plan['partitions'], list):
_log.error('"partitions" of type list expected.')
return False
# Invalid partition-data
for p_data in plan['partitions']:
if set(p_data.keys()) != set(['topic', 'partition', 'replicas']):
_log.error(
'Invalid keys in partition-data {keys}'
.format(keys=', '.join(list(p_data.keys()))),
)
return False
# Check types
if not isinstance(p_data['topic'], six.text_type):
_log.error(
'"topic" of type unicode expected {p_data}, found {t_type}'
.format(p_data=p_data, t_type=type(p_data['topic'])),
)
return False
if not isinstance(p_data['partition'], int):
_log.error(
'"partition" of type int expected {p_data}, found {p_type}'
.format(p_data=p_data, p_type=type(p_data['partition'])),
)
return False
if not isinstance(p_data['replicas'], list):
_log.error(
'"replicas" of type list expected {p_data}, found {r_type}'
.format(p_data=p_data, r_type=type(p_data['replicas'])),
)
return False
if not p_data['replicas']:
_log.error(
'Non-empty "replicas" expected: {p_data}'
.format(p_data=p_data),
)
return False
# Invalid broker-type
for broker in p_data['replicas']:
if not isinstance(broker, int):
_log.error(
'"replicas" of type integer list expected {p_data}'
.format(p_data=p_data),
)
return False
return True | python | def _validate_format(plan):
"""Validate if the format of the plan as expected.
Validate format of plan on following rules:
a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions'
b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas',
'partition', 'topic'
c) Verify desired type of each value
d) Verify non-empty partitions and replicas
Sample-plan format:
{
"version": 1,
"partitions": [
{"partition":0, "topic":'t1', "replicas":[0,1,2]},
{"partition":0, "topic":'t2', "replicas":[1,2]},
...
]}
"""
# Verify presence of required keys
if set(plan.keys()) != set(['version', 'partitions']):
_log.error(
'Invalid or incomplete keys in given plan. Expected: "version", '
'"partitions". Found:{keys}'
.format(keys=', '.join(list(plan.keys()))),
)
return False
# Invalid version
if plan['version'] != 1:
_log.error(
'Invalid version of plan {version}'
.format(version=plan['version']),
)
return False
# Empty partitions
if not plan['partitions']:
_log.error(
'"partitions" list found empty"'
.format(version=plan['partitions']),
)
return False
# Invalid partitions type
if not isinstance(plan['partitions'], list):
_log.error('"partitions" of type list expected.')
return False
# Invalid partition-data
for p_data in plan['partitions']:
if set(p_data.keys()) != set(['topic', 'partition', 'replicas']):
_log.error(
'Invalid keys in partition-data {keys}'
.format(keys=', '.join(list(p_data.keys()))),
)
return False
# Check types
if not isinstance(p_data['topic'], six.text_type):
_log.error(
'"topic" of type unicode expected {p_data}, found {t_type}'
.format(p_data=p_data, t_type=type(p_data['topic'])),
)
return False
if not isinstance(p_data['partition'], int):
_log.error(
'"partition" of type int expected {p_data}, found {p_type}'
.format(p_data=p_data, p_type=type(p_data['partition'])),
)
return False
if not isinstance(p_data['replicas'], list):
_log.error(
'"replicas" of type list expected {p_data}, found {r_type}'
.format(p_data=p_data, r_type=type(p_data['replicas'])),
)
return False
if not p_data['replicas']:
_log.error(
'Non-empty "replicas" expected: {p_data}'
.format(p_data=p_data),
)
return False
# Invalid broker-type
for broker in p_data['replicas']:
if not isinstance(broker, int):
_log.error(
'"replicas" of type integer list expected {p_data}'
.format(p_data=p_data),
)
return False
return True | [
"def",
"_validate_format",
"(",
"plan",
")",
":",
"# Verify presence of required keys",
"if",
"set",
"(",
"plan",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"[",
"'version'",
",",
"'partitions'",
"]",
")",
":",
"_log",
".",
"error",
"(",
"'Invalid or in... | Validate if the format of the plan as expected.
Validate format of plan on following rules:
a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions'
b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas',
'partition', 'topic'
c) Verify desired type of each value
d) Verify non-empty partitions and replicas
Sample-plan format:
{
"version": 1,
"partitions": [
{"partition":0, "topic":'t1', "replicas":[0,1,2]},
{"partition":0, "topic":'t2', "replicas":[1,2]},
...
]} | [
"Validate",
"if",
"the",
"format",
"of",
"the",
"plan",
"as",
"expected",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L155-L244 | train | 202,612 |
Yelp/kafka-utils | kafka_utils/util/validation.py | _validate_plan | def _validate_plan(plan):
"""Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
"""
# Validate format of plan
if not _validate_format(plan):
return False
# Verify no duplicate partitions
partition_names = [
(p_data['topic'], p_data['partition'])
for p_data in plan['partitions']
]
duplicate_partitions = [
partition for partition, count in six.iteritems(Counter(partition_names))
if count > 1
]
if duplicate_partitions:
_log.error(
'Duplicate partitions in plan {p_list}'
.format(p_list=duplicate_partitions),
)
return False
# Verify no duplicate brokers in partition-replicas
dup_replica_brokers = []
for p_data in plan['partitions']:
dup_replica_brokers = [
broker
for broker, count in Counter(p_data['replicas']).items()
if count > 1
]
if dup_replica_brokers:
_log.error(
'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}'
.format(
topic=p_data['topic'],
p_id=p_data['partition'],
replicas=p_data['replicas'],
)
)
return False
# Verify same replication-factor for every topic
topic_replication_factor = {}
for partition_info in plan['partitions']:
topic = partition_info['topic']
replication_factor = len(partition_info['replicas'])
if topic in list(topic_replication_factor.keys()):
if topic_replication_factor[topic] != replication_factor:
_log.error(
'Mismatch in replication-factor of partitions for topic '
'{topic}'.format(topic=topic),
)
return False
else:
topic_replication_factor[topic] = replication_factor
return True | python | def _validate_plan(plan):
"""Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
"""
# Validate format of plan
if not _validate_format(plan):
return False
# Verify no duplicate partitions
partition_names = [
(p_data['topic'], p_data['partition'])
for p_data in plan['partitions']
]
duplicate_partitions = [
partition for partition, count in six.iteritems(Counter(partition_names))
if count > 1
]
if duplicate_partitions:
_log.error(
'Duplicate partitions in plan {p_list}'
.format(p_list=duplicate_partitions),
)
return False
# Verify no duplicate brokers in partition-replicas
dup_replica_brokers = []
for p_data in plan['partitions']:
dup_replica_brokers = [
broker
for broker, count in Counter(p_data['replicas']).items()
if count > 1
]
if dup_replica_brokers:
_log.error(
'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}'
.format(
topic=p_data['topic'],
p_id=p_data['partition'],
replicas=p_data['replicas'],
)
)
return False
# Verify same replication-factor for every topic
topic_replication_factor = {}
for partition_info in plan['partitions']:
topic = partition_info['topic']
replication_factor = len(partition_info['replicas'])
if topic in list(topic_replication_factor.keys()):
if topic_replication_factor[topic] != replication_factor:
_log.error(
'Mismatch in replication-factor of partitions for topic '
'{topic}'.format(topic=topic),
)
return False
else:
topic_replication_factor[topic] = replication_factor
return True | [
"def",
"_validate_plan",
"(",
"plan",
")",
":",
"# Validate format of plan",
"if",
"not",
"_validate_format",
"(",
"plan",
")",
":",
"return",
"False",
"# Verify no duplicate partitions",
"partition_names",
"=",
"[",
"(",
"p_data",
"[",
"'topic'",
"]",
",",
"p_dat... | Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set | [
"Validate",
"if",
"given",
"plan",
"is",
"valid",
"based",
"on",
"kafka",
"-",
"cluster",
"-",
"assignment",
"protocols",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L247-L309 | train | 202,613 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/offset_get.py | OffsetGet.percentage_distance | def percentage_distance(cls, highmark, current):
"""Percentage of distance the current offset is behind the highmark."""
highmark = int(highmark)
current = int(current)
if highmark > 0:
return round(
(highmark - current) * 100.0 / highmark,
2,
)
else:
return 0.0 | python | def percentage_distance(cls, highmark, current):
"""Percentage of distance the current offset is behind the highmark."""
highmark = int(highmark)
current = int(current)
if highmark > 0:
return round(
(highmark - current) * 100.0 / highmark,
2,
)
else:
return 0.0 | [
"def",
"percentage_distance",
"(",
"cls",
",",
"highmark",
",",
"current",
")",
":",
"highmark",
"=",
"int",
"(",
"highmark",
")",
"current",
"=",
"int",
"(",
"current",
")",
"if",
"highmark",
">",
"0",
":",
"return",
"round",
"(",
"(",
"highmark",
"-"... | Percentage of distance the current offset is behind the highmark. | [
"Percentage",
"of",
"distance",
"the",
"current",
"offset",
"is",
"behind",
"the",
"highmark",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/commands/offset_get.py#L219-L229 | train | 202,614 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_children | def get_children(self, path, watch=None):
"""Returns the children of the specified node."""
_log.debug(
"ZK: Getting children of {path}".format(path=path),
)
return self.zk.get_children(path, watch) | python | def get_children(self, path, watch=None):
"""Returns the children of the specified node."""
_log.debug(
"ZK: Getting children of {path}".format(path=path),
)
return self.zk.get_children(path, watch) | [
"def",
"get_children",
"(",
"self",
",",
"path",
",",
"watch",
"=",
"None",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Getting children of {path}\"",
".",
"format",
"(",
"path",
"=",
"path",
")",
",",
")",
"return",
"self",
".",
"zk",
".",
"get_children... | Returns the children of the specified node. | [
"Returns",
"the",
"children",
"of",
"the",
"specified",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L61-L66 | train | 202,615 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get | def get(self, path, watch=None):
"""Returns the data of the specified node."""
_log.debug(
"ZK: Getting {path}".format(path=path),
)
return self.zk.get(path, watch) | python | def get(self, path, watch=None):
"""Returns the data of the specified node."""
_log.debug(
"ZK: Getting {path}".format(path=path),
)
return self.zk.get(path, watch) | [
"def",
"get",
"(",
"self",
",",
"path",
",",
"watch",
"=",
"None",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Getting {path}\"",
".",
"format",
"(",
"path",
"=",
"path",
")",
",",
")",
"return",
"self",
".",
"zk",
".",
"get",
"(",
"path",
",",
"... | Returns the data of the specified node. | [
"Returns",
"the",
"data",
"of",
"the",
"specified",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L68-L73 | train | 202,616 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.set | def set(self, path, value):
"""Sets and returns new data for the specified node."""
_log.debug(
"ZK: Setting {path} to {value}".format(path=path, value=value)
)
return self.zk.set(path, value) | python | def set(self, path, value):
"""Sets and returns new data for the specified node."""
_log.debug(
"ZK: Setting {path} to {value}".format(path=path, value=value)
)
return self.zk.set(path, value) | [
"def",
"set",
"(",
"self",
",",
"path",
",",
"value",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Setting {path} to {value}\"",
".",
"format",
"(",
"path",
"=",
"path",
",",
"value",
"=",
"value",
")",
")",
"return",
"self",
".",
"zk",
".",
"set",
"(... | Sets and returns new data for the specified node. | [
"Sets",
"and",
"returns",
"new",
"data",
"for",
"the",
"specified",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L75-L80 | train | 202,617 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_json | def get_json(self, path, watch=None):
"""Reads the data of the specified node and converts it to json."""
data, _ = self.get(path, watch)
return load_json(data) if data else None | python | def get_json(self, path, watch=None):
"""Reads the data of the specified node and converts it to json."""
data, _ = self.get(path, watch)
return load_json(data) if data else None | [
"def",
"get_json",
"(",
"self",
",",
"path",
",",
"watch",
"=",
"None",
")",
":",
"data",
",",
"_",
"=",
"self",
".",
"get",
"(",
"path",
",",
"watch",
")",
"return",
"load_json",
"(",
"data",
")",
"if",
"data",
"else",
"None"
] | Reads the data of the specified node and converts it to json. | [
"Reads",
"the",
"data",
"of",
"the",
"specified",
"node",
"and",
"converts",
"it",
"to",
"json",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L82-L85 | train | 202,618 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_brokers | def get_brokers(self, names_only=False):
"""Get information on all the available brokers.
:rtype : dict of brokers
"""
try:
broker_ids = self.get_children("/brokers/ids")
except NoNodeError:
_log.info(
"cluster is empty."
)
return {}
# Return broker-ids only
if names_only:
return {int(b_id): None for b_id in broker_ids}
return {int(b_id): self.get_broker_metadata(b_id) for b_id in broker_ids} | python | def get_brokers(self, names_only=False):
"""Get information on all the available brokers.
:rtype : dict of brokers
"""
try:
broker_ids = self.get_children("/brokers/ids")
except NoNodeError:
_log.info(
"cluster is empty."
)
return {}
# Return broker-ids only
if names_only:
return {int(b_id): None for b_id in broker_ids}
return {int(b_id): self.get_broker_metadata(b_id) for b_id in broker_ids} | [
"def",
"get_brokers",
"(",
"self",
",",
"names_only",
"=",
"False",
")",
":",
"try",
":",
"broker_ids",
"=",
"self",
".",
"get_children",
"(",
"\"/brokers/ids\"",
")",
"except",
"NoNodeError",
":",
"_log",
".",
"info",
"(",
"\"cluster is empty.\"",
")",
"ret... | Get information on all the available brokers.
:rtype : dict of brokers | [
"Get",
"information",
"on",
"all",
"the",
"available",
"brokers",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L99-L114 | train | 202,619 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_topic_config | def get_topic_config(self, topic):
"""Get configuration information for specified topic.
:rtype : dict of configuration
"""
try:
config_data = load_json(
self.get(
"/config/topics/{topic}".format(topic=topic)
)[0]
)
except NoNodeError as e:
# Kafka version before 0.8.1 does not have "/config/topics/<topic_name>" path in ZK and
# if the topic exists, return default dict instead of raising an Exception.
# Ref: https://cwiki.apache.org/confluence/display/KAFKA/Kafka+data+structures+in+Zookeeper.
topics = self.get_topics(topic_name=topic, fetch_partition_state=False)
if len(topics) > 0:
_log.info("Configuration not available for topic {topic}.".format(topic=topic))
config_data = {"config": {}}
else:
_log.error(
"topic {topic} not found.".format(topic=topic)
)
raise e
return config_data | python | def get_topic_config(self, topic):
"""Get configuration information for specified topic.
:rtype : dict of configuration
"""
try:
config_data = load_json(
self.get(
"/config/topics/{topic}".format(topic=topic)
)[0]
)
except NoNodeError as e:
# Kafka version before 0.8.1 does not have "/config/topics/<topic_name>" path in ZK and
# if the topic exists, return default dict instead of raising an Exception.
# Ref: https://cwiki.apache.org/confluence/display/KAFKA/Kafka+data+structures+in+Zookeeper.
topics = self.get_topics(topic_name=topic, fetch_partition_state=False)
if len(topics) > 0:
_log.info("Configuration not available for topic {topic}.".format(topic=topic))
config_data = {"config": {}}
else:
_log.error(
"topic {topic} not found.".format(topic=topic)
)
raise e
return config_data | [
"def",
"get_topic_config",
"(",
"self",
",",
"topic",
")",
":",
"try",
":",
"config_data",
"=",
"load_json",
"(",
"self",
".",
"get",
"(",
"\"/config/topics/{topic}\"",
".",
"format",
"(",
"topic",
"=",
"topic",
")",
")",
"[",
"0",
"]",
")",
"except",
... | Get configuration information for specified topic.
:rtype : dict of configuration | [
"Get",
"configuration",
"information",
"for",
"specified",
"topic",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L116-L142 | train | 202,620 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.set_topic_config | def set_topic_config(self, topic, value, kafka_version=(0, 10, )):
"""Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kafka version the brokers are running on.
Defaults to (0, 10, x). Kafka version 9 and kafka 10
support this feature.
"""
config_data = dump_json(value)
try:
# Change value
return_value = self.set(
"/config/topics/{topic}".format(topic=topic),
config_data
)
# Create change
version = kafka_version[1]
# this feature is supported in kafka 9 and kafka 10
assert version in (9, 10), "Feature supported with kafka 9 and kafka 10"
if version == 9:
# https://github.com/apache/kafka/blob/0.9.0.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L334
change_node = dump_json({
"version": 1,
"entity_type": "topics",
"entity_name": topic
})
else: # kafka 10
# https://github.com/apache/kafka/blob/0.10.2.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L574
change_node = dump_json({
"version": 2,
"entity_path": "topics/" + topic,
})
self.create(
'/config/changes/config_change_',
change_node,
sequence=True
)
except NoNodeError as e:
_log.error(
"topic {topic} not found.".format(topic=topic)
)
raise e
return return_value | python | def set_topic_config(self, topic, value, kafka_version=(0, 10, )):
"""Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kafka version the brokers are running on.
Defaults to (0, 10, x). Kafka version 9 and kafka 10
support this feature.
"""
config_data = dump_json(value)
try:
# Change value
return_value = self.set(
"/config/topics/{topic}".format(topic=topic),
config_data
)
# Create change
version = kafka_version[1]
# this feature is supported in kafka 9 and kafka 10
assert version in (9, 10), "Feature supported with kafka 9 and kafka 10"
if version == 9:
# https://github.com/apache/kafka/blob/0.9.0.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L334
change_node = dump_json({
"version": 1,
"entity_type": "topics",
"entity_name": topic
})
else: # kafka 10
# https://github.com/apache/kafka/blob/0.10.2.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L574
change_node = dump_json({
"version": 2,
"entity_path": "topics/" + topic,
})
self.create(
'/config/changes/config_change_',
change_node,
sequence=True
)
except NoNodeError as e:
_log.error(
"topic {topic} not found.".format(topic=topic)
)
raise e
return return_value | [
"def",
"set_topic_config",
"(",
"self",
",",
"topic",
",",
"value",
",",
"kafka_version",
"=",
"(",
"0",
",",
"10",
",",
")",
")",
":",
"config_data",
"=",
"dump_json",
"(",
"value",
")",
"try",
":",
"# Change value",
"return_value",
"=",
"self",
".",
... | Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kafka version the brokers are running on.
Defaults to (0, 10, x). Kafka version 9 and kafka 10
support this feature. | [
"Set",
"configuration",
"information",
"for",
"specified",
"topic",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L144-L195 | train | 202,621 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_topics | def get_topics(
self,
topic_name=None,
names_only=False,
fetch_partition_state=True,
):
"""Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False.
"""
try:
topic_ids = [topic_name] if topic_name else self.get_children(
"/brokers/topics",
)
except NoNodeError:
_log.error(
"Cluster is empty."
)
return {}
if names_only:
return topic_ids
topics_data = {}
for topic_id in topic_ids:
try:
topic_info = self.get("/brokers/topics/{id}".format(id=topic_id))
topic_data = load_json(topic_info[0])
topic_ctime = topic_info[1].ctime / 1000.0
topic_data['ctime'] = topic_ctime
except NoNodeError:
_log.info(
"topic '{topic}' not found.".format(topic=topic_id),
)
return {}
# Prepare data for each partition
partitions_data = {}
for p_id, replicas in six.iteritems(topic_data['partitions']):
partitions_data[p_id] = {}
if fetch_partition_state:
# Fetch partition-state from zookeeper
partition_state = self._fetch_partition_state(topic_id, p_id)
partitions_data[p_id] = load_json(partition_state[0])
partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0
else:
# Fetch partition-info from zookeeper
partition_info = self._fetch_partition_info(topic_id, p_id)
partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0
partitions_data[p_id]['replicas'] = replicas
topic_data['partitions'] = partitions_data
topics_data[topic_id] = topic_data
return topics_data | python | def get_topics(
self,
topic_name=None,
names_only=False,
fetch_partition_state=True,
):
"""Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False.
"""
try:
topic_ids = [topic_name] if topic_name else self.get_children(
"/brokers/topics",
)
except NoNodeError:
_log.error(
"Cluster is empty."
)
return {}
if names_only:
return topic_ids
topics_data = {}
for topic_id in topic_ids:
try:
topic_info = self.get("/brokers/topics/{id}".format(id=topic_id))
topic_data = load_json(topic_info[0])
topic_ctime = topic_info[1].ctime / 1000.0
topic_data['ctime'] = topic_ctime
except NoNodeError:
_log.info(
"topic '{topic}' not found.".format(topic=topic_id),
)
return {}
# Prepare data for each partition
partitions_data = {}
for p_id, replicas in six.iteritems(topic_data['partitions']):
partitions_data[p_id] = {}
if fetch_partition_state:
# Fetch partition-state from zookeeper
partition_state = self._fetch_partition_state(topic_id, p_id)
partitions_data[p_id] = load_json(partition_state[0])
partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0
else:
# Fetch partition-info from zookeeper
partition_info = self._fetch_partition_info(topic_id, p_id)
partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0
partitions_data[p_id]['replicas'] = replicas
topic_data['partitions'] = partitions_data
topics_data[topic_id] = topic_data
return topics_data | [
"def",
"get_topics",
"(",
"self",
",",
"topic_name",
"=",
"None",
",",
"names_only",
"=",
"False",
",",
"fetch_partition_state",
"=",
"True",
",",
")",
":",
"try",
":",
"topic_ids",
"=",
"[",
"topic_name",
"]",
"if",
"topic_name",
"else",
"self",
".",
"g... | Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False. | [
"Get",
"information",
"on",
"all",
"the",
"available",
"topics",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L197-L275 | train | 202,622 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_consumer_groups | def get_consumer_groups(self, consumer_group_id=None, names_only=False):
"""Get information on all the available consumer-groups.
If names_only is False, only list of consumer-group ids are sent.
If names_only is True, Consumer group offset details are returned
for all consumer-groups or given consumer-group if given in dict
format as:-
{
'group-id':
{
'topic':
{
'partition': offset-value,
...
...
}
}
}
:rtype: dict of consumer-group offset details
"""
if consumer_group_id is None:
group_ids = self.get_children("/consumers")
else:
group_ids = [consumer_group_id]
# Return consumer-group-ids only
if names_only:
return {g_id: None for g_id in group_ids}
consumer_offsets = {}
for g_id in group_ids:
consumer_offsets[g_id] = self.get_group_offsets(g_id)
return consumer_offsets | python | def get_consumer_groups(self, consumer_group_id=None, names_only=False):
"""Get information on all the available consumer-groups.
If names_only is False, only list of consumer-group ids are sent.
If names_only is True, Consumer group offset details are returned
for all consumer-groups or given consumer-group if given in dict
format as:-
{
'group-id':
{
'topic':
{
'partition': offset-value,
...
...
}
}
}
:rtype: dict of consumer-group offset details
"""
if consumer_group_id is None:
group_ids = self.get_children("/consumers")
else:
group_ids = [consumer_group_id]
# Return consumer-group-ids only
if names_only:
return {g_id: None for g_id in group_ids}
consumer_offsets = {}
for g_id in group_ids:
consumer_offsets[g_id] = self.get_group_offsets(g_id)
return consumer_offsets | [
"def",
"get_consumer_groups",
"(",
"self",
",",
"consumer_group_id",
"=",
"None",
",",
"names_only",
"=",
"False",
")",
":",
"if",
"consumer_group_id",
"is",
"None",
":",
"group_ids",
"=",
"self",
".",
"get_children",
"(",
"\"/consumers\"",
")",
"else",
":",
... | Get information on all the available consumer-groups.
If names_only is False, only list of consumer-group ids are sent.
If names_only is True, Consumer group offset details are returned
for all consumer-groups or given consumer-group if given in dict
format as:-
{
'group-id':
{
'topic':
{
'partition': offset-value,
...
...
}
}
}
:rtype: dict of consumer-group offset details | [
"Get",
"information",
"on",
"all",
"the",
"available",
"consumer",
"-",
"groups",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L277-L311 | train | 202,623 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_group_offsets | def get_group_offsets(self, group, topic=None):
"""Fetch group offsets for given topic and partition otherwise all topics
and partitions otherwise.
{
'topic':
{
'partition': offset-value,
...
...
}
}
"""
group_offsets = {}
try:
all_topics = self.get_my_subscribed_topics(group)
except NoNodeError:
# No offset information of given consumer-group
_log.warning(
"No topics subscribed to consumer-group {group}.".format(
group=group,
),
)
return group_offsets
if topic:
if topic in all_topics:
topics = [topic]
else:
_log.error(
"Topic {topic} not found in topic list {topics} for consumer"
"-group {consumer_group}.".format(
topic=topic,
topics=', '.join(topic for topic in all_topics),
consumer_group=group,
),
)
return group_offsets
else:
topics = all_topics
for topic in topics:
group_offsets[topic] = {}
try:
partitions = self.get_my_subscribed_partitions(group, topic)
except NoNodeError:
_log.warning(
"No partition offsets found for topic {topic}. "
"Continuing to next one...".format(topic=topic),
)
continue
# Fetch offsets for each partition
for partition in partitions:
path = "/consumers/{group_id}/offsets/{topic}/{partition}".format(
group_id=group,
topic=topic,
partition=partition,
)
try:
# Get current offset
offset_json, _ = self.get(path)
group_offsets[topic][partition] = load_json(offset_json)
except NoNodeError:
_log.error("Path {path} not found".format(path=path))
raise
return group_offsets | python | def get_group_offsets(self, group, topic=None):
"""Fetch group offsets for given topic and partition otherwise all topics
and partitions otherwise.
{
'topic':
{
'partition': offset-value,
...
...
}
}
"""
group_offsets = {}
try:
all_topics = self.get_my_subscribed_topics(group)
except NoNodeError:
# No offset information of given consumer-group
_log.warning(
"No topics subscribed to consumer-group {group}.".format(
group=group,
),
)
return group_offsets
if topic:
if topic in all_topics:
topics = [topic]
else:
_log.error(
"Topic {topic} not found in topic list {topics} for consumer"
"-group {consumer_group}.".format(
topic=topic,
topics=', '.join(topic for topic in all_topics),
consumer_group=group,
),
)
return group_offsets
else:
topics = all_topics
for topic in topics:
group_offsets[topic] = {}
try:
partitions = self.get_my_subscribed_partitions(group, topic)
except NoNodeError:
_log.warning(
"No partition offsets found for topic {topic}. "
"Continuing to next one...".format(topic=topic),
)
continue
# Fetch offsets for each partition
for partition in partitions:
path = "/consumers/{group_id}/offsets/{topic}/{partition}".format(
group_id=group,
topic=topic,
partition=partition,
)
try:
# Get current offset
offset_json, _ = self.get(path)
group_offsets[topic][partition] = load_json(offset_json)
except NoNodeError:
_log.error("Path {path} not found".format(path=path))
raise
return group_offsets | [
"def",
"get_group_offsets",
"(",
"self",
",",
"group",
",",
"topic",
"=",
"None",
")",
":",
"group_offsets",
"=",
"{",
"}",
"try",
":",
"all_topics",
"=",
"self",
".",
"get_my_subscribed_topics",
"(",
"group",
")",
"except",
"NoNodeError",
":",
"# No offset ... | Fetch group offsets for given topic and partition otherwise all topics
and partitions otherwise.
{
'topic':
{
'partition': offset-value,
...
...
}
} | [
"Fetch",
"group",
"offsets",
"for",
"given",
"topic",
"and",
"partition",
"otherwise",
"all",
"topics",
"and",
"partitions",
"otherwise",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L313-L377 | train | 202,624 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK._fetch_partition_state | def _fetch_partition_state(self, topic_id, partition_id):
"""Fetch partition-state for given topic-partition."""
state_path = "/brokers/topics/{topic_id}/partitions/{p_id}/state"
try:
partition_state = self.get(
state_path.format(topic_id=topic_id, p_id=partition_id),
)
return partition_state
except NoNodeError:
return {} | python | def _fetch_partition_state(self, topic_id, partition_id):
"""Fetch partition-state for given topic-partition."""
state_path = "/brokers/topics/{topic_id}/partitions/{p_id}/state"
try:
partition_state = self.get(
state_path.format(topic_id=topic_id, p_id=partition_id),
)
return partition_state
except NoNodeError:
return {} | [
"def",
"_fetch_partition_state",
"(",
"self",
",",
"topic_id",
",",
"partition_id",
")",
":",
"state_path",
"=",
"\"/brokers/topics/{topic_id}/partitions/{p_id}/state\"",
"try",
":",
"partition_state",
"=",
"self",
".",
"get",
"(",
"state_path",
".",
"format",
"(",
... | Fetch partition-state for given topic-partition. | [
"Fetch",
"partition",
"-",
"state",
"for",
"given",
"topic",
"-",
"partition",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L379-L388 | train | 202,625 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK._fetch_partition_info | def _fetch_partition_info(self, topic_id, partition_id):
"""Fetch partition info for given topic-partition."""
info_path = "/brokers/topics/{topic_id}/partitions/{p_id}"
try:
_, partition_info = self.get(
info_path.format(topic_id=topic_id, p_id=partition_id),
)
return partition_info
except NoNodeError:
return {} | python | def _fetch_partition_info(self, topic_id, partition_id):
"""Fetch partition info for given topic-partition."""
info_path = "/brokers/topics/{topic_id}/partitions/{p_id}"
try:
_, partition_info = self.get(
info_path.format(topic_id=topic_id, p_id=partition_id),
)
return partition_info
except NoNodeError:
return {} | [
"def",
"_fetch_partition_info",
"(",
"self",
",",
"topic_id",
",",
"partition_id",
")",
":",
"info_path",
"=",
"\"/brokers/topics/{topic_id}/partitions/{p_id}\"",
"try",
":",
"_",
",",
"partition_info",
"=",
"self",
".",
"get",
"(",
"info_path",
".",
"format",
"("... | Fetch partition info for given topic-partition. | [
"Fetch",
"partition",
"info",
"for",
"given",
"topic",
"-",
"partition",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L390-L399 | train | 202,626 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_my_subscribed_topics | def get_my_subscribed_topics(self, groupid):
"""Get the list of topics that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:returns list of kafka topics
:rtype: list
"""
path = "/consumers/{group_id}/offsets".format(group_id=groupid)
return self.get_children(path) | python | def get_my_subscribed_topics(self, groupid):
"""Get the list of topics that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:returns list of kafka topics
:rtype: list
"""
path = "/consumers/{group_id}/offsets".format(group_id=groupid)
return self.get_children(path) | [
"def",
"get_my_subscribed_topics",
"(",
"self",
",",
"groupid",
")",
":",
"path",
"=",
"\"/consumers/{group_id}/offsets\"",
".",
"format",
"(",
"group_id",
"=",
"groupid",
")",
"return",
"self",
".",
"get_children",
"(",
"path",
")"
] | Get the list of topics that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:returns list of kafka topics
:rtype: list | [
"Get",
"the",
"list",
"of",
"topics",
"that",
"a",
"consumer",
"is",
"subscribed",
"to"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L401-L409 | train | 202,627 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_my_subscribed_partitions | def get_my_subscribed_partitions(self, groupid, topic):
"""Get the list of partitions of a topic
that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:param: topic: The topic name
:returns list of partitions
:rtype: list
"""
path = "/consumers/{group_id}/offsets/{topic}".format(
group_id=groupid,
topic=topic,
)
return self.get_children(path) | python | def get_my_subscribed_partitions(self, groupid, topic):
"""Get the list of partitions of a topic
that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:param: topic: The topic name
:returns list of partitions
:rtype: list
"""
path = "/consumers/{group_id}/offsets/{topic}".format(
group_id=groupid,
topic=topic,
)
return self.get_children(path) | [
"def",
"get_my_subscribed_partitions",
"(",
"self",
",",
"groupid",
",",
"topic",
")",
":",
"path",
"=",
"\"/consumers/{group_id}/offsets/{topic}\"",
".",
"format",
"(",
"group_id",
"=",
"groupid",
",",
"topic",
"=",
"topic",
",",
")",
"return",
"self",
".",
"... | Get the list of partitions of a topic
that a consumer is subscribed to
:param: groupid: The consumer group ID for the consumer
:param: topic: The topic name
:returns list of partitions
:rtype: list | [
"Get",
"the",
"list",
"of",
"partitions",
"of",
"a",
"topic",
"that",
"a",
"consumer",
"is",
"subscribed",
"to"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L411-L424 | train | 202,628 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_cluster_assignment | def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | python | def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | [
"def",
"get_cluster_assignment",
"(",
"self",
")",
":",
"plan",
"=",
"self",
".",
"get_cluster_plan",
"(",
")",
"assignment",
"=",
"{",
"}",
"for",
"elem",
"in",
"plan",
"[",
"'partitions'",
"]",
":",
"assignment",
"[",
"(",
"elem",
"[",
"'topic'",
"]",
... | Fetch the cluster layout in form of assignment from zookeeper | [
"Fetch",
"the",
"cluster",
"layout",
"in",
"form",
"of",
"assignment",
"from",
"zookeeper"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L426-L435 | train | 202,629 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.create | def create(
self,
path,
value='',
acl=None,
ephemeral=False,
sequence=False,
makepath=False
):
"""Creates a Zookeeper node.
:param: path: The zookeeper node path
:param: value: Zookeeper node value
:param: acl: ACL list
:param: ephemeral: Boolean indicating where this node is tied to
this session.
:param: sequence: Boolean indicating whether path is suffixed
with a unique index.
:param: makepath: Whether the path should be created if it doesn't
exist.
"""
_log.debug("ZK: Creating node " + path)
return self.zk.create(path, value, acl, ephemeral, sequence, makepath) | python | def create(
self,
path,
value='',
acl=None,
ephemeral=False,
sequence=False,
makepath=False
):
"""Creates a Zookeeper node.
:param: path: The zookeeper node path
:param: value: Zookeeper node value
:param: acl: ACL list
:param: ephemeral: Boolean indicating where this node is tied to
this session.
:param: sequence: Boolean indicating whether path is suffixed
with a unique index.
:param: makepath: Whether the path should be created if it doesn't
exist.
"""
_log.debug("ZK: Creating node " + path)
return self.zk.create(path, value, acl, ephemeral, sequence, makepath) | [
"def",
"create",
"(",
"self",
",",
"path",
",",
"value",
"=",
"''",
",",
"acl",
"=",
"None",
",",
"ephemeral",
"=",
"False",
",",
"sequence",
"=",
"False",
",",
"makepath",
"=",
"False",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Creating node \"",
... | Creates a Zookeeper node.
:param: path: The zookeeper node path
:param: value: Zookeeper node value
:param: acl: ACL list
:param: ephemeral: Boolean indicating where this node is tied to
this session.
:param: sequence: Boolean indicating whether path is suffixed
with a unique index.
:param: makepath: Whether the path should be created if it doesn't
exist. | [
"Creates",
"a",
"Zookeeper",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L437-L459 | train | 202,630 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.delete | def delete(self, path, recursive=False):
"""Deletes a Zookeeper node.
:param: path: The zookeeper node path
:param: recursive: Recursively delete node and all its children.
"""
_log.debug("ZK: Deleting node " + path)
return self.zk.delete(path, recursive=recursive) | python | def delete(self, path, recursive=False):
"""Deletes a Zookeeper node.
:param: path: The zookeeper node path
:param: recursive: Recursively delete node and all its children.
"""
_log.debug("ZK: Deleting node " + path)
return self.zk.delete(path, recursive=recursive) | [
"def",
"delete",
"(",
"self",
",",
"path",
",",
"recursive",
"=",
"False",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Deleting node \"",
"+",
"path",
")",
"return",
"self",
".",
"zk",
".",
"delete",
"(",
"path",
",",
"recursive",
"=",
"recursive",
")"... | Deletes a Zookeeper node.
:param: path: The zookeeper node path
:param: recursive: Recursively delete node and all its children. | [
"Deletes",
"a",
"Zookeeper",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L461-L468 | train | 202,631 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.execute_plan | def execute_plan(self, plan, allow_rf_change=False):
"""Submit reassignment plan for execution."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
plan_json = dump_json(plan)
base_plan = self.get_cluster_plan()
if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change):
_log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan))
return False
# Send proposed-plan to zookeeper
try:
_log.info('Sending plan to Zookeeper...')
self.create(reassignment_path, plan_json, makepath=True)
_log.info(
'Re-assign partitions node in Zookeeper updated successfully '
'with {plan}'.format(plan=plan),
)
return True
except NodeExistsError:
_log.warning('Previous plan in progress. Exiting..')
_log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan))
in_progress_plan = load_json(self.get(reassignment_path)[0])
in_progress_partitions = [
'{topic}-{p_id}'.format(
topic=p_data['topic'],
p_id=str(p_data['partition']),
)
for p_data in in_progress_plan['partitions']
]
_log.warning(
'{count} partition(s) reassignment currently in progress:-'
.format(count=len(in_progress_partitions)),
)
_log.warning(
'{partitions}. In Progress reassignment plan...'.format(
partitions=', '.join(in_progress_partitions),
),
)
return False
except Exception as e:
_log.error(
'Could not re-assign partitions {plan}. Error: {e}'
.format(plan=plan, e=e),
)
return False | python | def execute_plan(self, plan, allow_rf_change=False):
"""Submit reassignment plan for execution."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
plan_json = dump_json(plan)
base_plan = self.get_cluster_plan()
if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change):
_log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan))
return False
# Send proposed-plan to zookeeper
try:
_log.info('Sending plan to Zookeeper...')
self.create(reassignment_path, plan_json, makepath=True)
_log.info(
'Re-assign partitions node in Zookeeper updated successfully '
'with {plan}'.format(plan=plan),
)
return True
except NodeExistsError:
_log.warning('Previous plan in progress. Exiting..')
_log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan))
in_progress_plan = load_json(self.get(reassignment_path)[0])
in_progress_partitions = [
'{topic}-{p_id}'.format(
topic=p_data['topic'],
p_id=str(p_data['partition']),
)
for p_data in in_progress_plan['partitions']
]
_log.warning(
'{count} partition(s) reassignment currently in progress:-'
.format(count=len(in_progress_partitions)),
)
_log.warning(
'{partitions}. In Progress reassignment plan...'.format(
partitions=', '.join(in_progress_partitions),
),
)
return False
except Exception as e:
_log.error(
'Could not re-assign partitions {plan}. Error: {e}'
.format(plan=plan, e=e),
)
return False | [
"def",
"execute_plan",
"(",
"self",
",",
"plan",
",",
"allow_rf_change",
"=",
"False",
")",
":",
"reassignment_path",
"=",
"'{admin}/{reassignment_node}'",
".",
"format",
"(",
"admin",
"=",
"ADMIN_PATH",
",",
"reassignment_node",
"=",
"REASSIGNMENT_NODE",
")",
"pl... | Submit reassignment plan for execution. | [
"Submit",
"reassignment",
"plan",
"for",
"execution",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L483-L527 | train | 202,632 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_cluster_plan | def get_cluster_plan(self):
"""Fetch cluster plan from zookeeper."""
_log.info('Fetching current cluster-topology from Zookeeper...')
cluster_layout = self.get_topics(fetch_partition_state=False)
# Re-format cluster-layout
partitions = [
{
'topic': topic_id,
'partition': int(p_id),
'replicas': partitions_data['replicas']
}
for topic_id, topic_info in six.iteritems(cluster_layout)
for p_id, partitions_data in six.iteritems(topic_info['partitions'])
]
return {
'version': 1,
'partitions': partitions
} | python | def get_cluster_plan(self):
"""Fetch cluster plan from zookeeper."""
_log.info('Fetching current cluster-topology from Zookeeper...')
cluster_layout = self.get_topics(fetch_partition_state=False)
# Re-format cluster-layout
partitions = [
{
'topic': topic_id,
'partition': int(p_id),
'replicas': partitions_data['replicas']
}
for topic_id, topic_info in six.iteritems(cluster_layout)
for p_id, partitions_data in six.iteritems(topic_info['partitions'])
]
return {
'version': 1,
'partitions': partitions
} | [
"def",
"get_cluster_plan",
"(",
"self",
")",
":",
"_log",
".",
"info",
"(",
"'Fetching current cluster-topology from Zookeeper...'",
")",
"cluster_layout",
"=",
"self",
".",
"get_topics",
"(",
"fetch_partition_state",
"=",
"False",
")",
"# Re-format cluster-layout",
"pa... | Fetch cluster plan from zookeeper. | [
"Fetch",
"cluster",
"plan",
"from",
"zookeeper",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L529-L547 | train | 202,633 |
Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.get_pending_plan | def get_pending_plan(self):
"""Read the currently running plan on reassign_partitions node."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
try:
result = self.get(reassignment_path)
return load_json(result[0])
except NoNodeError:
return {} | python | def get_pending_plan(self):
"""Read the currently running plan on reassign_partitions node."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
try:
result = self.get(reassignment_path)
return load_json(result[0])
except NoNodeError:
return {} | [
"def",
"get_pending_plan",
"(",
"self",
")",
":",
"reassignment_path",
"=",
"'{admin}/{reassignment_node}'",
".",
"format",
"(",
"admin",
"=",
"ADMIN_PATH",
",",
"reassignment_node",
"=",
"REASSIGNMENT_NODE",
")",
"try",
":",
"result",
"=",
"self",
".",
"get",
"... | Read the currently running plan on reassign_partitions node. | [
"Read",
"the",
"currently",
"running",
"plan",
"on",
"reassign_partitions",
"node",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L549-L557 | train | 202,634 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/offline.py | OfflineCmd.run_command | def run_command(self):
"""Checks the number of offline partitions"""
offline = get_topic_partition_with_error(
self.cluster_config,
LEADER_NOT_AVAILABLE_ERROR,
)
errcode = status_code.OK if not offline else status_code.CRITICAL
out = _prepare_output(offline, self.args.verbose)
return errcode, out | python | def run_command(self):
"""Checks the number of offline partitions"""
offline = get_topic_partition_with_error(
self.cluster_config,
LEADER_NOT_AVAILABLE_ERROR,
)
errcode = status_code.OK if not offline else status_code.CRITICAL
out = _prepare_output(offline, self.args.verbose)
return errcode, out | [
"def",
"run_command",
"(",
"self",
")",
":",
"offline",
"=",
"get_topic_partition_with_error",
"(",
"self",
".",
"cluster_config",
",",
"LEADER_NOT_AVAILABLE_ERROR",
",",
")",
"errcode",
"=",
"status_code",
".",
"OK",
"if",
"not",
"offline",
"else",
"status_code",... | Checks the number of offline partitions | [
"Checks",
"the",
"number",
"of",
"offline",
"partitions"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/offline.py#L37-L46 | train | 202,635 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/list_groups.py | ListGroups.get_kafka_groups | def get_kafka_groups(cls, cluster_config):
'''Get the group_id of groups committed into Kafka.'''
kafka_group_reader = KafkaGroupReader(cluster_config)
return list(kafka_group_reader.read_groups().keys()) | python | def get_kafka_groups(cls, cluster_config):
'''Get the group_id of groups committed into Kafka.'''
kafka_group_reader = KafkaGroupReader(cluster_config)
return list(kafka_group_reader.read_groups().keys()) | [
"def",
"get_kafka_groups",
"(",
"cls",
",",
"cluster_config",
")",
":",
"kafka_group_reader",
"=",
"KafkaGroupReader",
"(",
"cluster_config",
")",
"return",
"list",
"(",
"kafka_group_reader",
".",
"read_groups",
"(",
")",
".",
"keys",
"(",
")",
")"
] | Get the group_id of groups committed into Kafka. | [
"Get",
"the",
"group_id",
"of",
"groups",
"committed",
"into",
"Kafka",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/commands/list_groups.py#L34-L37 | train | 202,636 |
Yelp/kafka-utils | kafka_utils/util/ssh.py | report_stdout | def report_stdout(host, stdout):
"""Take a stdout and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stdout: the std out of that process
:type stdout: paramiko.channel.Channel
"""
lines = stdout.readlines()
if lines:
print("STDOUT from {host}:".format(host=host))
for line in lines:
print(line.rstrip(), file=sys.stdout) | python | def report_stdout(host, stdout):
"""Take a stdout and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stdout: the std out of that process
:type stdout: paramiko.channel.Channel
"""
lines = stdout.readlines()
if lines:
print("STDOUT from {host}:".format(host=host))
for line in lines:
print(line.rstrip(), file=sys.stdout) | [
"def",
"report_stdout",
"(",
"host",
",",
"stdout",
")",
":",
"lines",
"=",
"stdout",
".",
"readlines",
"(",
")",
"if",
"lines",
":",
"print",
"(",
"\"STDOUT from {host}:\"",
".",
"format",
"(",
"host",
"=",
"host",
")",
")",
"for",
"line",
"in",
"line... | Take a stdout and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stdout: the std out of that process
:type stdout: paramiko.channel.Channel | [
"Take",
"a",
"stdout",
"and",
"print",
"it",
"s",
"lines",
"to",
"output",
"if",
"lines",
"are",
"present",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/ssh.py#L166-L178 | train | 202,637 |
Yelp/kafka-utils | kafka_utils/util/ssh.py | report_stderr | def report_stderr(host, stderr):
"""Take a stderr and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stderr: the std error of that process
:type stderr: paramiko.channel.Channel
"""
lines = stderr.readlines()
if lines:
print("STDERR from {host}:".format(host=host))
for line in lines:
print(line.rstrip(), file=sys.stderr) | python | def report_stderr(host, stderr):
"""Take a stderr and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stderr: the std error of that process
:type stderr: paramiko.channel.Channel
"""
lines = stderr.readlines()
if lines:
print("STDERR from {host}:".format(host=host))
for line in lines:
print(line.rstrip(), file=sys.stderr) | [
"def",
"report_stderr",
"(",
"host",
",",
"stderr",
")",
":",
"lines",
"=",
"stderr",
".",
"readlines",
"(",
")",
"if",
"lines",
":",
"print",
"(",
"\"STDERR from {host}:\"",
".",
"format",
"(",
"host",
"=",
"host",
")",
")",
"for",
"line",
"in",
"line... | Take a stderr and print it's lines to output if lines are present.
:param host: the host where the process is running
:type host: str
:param stderr: the std error of that process
:type stderr: paramiko.channel.Channel | [
"Take",
"a",
"stderr",
"and",
"print",
"it",
"s",
"lines",
"to",
"output",
"if",
"lines",
"are",
"present",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/ssh.py#L181-L193 | train | 202,638 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/offset_save.py | OffsetSave.save_offsets | def save_offsets(
cls,
consumer_offsets_metadata,
topics_dict,
json_file,
groupid,
):
"""Built offsets for given topic-partitions in required format from current
offsets metadata and write to given json-file.
:param consumer_offsets_metadata: Fetched consumer offsets from kafka.
:param topics_dict: Dictionary of topic-partitions.
:param json_file: Filename to store consumer-offsets.
:param groupid: Current consumer-group.
"""
# Build consumer-offset data in desired format
current_consumer_offsets = defaultdict(dict)
for topic, topic_offsets in six.iteritems(consumer_offsets_metadata):
for partition_offset in topic_offsets:
current_consumer_offsets[topic][partition_offset.partition] = \
partition_offset.current
consumer_offsets_data = {'groupid': groupid, 'offsets': current_consumer_offsets}
cls.write_offsets_to_file(json_file, consumer_offsets_data) | python | def save_offsets(
cls,
consumer_offsets_metadata,
topics_dict,
json_file,
groupid,
):
"""Built offsets for given topic-partitions in required format from current
offsets metadata and write to given json-file.
:param consumer_offsets_metadata: Fetched consumer offsets from kafka.
:param topics_dict: Dictionary of topic-partitions.
:param json_file: Filename to store consumer-offsets.
:param groupid: Current consumer-group.
"""
# Build consumer-offset data in desired format
current_consumer_offsets = defaultdict(dict)
for topic, topic_offsets in six.iteritems(consumer_offsets_metadata):
for partition_offset in topic_offsets:
current_consumer_offsets[topic][partition_offset.partition] = \
partition_offset.current
consumer_offsets_data = {'groupid': groupid, 'offsets': current_consumer_offsets}
cls.write_offsets_to_file(json_file, consumer_offsets_data) | [
"def",
"save_offsets",
"(",
"cls",
",",
"consumer_offsets_metadata",
",",
"topics_dict",
",",
"json_file",
",",
"groupid",
",",
")",
":",
"# Build consumer-offset data in desired format",
"current_consumer_offsets",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"topic",
... | Built offsets for given topic-partitions in required format from current
offsets metadata and write to given json-file.
:param consumer_offsets_metadata: Fetched consumer offsets from kafka.
:param topics_dict: Dictionary of topic-partitions.
:param json_file: Filename to store consumer-offsets.
:param groupid: Current consumer-group. | [
"Built",
"offsets",
"for",
"given",
"topic",
"-",
"partitions",
"in",
"required",
"format",
"from",
"current",
"offsets",
"metadata",
"and",
"write",
"to",
"given",
"json",
"-",
"file",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/commands/offset_save.py#L116-L139 | train | 202,639 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/offset_save.py | OffsetSave.write_offsets_to_file | def write_offsets_to_file(cls, json_file_name, consumer_offsets_data):
"""Save built consumer-offsets data to given json file."""
# Save consumer-offsets to file
with open(json_file_name, "w") as json_file:
try:
json.dump(consumer_offsets_data, json_file)
except ValueError:
print("Error: Invalid json data {data}".format(data=consumer_offsets_data))
raise
print("Consumer offset data saved in json-file {file}".format(file=json_file_name)) | python | def write_offsets_to_file(cls, json_file_name, consumer_offsets_data):
"""Save built consumer-offsets data to given json file."""
# Save consumer-offsets to file
with open(json_file_name, "w") as json_file:
try:
json.dump(consumer_offsets_data, json_file)
except ValueError:
print("Error: Invalid json data {data}".format(data=consumer_offsets_data))
raise
print("Consumer offset data saved in json-file {file}".format(file=json_file_name)) | [
"def",
"write_offsets_to_file",
"(",
"cls",
",",
"json_file_name",
",",
"consumer_offsets_data",
")",
":",
"# Save consumer-offsets to file",
"with",
"open",
"(",
"json_file_name",
",",
"\"w\"",
")",
"as",
"json_file",
":",
"try",
":",
"json",
".",
"dump",
"(",
... | Save built consumer-offsets data to given json file. | [
"Save",
"built",
"consumer",
"-",
"offsets",
"data",
"to",
"given",
"json",
"file",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/commands/offset_save.py#L142-L151 | train | 202,640 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.decommission_brokers | def decommission_brokers(self, broker_ids):
"""Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced.
:param broker_ids: list of string representing valid broker ids in the cluster
:raises: InvalidBrokerIdError when the id is invalid.
"""
groups = set()
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
# Raise an error for now. As alternative we may ignore the
# invalid id and continue with the others.
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_decommissioned()
groups.add(broker.replication_group)
for group in groups:
self._decommission_brokers_in_group(group) | python | def decommission_brokers(self, broker_ids):
"""Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced.
:param broker_ids: list of string representing valid broker ids in the cluster
:raises: InvalidBrokerIdError when the id is invalid.
"""
groups = set()
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
# Raise an error for now. As alternative we may ignore the
# invalid id and continue with the others.
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_decommissioned()
groups.add(broker.replication_group)
for group in groups:
self._decommission_brokers_in_group(group) | [
"def",
"decommission_brokers",
"(",
"self",
",",
"broker_ids",
")",
":",
"groups",
"=",
"set",
"(",
")",
"for",
"b_id",
"in",
"broker_ids",
":",
"try",
":",
"broker",
"=",
"self",
".",
"cluster_topology",
".",
"brokers",
"[",
"b_id",
"]",
"except",
"KeyE... | Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced.
:param broker_ids: list of string representing valid broker ids in the cluster
:raises: InvalidBrokerIdError when the id is invalid. | [
"Decommission",
"a",
"list",
"of",
"brokers",
"trying",
"to",
"keep",
"the",
"replication",
"group",
"the",
"brokers",
"belong",
"to",
"balanced",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L68-L90 | train | 202,641 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer._decommission_brokers_in_group | def _decommission_brokers_in_group(self, group):
"""Decommission the marked brokers of a group."""
try:
group.rebalance_brokers()
except EmptyReplicationGroupError:
self.log.warning("No active brokers left in replication group %s", group)
for broker in group.brokers:
if broker.decommissioned and not broker.empty():
# In this case we need to reassign the remaining partitions
# to other replication groups
self.log.info(
"Broker %s can't be decommissioned within the same "
"replication group %s. Moving partitions to other "
"replication groups.",
broker,
broker.replication_group,
)
self._force_broker_decommission(broker)
# Broker should be empty now
if not broker.empty():
# Decommission may be impossible if there are not enough
# brokers to redistributed the replicas.
self.log.error(
"Could not decommission broker %s. "
"Partitions %s cannot be reassigned.",
broker,
broker.partitions,
)
raise BrokerDecommissionError("Broker decommission failed.") | python | def _decommission_brokers_in_group(self, group):
"""Decommission the marked brokers of a group."""
try:
group.rebalance_brokers()
except EmptyReplicationGroupError:
self.log.warning("No active brokers left in replication group %s", group)
for broker in group.brokers:
if broker.decommissioned and not broker.empty():
# In this case we need to reassign the remaining partitions
# to other replication groups
self.log.info(
"Broker %s can't be decommissioned within the same "
"replication group %s. Moving partitions to other "
"replication groups.",
broker,
broker.replication_group,
)
self._force_broker_decommission(broker)
# Broker should be empty now
if not broker.empty():
# Decommission may be impossible if there are not enough
# brokers to redistributed the replicas.
self.log.error(
"Could not decommission broker %s. "
"Partitions %s cannot be reassigned.",
broker,
broker.partitions,
)
raise BrokerDecommissionError("Broker decommission failed.") | [
"def",
"_decommission_brokers_in_group",
"(",
"self",
",",
"group",
")",
":",
"try",
":",
"group",
".",
"rebalance_brokers",
"(",
")",
"except",
"EmptyReplicationGroupError",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"No active brokers left in replication group ... | Decommission the marked brokers of a group. | [
"Decommission",
"the",
"marked",
"brokers",
"of",
"a",
"group",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L92-L120 | train | 202,642 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.rebalance_replication_groups | def rebalance_replication_groups(self):
"""Rebalance partitions over replication groups.
First step involves rebalancing replica-count for each partition across
replication-groups.
Second step involves rebalancing partition-count across replication-groups
of the cluster.
"""
# Balance replicas over replication-groups for each partition
if any(b.inactive for b in six.itervalues(self.cluster_topology.brokers)):
self.log.error(
"Impossible to rebalance replication groups because of inactive "
"brokers."
)
raise RebalanceError(
"Impossible to rebalance replication groups because of inactive "
"brokers"
)
# Balance replica-count over replication-groups
self.rebalance_replicas()
# Balance partition-count over replication-groups
self._rebalance_groups_partition_cnt() | python | def rebalance_replication_groups(self):
"""Rebalance partitions over replication groups.
First step involves rebalancing replica-count for each partition across
replication-groups.
Second step involves rebalancing partition-count across replication-groups
of the cluster.
"""
# Balance replicas over replication-groups for each partition
if any(b.inactive for b in six.itervalues(self.cluster_topology.brokers)):
self.log.error(
"Impossible to rebalance replication groups because of inactive "
"brokers."
)
raise RebalanceError(
"Impossible to rebalance replication groups because of inactive "
"brokers"
)
# Balance replica-count over replication-groups
self.rebalance_replicas()
# Balance partition-count over replication-groups
self._rebalance_groups_partition_cnt() | [
"def",
"rebalance_replication_groups",
"(",
"self",
")",
":",
"# Balance replicas over replication-groups for each partition",
"if",
"any",
"(",
"b",
".",
"inactive",
"for",
"b",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"cluster_topology",
".",
"brokers",
"... | Rebalance partitions over replication groups.
First step involves rebalancing replica-count for each partition across
replication-groups.
Second step involves rebalancing partition-count across replication-groups
of the cluster. | [
"Rebalance",
"partitions",
"over",
"replication",
"groups",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L178-L201 | train | 202,643 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.rebalance_brokers | def rebalance_brokers(self):
"""Rebalance partition-count across brokers within each replication-group."""
for rg in six.itervalues(self.cluster_topology.rgs):
rg.rebalance_brokers() | python | def rebalance_brokers(self):
"""Rebalance partition-count across brokers within each replication-group."""
for rg in six.itervalues(self.cluster_topology.rgs):
rg.rebalance_brokers() | [
"def",
"rebalance_brokers",
"(",
"self",
")",
":",
"for",
"rg",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"cluster_topology",
".",
"rgs",
")",
":",
"rg",
".",
"rebalance_brokers",
"(",
")"
] | Rebalance partition-count across brokers within each replication-group. | [
"Rebalance",
"partition",
"-",
"count",
"across",
"brokers",
"within",
"each",
"replication",
"-",
"group",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L204-L207 | train | 202,644 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.revoke_leadership | def revoke_leadership(self, broker_ids):
"""Revoke leadership for given brokers.
:param broker_ids: List of broker-ids whose leadership needs to be revoked.
"""
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_revoked_leadership()
assert(len(self.cluster_topology.brokers) - len(broker_ids) > 0), "Not " \
"all brokers can be revoked for leadership"
opt_leader_cnt = len(self.cluster_topology.partitions) // (
len(self.cluster_topology.brokers) - len(broker_ids)
)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt)
# If the broker-ids to be revoked from leadership are still leaders for any
# partitions, try to forcefully move their leadership to followers if possible
pending_brokers = [
b for b in six.itervalues(self.cluster_topology.brokers)
if b.revoked_leadership and b.count_preferred_replica() > 0
]
for b in pending_brokers:
self._force_revoke_leadership(b) | python | def revoke_leadership(self, broker_ids):
"""Revoke leadership for given brokers.
:param broker_ids: List of broker-ids whose leadership needs to be revoked.
"""
for b_id in broker_ids:
try:
broker = self.cluster_topology.brokers[b_id]
except KeyError:
self.log.error("Invalid broker id %s.", b_id)
raise InvalidBrokerIdError(
"Broker id {} does not exist in cluster".format(b_id),
)
broker.mark_revoked_leadership()
assert(len(self.cluster_topology.brokers) - len(broker_ids) > 0), "Not " \
"all brokers can be revoked for leadership"
opt_leader_cnt = len(self.cluster_topology.partitions) // (
len(self.cluster_topology.brokers) - len(broker_ids)
)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt)
# If the broker-ids to be revoked from leadership are still leaders for any
# partitions, try to forcefully move their leadership to followers if possible
pending_brokers = [
b for b in six.itervalues(self.cluster_topology.brokers)
if b.revoked_leadership and b.count_preferred_replica() > 0
]
for b in pending_brokers:
self._force_revoke_leadership(b) | [
"def",
"revoke_leadership",
"(",
"self",
",",
"broker_ids",
")",
":",
"for",
"b_id",
"in",
"broker_ids",
":",
"try",
":",
"broker",
"=",
"self",
".",
"cluster_topology",
".",
"brokers",
"[",
"b_id",
"]",
"except",
"KeyError",
":",
"self",
".",
"log",
"."... | Revoke leadership for given brokers.
:param broker_ids: List of broker-ids whose leadership needs to be revoked. | [
"Revoke",
"leadership",
"for",
"given",
"brokers",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L209-L239 | train | 202,645 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer._force_revoke_leadership | def _force_revoke_leadership(self, broker):
"""Revoke the leadership of given broker for any remaining partitions.
Algorithm:
1. Find the partitions (owned_partitions) with given broker as leader.
2. For each partition find the eligible followers.
Brokers which are not to be revoked from leadership are eligible followers.
3. Select the follower who is leader for minimum partitions.
4. Assign the selected follower as leader.
5. Notify for any pending owned_partitions whose leader cannot be changed.
This could be due to replica size 1 or eligible followers are None.
"""
owned_partitions = list(filter(
lambda p: broker is p.leader,
broker.partitions,
))
for partition in owned_partitions:
if len(partition.replicas) == 1:
self.log.error(
"Cannot be revoked leadership for broker {b} for partition {p}. Replica count: 1"
.format(p=partition, b=broker),
)
continue
eligible_followers = [
follower for follower in partition.followers
if not follower.revoked_leadership
]
if eligible_followers:
# Pick follower with least leader-count
best_fit_follower = min(
eligible_followers,
key=lambda follower: follower.count_preferred_replica(),
)
partition.swap_leader(best_fit_follower)
else:
self.log.error(
"All replicas for partition {p} on broker {b} are to be revoked for leadership.".format(
p=partition,
b=broker,
)
) | python | def _force_revoke_leadership(self, broker):
"""Revoke the leadership of given broker for any remaining partitions.
Algorithm:
1. Find the partitions (owned_partitions) with given broker as leader.
2. For each partition find the eligible followers.
Brokers which are not to be revoked from leadership are eligible followers.
3. Select the follower who is leader for minimum partitions.
4. Assign the selected follower as leader.
5. Notify for any pending owned_partitions whose leader cannot be changed.
This could be due to replica size 1 or eligible followers are None.
"""
owned_partitions = list(filter(
lambda p: broker is p.leader,
broker.partitions,
))
for partition in owned_partitions:
if len(partition.replicas) == 1:
self.log.error(
"Cannot be revoked leadership for broker {b} for partition {p}. Replica count: 1"
.format(p=partition, b=broker),
)
continue
eligible_followers = [
follower for follower in partition.followers
if not follower.revoked_leadership
]
if eligible_followers:
# Pick follower with least leader-count
best_fit_follower = min(
eligible_followers,
key=lambda follower: follower.count_preferred_replica(),
)
partition.swap_leader(best_fit_follower)
else:
self.log.error(
"All replicas for partition {p} on broker {b} are to be revoked for leadership.".format(
p=partition,
b=broker,
)
) | [
"def",
"_force_revoke_leadership",
"(",
"self",
",",
"broker",
")",
":",
"owned_partitions",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"broker",
"is",
"p",
".",
"leader",
",",
"broker",
".",
"partitions",
",",
")",
")",
"for",
"partition",
"in... | Revoke the leadership of given broker for any remaining partitions.
Algorithm:
1. Find the partitions (owned_partitions) with given broker as leader.
2. For each partition find the eligible followers.
Brokers which are not to be revoked from leadership are eligible followers.
3. Select the follower who is leader for minimum partitions.
4. Assign the selected follower as leader.
5. Notify for any pending owned_partitions whose leader cannot be changed.
This could be due to replica size 1 or eligible followers are None. | [
"Revoke",
"the",
"leadership",
"of",
"given",
"broker",
"for",
"any",
"remaining",
"partitions",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L241-L281 | train | 202,646 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.rebalance_leaders | def rebalance_leaders(self):
"""Re-order brokers in replicas such that, every broker is assigned as
preferred leader evenly.
"""
opt_leader_cnt = len(self.cluster_topology.partitions) // len(self.cluster_topology.brokers)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt) | python | def rebalance_leaders(self):
"""Re-order brokers in replicas such that, every broker is assigned as
preferred leader evenly.
"""
opt_leader_cnt = len(self.cluster_topology.partitions) // len(self.cluster_topology.brokers)
# Balanced brokers transfer leadership to their under-balanced followers
self.rebalancing_non_followers(opt_leader_cnt) | [
"def",
"rebalance_leaders",
"(",
"self",
")",
":",
"opt_leader_cnt",
"=",
"len",
"(",
"self",
".",
"cluster_topology",
".",
"partitions",
")",
"//",
"len",
"(",
"self",
".",
"cluster_topology",
".",
"brokers",
")",
"# Balanced brokers transfer leadership to their un... | Re-order brokers in replicas such that, every broker is assigned as
preferred leader evenly. | [
"Re",
"-",
"order",
"brokers",
"in",
"replicas",
"such",
"that",
"every",
"broker",
"is",
"assigned",
"as",
"preferred",
"leader",
"evenly",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L284-L290 | train | 202,647 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer._rebalance_groups_partition_cnt | def _rebalance_groups_partition_cnt(self):
"""Re-balance partition-count across replication-groups.
Algorithm:
The key constraint is not to create any replica-count imbalance while
moving partitions across replication-groups.
1) Divide replication-groups into over and under loaded groups in terms
of partition-count.
2) For each over-loaded replication-group, select eligible partitions
which can be moved to under-replicated groups. Partitions with greater
than optimum replica-count for the group have the ability to donate one
of their replicas without creating replica-count imbalance.
3) Destination replication-group is selected based on minimum partition-count
and ability to accept one of the eligible partition-replicas.
4) Source and destination brokers are selected based on :-
* their ability to donate and accept extra partition-replica respectively.
* maximum and minimum partition-counts respectively.
5) Move partition-replica from source to destination-broker.
6) Repeat steps 1) to 5) until groups are balanced or cannot be balanced further.
"""
# Segregate replication-groups based on partition-count
total_elements = sum(len(rg.partitions) for rg in six.itervalues(self.cluster_topology.rgs))
over_loaded_rgs, under_loaded_rgs = separate_groups(
list(self.cluster_topology.rgs.values()),
lambda rg: len(rg.partitions),
total_elements,
)
if over_loaded_rgs and under_loaded_rgs:
self.cluster_topology.log.info(
'Over-loaded replication-groups {over_loaded}, under-loaded '
'replication-groups {under_loaded} based on partition-count'
.format(
over_loaded=[rg.id for rg in over_loaded_rgs],
under_loaded=[rg.id for rg in under_loaded_rgs],
)
)
else:
self.cluster_topology.log.info('Replication-groups are balanced based on partition-count.')
return
# Get optimal partition-count per replication-group
opt_partition_cnt, _ = compute_optimum(
len(self.cluster_topology.rgs),
total_elements,
)
# Balance replication-groups
for over_loaded_rg in over_loaded_rgs:
for under_loaded_rg in under_loaded_rgs:
# Filter unique partition with replica-count > opt-replica-count
# in over-loaded-rgs and <= opt-replica-count in under-loaded-rgs
eligible_partitions = set(filter(
lambda partition:
over_loaded_rg.count_replica(partition) >
len(partition.replicas) // len(self.cluster_topology.rgs) and
under_loaded_rg.count_replica(partition) <=
len(partition.replicas) // len(self.cluster_topology.rgs),
over_loaded_rg.partitions,
))
# Move all possible partitions
for eligible_partition in eligible_partitions:
# The difference of partition-count b/w the over-loaded and under-loaded
# replication-groups should be greater than 1 for convergence
if len(over_loaded_rg.partitions) - len(under_loaded_rg.partitions) > 1:
over_loaded_rg.move_partition_replica(
under_loaded_rg,
eligible_partition,
)
else:
break
# Move to next replication-group if either of the groups got
# balanced, otherwise try with next eligible partition
if (len(under_loaded_rg.partitions) == opt_partition_cnt or
len(over_loaded_rg.partitions) == opt_partition_cnt):
break
if len(over_loaded_rg.partitions) == opt_partition_cnt:
# Move to next over-loaded replication-group if balanced
break | python | def _rebalance_groups_partition_cnt(self):
"""Re-balance partition-count across replication-groups.
Algorithm:
The key constraint is not to create any replica-count imbalance while
moving partitions across replication-groups.
1) Divide replication-groups into over and under loaded groups in terms
of partition-count.
2) For each over-loaded replication-group, select eligible partitions
which can be moved to under-replicated groups. Partitions with greater
than optimum replica-count for the group have the ability to donate one
of their replicas without creating replica-count imbalance.
3) Destination replication-group is selected based on minimum partition-count
and ability to accept one of the eligible partition-replicas.
4) Source and destination brokers are selected based on :-
* their ability to donate and accept extra partition-replica respectively.
* maximum and minimum partition-counts respectively.
5) Move partition-replica from source to destination-broker.
6) Repeat steps 1) to 5) until groups are balanced or cannot be balanced further.
"""
# Segregate replication-groups based on partition-count
total_elements = sum(len(rg.partitions) for rg in six.itervalues(self.cluster_topology.rgs))
over_loaded_rgs, under_loaded_rgs = separate_groups(
list(self.cluster_topology.rgs.values()),
lambda rg: len(rg.partitions),
total_elements,
)
if over_loaded_rgs and under_loaded_rgs:
self.cluster_topology.log.info(
'Over-loaded replication-groups {over_loaded}, under-loaded '
'replication-groups {under_loaded} based on partition-count'
.format(
over_loaded=[rg.id for rg in over_loaded_rgs],
under_loaded=[rg.id for rg in under_loaded_rgs],
)
)
else:
self.cluster_topology.log.info('Replication-groups are balanced based on partition-count.')
return
# Get optimal partition-count per replication-group
opt_partition_cnt, _ = compute_optimum(
len(self.cluster_topology.rgs),
total_elements,
)
# Balance replication-groups
for over_loaded_rg in over_loaded_rgs:
for under_loaded_rg in under_loaded_rgs:
# Filter unique partition with replica-count > opt-replica-count
# in over-loaded-rgs and <= opt-replica-count in under-loaded-rgs
eligible_partitions = set(filter(
lambda partition:
over_loaded_rg.count_replica(partition) >
len(partition.replicas) // len(self.cluster_topology.rgs) and
under_loaded_rg.count_replica(partition) <=
len(partition.replicas) // len(self.cluster_topology.rgs),
over_loaded_rg.partitions,
))
# Move all possible partitions
for eligible_partition in eligible_partitions:
# The difference of partition-count b/w the over-loaded and under-loaded
# replication-groups should be greater than 1 for convergence
if len(over_loaded_rg.partitions) - len(under_loaded_rg.partitions) > 1:
over_loaded_rg.move_partition_replica(
under_loaded_rg,
eligible_partition,
)
else:
break
# Move to next replication-group if either of the groups got
# balanced, otherwise try with next eligible partition
if (len(under_loaded_rg.partitions) == opt_partition_cnt or
len(over_loaded_rg.partitions) == opt_partition_cnt):
break
if len(over_loaded_rg.partitions) == opt_partition_cnt:
# Move to next over-loaded replication-group if balanced
break | [
"def",
"_rebalance_groups_partition_cnt",
"(",
"self",
")",
":",
"# Segregate replication-groups based on partition-count",
"total_elements",
"=",
"sum",
"(",
"len",
"(",
"rg",
".",
"partitions",
")",
"for",
"rg",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
... | Re-balance partition-count across replication-groups.
Algorithm:
The key constraint is not to create any replica-count imbalance while
moving partitions across replication-groups.
1) Divide replication-groups into over and under loaded groups in terms
of partition-count.
2) For each over-loaded replication-group, select eligible partitions
which can be moved to under-replicated groups. Partitions with greater
than optimum replica-count for the group have the ability to donate one
of their replicas without creating replica-count imbalance.
3) Destination replication-group is selected based on minimum partition-count
and ability to accept one of the eligible partition-replicas.
4) Source and destination brokers are selected based on :-
* their ability to donate and accept extra partition-replica respectively.
* maximum and minimum partition-counts respectively.
5) Move partition-replica from source to destination-broker.
6) Repeat steps 1) to 5) until groups are balanced or cannot be balanced further. | [
"Re",
"-",
"balance",
"partition",
"-",
"count",
"across",
"replication",
"-",
"groups",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L336-L412 | train | 202,648 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.add_replica | def add_replica(self, partition_name, count=1):
"""Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor + count > len(self.cluster_topology.brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor to {0}. There are only "
"{1} brokers."
.format(
partition.replication_factor + count,
len(self.cluster_topology.brokers),
)
)
non_full_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) < len(rg.brokers)
]
for _ in range(count):
total_replicas = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
total_replicas,
)
under_replicated_rgs = [
rg
for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
]
candidate_rgs = under_replicated_rgs or non_full_rgs
rg = min(candidate_rgs, key=lambda rg: len(rg.partitions))
rg.add_replica(partition)
if rg.count_replica(partition) >= len(rg.brokers):
non_full_rgs.remove(rg) | python | def add_replica(self, partition_name, count=1):
"""Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor + count > len(self.cluster_topology.brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor to {0}. There are only "
"{1} brokers."
.format(
partition.replication_factor + count,
len(self.cluster_topology.brokers),
)
)
non_full_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) < len(rg.brokers)
]
for _ in range(count):
total_replicas = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
total_replicas,
)
under_replicated_rgs = [
rg
for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
]
candidate_rgs = under_replicated_rgs or non_full_rgs
rg = min(candidate_rgs, key=lambda rg: len(rg.partitions))
rg.add_replica(partition)
if rg.count_replica(partition) >= len(rg.brokers):
non_full_rgs.remove(rg) | [
"def",
"add_replica",
"(",
"self",
",",
"partition_name",
",",
"count",
"=",
"1",
")",
":",
"try",
":",
"partition",
"=",
"self",
".",
"cluster_topology",
".",
"partitions",
"[",
"partition_name",
"]",
"except",
"KeyError",
":",
"raise",
"InvalidPartitionError... | Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster. | [
"Increase",
"the",
"replication",
"-",
"factor",
"for",
"a",
"partition",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L414-L471 | train | 202,649 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py | PartitionCountBalancer.remove_replica | def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Remove one replica of a partition from the cluster.
The replication-group to remove from is determined as follows:
1. Find all replication-groups that contain at least one
out-of-sync replica for this partition.
2. Of these, find replication-groups with more than the average
number of replicas of this partition.
3. Choose the replication-group with the most overall partitions.
4. Repeat steps 1-3 with in-sync replicas
After this operation, the preferred leader for this partition will
be set to the broker that leads the fewest other partitions, even if
the current preferred leader is not removed.
This is done to keep the number of preferred replicas balanced across
brokers in the cluster.
:param partition_name: (topic_id, partition_id) of the partition to
remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
:raises: InvalidReplicationFactorError when count is greater than the
replication factor of the partition.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor <= count:
raise InvalidReplicationFactorError(
"Cannot remove {0} replicas. Replication factor is only {1}."
.format(count, partition.replication_factor)
)
osr = []
for broker_id in osr_broker_ids:
try:
osr.append(self.cluster_topology.brokers[broker_id])
except KeyError:
raise InvalidBrokerIdError(
"No broker found with id {bid}".format(bid=broker_id),
)
non_empty_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg
for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
for _ in range(count):
candidate_rgs = rgs_with_osr or non_empty_rgs
total_replicas = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replica_cnt, _ = compute_optimum(
len(candidate_rgs),
total_replicas,
)
over_replicated_rgs = [
rg
for rg in candidate_rgs
if rg.count_replica(partition) > opt_replica_cnt
]
candidate_rgs = over_replicated_rgs or candidate_rgs
rg = max(candidate_rgs, key=lambda rg: len(rg.partitions))
osr_in_rg = [b for b in rg.brokers if b in osr]
rg.remove_replica(partition, osr_in_rg)
osr = [b for b in osr if b in partition.replicas]
if rg in rgs_with_osr and len(osr_in_rg) == 1:
rgs_with_osr.remove(rg)
if rg.count_replica(partition) == 0:
non_empty_rgs.remove(rg)
new_leader = min(
partition.replicas,
key=lambda broker: broker.count_preferred_replica(),
)
partition.swap_leader(new_leader) | python | def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Remove one replica of a partition from the cluster.
The replication-group to remove from is determined as follows:
1. Find all replication-groups that contain at least one
out-of-sync replica for this partition.
2. Of these, find replication-groups with more than the average
number of replicas of this partition.
3. Choose the replication-group with the most overall partitions.
4. Repeat steps 1-3 with in-sync replicas
After this operation, the preferred leader for this partition will
be set to the broker that leads the fewest other partitions, even if
the current preferred leader is not removed.
This is done to keep the number of preferred replicas balanced across
brokers in the cluster.
:param partition_name: (topic_id, partition_id) of the partition to
remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
:raises: InvalidReplicationFactorError when count is greater than the
replication factor of the partition.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor <= count:
raise InvalidReplicationFactorError(
"Cannot remove {0} replicas. Replication factor is only {1}."
.format(count, partition.replication_factor)
)
osr = []
for broker_id in osr_broker_ids:
try:
osr.append(self.cluster_topology.brokers[broker_id])
except KeyError:
raise InvalidBrokerIdError(
"No broker found with id {bid}".format(bid=broker_id),
)
non_empty_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg
for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
for _ in range(count):
candidate_rgs = rgs_with_osr or non_empty_rgs
total_replicas = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replica_cnt, _ = compute_optimum(
len(candidate_rgs),
total_replicas,
)
over_replicated_rgs = [
rg
for rg in candidate_rgs
if rg.count_replica(partition) > opt_replica_cnt
]
candidate_rgs = over_replicated_rgs or candidate_rgs
rg = max(candidate_rgs, key=lambda rg: len(rg.partitions))
osr_in_rg = [b for b in rg.brokers if b in osr]
rg.remove_replica(partition, osr_in_rg)
osr = [b for b in osr if b in partition.replicas]
if rg in rgs_with_osr and len(osr_in_rg) == 1:
rgs_with_osr.remove(rg)
if rg.count_replica(partition) == 0:
non_empty_rgs.remove(rg)
new_leader = min(
partition.replicas,
key=lambda broker: broker.count_preferred_replica(),
)
partition.swap_leader(new_leader) | [
"def",
"remove_replica",
"(",
"self",
",",
"partition_name",
",",
"osr_broker_ids",
",",
"count",
"=",
"1",
")",
":",
"try",
":",
"partition",
"=",
"self",
".",
"cluster_topology",
".",
"partitions",
"[",
"partition_name",
"]",
"except",
"KeyError",
":",
"ra... | Remove one replica of a partition from the cluster.
The replication-group to remove from is determined as follows:
1. Find all replication-groups that contain at least one
out-of-sync replica for this partition.
2. Of these, find replication-groups with more than the average
number of replicas of this partition.
3. Choose the replication-group with the most overall partitions.
4. Repeat steps 1-3 with in-sync replicas
After this operation, the preferred leader for this partition will
be set to the broker that leads the fewest other partitions, even if
the current preferred leader is not removed.
This is done to keep the number of preferred replicas balanced across
brokers in the cluster.
:param partition_name: (topic_id, partition_id) of the partition to
remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
:raises: InvalidReplicationFactorError when count is greater than the
replication factor of the partition. | [
"Remove",
"one",
"replica",
"of",
"a",
"partition",
"from",
"the",
"cluster",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L473-L560 | train | 202,650 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | preprocess_topics | def preprocess_topics(source_groupid, source_topics, dest_groupid, topics_dest_group):
"""Pre-process the topics in source and destination group for duplicates."""
# Is the new consumer already subscribed to any of these topics?
common_topics = [topic for topic in topics_dest_group if topic in source_topics]
if common_topics:
print(
"Error: Consumer Group ID: {groupid} is already "
"subscribed to following topics: {topic}.\nPlease delete this "
"topics from new group before re-running the "
"command.".format(
groupid=dest_groupid,
topic=', '.join(common_topics),
),
file=sys.stderr,
)
sys.exit(1)
# Let's confirm what the user intends to do.
if topics_dest_group:
in_str = (
"New Consumer Group: {dest_groupid} already "
"exists.\nTopics subscribed to by the consumer groups are listed "
"below:\n{source_groupid}: {source_group_topics}\n"
"{dest_groupid}: {dest_group_topics}\nDo you intend to copy into"
"existing consumer destination-group? (y/n)".format(
source_groupid=source_groupid,
source_group_topics=source_topics,
dest_groupid=dest_groupid,
dest_group_topics=topics_dest_group,
)
)
prompt_user_input(in_str) | python | def preprocess_topics(source_groupid, source_topics, dest_groupid, topics_dest_group):
"""Pre-process the topics in source and destination group for duplicates."""
# Is the new consumer already subscribed to any of these topics?
common_topics = [topic for topic in topics_dest_group if topic in source_topics]
if common_topics:
print(
"Error: Consumer Group ID: {groupid} is already "
"subscribed to following topics: {topic}.\nPlease delete this "
"topics from new group before re-running the "
"command.".format(
groupid=dest_groupid,
topic=', '.join(common_topics),
),
file=sys.stderr,
)
sys.exit(1)
# Let's confirm what the user intends to do.
if topics_dest_group:
in_str = (
"New Consumer Group: {dest_groupid} already "
"exists.\nTopics subscribed to by the consumer groups are listed "
"below:\n{source_groupid}: {source_group_topics}\n"
"{dest_groupid}: {dest_group_topics}\nDo you intend to copy into"
"existing consumer destination-group? (y/n)".format(
source_groupid=source_groupid,
source_group_topics=source_topics,
dest_groupid=dest_groupid,
dest_group_topics=topics_dest_group,
)
)
prompt_user_input(in_str) | [
"def",
"preprocess_topics",
"(",
"source_groupid",
",",
"source_topics",
",",
"dest_groupid",
",",
"topics_dest_group",
")",
":",
"# Is the new consumer already subscribed to any of these topics?",
"common_topics",
"=",
"[",
"topic",
"for",
"topic",
"in",
"topics_dest_group",... | Pre-process the topics in source and destination group for duplicates. | [
"Pre",
"-",
"process",
"the",
"topics",
"in",
"source",
"and",
"destination",
"group",
"for",
"duplicates",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L41-L71 | train | 202,651 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | create_offsets | def create_offsets(zk, consumer_group, offsets):
"""Create path with offset value for each topic-partition of given consumer
group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:param offsets: Offsets of all topic-partitions
:type offsets: dict(topic, dict(partition, offset))
"""
# Create new offsets
for topic, partition_offsets in six.iteritems(offsets):
for partition, offset in six.iteritems(partition_offsets):
new_path = "/consumers/{groupid}/offsets/{topic}/{partition}".format(
groupid=consumer_group,
topic=topic,
partition=partition,
)
try:
zk.create(new_path, value=offset, makepath=True)
except NodeExistsError:
print(
"Error: Path {path} already exists. Please re-run the "
"command.".format(path=new_path),
file=sys.stderr,
)
raise | python | def create_offsets(zk, consumer_group, offsets):
"""Create path with offset value for each topic-partition of given consumer
group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:param offsets: Offsets of all topic-partitions
:type offsets: dict(topic, dict(partition, offset))
"""
# Create new offsets
for topic, partition_offsets in six.iteritems(offsets):
for partition, offset in six.iteritems(partition_offsets):
new_path = "/consumers/{groupid}/offsets/{topic}/{partition}".format(
groupid=consumer_group,
topic=topic,
partition=partition,
)
try:
zk.create(new_path, value=offset, makepath=True)
except NodeExistsError:
print(
"Error: Path {path} already exists. Please re-run the "
"command.".format(path=new_path),
file=sys.stderr,
)
raise | [
"def",
"create_offsets",
"(",
"zk",
",",
"consumer_group",
",",
"offsets",
")",
":",
"# Create new offsets",
"for",
"topic",
",",
"partition_offsets",
"in",
"six",
".",
"iteritems",
"(",
"offsets",
")",
":",
"for",
"partition",
",",
"offset",
"in",
"six",
".... | Create path with offset value for each topic-partition of given consumer
group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:param offsets: Offsets of all topic-partitions
:type offsets: dict(topic, dict(partition, offset)) | [
"Create",
"path",
"with",
"offset",
"value",
"for",
"each",
"topic",
"-",
"partition",
"of",
"given",
"consumer",
"group",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L74-L100 | train | 202,652 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | fetch_offsets | def fetch_offsets(zk, consumer_group, topics):
"""Fetch offsets for given topics of given consumer group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:rtype: dict(topic, dict(partition, offset))
"""
source_offsets = defaultdict(dict)
for topic, partitions in six.iteritems(topics):
for partition in partitions:
offset, _ = zk.get(
"/consumers/{groupid}/offsets/{topic}/{partition}".format(
groupid=consumer_group,
topic=topic,
partition=partition,
)
)
source_offsets[topic][partition] = offset
return source_offsets | python | def fetch_offsets(zk, consumer_group, topics):
"""Fetch offsets for given topics of given consumer group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:rtype: dict(topic, dict(partition, offset))
"""
source_offsets = defaultdict(dict)
for topic, partitions in six.iteritems(topics):
for partition in partitions:
offset, _ = zk.get(
"/consumers/{groupid}/offsets/{topic}/{partition}".format(
groupid=consumer_group,
topic=topic,
partition=partition,
)
)
source_offsets[topic][partition] = offset
return source_offsets | [
"def",
"fetch_offsets",
"(",
"zk",
",",
"consumer_group",
",",
"topics",
")",
":",
"source_offsets",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"topic",
",",
"partitions",
"in",
"six",
".",
"iteritems",
"(",
"topics",
")",
":",
"for",
"partition",
"in",
... | Fetch offsets for given topics of given consumer group.
:param zk: Zookeeper client
:param consumer_group: Consumer group id for given offsets
:type consumer_group: int
:rtype: dict(topic, dict(partition, offset)) | [
"Fetch",
"offsets",
"for",
"given",
"topics",
"of",
"given",
"consumer",
"group",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L103-L122 | train | 202,653 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | get_offset_topic_partition_count | def get_offset_topic_partition_count(kafka_config):
"""Given a kafka cluster configuration, return the number of partitions
in the offset topic. It will raise an UnknownTopic exception if the topic
cannot be found."""
metadata = get_topic_partition_metadata(kafka_config.broker_list)
if CONSUMER_OFFSET_TOPIC not in metadata:
raise UnknownTopic("Consumer offset topic is missing.")
return len(metadata[CONSUMER_OFFSET_TOPIC]) | python | def get_offset_topic_partition_count(kafka_config):
"""Given a kafka cluster configuration, return the number of partitions
in the offset topic. It will raise an UnknownTopic exception if the topic
cannot be found."""
metadata = get_topic_partition_metadata(kafka_config.broker_list)
if CONSUMER_OFFSET_TOPIC not in metadata:
raise UnknownTopic("Consumer offset topic is missing.")
return len(metadata[CONSUMER_OFFSET_TOPIC]) | [
"def",
"get_offset_topic_partition_count",
"(",
"kafka_config",
")",
":",
"metadata",
"=",
"get_topic_partition_metadata",
"(",
"kafka_config",
".",
"broker_list",
")",
"if",
"CONSUMER_OFFSET_TOPIC",
"not",
"in",
"metadata",
":",
"raise",
"UnknownTopic",
"(",
"\"Consume... | Given a kafka cluster configuration, return the number of partitions
in the offset topic. It will raise an UnknownTopic exception if the topic
cannot be found. | [
"Given",
"a",
"kafka",
"cluster",
"configuration",
"return",
"the",
"number",
"of",
"partitions",
"in",
"the",
"offset",
"topic",
".",
"It",
"will",
"raise",
"an",
"UnknownTopic",
"exception",
"if",
"the",
"topic",
"cannot",
"be",
"found",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L134-L141 | train | 202,654 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | get_group_partition | def get_group_partition(group, partition_count):
"""Given a group name, return the partition number of the consumer offset
topic containing the data associated to that group."""
def java_string_hashcode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
return abs(java_string_hashcode(group)) % partition_count | python | def get_group_partition(group, partition_count):
"""Given a group name, return the partition number of the consumer offset
topic containing the data associated to that group."""
def java_string_hashcode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
return abs(java_string_hashcode(group)) % partition_count | [
"def",
"get_group_partition",
"(",
"group",
",",
"partition_count",
")",
":",
"def",
"java_string_hashcode",
"(",
"s",
")",
":",
"h",
"=",
"0",
"for",
"c",
"in",
"s",
":",
"h",
"=",
"(",
"31",
"*",
"h",
"+",
"ord",
"(",
"c",
")",
")",
"&",
"0xFFF... | Given a group name, return the partition number of the consumer offset
topic containing the data associated to that group. | [
"Given",
"a",
"group",
"name",
"return",
"the",
"partition",
"number",
"of",
"the",
"consumer",
"offset",
"topic",
"containing",
"the",
"data",
"associated",
"to",
"that",
"group",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L153-L161 | train | 202,655 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | topic_offsets_for_timestamp | def topic_offsets_for_timestamp(consumer, timestamp, topics):
"""Given an initialized KafkaConsumer, timestamp, and list of topics,
looks up the offsets for the given topics by timestamp. The returned
offset for each partition is the earliest offset whose timestamp is greater than or
equal to the given timestamp in the corresponding partition.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer
timestamp (int): Unix epoch milliseconds. Unit should be milliseconds
since beginning of the epoch (midnight Jan 1, 1970 (UTC))
topics (list): List of topics whose offsets are to be fetched.
:returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Returns ``{TopicPartition: None}`` for specific topic-partiitons if:
1. Timestamps are not supported in messages
2. No offsets in the partition after the given timestamp
3. No data in the topic-partition
:raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
tp_timestamps = {}
for topic in topics:
topic_partitions = consumer_partitions_for_topic(consumer, topic)
for tp in topic_partitions:
tp_timestamps[tp] = timestamp
return consumer.offsets_for_times(tp_timestamps) | python | def topic_offsets_for_timestamp(consumer, timestamp, topics):
"""Given an initialized KafkaConsumer, timestamp, and list of topics,
looks up the offsets for the given topics by timestamp. The returned
offset for each partition is the earliest offset whose timestamp is greater than or
equal to the given timestamp in the corresponding partition.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer
timestamp (int): Unix epoch milliseconds. Unit should be milliseconds
since beginning of the epoch (midnight Jan 1, 1970 (UTC))
topics (list): List of topics whose offsets are to be fetched.
:returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Returns ``{TopicPartition: None}`` for specific topic-partiitons if:
1. Timestamps are not supported in messages
2. No offsets in the partition after the given timestamp
3. No data in the topic-partition
:raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
tp_timestamps = {}
for topic in topics:
topic_partitions = consumer_partitions_for_topic(consumer, topic)
for tp in topic_partitions:
tp_timestamps[tp] = timestamp
return consumer.offsets_for_times(tp_timestamps) | [
"def",
"topic_offsets_for_timestamp",
"(",
"consumer",
",",
"timestamp",
",",
"topics",
")",
":",
"tp_timestamps",
"=",
"{",
"}",
"for",
"topic",
"in",
"topics",
":",
"topic_partitions",
"=",
"consumer_partitions_for_topic",
"(",
"consumer",
",",
"topic",
")",
"... | Given an initialized KafkaConsumer, timestamp, and list of topics,
looks up the offsets for the given topics by timestamp. The returned
offset for each partition is the earliest offset whose timestamp is greater than or
equal to the given timestamp in the corresponding partition.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer
timestamp (int): Unix epoch milliseconds. Unit should be milliseconds
since beginning of the epoch (midnight Jan 1, 1970 (UTC))
topics (list): List of topics whose offsets are to be fetched.
:returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Returns ``{TopicPartition: None}`` for specific topic-partiitons if:
1. Timestamps are not supported in messages
2. No offsets in the partition after the given timestamp
3. No data in the topic-partition
:raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms | [
"Given",
"an",
"initialized",
"KafkaConsumer",
"timestamp",
"and",
"list",
"of",
"topics",
"looks",
"up",
"the",
"offsets",
"for",
"the",
"given",
"topics",
"by",
"timestamp",
".",
"The",
"returned",
"offset",
"for",
"each",
"partition",
"is",
"the",
"earliest... | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L164-L194 | train | 202,656 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | consumer_partitions_for_topic | def consumer_partitions_for_topic(consumer, topic):
"""Returns a list of all TopicPartitions for a given topic.
Arguments:
consumer: an initialized KafkaConsumer
topic: a topic name to fetch TopicPartitions for
:returns:
list(TopicPartition): A list of TopicPartitions that belong to the given topic
"""
topic_partitions = []
partitions = consumer.partitions_for_topic(topic)
if partitions is not None:
for partition in partitions:
topic_partitions.append(TopicPartition(topic, partition))
else:
logging.error(
"No partitions found for topic {}. Maybe it doesn't exist?".format(topic),
)
return topic_partitions | python | def consumer_partitions_for_topic(consumer, topic):
"""Returns a list of all TopicPartitions for a given topic.
Arguments:
consumer: an initialized KafkaConsumer
topic: a topic name to fetch TopicPartitions for
:returns:
list(TopicPartition): A list of TopicPartitions that belong to the given topic
"""
topic_partitions = []
partitions = consumer.partitions_for_topic(topic)
if partitions is not None:
for partition in partitions:
topic_partitions.append(TopicPartition(topic, partition))
else:
logging.error(
"No partitions found for topic {}. Maybe it doesn't exist?".format(topic),
)
return topic_partitions | [
"def",
"consumer_partitions_for_topic",
"(",
"consumer",
",",
"topic",
")",
":",
"topic_partitions",
"=",
"[",
"]",
"partitions",
"=",
"consumer",
".",
"partitions_for_topic",
"(",
"topic",
")",
"if",
"partitions",
"is",
"not",
"None",
":",
"for",
"partition",
... | Returns a list of all TopicPartitions for a given topic.
Arguments:
consumer: an initialized KafkaConsumer
topic: a topic name to fetch TopicPartitions for
:returns:
list(TopicPartition): A list of TopicPartitions that belong to the given topic | [
"Returns",
"a",
"list",
"of",
"all",
"TopicPartitions",
"for",
"a",
"given",
"topic",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L197-L216 | train | 202,657 |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/util.py | consumer_commit_for_times | def consumer_commit_for_times(consumer, partition_to_offset, atomic=False):
"""Commits offsets to Kafka using the given KafkaConsumer and offsets, a mapping
of TopicPartition to Unix Epoch milliseconds timestamps.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer.
partitions_to_offset (dict TopicPartition: OffsetAndTimestamp): Map of TopicPartition to OffsetAndTimestamp. Return value of offsets_for_times.
atomic (bool): Flag to specify whether the commit should fail if offsets are not found for some
TopicPartition: timestamp pairs.
"""
no_offsets = set()
for tp, offset in six.iteritems(partition_to_offset):
if offset is None:
logging.error(
"No offsets found for topic-partition {tp}. Either timestamps not supported"
" for the topic {tp}, or no offsets found after timestamp specified, or there is no"
" data in the topic-partition.".format(tp=tp),
)
no_offsets.add(tp)
if atomic and len(no_offsets) > 0:
logging.error(
"Commit aborted; offsets were not found for timestamps in"
" topics {}".format(",".join([str(tp) for tp in no_offsets])),
)
return
offsets_metadata = {
tp: OffsetAndMetadata(partition_to_offset[tp].offset, metadata=None)
for tp in six.iterkeys(partition_to_offset) if tp not in no_offsets
}
if len(offsets_metadata) != 0:
consumer.commit(offsets_metadata) | python | def consumer_commit_for_times(consumer, partition_to_offset, atomic=False):
"""Commits offsets to Kafka using the given KafkaConsumer and offsets, a mapping
of TopicPartition to Unix Epoch milliseconds timestamps.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer.
partitions_to_offset (dict TopicPartition: OffsetAndTimestamp): Map of TopicPartition to OffsetAndTimestamp. Return value of offsets_for_times.
atomic (bool): Flag to specify whether the commit should fail if offsets are not found for some
TopicPartition: timestamp pairs.
"""
no_offsets = set()
for tp, offset in six.iteritems(partition_to_offset):
if offset is None:
logging.error(
"No offsets found for topic-partition {tp}. Either timestamps not supported"
" for the topic {tp}, or no offsets found after timestamp specified, or there is no"
" data in the topic-partition.".format(tp=tp),
)
no_offsets.add(tp)
if atomic and len(no_offsets) > 0:
logging.error(
"Commit aborted; offsets were not found for timestamps in"
" topics {}".format(",".join([str(tp) for tp in no_offsets])),
)
return
offsets_metadata = {
tp: OffsetAndMetadata(partition_to_offset[tp].offset, metadata=None)
for tp in six.iterkeys(partition_to_offset) if tp not in no_offsets
}
if len(offsets_metadata) != 0:
consumer.commit(offsets_metadata) | [
"def",
"consumer_commit_for_times",
"(",
"consumer",
",",
"partition_to_offset",
",",
"atomic",
"=",
"False",
")",
":",
"no_offsets",
"=",
"set",
"(",
")",
"for",
"tp",
",",
"offset",
"in",
"six",
".",
"iteritems",
"(",
"partition_to_offset",
")",
":",
"if",... | Commits offsets to Kafka using the given KafkaConsumer and offsets, a mapping
of TopicPartition to Unix Epoch milliseconds timestamps.
Arguments:
consumer (KafkaConsumer): an initialized kafka-python consumer.
partitions_to_offset (dict TopicPartition: OffsetAndTimestamp): Map of TopicPartition to OffsetAndTimestamp. Return value of offsets_for_times.
atomic (bool): Flag to specify whether the commit should fail if offsets are not found for some
TopicPartition: timestamp pairs. | [
"Commits",
"offsets",
"to",
"Kafka",
"using",
"the",
"given",
"KafkaConsumer",
"and",
"offsets",
"a",
"mapping",
"of",
"TopicPartition",
"to",
"Unix",
"Epoch",
"milliseconds",
"timestamps",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_consumer_manager/util.py#L219-L251 | train | 202,658 |
Yelp/kafka-utils | kafka_utils/util/config.py | get_cluster_config | def get_cluster_config(
cluster_type,
cluster_name=None,
kafka_topology_base_path=None,
):
"""Return the cluster configuration.
Use the local cluster if cluster_name is not specified.
:param cluster_type: the type of the cluster
:type cluster_type: string
:param cluster_name: the name of the cluster
:type cluster_name: string
:param kafka_topology_base_path: base path to look for <cluster_type>.yaml
:type cluster_name: string
:returns: the cluster
:rtype: ClusterConfig
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
topology = None
for config_dir in config_dirs:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except MissingConfigurationError:
pass
if not topology:
raise MissingConfigurationError(
"No available configuration for type {0}".format(cluster_type),
)
if cluster_name:
return topology.get_cluster_by_name(cluster_name)
else:
return topology.get_local_cluster() | python | def get_cluster_config(
cluster_type,
cluster_name=None,
kafka_topology_base_path=None,
):
"""Return the cluster configuration.
Use the local cluster if cluster_name is not specified.
:param cluster_type: the type of the cluster
:type cluster_type: string
:param cluster_name: the name of the cluster
:type cluster_name: string
:param kafka_topology_base_path: base path to look for <cluster_type>.yaml
:type cluster_name: string
:returns: the cluster
:rtype: ClusterConfig
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
topology = None
for config_dir in config_dirs:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except MissingConfigurationError:
pass
if not topology:
raise MissingConfigurationError(
"No available configuration for type {0}".format(cluster_type),
)
if cluster_name:
return topology.get_cluster_by_name(cluster_name)
else:
return topology.get_local_cluster() | [
"def",
"get_cluster_config",
"(",
"cluster_type",
",",
"cluster_name",
"=",
"None",
",",
"kafka_topology_base_path",
"=",
"None",
",",
")",
":",
"if",
"not",
"kafka_topology_base_path",
":",
"config_dirs",
"=",
"get_conf_dirs",
"(",
")",
"else",
":",
"config_dirs"... | Return the cluster configuration.
Use the local cluster if cluster_name is not specified.
:param cluster_type: the type of the cluster
:type cluster_type: string
:param cluster_name: the name of the cluster
:type cluster_name: string
:param kafka_topology_base_path: base path to look for <cluster_type>.yaml
:type cluster_name: string
:returns: the cluster
:rtype: ClusterConfig | [
"Return",
"the",
"cluster",
"configuration",
".",
"Use",
"the",
"local",
"cluster",
"if",
"cluster_name",
"is",
"not",
"specified",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/config.py#L213-L252 | train | 202,659 |
Yelp/kafka-utils | kafka_utils/util/config.py | iter_configurations | def iter_configurations(kafka_topology_base_path=None):
"""Cluster topology iterator.
Iterate over all the topologies available in config.
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
types = set()
for config_dir in config_dirs:
new_types = [x for x in map(
lambda x: os.path.basename(x)[:-5],
glob.glob('{0}/*.yaml'.format(config_dir)),
) if x not in types]
for cluster_type in new_types:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except ConfigurationError:
continue
types.add(cluster_type)
yield topology | python | def iter_configurations(kafka_topology_base_path=None):
"""Cluster topology iterator.
Iterate over all the topologies available in config.
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
types = set()
for config_dir in config_dirs:
new_types = [x for x in map(
lambda x: os.path.basename(x)[:-5],
glob.glob('{0}/*.yaml'.format(config_dir)),
) if x not in types]
for cluster_type in new_types:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except ConfigurationError:
continue
types.add(cluster_type)
yield topology | [
"def",
"iter_configurations",
"(",
"kafka_topology_base_path",
"=",
"None",
")",
":",
"if",
"not",
"kafka_topology_base_path",
":",
"config_dirs",
"=",
"get_conf_dirs",
"(",
")",
"else",
":",
"config_dirs",
"=",
"[",
"kafka_topology_base_path",
"]",
"types",
"=",
... | Cluster topology iterator.
Iterate over all the topologies available in config. | [
"Cluster",
"topology",
"iterator",
".",
"Iterate",
"over",
"all",
"the",
"topologies",
"available",
"in",
"config",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/config.py#L255-L279 | train | 202,660 |
Yelp/kafka-utils | kafka_utils/util/config.py | TopologyConfiguration.load_topology_config | def load_topology_config(self):
"""Load the topology configuration"""
config_path = os.path.join(
self.kafka_topology_path,
'{id}.yaml'.format(id=self.cluster_type),
)
self.log.debug("Loading configuration from %s", config_path)
if os.path.isfile(config_path):
topology_config = load_yaml_config(config_path)
else:
raise MissingConfigurationError(
"Topology configuration {0} for cluster {1} "
"does not exist".format(
config_path,
self.cluster_type,
)
)
self.log.debug("Topology configuration %s", topology_config)
try:
self.clusters = topology_config['clusters']
except KeyError:
self.log.exception("Invalid topology file")
raise InvalidConfigurationError("Invalid topology file {0}".format(
config_path))
if 'local_config' in topology_config:
self.local_config = topology_config['local_config'] | python | def load_topology_config(self):
"""Load the topology configuration"""
config_path = os.path.join(
self.kafka_topology_path,
'{id}.yaml'.format(id=self.cluster_type),
)
self.log.debug("Loading configuration from %s", config_path)
if os.path.isfile(config_path):
topology_config = load_yaml_config(config_path)
else:
raise MissingConfigurationError(
"Topology configuration {0} for cluster {1} "
"does not exist".format(
config_path,
self.cluster_type,
)
)
self.log.debug("Topology configuration %s", topology_config)
try:
self.clusters = topology_config['clusters']
except KeyError:
self.log.exception("Invalid topology file")
raise InvalidConfigurationError("Invalid topology file {0}".format(
config_path))
if 'local_config' in topology_config:
self.local_config = topology_config['local_config'] | [
"def",
"load_topology_config",
"(",
"self",
")",
":",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"kafka_topology_path",
",",
"'{id}.yaml'",
".",
"format",
"(",
"id",
"=",
"self",
".",
"cluster_type",
")",
",",
")",
"self",
".",... | Load the topology configuration | [
"Load",
"the",
"topology",
"configuration"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/config.py#L125-L150 | train | 202,661 |
Yelp/kafka-utils | kafka_utils/kafka_check/main.py | convert_to_broker_id | def convert_to_broker_id(string):
"""Convert string to kafka broker_id."""
error_msg = 'Positive integer or -1 required, {string} given.'.format(string=string)
try:
value = int(string)
except ValueError:
raise argparse.ArgumentTypeError(error_msg)
if value <= 0 and value != -1:
raise argparse.ArgumentTypeError(error_msg)
return value | python | def convert_to_broker_id(string):
"""Convert string to kafka broker_id."""
error_msg = 'Positive integer or -1 required, {string} given.'.format(string=string)
try:
value = int(string)
except ValueError:
raise argparse.ArgumentTypeError(error_msg)
if value <= 0 and value != -1:
raise argparse.ArgumentTypeError(error_msg)
return value | [
"def",
"convert_to_broker_id",
"(",
"string",
")",
":",
"error_msg",
"=",
"'Positive integer or -1 required, {string} given.'",
".",
"format",
"(",
"string",
"=",
"string",
")",
"try",
":",
"value",
"=",
"int",
"(",
"string",
")",
"except",
"ValueError",
":",
"r... | Convert string to kafka broker_id. | [
"Convert",
"string",
"to",
"kafka",
"broker_id",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/main.py#L37-L46 | train | 202,662 |
Yelp/kafka-utils | kafka_utils/kafka_check/main.py | run | def run():
"""Verify command-line arguments and run commands"""
args = parse_args()
logging.basicConfig(level=logging.WARN)
# to prevent flooding for sensu-check.
logging.getLogger('kafka').setLevel(logging.CRITICAL)
if args.controller_only and args.first_broker_only:
terminate(
status_code.WARNING,
prepare_terminate_message(
"Only one of controller_only and first_broker_only should be used",
),
args.json,
)
if args.controller_only or args.first_broker_only:
if args.broker_id is None:
terminate(
status_code.WARNING,
prepare_terminate_message("broker_id is not specified"),
args.json,
)
elif args.broker_id == -1:
try:
args.broker_id = get_broker_id(args.data_path)
except Exception as e:
terminate(
status_code.WARNING,
prepare_terminate_message("{}".format(e)),
args.json,
)
try:
cluster_config = config.get_cluster_config(
args.cluster_type,
args.cluster_name,
args.discovery_base_path,
)
code, msg = args.command(cluster_config, args)
except ConfigurationError as e:
terminate(
status_code.CRITICAL,
prepare_terminate_message("ConfigurationError {0}".format(e)),
args.json,
)
terminate(code, msg, args.json) | python | def run():
"""Verify command-line arguments and run commands"""
args = parse_args()
logging.basicConfig(level=logging.WARN)
# to prevent flooding for sensu-check.
logging.getLogger('kafka').setLevel(logging.CRITICAL)
if args.controller_only and args.first_broker_only:
terminate(
status_code.WARNING,
prepare_terminate_message(
"Only one of controller_only and first_broker_only should be used",
),
args.json,
)
if args.controller_only or args.first_broker_only:
if args.broker_id is None:
terminate(
status_code.WARNING,
prepare_terminate_message("broker_id is not specified"),
args.json,
)
elif args.broker_id == -1:
try:
args.broker_id = get_broker_id(args.data_path)
except Exception as e:
terminate(
status_code.WARNING,
prepare_terminate_message("{}".format(e)),
args.json,
)
try:
cluster_config = config.get_cluster_config(
args.cluster_type,
args.cluster_name,
args.discovery_base_path,
)
code, msg = args.command(cluster_config, args)
except ConfigurationError as e:
terminate(
status_code.CRITICAL,
prepare_terminate_message("ConfigurationError {0}".format(e)),
args.json,
)
terminate(code, msg, args.json) | [
"def",
"run",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"WARN",
")",
"# to prevent flooding for sensu-check.",
"logging",
".",
"getLogger",
"(",
"'kafka'",
")",
".",
"setLevel",
"(",... | Verify command-line arguments and run commands | [
"Verify",
"command",
"-",
"line",
"arguments",
"and",
"run",
"commands"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/main.py#L123-L171 | train | 202,663 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/main.py | exception_logger | def exception_logger(exc_type, exc_value, exc_traceback):
"""Log unhandled exceptions"""
if not issubclass(exc_type, KeyboardInterrupt): # do not log Ctrl-C
_log.critical(
"Uncaught exception:",
exc_info=(exc_type, exc_value, exc_traceback)
)
sys.__excepthook__(exc_type, exc_value, exc_traceback) | python | def exception_logger(exc_type, exc_value, exc_traceback):
"""Log unhandled exceptions"""
if not issubclass(exc_type, KeyboardInterrupt): # do not log Ctrl-C
_log.critical(
"Uncaught exception:",
exc_info=(exc_type, exc_value, exc_traceback)
)
sys.__excepthook__(exc_type, exc_value, exc_traceback) | [
"def",
"exception_logger",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
":",
"if",
"not",
"issubclass",
"(",
"exc_type",
",",
"KeyboardInterrupt",
")",
":",
"# do not log Ctrl-C",
"_log",
".",
"critical",
"(",
"\"Uncaught exception:\"",
",",
"exc_... | Log unhandled exceptions | [
"Log",
"unhandled",
"exceptions"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/main.py#L177-L184 | train | 202,664 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/replication_factor.py | _find_topics_with_wrong_rp | def _find_topics_with_wrong_rp(topics, zk, default_min_isr):
"""Returns topics with wrong replication factor."""
topics_with_wrong_rf = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
replication_factor = len(partitions[0].replicas)
if replication_factor >= min_isr + 1:
continue
topics_with_wrong_rf.append({
'replication_factor': replication_factor,
'min_isr': min_isr,
'topic': topic_name,
})
return topics_with_wrong_rf | python | def _find_topics_with_wrong_rp(topics, zk, default_min_isr):
"""Returns topics with wrong replication factor."""
topics_with_wrong_rf = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
replication_factor = len(partitions[0].replicas)
if replication_factor >= min_isr + 1:
continue
topics_with_wrong_rf.append({
'replication_factor': replication_factor,
'min_isr': min_isr,
'topic': topic_name,
})
return topics_with_wrong_rf | [
"def",
"_find_topics_with_wrong_rp",
"(",
"topics",
",",
"zk",
",",
"default_min_isr",
")",
":",
"topics_with_wrong_rf",
"=",
"[",
"]",
"for",
"topic_name",
",",
"partitions",
"in",
"topics",
".",
"items",
"(",
")",
":",
"min_isr",
"=",
"get_min_isr",
"(",
"... | Returns topics with wrong replication factor. | [
"Returns",
"topics",
"with",
"wrong",
"replication",
"factor",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/replication_factor.py#L58-L75 | train | 202,665 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/replication_factor.py | ReplicationFactorCmd.run_command | def run_command(self):
"""Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster."""
topics = get_topic_partition_metadata(self.cluster_config.broker_list)
topics_with_wrong_rf = _find_topics_with_wrong_rp(
topics,
self.zk,
self.args.default_min_isr,
)
errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL
out = _prepare_output(topics_with_wrong_rf, self.args.verbose)
return errcode, out | python | def run_command(self):
"""Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster."""
topics = get_topic_partition_metadata(self.cluster_config.broker_list)
topics_with_wrong_rf = _find_topics_with_wrong_rp(
topics,
self.zk,
self.args.default_min_isr,
)
errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL
out = _prepare_output(topics_with_wrong_rf, self.args.verbose)
return errcode, out | [
"def",
"run_command",
"(",
"self",
")",
":",
"topics",
"=",
"get_topic_partition_metadata",
"(",
"self",
".",
"cluster_config",
".",
"broker_list",
")",
"topics_with_wrong_rf",
"=",
"_find_topics_with_wrong_rp",
"(",
"topics",
",",
"self",
".",
"zk",
",",
"self",
... | Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster. | [
"Replication",
"factor",
"command",
"checks",
"replication",
"factor",
"settings",
"and",
"compare",
"it",
"with",
"min",
".",
"isr",
"in",
"the",
"cluster",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/replication_factor.py#L42-L55 | train | 202,666 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | GeneticBalancer.decommission_brokers | def decommission_brokers(self, broker_ids):
"""Decommissioning brokers is done by removing all partitions from
the decommissioned brokers and adding them, one-by-one, back to the
cluster.
:param broker_ids: List of broker ids that should be decommissioned.
"""
decommission_brokers = []
for broker_id in broker_ids:
try:
broker = self.cluster_topology.brokers[broker_id]
broker.mark_decommissioned()
decommission_brokers.append(broker)
except KeyError:
raise InvalidBrokerIdError(
"No broker found with id {broker_id}".format(broker_id=broker_id)
)
partitions = defaultdict(int)
# Remove all partitions from decommissioned brokers.
for broker in decommission_brokers:
broker_partitions = list(broker.partitions)
for partition in broker_partitions:
broker.remove_partition(partition)
partitions[partition.name] += 1
active_brokers = self.cluster_topology.active_brokers
# Create state from the initial cluster topology.
self.state = _State(self.cluster_topology, brokers=active_brokers)
# Add partition replicas to active brokers one-by-one.
for partition_name in sorted(six.iterkeys(partitions)): # repeatability
partition = self.cluster_topology.partitions[partition_name]
replica_count = partitions[partition_name]
try:
self.add_replica(partition_name, replica_count)
except InvalidReplicationFactorError:
raise BrokerDecommissionError(
"Not enough active brokers in the cluster. "
"Partition {partition} has replication-factor {rf}, "
"but only {brokers} active brokers remain."
.format(
partition=partition_name,
rf=partition.replication_factor + replica_count,
brokers=len(active_brokers)
)
) | python | def decommission_brokers(self, broker_ids):
"""Decommissioning brokers is done by removing all partitions from
the decommissioned brokers and adding them, one-by-one, back to the
cluster.
:param broker_ids: List of broker ids that should be decommissioned.
"""
decommission_brokers = []
for broker_id in broker_ids:
try:
broker = self.cluster_topology.brokers[broker_id]
broker.mark_decommissioned()
decommission_brokers.append(broker)
except KeyError:
raise InvalidBrokerIdError(
"No broker found with id {broker_id}".format(broker_id=broker_id)
)
partitions = defaultdict(int)
# Remove all partitions from decommissioned brokers.
for broker in decommission_brokers:
broker_partitions = list(broker.partitions)
for partition in broker_partitions:
broker.remove_partition(partition)
partitions[partition.name] += 1
active_brokers = self.cluster_topology.active_brokers
# Create state from the initial cluster topology.
self.state = _State(self.cluster_topology, brokers=active_brokers)
# Add partition replicas to active brokers one-by-one.
for partition_name in sorted(six.iterkeys(partitions)): # repeatability
partition = self.cluster_topology.partitions[partition_name]
replica_count = partitions[partition_name]
try:
self.add_replica(partition_name, replica_count)
except InvalidReplicationFactorError:
raise BrokerDecommissionError(
"Not enough active brokers in the cluster. "
"Partition {partition} has replication-factor {rf}, "
"but only {brokers} active brokers remain."
.format(
partition=partition_name,
rf=partition.replication_factor + replica_count,
brokers=len(active_brokers)
)
) | [
"def",
"decommission_brokers",
"(",
"self",
",",
"broker_ids",
")",
":",
"decommission_brokers",
"=",
"[",
"]",
"for",
"broker_id",
"in",
"broker_ids",
":",
"try",
":",
"broker",
"=",
"self",
".",
"cluster_topology",
".",
"brokers",
"[",
"broker_id",
"]",
"b... | Decommissioning brokers is done by removing all partitions from
the decommissioned brokers and adding them, one-by-one, back to the
cluster.
:param broker_ids: List of broker ids that should be decommissioned. | [
"Decommissioning",
"brokers",
"is",
"done",
"by",
"removing",
"all",
"partitions",
"from",
"the",
"decommissioned",
"brokers",
"and",
"adding",
"them",
"one",
"-",
"by",
"-",
"one",
"back",
"to",
"the",
"cluster",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L271-L319 | train | 202,667 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | GeneticBalancer.add_replica | def add_replica(self, partition_name, count=1):
"""Adding a replica is done by trying to add the replica to every
broker in the cluster and choosing the resulting state with the
highest fitness score.
:param partition_name: (topic_id, partition_id) of the partition to add replicas of.
:param count: The number of replicas to add.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
active_brokers = self.cluster_topology.active_brokers
if partition.replication_factor + count > len(active_brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor from {rf} to {new_rf}."
" There are only {brokers} active brokers."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor + count,
brokers=len(active_brokers),
)
)
partition_index = self.state.partition_indices[partition]
for _ in range(count):
# Find eligible replication-groups.
non_full_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) < len(rg.active_brokers)
]
# Since replicas can only be added to non-full rgs, only consider
# replicas on those rgs when determining which rgs are
# under-replicated.
replica_count = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
replica_count,
)
under_replicated_rgs = [
rg for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
] or non_full_rgs
# Add the replica to every eligible broker, as follower and leader
new_states = []
for rg in under_replicated_rgs:
for broker in rg.active_brokers:
if broker not in partition.replicas:
broker_index = self.state.brokers.index(broker)
new_state = self.state.add_replica(
partition_index,
broker_index,
)
new_state_leader = new_state.move_leadership(
partition_index,
broker_index,
)
new_states.extend([new_state, new_state_leader])
# Update cluster topology with highest scoring state.
self.state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(self.state.pending_assignment)
# Update the internal state to match.
self.state.clear_pending_assignment() | python | def add_replica(self, partition_name, count=1):
"""Adding a replica is done by trying to add the replica to every
broker in the cluster and choosing the resulting state with the
highest fitness score.
:param partition_name: (topic_id, partition_id) of the partition to add replicas of.
:param count: The number of replicas to add.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
active_brokers = self.cluster_topology.active_brokers
if partition.replication_factor + count > len(active_brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor from {rf} to {new_rf}."
" There are only {brokers} active brokers."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor + count,
brokers=len(active_brokers),
)
)
partition_index = self.state.partition_indices[partition]
for _ in range(count):
# Find eligible replication-groups.
non_full_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) < len(rg.active_brokers)
]
# Since replicas can only be added to non-full rgs, only consider
# replicas on those rgs when determining which rgs are
# under-replicated.
replica_count = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
replica_count,
)
under_replicated_rgs = [
rg for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
] or non_full_rgs
# Add the replica to every eligible broker, as follower and leader
new_states = []
for rg in under_replicated_rgs:
for broker in rg.active_brokers:
if broker not in partition.replicas:
broker_index = self.state.brokers.index(broker)
new_state = self.state.add_replica(
partition_index,
broker_index,
)
new_state_leader = new_state.move_leadership(
partition_index,
broker_index,
)
new_states.extend([new_state, new_state_leader])
# Update cluster topology with highest scoring state.
self.state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(self.state.pending_assignment)
# Update the internal state to match.
self.state.clear_pending_assignment() | [
"def",
"add_replica",
"(",
"self",
",",
"partition_name",
",",
"count",
"=",
"1",
")",
":",
"try",
":",
"partition",
"=",
"self",
".",
"cluster_topology",
".",
"partitions",
"[",
"partition_name",
"]",
"except",
"KeyError",
":",
"raise",
"InvalidPartitionError... | Adding a replica is done by trying to add the replica to every
broker in the cluster and choosing the resulting state with the
highest fitness score.
:param partition_name: (topic_id, partition_id) of the partition to add replicas of.
:param count: The number of replicas to add. | [
"Adding",
"a",
"replica",
"is",
"done",
"by",
"trying",
"to",
"add",
"the",
"replica",
"to",
"every",
"broker",
"in",
"the",
"cluster",
"and",
"choosing",
"the",
"resulting",
"state",
"with",
"the",
"highest",
"fitness",
"score",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L321-L394 | train | 202,668 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | GeneticBalancer.remove_replica | def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
if partition.replication_factor - count < 1:
raise InvalidReplicationFactorError(
"Cannot decrease replication factor from {rf} to {new_rf}."
"Replication factor must be at least 1."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor - count,
)
)
osr = {
broker for broker in partition.replicas
if broker.id in osr_broker_ids
}
# Create state from current cluster topology.
state = _State(self.cluster_topology)
partition_index = state.partitions.index(partition)
for _ in range(count):
# Find eligible replication groups.
non_empty_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
candidate_rgs = rgs_with_osr or non_empty_rgs
# Since replicas will only be removed from the candidate rgs, only
# count replicas on those rgs when determining which rgs are
# over-replicated.
replica_count = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replicas, _ = compute_optimum(
len(candidate_rgs),
replica_count,
)
over_replicated_rgs = [
rg for rg in candidate_rgs
if rg.count_replica(partition) > opt_replicas
] or candidate_rgs
candidate_rgs = over_replicated_rgs or candidate_rgs
# Remove the replica from every eligible broker.
new_states = []
for rg in candidate_rgs:
osr_brokers = {
broker for broker in rg.brokers
if broker in osr
}
candidate_brokers = osr_brokers or rg.brokers
for broker in candidate_brokers:
if broker in partition.replicas:
broker_index = state.brokers.index(broker)
new_states.append(
state.remove_replica(partition_index, broker_index)
)
# Update cluster topology with highest scoring state.
state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(state.assignment)
osr = {b for b in osr if b in partition.replicas} | python | def remove_replica(self, partition_name, osr_broker_ids, count=1):
"""Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
if partition.replication_factor - count < 1:
raise InvalidReplicationFactorError(
"Cannot decrease replication factor from {rf} to {new_rf}."
"Replication factor must be at least 1."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor - count,
)
)
osr = {
broker for broker in partition.replicas
if broker.id in osr_broker_ids
}
# Create state from current cluster topology.
state = _State(self.cluster_topology)
partition_index = state.partitions.index(partition)
for _ in range(count):
# Find eligible replication groups.
non_empty_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
candidate_rgs = rgs_with_osr or non_empty_rgs
# Since replicas will only be removed from the candidate rgs, only
# count replicas on those rgs when determining which rgs are
# over-replicated.
replica_count = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replicas, _ = compute_optimum(
len(candidate_rgs),
replica_count,
)
over_replicated_rgs = [
rg for rg in candidate_rgs
if rg.count_replica(partition) > opt_replicas
] or candidate_rgs
candidate_rgs = over_replicated_rgs or candidate_rgs
# Remove the replica from every eligible broker.
new_states = []
for rg in candidate_rgs:
osr_brokers = {
broker for broker in rg.brokers
if broker in osr
}
candidate_brokers = osr_brokers or rg.brokers
for broker in candidate_brokers:
if broker in partition.replicas:
broker_index = state.brokers.index(broker)
new_states.append(
state.remove_replica(partition_index, broker_index)
)
# Update cluster topology with highest scoring state.
state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(state.assignment)
osr = {b for b in osr if b in partition.replicas} | [
"def",
"remove_replica",
"(",
"self",
",",
"partition_name",
",",
"osr_broker_ids",
",",
"count",
"=",
"1",
")",
":",
"try",
":",
"partition",
"=",
"self",
".",
"cluster_topology",
".",
"partitions",
"[",
"partition_name",
"]",
"except",
"KeyError",
":",
"ra... | Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove. | [
"Removing",
"a",
"replica",
"is",
"done",
"by",
"trying",
"to",
"remove",
"a",
"replica",
"from",
"every",
"broker",
"and",
"choosing",
"the",
"resulting",
"state",
"with",
"the",
"highest",
"fitness",
"score",
".",
"Out",
"-",
"of",
"-",
"sync",
"replicas... | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L396-L477 | train | 202,669 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | GeneticBalancer._prune | def _prune(self, pop_candidates):
"""Choose a subset of the candidate states to continue on to the next
generation.
:param pop_candidates: The set of candidate states.
"""
return set(
sorted(pop_candidates, key=self._score, reverse=True)
[:self.args.max_pop]
) | python | def _prune(self, pop_candidates):
"""Choose a subset of the candidate states to continue on to the next
generation.
:param pop_candidates: The set of candidate states.
"""
return set(
sorted(pop_candidates, key=self._score, reverse=True)
[:self.args.max_pop]
) | [
"def",
"_prune",
"(",
"self",
",",
"pop_candidates",
")",
":",
"return",
"set",
"(",
"sorted",
"(",
"pop_candidates",
",",
"key",
"=",
"self",
".",
"_score",
",",
"reverse",
"=",
"True",
")",
"[",
":",
"self",
".",
"args",
".",
"max_pop",
"]",
")"
] | Choose a subset of the candidate states to continue on to the next
generation.
:param pop_candidates: The set of candidate states. | [
"Choose",
"a",
"subset",
"of",
"the",
"candidate",
"states",
"to",
"continue",
"on",
"to",
"the",
"next",
"generation",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L566-L575 | train | 202,670 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | GeneticBalancer._score | def _score(self, state, score_movement=True):
"""Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
"""
score = 0
max_score = 0
if state.total_weight:
# Coefficient of variance is a value between 0 and the sqrt(n)
# where n is the length of the series (the number of brokers)
# so those parameters are scaled by (1 / sqrt(# or brokers)) to
# get a value between 0 and 1.
#
# Since smaller imbalance values are preferred use 1 - x so that
# higher scores correspond to more balanced states.
score += self.args.partition_weight_cv_score_weight * \
(1 - state.broker_weight_cv / sqrt(len(state.brokers)))
score += self.args.leader_weight_cv_score_weight * \
(1 - state.broker_leader_weight_cv / sqrt(len(state.brokers)))
score += self.args.topic_broker_imbalance_score_weight * \
(1 - state.weighted_topic_broker_imbalance)
score += self.args.broker_partition_count_score_weight * \
(1 - state.broker_partition_count_cv / sqrt(len(state.brokers)))
score += self.args.broker_leader_count_score_weight * \
(1 - state.broker_leader_count_cv / sqrt(len(state.brokers)))
max_score += self.args.partition_weight_cv_score_weight
max_score += self.args.leader_weight_cv_score_weight
max_score += self.args.topic_broker_imbalance_score_weight
max_score += self.args.broker_partition_count_score_weight
max_score += self.args.broker_leader_count_score_weight
if self.args.max_movement_size is not None and score_movement:
# Avoid potential divide-by-zero error
max_movement = max(self.args.max_movement_size, 1)
score += self.args.movement_size_score_weight * \
(1 - state.movement_size / max_movement)
max_score += self.args.movement_size_score_weight
if self.args.max_leader_changes is not None and score_movement:
# Avoid potential divide-by-zero error
max_leader = max(self.args.max_leader_changes, 1)
score += self.args.leader_change_score_weight * \
(1 - state.leader_movement_count / max_leader)
max_score += self.args.leader_change_score_weight
return score / max_score | python | def _score(self, state, score_movement=True):
"""Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
"""
score = 0
max_score = 0
if state.total_weight:
# Coefficient of variance is a value between 0 and the sqrt(n)
# where n is the length of the series (the number of brokers)
# so those parameters are scaled by (1 / sqrt(# or brokers)) to
# get a value between 0 and 1.
#
# Since smaller imbalance values are preferred use 1 - x so that
# higher scores correspond to more balanced states.
score += self.args.partition_weight_cv_score_weight * \
(1 - state.broker_weight_cv / sqrt(len(state.brokers)))
score += self.args.leader_weight_cv_score_weight * \
(1 - state.broker_leader_weight_cv / sqrt(len(state.brokers)))
score += self.args.topic_broker_imbalance_score_weight * \
(1 - state.weighted_topic_broker_imbalance)
score += self.args.broker_partition_count_score_weight * \
(1 - state.broker_partition_count_cv / sqrt(len(state.brokers)))
score += self.args.broker_leader_count_score_weight * \
(1 - state.broker_leader_count_cv / sqrt(len(state.brokers)))
max_score += self.args.partition_weight_cv_score_weight
max_score += self.args.leader_weight_cv_score_weight
max_score += self.args.topic_broker_imbalance_score_weight
max_score += self.args.broker_partition_count_score_weight
max_score += self.args.broker_leader_count_score_weight
if self.args.max_movement_size is not None and score_movement:
# Avoid potential divide-by-zero error
max_movement = max(self.args.max_movement_size, 1)
score += self.args.movement_size_score_weight * \
(1 - state.movement_size / max_movement)
max_score += self.args.movement_size_score_weight
if self.args.max_leader_changes is not None and score_movement:
# Avoid potential divide-by-zero error
max_leader = max(self.args.max_leader_changes, 1)
score += self.args.leader_change_score_weight * \
(1 - state.leader_movement_count / max_leader)
max_score += self.args.leader_change_score_weight
return score / max_score | [
"def",
"_score",
"(",
"self",
",",
"state",
",",
"score_movement",
"=",
"True",
")",
":",
"score",
"=",
"0",
"max_score",
"=",
"0",
"if",
"state",
".",
"total_weight",
":",
"# Coefficient of variance is a value between 0 and the sqrt(n)",
"# where n is the length of t... | Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score. | [
"Score",
"a",
"state",
"based",
"on",
"how",
"balanced",
"it",
"is",
".",
"A",
"higher",
"score",
"represents",
"a",
"more",
"balanced",
"state",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L577-L623 | train | 202,671 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | _State.move | def move(self, partition, source, dest):
"""Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to.
"""
new_state = copy(self)
# Update the partition replica tuple
source_index = self.replicas[partition].index(source)
new_state.replicas = tuple_alter(
self.replicas,
(partition, lambda replicas: tuple_replace(
replicas,
(source_index, dest),
)),
)
new_state.pending_partitions = self.pending_partitions + (partition, )
# Update the broker weights
partition_weight = self.partition_weights[partition]
new_state.broker_weights = tuple_alter(
self.broker_weights,
(source, lambda broker_weight: broker_weight - partition_weight),
(dest, lambda broker_weight: broker_weight + partition_weight),
)
# Update the broker partition count
new_state.broker_partition_counts = tuple_alter(
self.broker_partition_counts,
(source, lambda partition_count: partition_count - 1),
(dest, lambda partition_count: partition_count + 1),
)
# Update the broker leader weights
if source_index == 0:
new_state.broker_leader_weights = tuple_alter(
self.broker_leader_weights,
(source, lambda lw: lw - partition_weight),
(dest, lambda lw: lw + partition_weight),
)
new_state.broker_leader_counts = tuple_alter(
self.broker_leader_counts,
(source, lambda leader_count: leader_count - 1),
(dest, lambda leader_count: leader_count + 1),
)
new_state.leader_movement_count += 1
# Update the topic broker counts
topic = self.partition_topic[partition]
new_state.topic_broker_count = tuple_alter(
self.topic_broker_count,
(topic, lambda broker_count: tuple_alter(
broker_count,
(source, lambda count: count - 1),
(dest, lambda count: count + 1),
)),
)
# Update the topic broker imbalance
new_state.topic_broker_imbalance = tuple_replace(
self.topic_broker_imbalance,
(topic, new_state._calculate_topic_imbalance(topic)),
)
new_state._weighted_topic_broker_imbalance = (
self._weighted_topic_broker_imbalance +
self.topic_weights[topic] * (
new_state.topic_broker_imbalance[topic] -
self.topic_broker_imbalance[topic]
)
)
# Update the replication group replica counts
source_rg = self.broker_rg[source]
dest_rg = self.broker_rg[dest]
if source_rg != dest_rg:
new_state.rg_replicas = tuple_alter(
self.rg_replicas,
(source_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count - 1),
)),
(dest_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count + 1),
)),
)
# Update the movement sizes
new_state.movement_size += self.partition_sizes[partition]
new_state.movement_count += 1
return new_state | python | def move(self, partition, source, dest):
"""Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to.
"""
new_state = copy(self)
# Update the partition replica tuple
source_index = self.replicas[partition].index(source)
new_state.replicas = tuple_alter(
self.replicas,
(partition, lambda replicas: tuple_replace(
replicas,
(source_index, dest),
)),
)
new_state.pending_partitions = self.pending_partitions + (partition, )
# Update the broker weights
partition_weight = self.partition_weights[partition]
new_state.broker_weights = tuple_alter(
self.broker_weights,
(source, lambda broker_weight: broker_weight - partition_weight),
(dest, lambda broker_weight: broker_weight + partition_weight),
)
# Update the broker partition count
new_state.broker_partition_counts = tuple_alter(
self.broker_partition_counts,
(source, lambda partition_count: partition_count - 1),
(dest, lambda partition_count: partition_count + 1),
)
# Update the broker leader weights
if source_index == 0:
new_state.broker_leader_weights = tuple_alter(
self.broker_leader_weights,
(source, lambda lw: lw - partition_weight),
(dest, lambda lw: lw + partition_weight),
)
new_state.broker_leader_counts = tuple_alter(
self.broker_leader_counts,
(source, lambda leader_count: leader_count - 1),
(dest, lambda leader_count: leader_count + 1),
)
new_state.leader_movement_count += 1
# Update the topic broker counts
topic = self.partition_topic[partition]
new_state.topic_broker_count = tuple_alter(
self.topic_broker_count,
(topic, lambda broker_count: tuple_alter(
broker_count,
(source, lambda count: count - 1),
(dest, lambda count: count + 1),
)),
)
# Update the topic broker imbalance
new_state.topic_broker_imbalance = tuple_replace(
self.topic_broker_imbalance,
(topic, new_state._calculate_topic_imbalance(topic)),
)
new_state._weighted_topic_broker_imbalance = (
self._weighted_topic_broker_imbalance +
self.topic_weights[topic] * (
new_state.topic_broker_imbalance[topic] -
self.topic_broker_imbalance[topic]
)
)
# Update the replication group replica counts
source_rg = self.broker_rg[source]
dest_rg = self.broker_rg[dest]
if source_rg != dest_rg:
new_state.rg_replicas = tuple_alter(
self.rg_replicas,
(source_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count - 1),
)),
(dest_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count + 1),
)),
)
# Update the movement sizes
new_state.movement_size += self.partition_sizes[partition]
new_state.movement_count += 1
return new_state | [
"def",
"move",
"(",
"self",
",",
"partition",
",",
"source",
",",
"dest",
")",
":",
"new_state",
"=",
"copy",
"(",
"self",
")",
"# Update the partition replica tuple",
"source_index",
"=",
"self",
".",
"replicas",
"[",
"partition",
"]",
".",
"index",
"(",
... | Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to. | [
"Return",
"a",
"new",
"state",
"that",
"is",
"the",
"result",
"of",
"moving",
"a",
"single",
"partition",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L791-L889 | train | 202,672 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | _State.move_leadership | def move_leadership(self, partition, new_leader):
"""Return a new state that is the result of changing the leadership of
a single partition.
:param partition: The partition index of the partition to change the
leadership of.
:param new_leader: The broker index of the new leader replica.
"""
new_state = copy(self)
# Update the partition replica tuple
source = new_state.replicas[partition][0]
new_leader_index = self.replicas[partition].index(new_leader)
new_state.replicas = tuple_alter(
self.replicas,
(partition, lambda replicas: tuple_replace(
replicas,
(0, replicas[new_leader_index]),
(new_leader_index, replicas[0]),
)),
)
new_state.pending_partitions = self.pending_partitions + (partition, )
# Update the leader count
new_state.broker_leader_counts = tuple_alter(
self.broker_leader_counts,
(source, lambda leader_count: leader_count - 1),
(new_leader, lambda leader_count: leader_count + 1),
)
# Update the broker leader weights
partition_weight = self.partition_weights[partition]
new_state.broker_leader_weights = tuple_alter(
self.broker_leader_weights,
(source, lambda leader_weight: leader_weight - partition_weight),
(new_leader, lambda leader_weight: leader_weight + partition_weight),
)
# Update the total leader movement size
new_state.leader_movement_count += 1
return new_state | python | def move_leadership(self, partition, new_leader):
"""Return a new state that is the result of changing the leadership of
a single partition.
:param partition: The partition index of the partition to change the
leadership of.
:param new_leader: The broker index of the new leader replica.
"""
new_state = copy(self)
# Update the partition replica tuple
source = new_state.replicas[partition][0]
new_leader_index = self.replicas[partition].index(new_leader)
new_state.replicas = tuple_alter(
self.replicas,
(partition, lambda replicas: tuple_replace(
replicas,
(0, replicas[new_leader_index]),
(new_leader_index, replicas[0]),
)),
)
new_state.pending_partitions = self.pending_partitions + (partition, )
# Update the leader count
new_state.broker_leader_counts = tuple_alter(
self.broker_leader_counts,
(source, lambda leader_count: leader_count - 1),
(new_leader, lambda leader_count: leader_count + 1),
)
# Update the broker leader weights
partition_weight = self.partition_weights[partition]
new_state.broker_leader_weights = tuple_alter(
self.broker_leader_weights,
(source, lambda leader_weight: leader_weight - partition_weight),
(new_leader, lambda leader_weight: leader_weight + partition_weight),
)
# Update the total leader movement size
new_state.leader_movement_count += 1
return new_state | [
"def",
"move_leadership",
"(",
"self",
",",
"partition",
",",
"new_leader",
")",
":",
"new_state",
"=",
"copy",
"(",
"self",
")",
"# Update the partition replica tuple",
"source",
"=",
"new_state",
".",
"replicas",
"[",
"partition",
"]",
"[",
"0",
"]",
"new_le... | Return a new state that is the result of changing the leadership of
a single partition.
:param partition: The partition index of the partition to change the
leadership of.
:param new_leader: The broker index of the new leader replica. | [
"Return",
"a",
"new",
"state",
"that",
"is",
"the",
"result",
"of",
"changing",
"the",
"leadership",
"of",
"a",
"single",
"partition",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L891-L933 | train | 202,673 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | _State.assignment | def assignment(self):
"""Return the partition assignment that this state represents."""
return {
partition.name: [
self.brokers[bid].id for bid in self.replicas[pid]
]
for pid, partition in enumerate(self.partitions)
} | python | def assignment(self):
"""Return the partition assignment that this state represents."""
return {
partition.name: [
self.brokers[bid].id for bid in self.replicas[pid]
]
for pid, partition in enumerate(self.partitions)
} | [
"def",
"assignment",
"(",
"self",
")",
":",
"return",
"{",
"partition",
".",
"name",
":",
"[",
"self",
".",
"brokers",
"[",
"bid",
"]",
".",
"id",
"for",
"bid",
"in",
"self",
".",
"replicas",
"[",
"pid",
"]",
"]",
"for",
"pid",
",",
"partition",
... | Return the partition assignment that this state represents. | [
"Return",
"the",
"partition",
"assignment",
"that",
"this",
"state",
"represents",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L1087-L1094 | train | 202,674 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py | _State.pending_assignment | def pending_assignment(self):
"""Return the pending partition assignment that this state represents."""
return {
self.partitions[pid].name: [
self.brokers[bid].id for bid in self.replicas[pid]
]
for pid in set(self.pending_partitions)
} | python | def pending_assignment(self):
"""Return the pending partition assignment that this state represents."""
return {
self.partitions[pid].name: [
self.brokers[bid].id for bid in self.replicas[pid]
]
for pid in set(self.pending_partitions)
} | [
"def",
"pending_assignment",
"(",
"self",
")",
":",
"return",
"{",
"self",
".",
"partitions",
"[",
"pid",
"]",
".",
"name",
":",
"[",
"self",
".",
"brokers",
"[",
"bid",
"]",
".",
"id",
"for",
"bid",
"in",
"self",
".",
"replicas",
"[",
"pid",
"]",
... | Return the pending partition assignment that this state represents. | [
"Return",
"the",
"pending",
"partition",
"assignment",
"that",
"this",
"state",
"represents",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L1097-L1104 | train | 202,675 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/replica_unavailability.py | ReplicaUnavailabilityCmd.run_command | def run_command(self):
"""replica_unavailability command, checks number of replicas not available
for communication over all brokers in the Kafka cluster."""
fetch_unavailable_brokers = True
result = get_topic_partition_with_error(
self.cluster_config,
REPLICA_NOT_AVAILABLE_ERROR,
fetch_unavailable_brokers=fetch_unavailable_brokers,
)
if fetch_unavailable_brokers:
replica_unavailability, unavailable_brokers = result
else:
replica_unavailability = result
errcode = status_code.OK if not replica_unavailability else status_code.CRITICAL
out = _prepare_output(replica_unavailability, unavailable_brokers, self.args.verbose)
return errcode, out | python | def run_command(self):
"""replica_unavailability command, checks number of replicas not available
for communication over all brokers in the Kafka cluster."""
fetch_unavailable_brokers = True
result = get_topic_partition_with_error(
self.cluster_config,
REPLICA_NOT_AVAILABLE_ERROR,
fetch_unavailable_brokers=fetch_unavailable_brokers,
)
if fetch_unavailable_brokers:
replica_unavailability, unavailable_brokers = result
else:
replica_unavailability = result
errcode = status_code.OK if not replica_unavailability else status_code.CRITICAL
out = _prepare_output(replica_unavailability, unavailable_brokers, self.args.verbose)
return errcode, out | [
"def",
"run_command",
"(",
"self",
")",
":",
"fetch_unavailable_brokers",
"=",
"True",
"result",
"=",
"get_topic_partition_with_error",
"(",
"self",
".",
"cluster_config",
",",
"REPLICA_NOT_AVAILABLE_ERROR",
",",
"fetch_unavailable_brokers",
"=",
"fetch_unavailable_brokers"... | replica_unavailability command, checks number of replicas not available
for communication over all brokers in the Kafka cluster. | [
"replica_unavailability",
"command",
"checks",
"number",
"of",
"replicas",
"not",
"available",
"for",
"communication",
"over",
"all",
"brokers",
"in",
"the",
"Kafka",
"cluster",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/replica_unavailability.py#L34-L50 | train | 202,676 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/min_isr.py | get_min_isr | def get_min_isr(zk, topic):
"""Return the min-isr for topic, or None if not specified"""
ISR_CONF_NAME = 'min.insync.replicas'
try:
config = zk.get_topic_config(topic)
except NoNodeError:
return None
if ISR_CONF_NAME in config['config']:
return int(config['config'][ISR_CONF_NAME])
else:
return None | python | def get_min_isr(zk, topic):
"""Return the min-isr for topic, or None if not specified"""
ISR_CONF_NAME = 'min.insync.replicas'
try:
config = zk.get_topic_config(topic)
except NoNodeError:
return None
if ISR_CONF_NAME in config['config']:
return int(config['config'][ISR_CONF_NAME])
else:
return None | [
"def",
"get_min_isr",
"(",
"zk",
",",
"topic",
")",
":",
"ISR_CONF_NAME",
"=",
"'min.insync.replicas'",
"try",
":",
"config",
"=",
"zk",
".",
"get_topic_config",
"(",
"topic",
")",
"except",
"NoNodeError",
":",
"return",
"None",
"if",
"ISR_CONF_NAME",
"in",
... | Return the min-isr for topic, or None if not specified | [
"Return",
"the",
"min",
"-",
"isr",
"for",
"topic",
"or",
"None",
"if",
"not",
"specified"
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/min_isr.py#L59-L69 | train | 202,677 |
Yelp/kafka-utils | kafka_utils/kafka_check/commands/min_isr.py | _process_metadata_response | def _process_metadata_response(topics, zk, default_min_isr):
"""Returns not in sync partitions."""
not_in_sync_partitions = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
if min_isr is None:
continue
for metadata in partitions.values():
cur_isr = len(metadata.isr)
if cur_isr < min_isr:
not_in_sync_partitions.append({
'isr': cur_isr,
'min_isr': min_isr,
'topic': metadata.topic,
'partition': metadata.partition,
})
return not_in_sync_partitions | python | def _process_metadata_response(topics, zk, default_min_isr):
"""Returns not in sync partitions."""
not_in_sync_partitions = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
if min_isr is None:
continue
for metadata in partitions.values():
cur_isr = len(metadata.isr)
if cur_isr < min_isr:
not_in_sync_partitions.append({
'isr': cur_isr,
'min_isr': min_isr,
'topic': metadata.topic,
'partition': metadata.partition,
})
return not_in_sync_partitions | [
"def",
"_process_metadata_response",
"(",
"topics",
",",
"zk",
",",
"default_min_isr",
")",
":",
"not_in_sync_partitions",
"=",
"[",
"]",
"for",
"topic_name",
",",
"partitions",
"in",
"topics",
".",
"items",
"(",
")",
":",
"min_isr",
"=",
"get_min_isr",
"(",
... | Returns not in sync partitions. | [
"Returns",
"not",
"in",
"sync",
"partitions",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/min_isr.py#L72-L89 | train | 202,678 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.remove_partition | def remove_partition(self, partition):
"""Remove partition from partition list."""
if partition in self._partitions:
# Remove partition from set
self._partitions.remove(partition)
# Remove broker from replica list of partition
partition.replicas.remove(self)
else:
raise ValueError(
'Partition: {topic_id}:{partition_id} not found in broker '
'{broker_id}'.format(
topic_id=partition.topic.id,
partition_id=partition.partition_id,
broker_id=self._id,
)
) | python | def remove_partition(self, partition):
"""Remove partition from partition list."""
if partition in self._partitions:
# Remove partition from set
self._partitions.remove(partition)
# Remove broker from replica list of partition
partition.replicas.remove(self)
else:
raise ValueError(
'Partition: {topic_id}:{partition_id} not found in broker '
'{broker_id}'.format(
topic_id=partition.topic.id,
partition_id=partition.partition_id,
broker_id=self._id,
)
) | [
"def",
"remove_partition",
"(",
"self",
",",
"partition",
")",
":",
"if",
"partition",
"in",
"self",
".",
"_partitions",
":",
"# Remove partition from set",
"self",
".",
"_partitions",
".",
"remove",
"(",
"partition",
")",
"# Remove broker from replica list of partiti... | Remove partition from partition list. | [
"Remove",
"partition",
"from",
"partition",
"list",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L112-L127 | train | 202,679 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.add_partition | def add_partition(self, partition):
"""Add partition to partition list."""
assert(partition not in self._partitions)
# Add partition to existing set
self._partitions.add(partition)
# Add broker to replica list
partition.add_replica(self) | python | def add_partition(self, partition):
"""Add partition to partition list."""
assert(partition not in self._partitions)
# Add partition to existing set
self._partitions.add(partition)
# Add broker to replica list
partition.add_replica(self) | [
"def",
"add_partition",
"(",
"self",
",",
"partition",
")",
":",
"assert",
"(",
"partition",
"not",
"in",
"self",
".",
"_partitions",
")",
"# Add partition to existing set",
"self",
".",
"_partitions",
".",
"add",
"(",
"partition",
")",
"# Add broker to replica li... | Add partition to partition list. | [
"Add",
"partition",
"to",
"partition",
"list",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L129-L135 | train | 202,680 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.move_partition | def move_partition(self, partition, broker_destination):
"""Move partition to destination broker and adjust replicas."""
self.remove_partition(partition)
broker_destination.add_partition(partition) | python | def move_partition(self, partition, broker_destination):
"""Move partition to destination broker and adjust replicas."""
self.remove_partition(partition)
broker_destination.add_partition(partition) | [
"def",
"move_partition",
"(",
"self",
",",
"partition",
",",
"broker_destination",
")",
":",
"self",
".",
"remove_partition",
"(",
"partition",
")",
"broker_destination",
".",
"add_partition",
"(",
"partition",
")"
] | Move partition to destination broker and adjust replicas. | [
"Move",
"partition",
"to",
"destination",
"broker",
"and",
"adjust",
"replicas",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L137-L140 | train | 202,681 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.count_partitions | def count_partitions(self, topic):
"""Return count of partitions for given topic."""
return sum(1 for p in topic.partitions if p in self.partitions) | python | def count_partitions(self, topic):
"""Return count of partitions for given topic."""
return sum(1 for p in topic.partitions if p in self.partitions) | [
"def",
"count_partitions",
"(",
"self",
",",
"topic",
")",
":",
"return",
"sum",
"(",
"1",
"for",
"p",
"in",
"topic",
".",
"partitions",
"if",
"p",
"in",
"self",
".",
"partitions",
")"
] | Return count of partitions for given topic. | [
"Return",
"count",
"of",
"partitions",
"for",
"given",
"topic",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L142-L144 | train | 202,682 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.request_leadership | def request_leadership(self, opt_count, skip_brokers, skip_partitions):
"""Under-balanced broker requests leadership from current leader, on the
pretext that it recursively can maintain its leadership count as optimal.
:key_terms:
leader-balanced: Count of brokers as leader is at least opt-count
Algorithm:
=========
Step-1: Broker will request leadership from current-leader of partitions
it belongs to.
Step-2: Current-leaders will grant their leadership if one of these happens:-
a) Either they remain leader-balanced.
b) Or they will recursively request leadership from other partitions
until they are become leader-balanced.
If both of these conditions fail, they will revoke their leadership-grant
Step-3: If current-broker becomes leader-balanced it will return
otherwise it moves ahead with next partition.
"""
# Possible partitions which can grant leadership to broker
owned_partitions = list(filter(
lambda p: self is not p.leader and len(p.replicas) > 1,
self.partitions,
))
for partition in owned_partitions:
# Partition not available to grant leadership when:
# 1. Broker is already under leadership change or
# 2. Partition has already granted leadership before
if partition.leader in skip_brokers or partition in skip_partitions:
continue
# Current broker is granted leadership temporarily
prev_leader = partition.swap_leader(self)
# Partition shouldn't be used again
skip_partitions.append(partition)
# Continue if prev-leader remains balanced
# If leadership of prev_leader is to be revoked, it is considered balanced
if prev_leader.count_preferred_replica() >= opt_count or \
prev_leader.revoked_leadership:
# If current broker is leader-balanced return else
# request next-partition
if self.count_preferred_replica() >= opt_count:
return
else:
continue
else: # prev-leader (broker) became unbalanced
# Append skip-brokers list so that it is not unbalanced further
skip_brokers.append(prev_leader)
# Try recursively arrange leadership for prev-leader
prev_leader.request_leadership(opt_count, skip_brokers, skip_partitions)
# If prev-leader couldn't be leader-balanced
# revert its previous grant to current-broker
if prev_leader.count_preferred_replica() < opt_count:
# Partition can be used again for rebalancing
skip_partitions.remove(partition)
partition.swap_leader(prev_leader)
# Try requesting leadership from next partition
continue
else:
# If prev-leader successfully balanced
skip_partitions.append(partition)
# Removing from skip-broker list, since it can now again be
# used for granting leadership for some other partition
skip_brokers.remove(prev_leader)
if self.count_preferred_replica() >= opt_count:
# Return if current-broker is leader-balanced
return
else:
continue | python | def request_leadership(self, opt_count, skip_brokers, skip_partitions):
"""Under-balanced broker requests leadership from current leader, on the
pretext that it recursively can maintain its leadership count as optimal.
:key_terms:
leader-balanced: Count of brokers as leader is at least opt-count
Algorithm:
=========
Step-1: Broker will request leadership from current-leader of partitions
it belongs to.
Step-2: Current-leaders will grant their leadership if one of these happens:-
a) Either they remain leader-balanced.
b) Or they will recursively request leadership from other partitions
until they are become leader-balanced.
If both of these conditions fail, they will revoke their leadership-grant
Step-3: If current-broker becomes leader-balanced it will return
otherwise it moves ahead with next partition.
"""
# Possible partitions which can grant leadership to broker
owned_partitions = list(filter(
lambda p: self is not p.leader and len(p.replicas) > 1,
self.partitions,
))
for partition in owned_partitions:
# Partition not available to grant leadership when:
# 1. Broker is already under leadership change or
# 2. Partition has already granted leadership before
if partition.leader in skip_brokers or partition in skip_partitions:
continue
# Current broker is granted leadership temporarily
prev_leader = partition.swap_leader(self)
# Partition shouldn't be used again
skip_partitions.append(partition)
# Continue if prev-leader remains balanced
# If leadership of prev_leader is to be revoked, it is considered balanced
if prev_leader.count_preferred_replica() >= opt_count or \
prev_leader.revoked_leadership:
# If current broker is leader-balanced return else
# request next-partition
if self.count_preferred_replica() >= opt_count:
return
else:
continue
else: # prev-leader (broker) became unbalanced
# Append skip-brokers list so that it is not unbalanced further
skip_brokers.append(prev_leader)
# Try recursively arrange leadership for prev-leader
prev_leader.request_leadership(opt_count, skip_brokers, skip_partitions)
# If prev-leader couldn't be leader-balanced
# revert its previous grant to current-broker
if prev_leader.count_preferred_replica() < opt_count:
# Partition can be used again for rebalancing
skip_partitions.remove(partition)
partition.swap_leader(prev_leader)
# Try requesting leadership from next partition
continue
else:
# If prev-leader successfully balanced
skip_partitions.append(partition)
# Removing from skip-broker list, since it can now again be
# used for granting leadership for some other partition
skip_brokers.remove(prev_leader)
if self.count_preferred_replica() >= opt_count:
# Return if current-broker is leader-balanced
return
else:
continue | [
"def",
"request_leadership",
"(",
"self",
",",
"opt_count",
",",
"skip_brokers",
",",
"skip_partitions",
")",
":",
"# Possible partitions which can grant leadership to broker",
"owned_partitions",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"self",
"is",
"not... | Under-balanced broker requests leadership from current leader, on the
pretext that it recursively can maintain its leadership count as optimal.
:key_terms:
leader-balanced: Count of brokers as leader is at least opt-count
Algorithm:
=========
Step-1: Broker will request leadership from current-leader of partitions
it belongs to.
Step-2: Current-leaders will grant their leadership if one of these happens:-
a) Either they remain leader-balanced.
b) Or they will recursively request leadership from other partitions
until they are become leader-balanced.
If both of these conditions fail, they will revoke their leadership-grant
Step-3: If current-broker becomes leader-balanced it will return
otherwise it moves ahead with next partition. | [
"Under",
"-",
"balanced",
"broker",
"requests",
"leadership",
"from",
"current",
"leader",
"on",
"the",
"pretext",
"that",
"it",
"recursively",
"can",
"maintain",
"its",
"leadership",
"count",
"as",
"optimal",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L176-L243 | train | 202,683 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/broker.py | Broker.donate_leadership | def donate_leadership(self, opt_count, skip_brokers, used_edges):
"""Over-loaded brokers tries to donate their leadership to one of their
followers recursively until they become balanced.
:key_terms:
used_edges: Represent list of tuple/edges (partition, prev-leader, new-leader),
which have already been used for donating leadership from
prev-leader to new-leader in same partition before.
skip_brokers: This is to avoid using same broker recursively for balancing
to prevent loops.
:Algorithm:
* Over-loaded leader tries to donate its leadership to one of its followers
* Follower will try to be balanced recursively if it becomes over-balanced
* If it is successful, over-loaded leader moves to next partition if required,
return otherwise.
* If it is unsuccessful, it tries for next-follower or next-partition whatever
or returns if none available.
"""
owned_partitions = list(filter(
lambda p: self is p.leader and len(p.replicas) > 1,
self.partitions,
))
for partition in owned_partitions:
# Skip using same partition with broker if already used before
potential_new_leaders = list(filter(
lambda f: f not in skip_brokers,
partition.followers,
))
for follower in potential_new_leaders:
# Don't swap the broker-pair if already swapped before
# in same partition
if (partition, self, follower) in used_edges:
continue
partition.swap_leader(follower)
used_edges.append((partition, follower, self))
# new-leader didn't unbalance
if follower.count_preferred_replica() <= opt_count + 1:
# over-broker balanced
# If over-broker is the one which needs to be revoked from leadership
# it's considered balanced only if its preferred replica count is 0
if (self.count_preferred_replica() <= opt_count + 1 and not self.revoked_leadership) or \
(self.count_preferred_replica() == 0 and self.revoked_leadership):
return
else:
# Try next-partition, not another follower
break
else: # new-leader (broker) became over-balanced
skip_brokers.append(follower)
follower.donate_leadership(opt_count, skip_brokers, used_edges)
# new-leader couldn't be balanced, revert
if follower.count_preferred_replica() > opt_count + 1:
used_edges.append((partition, follower, self))
partition.swap_leader(self)
# Try next leader or partition
continue
else:
# New-leader was successfully balanced
used_edges.append((partition, follower, self))
# New-leader can be reused
skip_brokers.remove(follower)
# If broker is the one which needs to be revoked from leadership
# it's considered balanced only if its preferred replica count is 0
if (self.count_preferred_replica() <= opt_count + 1 and not self.revoked_leadership) or \
(self.count_preferred_replica() == 0 and self.revoked_leadership):
# Now broker is balanced
return
else:
# Try next-partition, not another follower
break | python | def donate_leadership(self, opt_count, skip_brokers, used_edges):
"""Over-loaded brokers tries to donate their leadership to one of their
followers recursively until they become balanced.
:key_terms:
used_edges: Represent list of tuple/edges (partition, prev-leader, new-leader),
which have already been used for donating leadership from
prev-leader to new-leader in same partition before.
skip_brokers: This is to avoid using same broker recursively for balancing
to prevent loops.
:Algorithm:
* Over-loaded leader tries to donate its leadership to one of its followers
* Follower will try to be balanced recursively if it becomes over-balanced
* If it is successful, over-loaded leader moves to next partition if required,
return otherwise.
* If it is unsuccessful, it tries for next-follower or next-partition whatever
or returns if none available.
"""
owned_partitions = list(filter(
lambda p: self is p.leader and len(p.replicas) > 1,
self.partitions,
))
for partition in owned_partitions:
# Skip using same partition with broker if already used before
potential_new_leaders = list(filter(
lambda f: f not in skip_brokers,
partition.followers,
))
for follower in potential_new_leaders:
# Don't swap the broker-pair if already swapped before
# in same partition
if (partition, self, follower) in used_edges:
continue
partition.swap_leader(follower)
used_edges.append((partition, follower, self))
# new-leader didn't unbalance
if follower.count_preferred_replica() <= opt_count + 1:
# over-broker balanced
# If over-broker is the one which needs to be revoked from leadership
# it's considered balanced only if its preferred replica count is 0
if (self.count_preferred_replica() <= opt_count + 1 and not self.revoked_leadership) or \
(self.count_preferred_replica() == 0 and self.revoked_leadership):
return
else:
# Try next-partition, not another follower
break
else: # new-leader (broker) became over-balanced
skip_brokers.append(follower)
follower.donate_leadership(opt_count, skip_brokers, used_edges)
# new-leader couldn't be balanced, revert
if follower.count_preferred_replica() > opt_count + 1:
used_edges.append((partition, follower, self))
partition.swap_leader(self)
# Try next leader or partition
continue
else:
# New-leader was successfully balanced
used_edges.append((partition, follower, self))
# New-leader can be reused
skip_brokers.remove(follower)
# If broker is the one which needs to be revoked from leadership
# it's considered balanced only if its preferred replica count is 0
if (self.count_preferred_replica() <= opt_count + 1 and not self.revoked_leadership) or \
(self.count_preferred_replica() == 0 and self.revoked_leadership):
# Now broker is balanced
return
else:
# Try next-partition, not another follower
break | [
"def",
"donate_leadership",
"(",
"self",
",",
"opt_count",
",",
"skip_brokers",
",",
"used_edges",
")",
":",
"owned_partitions",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"self",
"is",
"p",
".",
"leader",
"and",
"len",
"(",
"p",
".",
"replicas... | Over-loaded brokers tries to donate their leadership to one of their
followers recursively until they become balanced.
:key_terms:
used_edges: Represent list of tuple/edges (partition, prev-leader, new-leader),
which have already been used for donating leadership from
prev-leader to new-leader in same partition before.
skip_brokers: This is to avoid using same broker recursively for balancing
to prevent loops.
:Algorithm:
* Over-loaded leader tries to donate its leadership to one of its followers
* Follower will try to be balanced recursively if it becomes over-balanced
* If it is successful, over-loaded leader moves to next partition if required,
return otherwise.
* If it is unsuccessful, it tries for next-follower or next-partition whatever
or returns if none available. | [
"Over",
"-",
"loaded",
"brokers",
"tries",
"to",
"donate",
"their",
"leadership",
"to",
"one",
"of",
"their",
"followers",
"recursively",
"until",
"they",
"become",
"balanced",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L245-L314 | train | 202,684 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | ssh_client | def ssh_client(host):
"""Start an ssh client.
:param host: the host
:type host: str
:returns: ssh client
:rtype: Paramiko client
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host)
return ssh | python | def ssh_client(host):
"""Start an ssh client.
:param host: the host
:type host: str
:returns: ssh client
:rtype: Paramiko client
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host)
return ssh | [
"def",
"ssh_client",
"(",
"host",
")",
":",
"ssh",
"=",
"paramiko",
".",
"SSHClient",
"(",
")",
"ssh",
".",
"set_missing_host_key_policy",
"(",
"paramiko",
".",
"AutoAddPolicy",
"(",
")",
")",
"ssh",
".",
"connect",
"(",
"host",
")",
"return",
"ssh"
] | Start an ssh client.
:param host: the host
:type host: str
:returns: ssh client
:rtype: Paramiko client | [
"Start",
"an",
"ssh",
"client",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L58-L69 | train | 202,685 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | find_files_cmd | def find_files_cmd(data_path, minutes, start_time, end_time):
"""Find the log files depending on their modification time.
:param data_path: the path to the Kafka data directory
:type data_path: str
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the find command
:rtype: str
"""
if minutes:
return FIND_MINUTES_COMMAND.format(
data_path=data_path,
minutes=minutes,
)
if start_time:
if end_time:
return FIND_RANGE_COMMAND.format(
data_path=data_path,
start_time=start_time,
end_time=end_time,
)
else:
return FIND_START_COMMAND.format(
data_path=data_path,
start_time=start_time,
) | python | def find_files_cmd(data_path, minutes, start_time, end_time):
"""Find the log files depending on their modification time.
:param data_path: the path to the Kafka data directory
:type data_path: str
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the find command
:rtype: str
"""
if minutes:
return FIND_MINUTES_COMMAND.format(
data_path=data_path,
minutes=minutes,
)
if start_time:
if end_time:
return FIND_RANGE_COMMAND.format(
data_path=data_path,
start_time=start_time,
end_time=end_time,
)
else:
return FIND_START_COMMAND.format(
data_path=data_path,
start_time=start_time,
) | [
"def",
"find_files_cmd",
"(",
"data_path",
",",
"minutes",
",",
"start_time",
",",
"end_time",
")",
":",
"if",
"minutes",
":",
"return",
"FIND_MINUTES_COMMAND",
".",
"format",
"(",
"data_path",
"=",
"data_path",
",",
"minutes",
"=",
"minutes",
",",
")",
"if"... | Find the log files depending on their modification time.
:param data_path: the path to the Kafka data directory
:type data_path: str
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the find command
:rtype: str | [
"Find",
"the",
"log",
"files",
"depending",
"on",
"their",
"modification",
"time",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L173-L203 | train | 202,686 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | check_corrupted_files_cmd | def check_corrupted_files_cmd(java_home, files):
"""Check the file corruption of the specified files.
:param java_home: the JAVA_HOME
:type java_home: string
:param files: list of files to be checked
:type files: list of string
"""
files_str = ",".join(files)
check_command = CHECK_COMMAND.format(
ionice=IONICE,
java_home=java_home,
files=files_str,
)
# One line per message can generate several MB/s of data
# Use pre-filtering on the server side to reduce it
command = "{check_command} | {reduce_output}".format(
check_command=check_command,
reduce_output=REDUCE_OUTPUT,
)
return command | python | def check_corrupted_files_cmd(java_home, files):
"""Check the file corruption of the specified files.
:param java_home: the JAVA_HOME
:type java_home: string
:param files: list of files to be checked
:type files: list of string
"""
files_str = ",".join(files)
check_command = CHECK_COMMAND.format(
ionice=IONICE,
java_home=java_home,
files=files_str,
)
# One line per message can generate several MB/s of data
# Use pre-filtering on the server side to reduce it
command = "{check_command} | {reduce_output}".format(
check_command=check_command,
reduce_output=REDUCE_OUTPUT,
)
return command | [
"def",
"check_corrupted_files_cmd",
"(",
"java_home",
",",
"files",
")",
":",
"files_str",
"=",
"\",\"",
".",
"join",
"(",
"files",
")",
"check_command",
"=",
"CHECK_COMMAND",
".",
"format",
"(",
"ionice",
"=",
"IONICE",
",",
"java_home",
"=",
"java_home",
"... | Check the file corruption of the specified files.
:param java_home: the JAVA_HOME
:type java_home: string
:param files: list of files to be checked
:type files: list of string | [
"Check",
"the",
"file",
"corruption",
"of",
"the",
"specified",
"files",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L206-L226 | train | 202,687 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | get_output_lines_from_command | def get_output_lines_from_command(host, command):
"""Execute a command on the specified host, returning a list of
output lines.
:param host: the host name
:type host: str
:param command: the command
:type commmand: str
"""
with closing(ssh_client(host)) as ssh:
_, stdout, stderr = ssh.exec_command(command)
lines = stdout.read().splitlines()
report_stderr(host, stderr)
return lines | python | def get_output_lines_from_command(host, command):
"""Execute a command on the specified host, returning a list of
output lines.
:param host: the host name
:type host: str
:param command: the command
:type commmand: str
"""
with closing(ssh_client(host)) as ssh:
_, stdout, stderr = ssh.exec_command(command)
lines = stdout.read().splitlines()
report_stderr(host, stderr)
return lines | [
"def",
"get_output_lines_from_command",
"(",
"host",
",",
"command",
")",
":",
"with",
"closing",
"(",
"ssh_client",
"(",
"host",
")",
")",
"as",
"ssh",
":",
"_",
",",
"stdout",
",",
"stderr",
"=",
"ssh",
".",
"exec_command",
"(",
"command",
")",
"lines"... | Execute a command on the specified host, returning a list of
output lines.
:param host: the host name
:type host: str
:param command: the command
:type commmand: str | [
"Execute",
"a",
"command",
"on",
"the",
"specified",
"host",
"returning",
"a",
"list",
"of",
"output",
"lines",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L229-L242 | train | 202,688 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | find_files | def find_files(data_path, brokers, minutes, start_time, end_time):
"""Find all the Kafka log files on the broker that have been modified
in the speficied time range.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the lof files on the broker
:type data_path: str
:param brokers: the brokers
:type brokers: list of (broker_id, host) pairs
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the files
:rtype: list of (broker, host, file_path) tuples
"""
command = find_files_cmd(data_path, minutes, start_time, end_time)
pool = Pool(len(brokers))
result = pool.map(
partial(get_output_lines_from_command, command=command),
[host for broker, host in brokers])
return [(broker, host, files)
for (broker, host), files
in zip(brokers, result)] | python | def find_files(data_path, brokers, minutes, start_time, end_time):
"""Find all the Kafka log files on the broker that have been modified
in the speficied time range.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the lof files on the broker
:type data_path: str
:param brokers: the brokers
:type brokers: list of (broker_id, host) pairs
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the files
:rtype: list of (broker, host, file_path) tuples
"""
command = find_files_cmd(data_path, minutes, start_time, end_time)
pool = Pool(len(brokers))
result = pool.map(
partial(get_output_lines_from_command, command=command),
[host for broker, host in brokers])
return [(broker, host, files)
for (broker, host), files
in zip(brokers, result)] | [
"def",
"find_files",
"(",
"data_path",
",",
"brokers",
",",
"minutes",
",",
"start_time",
",",
"end_time",
")",
":",
"command",
"=",
"find_files_cmd",
"(",
"data_path",
",",
"minutes",
",",
"start_time",
",",
"end_time",
")",
"pool",
"=",
"Pool",
"(",
"len... | Find all the Kafka log files on the broker that have been modified
in the speficied time range.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the lof files on the broker
:type data_path: str
:param brokers: the brokers
:type brokers: list of (broker_id, host) pairs
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the files
:rtype: list of (broker, host, file_path) tuples | [
"Find",
"all",
"the",
"Kafka",
"log",
"files",
"on",
"the",
"broker",
"that",
"have",
"been",
"modified",
"in",
"the",
"speficied",
"time",
"range",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L245-L272 | train | 202,689 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | parse_output | def parse_output(host, output):
"""Parse the output of the dump tool and print warnings or error messages
accordingly.
:param host: the source
:type host: str
:param output: the output of the script on host
:type output: list of str
"""
current_file = None
for line in output.readlines():
file_name_search = FILE_PATH_REGEX.search(line)
if file_name_search:
current_file = file_name_search.group(1)
continue
if INVALID_MESSAGE_REGEX.match(line) or INVALID_BYTES_REGEX.match(line):
print_line(host, current_file, line, "ERROR")
elif VALID_MESSAGE_REGEX.match(line) or \
line.startswith('Starting offset:'):
continue
else:
print_line(host, current_file, line, "UNEXPECTED OUTPUT") | python | def parse_output(host, output):
"""Parse the output of the dump tool and print warnings or error messages
accordingly.
:param host: the source
:type host: str
:param output: the output of the script on host
:type output: list of str
"""
current_file = None
for line in output.readlines():
file_name_search = FILE_PATH_REGEX.search(line)
if file_name_search:
current_file = file_name_search.group(1)
continue
if INVALID_MESSAGE_REGEX.match(line) or INVALID_BYTES_REGEX.match(line):
print_line(host, current_file, line, "ERROR")
elif VALID_MESSAGE_REGEX.match(line) or \
line.startswith('Starting offset:'):
continue
else:
print_line(host, current_file, line, "UNEXPECTED OUTPUT") | [
"def",
"parse_output",
"(",
"host",
",",
"output",
")",
":",
"current_file",
"=",
"None",
"for",
"line",
"in",
"output",
".",
"readlines",
"(",
")",
":",
"file_name_search",
"=",
"FILE_PATH_REGEX",
".",
"search",
"(",
"line",
")",
"if",
"file_name_search",
... | Parse the output of the dump tool and print warnings or error messages
accordingly.
:param host: the source
:type host: str
:param output: the output of the script on host
:type output: list of str | [
"Parse",
"the",
"output",
"of",
"the",
"dump",
"tool",
"and",
"print",
"warnings",
"or",
"error",
"messages",
"accordingly",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L275-L296 | train | 202,690 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | print_line | def print_line(host, path, line, line_type):
"""Print a dump tool line to stdout.
:param host: the source host
:type host: str
:param path: the path to the file that is being analyzed
:type path: str
:param line: the line to be printed
:type line: str
:param line_type: a header for the line
:type line_type: str
"""
print(
"{ltype} Host: {host}, File: {path}".format(
ltype=line_type,
host=host,
path=path,
)
)
print("{ltype} Output: {line}".format(ltype=line_type, line=line)) | python | def print_line(host, path, line, line_type):
"""Print a dump tool line to stdout.
:param host: the source host
:type host: str
:param path: the path to the file that is being analyzed
:type path: str
:param line: the line to be printed
:type line: str
:param line_type: a header for the line
:type line_type: str
"""
print(
"{ltype} Host: {host}, File: {path}".format(
ltype=line_type,
host=host,
path=path,
)
)
print("{ltype} Output: {line}".format(ltype=line_type, line=line)) | [
"def",
"print_line",
"(",
"host",
",",
"path",
",",
"line",
",",
"line_type",
")",
":",
"print",
"(",
"\"{ltype} Host: {host}, File: {path}\"",
".",
"format",
"(",
"ltype",
"=",
"line_type",
",",
"host",
"=",
"host",
",",
"path",
"=",
"path",
",",
")",
"... | Print a dump tool line to stdout.
:param host: the source host
:type host: str
:param path: the path to the file that is being analyzed
:type path: str
:param line: the line to be printed
:type line: str
:param line_type: a header for the line
:type line_type: str | [
"Print",
"a",
"dump",
"tool",
"line",
"to",
"stdout",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L299-L318 | train | 202,691 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | check_files_on_host | def check_files_on_host(java_home, host, files, batch_size):
"""Check the files on the host. Files are grouped together in groups
of batch_size files. The dump class will be executed on each batch,
sequentially.
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param host: the host where the tool will be executed
:type host: str
:param files: the list of files to be analyzed
:type files: list of str
:param batch_size: the size of each batch
:type batch_size: int
"""
with closing(ssh_client(host)) as ssh:
for i, batch in enumerate(chunks(files, batch_size)):
command = check_corrupted_files_cmd(java_home, batch)
_, stdout, stderr = ssh.exec_command(command)
report_stderr(host, stderr)
print(
" {host}: file {n_file} of {total}".format(
host=host,
n_file=(i * DEFAULT_BATCH_SIZE),
total=len(files),
)
)
parse_output(host, stdout) | python | def check_files_on_host(java_home, host, files, batch_size):
"""Check the files on the host. Files are grouped together in groups
of batch_size files. The dump class will be executed on each batch,
sequentially.
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param host: the host where the tool will be executed
:type host: str
:param files: the list of files to be analyzed
:type files: list of str
:param batch_size: the size of each batch
:type batch_size: int
"""
with closing(ssh_client(host)) as ssh:
for i, batch in enumerate(chunks(files, batch_size)):
command = check_corrupted_files_cmd(java_home, batch)
_, stdout, stderr = ssh.exec_command(command)
report_stderr(host, stderr)
print(
" {host}: file {n_file} of {total}".format(
host=host,
n_file=(i * DEFAULT_BATCH_SIZE),
total=len(files),
)
)
parse_output(host, stdout) | [
"def",
"check_files_on_host",
"(",
"java_home",
",",
"host",
",",
"files",
",",
"batch_size",
")",
":",
"with",
"closing",
"(",
"ssh_client",
"(",
"host",
")",
")",
"as",
"ssh",
":",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"chunks",
"(",
"fil... | Check the files on the host. Files are grouped together in groups
of batch_size files. The dump class will be executed on each batch,
sequentially.
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param host: the host where the tool will be executed
:type host: str
:param files: the list of files to be analyzed
:type files: list of str
:param batch_size: the size of each batch
:type batch_size: int | [
"Check",
"the",
"files",
"on",
"the",
"host",
".",
"Files",
"are",
"grouped",
"together",
"in",
"groups",
"of",
"batch_size",
"files",
".",
"The",
"dump",
"class",
"will",
"be",
"executed",
"on",
"each",
"batch",
"sequentially",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L321-L347 | train | 202,692 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | get_partition_leaders | def get_partition_leaders(cluster_config):
"""Return the current leaders of all partitions. Partitions are
returned as a "topic-partition" string.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:returns: leaders for partitions
:rtype: map of ("topic-partition", broker_id) pairs
"""
client = KafkaClient(cluster_config.broker_list)
result = {}
for topic, topic_data in six.iteritems(client.topic_partitions):
for partition, p_data in six.iteritems(topic_data):
topic_partition = topic + "-" + str(partition)
result[topic_partition] = p_data.leader
return result | python | def get_partition_leaders(cluster_config):
"""Return the current leaders of all partitions. Partitions are
returned as a "topic-partition" string.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:returns: leaders for partitions
:rtype: map of ("topic-partition", broker_id) pairs
"""
client = KafkaClient(cluster_config.broker_list)
result = {}
for topic, topic_data in six.iteritems(client.topic_partitions):
for partition, p_data in six.iteritems(topic_data):
topic_partition = topic + "-" + str(partition)
result[topic_partition] = p_data.leader
return result | [
"def",
"get_partition_leaders",
"(",
"cluster_config",
")",
":",
"client",
"=",
"KafkaClient",
"(",
"cluster_config",
".",
"broker_list",
")",
"result",
"=",
"{",
"}",
"for",
"topic",
",",
"topic_data",
"in",
"six",
".",
"iteritems",
"(",
"client",
".",
"top... | Return the current leaders of all partitions. Partitions are
returned as a "topic-partition" string.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:returns: leaders for partitions
:rtype: map of ("topic-partition", broker_id) pairs | [
"Return",
"the",
"current",
"leaders",
"of",
"all",
"partitions",
".",
"Partitions",
"are",
"returned",
"as",
"a",
"topic",
"-",
"partition",
"string",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L350-L365 | train | 202,693 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | get_tp_from_file | def get_tp_from_file(file_path):
"""Return the name of the topic-partition given the path to the file.
:param file_path: the path to the log file
:type file_path: str
:returns: the name of the topic-partition, ex. "topic_name-0"
:rtype: str
"""
match = TP_FROM_FILE_REGEX.match(file_path)
if not match:
print("File path is not valid: " + file_path)
sys.exit(1)
return match.group(1) | python | def get_tp_from_file(file_path):
"""Return the name of the topic-partition given the path to the file.
:param file_path: the path to the log file
:type file_path: str
:returns: the name of the topic-partition, ex. "topic_name-0"
:rtype: str
"""
match = TP_FROM_FILE_REGEX.match(file_path)
if not match:
print("File path is not valid: " + file_path)
sys.exit(1)
return match.group(1) | [
"def",
"get_tp_from_file",
"(",
"file_path",
")",
":",
"match",
"=",
"TP_FROM_FILE_REGEX",
".",
"match",
"(",
"file_path",
")",
"if",
"not",
"match",
":",
"print",
"(",
"\"File path is not valid: \"",
"+",
"file_path",
")",
"sys",
".",
"exit",
"(",
"1",
")",... | Return the name of the topic-partition given the path to the file.
:param file_path: the path to the log file
:type file_path: str
:returns: the name of the topic-partition, ex. "topic_name-0"
:rtype: str | [
"Return",
"the",
"name",
"of",
"the",
"topic",
"-",
"partition",
"given",
"the",
"path",
"to",
"the",
"file",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L368-L380 | train | 202,694 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | filter_leader_files | def filter_leader_files(cluster_config, broker_files):
"""Given a list of broker files, filters out all the files that
are in the replicas.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:param broker_files: the broker files
:type broker_files: list of (b_id, host, [file_path, file_path ...]) tuples
:returns: the filtered list
:rtype: list of (broker_id, host, [file_path, file_path ...]) tuples
"""
print("Filtering leaders")
leader_of = get_partition_leaders(cluster_config)
result = []
for broker, host, files in broker_files:
filtered = []
for file_path in files:
tp = get_tp_from_file(file_path)
if tp not in leader_of or leader_of[tp] == broker:
filtered.append(file_path)
result.append((broker, host, filtered))
print(
"Broker: {broker}, leader of {l_count} over {f_count} files".format(
broker=broker,
l_count=len(filtered),
f_count=len(files),
)
)
return result | python | def filter_leader_files(cluster_config, broker_files):
"""Given a list of broker files, filters out all the files that
are in the replicas.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:param broker_files: the broker files
:type broker_files: list of (b_id, host, [file_path, file_path ...]) tuples
:returns: the filtered list
:rtype: list of (broker_id, host, [file_path, file_path ...]) tuples
"""
print("Filtering leaders")
leader_of = get_partition_leaders(cluster_config)
result = []
for broker, host, files in broker_files:
filtered = []
for file_path in files:
tp = get_tp_from_file(file_path)
if tp not in leader_of or leader_of[tp] == broker:
filtered.append(file_path)
result.append((broker, host, filtered))
print(
"Broker: {broker}, leader of {l_count} over {f_count} files".format(
broker=broker,
l_count=len(filtered),
f_count=len(files),
)
)
return result | [
"def",
"filter_leader_files",
"(",
"cluster_config",
",",
"broker_files",
")",
":",
"print",
"(",
"\"Filtering leaders\"",
")",
"leader_of",
"=",
"get_partition_leaders",
"(",
"cluster_config",
")",
"result",
"=",
"[",
"]",
"for",
"broker",
",",
"host",
",",
"fi... | Given a list of broker files, filters out all the files that
are in the replicas.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:param broker_files: the broker files
:type broker_files: list of (b_id, host, [file_path, file_path ...]) tuples
:returns: the filtered list
:rtype: list of (broker_id, host, [file_path, file_path ...]) tuples | [
"Given",
"a",
"list",
"of",
"broker",
"files",
"filters",
"out",
"all",
"the",
"files",
"that",
"are",
"in",
"the",
"replicas",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L383-L411 | train | 202,695 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | check_cluster | def check_cluster(
cluster_config,
data_path,
java_home,
check_replicas,
batch_size,
minutes,
start_time,
end_time,
):
"""Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
"""
brokers = get_broker_list(cluster_config)
broker_files = find_files(data_path, brokers, minutes, start_time, end_time)
if not check_replicas: # remove replicas
broker_files = filter_leader_files(cluster_config, broker_files)
processes = []
print("Starting {n} parallel processes".format(n=len(broker_files)))
try:
for broker, host, files in broker_files:
print(
" Broker: {host}, {n} files to check".format(
host=host,
n=len(files)),
)
p = Process(
name="dump_process_" + host,
target=check_files_on_host,
args=(java_home, host, files, batch_size),
)
p.start()
processes.append(p)
print("Processes running:")
for process in processes:
process.join()
except KeyboardInterrupt:
print("Terminating all processes")
for process in processes:
process.terminate()
process.join()
print("All processes terminated")
sys.exit(1) | python | def check_cluster(
cluster_config,
data_path,
java_home,
check_replicas,
batch_size,
minutes,
start_time,
end_time,
):
"""Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
"""
brokers = get_broker_list(cluster_config)
broker_files = find_files(data_path, brokers, minutes, start_time, end_time)
if not check_replicas: # remove replicas
broker_files = filter_leader_files(cluster_config, broker_files)
processes = []
print("Starting {n} parallel processes".format(n=len(broker_files)))
try:
for broker, host, files in broker_files:
print(
" Broker: {host}, {n} files to check".format(
host=host,
n=len(files)),
)
p = Process(
name="dump_process_" + host,
target=check_files_on_host,
args=(java_home, host, files, batch_size),
)
p.start()
processes.append(p)
print("Processes running:")
for process in processes:
process.join()
except KeyboardInterrupt:
print("Terminating all processes")
for process in processes:
process.terminate()
process.join()
print("All processes terminated")
sys.exit(1) | [
"def",
"check_cluster",
"(",
"cluster_config",
",",
"data_path",
",",
"java_home",
",",
"check_replicas",
",",
"batch_size",
",",
"minutes",
",",
"start_time",
",",
"end_time",
",",
")",
":",
"brokers",
"=",
"get_broker_list",
"(",
"cluster_config",
")",
"broker... | Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str | [
"Check",
"the",
"integrity",
"of",
"the",
"Kafka",
"log",
"files",
"in",
"a",
"cluster",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L414-L473 | train | 202,696 |
Yelp/kafka-utils | kafka_utils/kafka_corruption_check/main.py | validate_args | def validate_args(args):
"""Basic option validation. Returns False if the options are not valid,
True otherwise.
:param args: the command line options
:type args: map
:param brokers_num: the number of brokers
"""
if not args.minutes and not args.start_time:
print("Error: missing --minutes or --start-time")
return False
if args.minutes and args.start_time:
print("Error: --minutes shouldn't be specified if --start-time is used")
return False
if args.end_time and not args.start_time:
print("Error: --end-time can't be used without --start-time")
return False
if args.minutes and args.minutes <= 0:
print("Error: --minutes must be > 0")
return False
if args.start_time and not TIME_FORMAT_REGEX.match(args.start_time):
print("Error: --start-time format is not valid")
print("Example format: '2015-11-26 11:00:00'")
return False
if args.end_time and not TIME_FORMAT_REGEX.match(args.end_time):
print("Error: --end-time format is not valid")
print("Example format: '2015-11-26 11:00:00'")
return False
if args.batch_size <= 0:
print("Error: --batch-size must be > 0")
return False
return True | python | def validate_args(args):
"""Basic option validation. Returns False if the options are not valid,
True otherwise.
:param args: the command line options
:type args: map
:param brokers_num: the number of brokers
"""
if not args.minutes and not args.start_time:
print("Error: missing --minutes or --start-time")
return False
if args.minutes and args.start_time:
print("Error: --minutes shouldn't be specified if --start-time is used")
return False
if args.end_time and not args.start_time:
print("Error: --end-time can't be used without --start-time")
return False
if args.minutes and args.minutes <= 0:
print("Error: --minutes must be > 0")
return False
if args.start_time and not TIME_FORMAT_REGEX.match(args.start_time):
print("Error: --start-time format is not valid")
print("Example format: '2015-11-26 11:00:00'")
return False
if args.end_time and not TIME_FORMAT_REGEX.match(args.end_time):
print("Error: --end-time format is not valid")
print("Example format: '2015-11-26 11:00:00'")
return False
if args.batch_size <= 0:
print("Error: --batch-size must be > 0")
return False
return True | [
"def",
"validate_args",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"minutes",
"and",
"not",
"args",
".",
"start_time",
":",
"print",
"(",
"\"Error: missing --minutes or --start-time\"",
")",
"return",
"False",
"if",
"args",
".",
"minutes",
"and",
"args",... | Basic option validation. Returns False if the options are not valid,
True otherwise.
:param args: the command line options
:type args: map
:param brokers_num: the number of brokers | [
"Basic",
"option",
"validation",
".",
"Returns",
"False",
"if",
"the",
"options",
"are",
"not",
"valid",
"True",
"otherwise",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L476-L507 | train | 202,697 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/util.py | separate_groups | def separate_groups(groups, key, total):
"""Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group
"""
optimum, extra = compute_optimum(len(groups), total)
over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total)
# If every group is optimal return
if not extra:
return over_loaded, under_loaded
# Some groups in optimal may have a number of elements that is optimum + 1.
# In this case they should be considered over_loaded.
potential_under_loaded = [
group for group in optimal
if key(group) == optimum
]
potential_over_loaded = [
group for group in optimal
if key(group) > optimum
]
revised_under_loaded = under_loaded + potential_under_loaded
revised_over_loaded = over_loaded + potential_over_loaded
return (
sorted(revised_over_loaded, key=key, reverse=True),
sorted(revised_under_loaded, key=key),
) | python | def separate_groups(groups, key, total):
"""Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group
"""
optimum, extra = compute_optimum(len(groups), total)
over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total)
# If every group is optimal return
if not extra:
return over_loaded, under_loaded
# Some groups in optimal may have a number of elements that is optimum + 1.
# In this case they should be considered over_loaded.
potential_under_loaded = [
group for group in optimal
if key(group) == optimum
]
potential_over_loaded = [
group for group in optimal
if key(group) > optimum
]
revised_under_loaded = under_loaded + potential_under_loaded
revised_over_loaded = over_loaded + potential_over_loaded
return (
sorted(revised_over_loaded, key=key, reverse=True),
sorted(revised_under_loaded, key=key),
) | [
"def",
"separate_groups",
"(",
"groups",
",",
"key",
",",
"total",
")",
":",
"optimum",
",",
"extra",
"=",
"compute_optimum",
"(",
"len",
"(",
"groups",
")",
",",
"total",
")",
"over_loaded",
",",
"under_loaded",
",",
"optimal",
"=",
"_smart_separate_groups"... | Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group | [
"Separate",
"the",
"group",
"into",
"overloaded",
"and",
"under",
"-",
"loaded",
"groups",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/util.py#L56-L97 | train | 202,698 |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cluster_info/rg.py | ReplicationGroup.active_brokers | def active_brokers(self):
"""Return set of brokers that are not inactive or decommissioned."""
return {
broker
for broker in self._brokers
if not broker.inactive and not broker.decommissioned
} | python | def active_brokers(self):
"""Return set of brokers that are not inactive or decommissioned."""
return {
broker
for broker in self._brokers
if not broker.inactive and not broker.decommissioned
} | [
"def",
"active_brokers",
"(",
"self",
")",
":",
"return",
"{",
"broker",
"for",
"broker",
"in",
"self",
".",
"_brokers",
"if",
"not",
"broker",
".",
"inactive",
"and",
"not",
"broker",
".",
"decommissioned",
"}"
] | Return set of brokers that are not inactive or decommissioned. | [
"Return",
"set",
"of",
"brokers",
"that",
"are",
"not",
"inactive",
"or",
"decommissioned",
"."
] | cdb4d64308f3079ee0873250bf7b34d0d94eca50 | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/rg.py#L56-L62 | train | 202,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.