text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_point(cb, msg, attributes=('x', 'y')): """ Projects a single point supplied by a callback """
if skip(cb, msg, attributes): return msg plot = get_cb_plot(cb) x, y = msg.get('x', 0), msg.get('y', 0) crs = plot.current_frame.crs coordinates = crs.transform_points(plot.projection, np.array([x]), np.array([y])) msg['x'], msg['y'] = coordinates[0, :2] return {k: v for k, v in msg.items() if k in attributes}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_drawn(cb, msg): """ Projects a drawn element to the declared coordinate system """
stream = cb.streams[0] old_data = stream.data stream.update(data=msg['data']) element = stream.element stream.update(data=old_data) proj = cb.plot.projection if not isinstance(element, _Element) or element.crs == proj: return None crs = element.crs element.crs = proj return project(element, projection=crs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_weight_files(cls): """ Cleans existing weight files. """
deleted = [] for f in cls._files: try: os.remove(f) deleted.append(f) except FileNotFoundError: pass print('Deleted %d weight files' % len(deleted)) cls._files = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_projection(el): """ Get coordinate reference system from non-auxiliary elements. Return value is a tuple of a precedence integer and the projection, to allow non-auxiliary components to take precedence. """
result = None if hasattr(el, 'crs'): result = (int(el._auxiliary_component), el.crs) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_extents(self, element, ranges, range_type='combined'): """ Subclasses the get_extents method using the GeoAxes set_extent method to project the extents to the Elements coordinate reference system. """
proj = self.projection if self.global_extent and range_type in ('combined', 'data'): (x0, x1), (y0, y1) = proj.x_limits, proj.y_limits return (x0, y0, x1, y1) extents = super(ProjectionPlot, self).get_extents(element, ranges, range_type) if not getattr(element, 'crs', None) or not self.geographic: return extents elif any(e is None or not np.isfinite(e) for e in extents): extents = None else: extents = project_extents(extents, element.crs, proj) return (np.NaN,)*4 if not extents else extents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap_lons(lons, base, period): """ Wrap longitude values into the range between base and base+period. """
lons = lons.astype(np.float64) return ((lons - base + period * 2) % period) + base
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geom_dict_to_array_dict(geom_dict, coord_names=['Longitude', 'Latitude']): """ Converts a dictionary containing an geometry key to a dictionary of x- and y-coordinate arrays and if present a list-of-lists of hole array. """
x, y = coord_names geom = geom_dict['geometry'] new_dict = {k: v for k, v in geom_dict.items() if k != 'geometry'} array = geom_to_array(geom) new_dict[x] = array[:, 0] new_dict[y] = array[:, 1] if geom.geom_type == 'Polygon': holes = [] for interior in geom.interiors: holes.append(geom_to_array(interior)) if holes: new_dict['holes'] = [holes] elif geom.geom_type == 'MultiPolygon': outer_holes = [] for g in geom: holes = [] for interior in g.interiors: holes.append(geom_to_array(interior)) outer_holes.append(holes) if any(hs for hs in outer_holes): new_dict['holes'] = outer_holes return new_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def polygons_to_geom_dicts(polygons, skip_invalid=True): """ Converts a Polygons element into a list of geometry dictionaries, preserving all value dimensions. For array conversion the following conventions are applied: * Any nan separated array are converted into a MultiPolygon * Any array without nans is converted to a Polygon * If there are holes associated with a nan separated array the holes are assigned to the polygons by testing for an intersection * If any single array does not have at least three coordinates it is skipped by default * If skip_invalid=False and an array has less than three coordinates it will be converted to a LineString """
interface = polygons.interface.datatype if interface == 'geodataframe': return [row.to_dict() for _, row in polygons.data.iterrows()] elif interface == 'geom_dictionary': return polygons.data polys = [] xdim, ydim = polygons.kdims has_holes = polygons.has_holes holes = polygons.holes() if has_holes else None for i, polygon in enumerate(polygons.split(datatype='columns')): array = np.column_stack([polygon.pop(xdim.name), polygon.pop(ydim.name)]) splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0] arrays = np.split(array, splits+1) if len(splits) else [array] invalid = False subpolys = [] subholes = None if has_holes: subholes = [[LinearRing(h) for h in hs] for hs in holes[i]] for j, arr in enumerate(arrays): if j != (len(arrays)-1): arr = arr[:-1] # Drop nan if len(arr) == 0: continue elif len(arr) == 1: if skip_invalid: continue poly = Point(arr[0]) invalid = True elif len(arr) == 2: if skip_invalid: continue poly = LineString(arr) invalid = True elif not len(splits): poly = Polygon(arr, (subholes[j] if has_holes else [])) else: poly = Polygon(arr) hs = [h for h in subholes[j]] if has_holes else [] poly = Polygon(poly.exterior, holes=hs) subpolys.append(poly) if invalid: polys += [dict(polygon, geometry=sp) for sp in subpolys] continue elif len(subpolys) == 1: geom = subpolys[0] elif subpolys: geom = MultiPolygon(subpolys) else: continue polygon['geometry'] = geom polys.append(polygon) return polys
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path_to_geom_dicts(path, skip_invalid=True): """ Converts a Path element into a list of geometry dictionaries, preserving all value dimensions. """
interface = path.interface.datatype if interface == 'geodataframe': return [row.to_dict() for _, row in path.data.iterrows()] elif interface == 'geom_dictionary': return path.data geoms = [] invalid = False xdim, ydim = path.kdims for i, path in enumerate(path.split(datatype='columns')): array = np.column_stack([path.pop(xdim.name), path.pop(ydim.name)]) splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0] arrays = np.split(array, splits+1) if len(splits) else [array] subpaths = [] for j, arr in enumerate(arrays): if j != (len(arrays)-1): arr = arr[:-1] # Drop nan if len(arr) == 0: continue elif len(arr) == 1: if skip_invalid: continue g = Point(arr[0]) invalid = True else: g = LineString(arr) subpaths.append(g) if invalid: geoms += [dict(path, geometry=sp) for sp in subpaths] continue elif len(subpaths) == 1: geom = subpaths[0] elif subpaths: geom = MultiLineString(subpaths) path['geometry'] = geom geoms.append(path) return geoms
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_ccw(geom): """ Reorients polygon to be wound counter-clockwise. """
if isinstance(geom, sgeom.Polygon) and not geom.exterior.is_ccw: geom = sgeom.polygon.orient(geom) return geom
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geom_length(geom): """ Calculates the length of coordinates in a shapely geometry. """
if geom.geom_type == 'Point': return 1 if hasattr(geom, 'exterior'): geom = geom.exterior if not geom.geom_type.startswith('Multi') and hasattr(geom, 'array_interface_base'): return len(geom.array_interface_base['data'])//2 else: length = 0 for g in geom: length += geom_length(g) return length
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geo_mesh(element): """ Get mesh data from a 2D Element ensuring that if the data is on a cylindrical coordinate system and wraps globally that data actually wraps around. """
if len(element.vdims) > 1: xs, ys = (element.dimension_values(i, False, False) for i in range(2)) zs = np.dstack([element.dimension_values(i, False, False) for i in range(2, 2+len(element.vdims))]) else: xs, ys, zs = (element.dimension_values(i, False, False) for i in range(3)) lon0, lon1 = element.range(0) if isinstance(element.crs, ccrs._CylindricalProjection) and (lon1 - lon0) == 360: xs = np.append(xs, xs[0:1] + 360, axis=0) zs = np.ma.concatenate([zs, zs[:, 0:1]], axis=1) return xs, ys, zs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_crs(crs): """ Checks if the crs represents a valid grid, projection or ESPG string. (Code copied from https://github.com/fmaussion/salem) Examples -------- '+units=m +init=epsg:26915 ' True Returns ------- A valid crs if possible, otherwise None """
import pyproj if isinstance(crs, pyproj.Proj): out = crs elif isinstance(crs, dict) or isinstance(crs, basestring): try: out = pyproj.Proj(crs) except RuntimeError: try: out = pyproj.Proj(init=crs) except RuntimeError: out = None else: out = None return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def proj_to_cartopy(proj): """ Converts a pyproj.Proj to a cartopy.crs.Projection (Code copied from https://github.com/fmaussion/salem) Parameters proj: pyproj.Proj the projection to convert Returns ------- a cartopy.crs.Projection object """
import cartopy.crs as ccrs try: from osgeo import osr has_gdal = True except ImportError: has_gdal = False proj = check_crs(proj) if proj.is_latlong(): return ccrs.PlateCarree() srs = proj.srs if has_gdal: # this is more robust, as srs could be anything (espg, etc.) s1 = osr.SpatialReference() s1.ImportFromProj4(proj.srs) srs = s1.ExportToProj4() km_proj = {'lon_0': 'central_longitude', 'lat_0': 'central_latitude', 'x_0': 'false_easting', 'y_0': 'false_northing', 'k': 'scale_factor', 'zone': 'zone', } km_globe = {'a': 'semimajor_axis', 'b': 'semiminor_axis', } km_std = {'lat_1': 'lat_1', 'lat_2': 'lat_2', } kw_proj = dict() kw_globe = dict() kw_std = dict() for s in srs.split('+'): s = s.split('=') if len(s) != 2: continue k = s[0].strip() v = s[1].strip() try: v = float(v) except: pass if k == 'proj': if v == 'tmerc': cl = ccrs.TransverseMercator if v == 'lcc': cl = ccrs.LambertConformal if v == 'merc': cl = ccrs.Mercator if v == 'utm': cl = ccrs.UTM if k in km_proj: kw_proj[km_proj[k]] = v if k in km_globe: kw_globe[km_globe[k]] = v if k in km_std: kw_std[km_std[k]] = v globe = None if kw_globe: globe = ccrs.Globe(**kw_globe) if kw_std: kw_proj['standard_parallels'] = (kw_std['lat_1'], kw_std['lat_2']) # mercatoooor if cl.__name__ == 'Mercator': kw_proj.pop('false_easting', None) kw_proj.pop('false_northing', None) return cl(globe=globe, **kw_proj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_tiff(filename, crs=None, apply_transform=False, nan_nodata=False, **kwargs): """ Returns an RGB or Image element loaded from a geotiff file. The data is loaded using xarray and rasterio. If a crs attribute is present on the loaded data it will attempt to decode it into a cartopy projection otherwise it will default to a non-geographic HoloViews element. Parameters filename: string Filename pointing to geotiff file to load crs: Cartopy CRS or EPSG string (optional) Overrides CRS inferred from the data apply_transform: boolean Whether to apply affine transform if defined on the data nan_nodata: boolean If data contains nodata values convert them to NaNs **kwargs: Keyword arguments passed to the HoloViews/GeoViews element Returns ------- element: Image/RGB/QuadMesh element """
try: import xarray as xr except: raise ImportError('Loading tiffs requires xarray to be installed') with warnings.catch_warnings(): warnings.filterwarnings('ignore') da = xr.open_rasterio(filename) return from_xarray(da, crs, apply_transform, nan_nodata, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def teardown_handles(self): """ If no custom update_handles method is supplied this method is called to tear down any previous handles before replacing them. """
if not isinstance(self.handles.get('artist'), GoogleTiles): self.handles['artist'].remove()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_geom(geom, geoms): """ Returns the index of a geometry in a list of geometries avoiding expensive equality checks of `in` operator. """
for i, g in enumerate(geoms): if g is geom: return i
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_zoom_level(bounds, domain, levels): """ Computes a zoom level given a bounds polygon, a polygon of the overall domain and the number of zoom levels to divide the data into. Parameters bounds: shapely.geometry.Polygon Polygon representing the area of the current viewport domain: shapely.geometry.Polygon Polygon representing the overall bounding region of the data levels: int Number of zoom levels to divide the domain into Returns ------- zoom_level: int Integer zoom level """
area_fraction = min(bounds.area/domain.area, 1) return int(min(round(np.log2(1/area_fraction)), levels))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bounds_to_poly(bounds): """ Constructs a shapely Polygon from the provided bounds tuple. Parameters bounds: tuple Tuple representing the (left, bottom, right, top) coordinates Returns ------- polygon: shapely.geometry.Polygon Shapely Polygon geometry of the bounds """
x0, y0, x1, y1 = bounds return Polygon([(x0, y0), (x1, y0), (x1, y1), (x0, y1)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_assignment(self): """Parse the given json plan in dict format."""
try: plan = json.loads(open(self.args.plan_file_path).read()) return plan_to_assignment(plan) except IOError: self.log.exception( 'Given json file {file} not found.' .format(file=self.args.plan_file_path), ) raise except ValueError: self.log.exception( 'Given json file {file} could not be decoded.' .format(file=self.args.plan_file_path), ) raise except KeyError: self.log.exception( 'Given json file {file} could not be parsed in desired format.' .format(file=self.args.plan_file_path), ) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_requests(hosts, jolokia_port, jolokia_prefix): """Return a generator of requests to fetch the under replicated partition number from the specified hosts. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :returns: generator of requests """
session = FuturesSession() for host in hosts: url = "http://{host}:{port}/{prefix}/read/{key}".format( host=host, port=jolokia_port, prefix=jolokia_prefix, key=UNDER_REPL_KEY, ) yield host, session.get(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_cluster_status(hosts, jolokia_port, jolokia_prefix): """Read and return the number of under replicated partitions and missing brokers from the specified hosts. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :returns: tuple of integers """
under_replicated = 0 missing_brokers = 0 for host, request in generate_requests(hosts, jolokia_port, jolokia_prefix): try: response = request.result() if 400 <= response.status_code <= 599: print("Got status code {0}. Exiting.".format(response.status_code)) sys.exit(1) json = response.json() under_replicated += json['value'] except RequestException as e: print("Broker {0} is down: {1}." "This maybe because it is starting up".format(host, e), file=sys.stderr) missing_brokers += 1 except KeyError: print("Cannot find the key, Kafka is probably still starting up", file=sys.stderr) missing_brokers += 1 return under_replicated, missing_brokers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_brokers(cluster_config, brokers): """Print the list of brokers that will be restarted. :param cluster_config: the cluster configuration :type cluster_config: map :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names """
print("Will restart the following brokers in {0}:".format(cluster_config.name)) for id, host in brokers: print(" {0}: {1}".format(id, host))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ask_confirmation(): """Ask for confirmation to the user. Return true if the user confirmed the execution, false otherwise. :returns: bool """
while True: print("Do you want to restart these brokers? ", end="") choice = input().lower() if choice in ['yes', 'y']: return True elif choice in ['no', 'n']: return False else: print("Please respond with 'yes' or 'no'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_broker(host, connection, start_command, verbose): """Execute the start"""
_, stdout, stderr = connection.sudo_command(start_command) if verbose: report_stdout(host, stdout) report_stderr(host, stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_broker(host, connection, stop_command, verbose): """Execute the stop"""
_, stdout, stderr = connection.sudo_command(stop_command) if verbose: report_stdout(host, stdout) report_stderr(host, stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_stable_cluster( hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, ): """ Block the caller until the cluster can be considered stable. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer """
stable_counter = 0 max_checks = int(math.ceil(unhealthy_time_limit / check_interval)) for i in itertools.count(): partitions, brokers = read_cluster_status( hosts, jolokia_port, jolokia_prefix, ) if partitions or brokers: stable_counter = 0 else: stable_counter += 1 print( "Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})".format( p_count=partitions, b_count=brokers, stable=stable_counter, limit=check_count, )) if stable_counter >= check_count: print("The cluster is stable") return if i >= max_checks: raise WaitTimeoutException() time.sleep(check_interval)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_rolling_restart( brokers, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, skip, verbose, pre_stop_task, post_stop_task, start_command, stop_command, ssh_password=None ): """Execute the rolling restart on the specified brokers. It checks the number of under replicated partitions on each broker, using Jolokia. The check is performed at constant intervals, and a broker will be restarted when all the brokers are answering and are reporting zero under replicated partitions. :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer :param skip: the number of brokers to skip :type skip: integer :param verbose: print commend execution information :type verbose: bool :param pre_stop_task: a list of tasks to execute before running stop :type pre_stop_task: list :param post_stop_task: a list of task to execute after running stop :type post_stop_task: list :param start_command: the start command for kafka :type start_command: string :param stop_command: the stop command for kafka :type stop_command: string :param ssh_password: The ssh password to use if needed :type ssh_password: string """
all_hosts = [b[1] for b in brokers] for n, host in enumerate(all_hosts[skip:]): with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: execute_task(pre_stop_task, host) wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, 1 if n == 0 else check_count, unhealthy_time_limit, ) print("Stopping {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) stop_broker(host, connection, stop_command, verbose) execute_task(post_stop_task, host) # we open a new SSH connection in case the hostname has a new IP with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: print("Starting {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) start_broker(host, connection, start_command, verbose) # Wait before terminating the script wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_opts(opts, brokers_num): """Basic option validation. Returns True if the options are not valid, False otherwise. :param opts: the command line options :type opts: map :param brokers_num: the number of brokers :type brokers_num: integer :returns: bool """
if opts.skip < 0 or opts.skip >= brokers_num: print("Error: --skip must be >= 0 and < #brokers") return True if opts.check_count < 0: print("Error: --check-count must be >= 0") return True if opts.unhealthy_time_limit < 0: print("Error: --unhealthy-time-limit must be >= 0") return True if opts.check_count == 0: print("Warning: no check will be performed") if opts.check_interval < 0: print("Error: --check-interval must be >= 0") return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_broker_ids_subset(broker_ids, subset_ids): """Validate that user specified broker ids to restart exist in the broker ids retrieved from cluster config. :param broker_ids: all broker IDs in a cluster :type broker_ids: list of integers :param subset_ids: broker IDs specified by user :type subset_ids: list of integers :returns: bool """
all_ids = set(broker_ids) valid = True for subset_id in subset_ids: valid = valid and subset_id in all_ids if subset_id not in all_ids: print("Error: user specified broker id {0} does not exist in cluster.".format(subset_id)) return valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run( self, cluster_config, rg_parser, partition_measurer, cluster_balancer, args, ): """Initialize cluster_config, args, and zk then call run_command."""
self.cluster_config = cluster_config self.args = args with ZK(self.cluster_config) as self.zk: self.log.debug( 'Starting %s for cluster: %s and zookeeper: %s', self.__class__.__name__, self.cluster_config.name, self.cluster_config.zookeeper, ) brokers = self.zk.get_brokers() assignment = self.zk.get_cluster_assignment() pm = partition_measurer( self.cluster_config, brokers, assignment, args, ) ct = ClusterTopology( assignment, brokers, pm, rg_parser.get_replication_group, ) if len(ct.partitions) == 0: self.log.info("The cluster is empty. No actions to perform.") return # Exit if there is an on-going reassignment if self.is_reassignment_pending(): self.log.error('Previous reassignment pending.') sys.exit(1) self.run_command(ct, cluster_balancer(ct, args))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_plan(self, plan, allow_rf_change=False): """Save proposed-plan and execute the same if requested."""
if self.should_execute(): result = self.zk.execute_plan(plan, allow_rf_change=allow_rf_change) if not result: self.log.error('Plan execution unsuccessful.') sys.exit(1) else: self.log.info( 'Plan sent to zookeeper for reassignment successfully.', ) else: self.log.info('Proposed plan won\'t be executed (--apply and confirmation needed).')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_execute(self): """Confirm if proposed-plan should be executed."""
return self.args.apply and (self.args.no_confirm or self.confirm_execution())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_reassignment_pending(self): """Return True if there are reassignment tasks pending."""
in_progress_plan = self.zk.get_pending_plan() if in_progress_plan: in_progress_partitions = in_progress_plan['partitions'] self.log.info( 'Previous re-assignment in progress for {count} partitions.' ' Current partitions in re-assignment queue: {partitions}' .format( count=len(in_progress_partitions), partitions=in_progress_partitions, ) ) return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reduced_assignment( self, original_assignment, cluster_topology, max_partition_movements, max_leader_only_changes, max_movement_size=DEFAULT_MAX_MOVEMENT_SIZE, force_progress=False, ): """Reduce the assignment based on the total actions. Actions represent actual partition movements and/or changes in preferred leader. Get the difference of original and proposed assignment and take the subset of this plan for given limit. Argument(s): original_assignment: Current assignment of cluster in zookeeper cluster_topology: Cluster topology containing the new proposed-assignment of cluster max_partition_movements:Maximum number of partition-movements in final set of actions max_leader_only_changes:Maximum number of actions with leader only changes max_movement_size: Maximum size, in bytes, to move in final set of actions force_progress: Whether to force progress if max_movement_size is too small :return: :reduced_assignment: Final reduced assignment """
new_assignment = cluster_topology.assignment if (not original_assignment or not new_assignment or max_partition_movements < 0 or max_leader_only_changes < 0 or max_movement_size < 0): return {} # The replica set stays the same for leaders only changes leaders_changes = [ (t_p, new_assignment[t_p]) for t_p, replica in six.iteritems(original_assignment) if replica != new_assignment[t_p] and set(replica) == set(new_assignment[t_p]) ] # The replica set is different for partitions changes # Here we create a list of tuple ((topic, partion), # replica movements) partition_change_count = [ ( t_p, len(set(replica) - set(new_assignment[t_p])), ) for t_p, replica in six.iteritems(original_assignment) if set(replica) != set(new_assignment[t_p]) ] self.log.info( "Total number of actions before reduction: %s.", len(partition_change_count) + len(leaders_changes), ) # Extract reduced plan maximizing uniqueness of topics and ensuring we do not # go over the max_movement_size reduced_actions = self._extract_actions_unique_topics( partition_change_count, max_partition_movements, cluster_topology, max_movement_size, ) # Ensure progress is made if force_progress=True if len(reduced_actions) == 0 and force_progress: smallest_size = min([cluster_topology.partitions[t_p[0]].size for t_p in partition_change_count]) self.log.warning( '--max-movement-size={max_movement_size} is too small, using smallest size' ' in set of partitions to move, {smallest_size} instead to force progress'.format( max_movement_size=max_movement_size, smallest_size=smallest_size, ) ) max_movement_size = smallest_size reduced_actions = self._extract_actions_unique_topics( partition_change_count, max_partition_movements, cluster_topology, max_movement_size, ) reduced_partition_changes = [ (t_p, new_assignment[t_p]) for t_p in reduced_actions ] self.log.info( "Number of partition changes: %s." " Number of leader-only changes: %s", len(reduced_partition_changes), min(max_leader_only_changes, len(leaders_changes)), ) # Merge leaders and partition changes and generate the assignment reduced_assignment = { t_p: replicas for t_p, replicas in ( reduced_partition_changes + leaders_changes[:max_leader_only_changes] ) } return reduced_assignment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extract_actions_unique_topics(self, movement_counts, max_movements, cluster_topology, max_movement_size): """Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics. Algorithm: 1. Group actions by by topic-name: {topic: action-list} 2. Iterate through the dictionary in circular fashion and keep extracting actions with until max_partition_movements are reached. :param movement_counts: list of tuple ((topic, partition), movement count) :param max_movements: max number of movements to extract :param cluster_topology: cluster topology containing the new proposed assignment for the cluster :param max_movement_size: maximum size of data to move at a time in extracted actions :return: list of tuple (topic, partitions) to include in the reduced plan """
# Group actions by topic topic_actions = defaultdict(list) for t_p, replica_change_cnt in movement_counts: topic_actions[t_p[0]].append((t_p, replica_change_cnt)) # Create reduced assignment minimizing duplication of topics extracted_actions = [] curr_movements = 0 curr_size = 0 action_available = True while curr_movements < max_movements and curr_size <= max_movement_size and action_available: action_available = False for topic, actions in six.iteritems(topic_actions): for action in actions: action_size = cluster_topology.partitions[action[0]].size if curr_movements + action[1] > max_movements or curr_size + action_size > max_movement_size: # Remove action since it won't be possible to use it actions.remove(action) else: # Append (topic, partition) to the list of movements action_available = True extracted_actions.append(action[0]) curr_movements += action[1] curr_size += action_size actions.remove(action) break return extracted_actions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confirm_execution(self): """Confirm from your if proposed-plan be executed."""
permit = '' while permit.lower() not in ('yes', 'no'): permit = input('Execute Proposed Plan? [yes/no] ') if permit.lower() == 'yes': return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_json_plan(self, proposed_layout, proposed_plan_file): """Dump proposed json plan to given output file for future usage."""
with open(proposed_plan_file, 'w') as output: json.dump(proposed_layout, output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def swap_leader(self, new_leader): """Change the preferred leader with one of given replicas. Note: Leaders for all the replicas of current partition needs to be changed. """
# Replica set cannot be changed assert(new_leader in self._replicas) curr_leader = self.leader idx = self._replicas.index(new_leader) self._replicas[0], self._replicas[idx] = \ self._replicas[idx], self._replicas[0] return curr_leader
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(self, source, dest): """Replace source broker with destination broker in replica set if found."""
for i, broker in enumerate(self.replicas): if broker == source: self.replicas[i] = dest return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_siblings(self, partitions): """Count siblings of partition in given partition-list. :key-term: sibling: partitions with same topic """
count = sum( int(self.topic == partition.topic) for partition in partitions ) return count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_topic_partition_metadata(hosts): """Returns topic-partition metadata from Kafka broker. kafka-python 1.3+ doesn't include partition metadata information in topic_partitions so we extract it from metadata ourselves. """
kafka_client = KafkaToolClient(hosts, timeout=10) kafka_client.load_metadata_for_topics() topic_partitions = kafka_client.topic_partitions resp = kafka_client.send_metadata_request() for _, topic, partitions in resp.topics: for partition_error, partition, leader, replicas, isr in partitions: if topic_partitions.get(topic, {}).get(partition) is not None: topic_partitions[topic][partition] = PartitionMetadata(topic, partition, leader, replicas, isr, partition_error) return topic_partitions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_unavailable_brokers(zk, partition_metadata): """Returns the set of unavailable brokers from the difference of replica set of given partition to the set of available replicas. """
topic_data = zk.get_topics(partition_metadata.topic) topic = partition_metadata.topic partition = partition_metadata.partition expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas']) available_replicas = set(partition_metadata.replicas) return expected_replicas - available_replicas
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_current_consumer_offsets( kafka_client, group, topics, raise_on_error=True, ): """ Get current consumer offsets. NOTE: This method does not refresh client metadata. It is up to the caller to avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: offset :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error. """
topics = _verify_topics_and_partitions(kafka_client, topics, raise_on_error) group_offset_reqs = [ OffsetFetchRequestPayload(topic, partition) for topic, partitions in six.iteritems(topics) for partition in partitions ] group_offsets = {} send_api = kafka_client.send_offset_fetch_request_kafka if group_offset_reqs: # fail_on_error = False does not prevent network errors group_resps = send_api( group=group, payloads=group_offset_reqs, fail_on_error=False, callback=pluck_topic_offset_or_zero_on_unknown, ) for resp in group_resps: group_offsets.setdefault( resp.topic, {}, )[resp.partition] = resp.offset return group_offsets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_topics_watermarks(kafka_client, topics, raise_on_error=True): """ Get current topic watermarks. NOTE: This method does not refresh client metadata. It is up to the caller to use avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: Part :raises: :py:class:`~kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`~kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error. """
topics = _verify_topics_and_partitions( kafka_client, topics, raise_on_error, ) highmark_offset_reqs = [] lowmark_offset_reqs = [] for topic, partitions in six.iteritems(topics): # Batch watermark requests for partition in partitions: # Request the the latest offset highmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -1, max_offsets=1 ) ) # Request the earliest offset lowmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -2, max_offsets=1 ) ) watermark_offsets = {} if not (len(highmark_offset_reqs) + len(lowmark_offset_reqs)): return watermark_offsets # fail_on_error = False does not prevent network errors highmark_resps = kafka_client.send_offset_request( highmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) lowmark_resps = kafka_client.send_offset_request( lowmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) # At this point highmark and lowmark should ideally have the same length. assert len(highmark_resps) == len(lowmark_resps) aggregated_offsets = defaultdict(lambda: defaultdict(dict)) for resp in highmark_resps: aggregated_offsets[resp.topic][resp.partition]['highmark'] = \ resp.offsets[0] for resp in lowmark_resps: aggregated_offsets[resp.topic][resp.partition]['lowmark'] = \ resp.offsets[0] for topic, partition_watermarks in six.iteritems(aggregated_offsets): for partition, watermarks in six.iteritems(partition_watermarks): watermark_offsets.setdefault( topic, {}, )[partition] = PartitionOffsets( topic, partition, watermarks['highmark'], watermarks['lowmark'], ) return watermark_offsets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_consumer_offsets( kafka_client, group, new_offsets, raise_on_error=True, ): """Set consumer offsets to the specified offsets. This method does not validate the specified offsets, it is up to the caller to specify valid offsets within a topic partition. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: dict {<topic>: {<partition>: <offset>}} :param raise_on_error: if False the method does not raise exceptions on errors encountered. It may still fail on the request send. :returns: a list of errors for each partition offset update that failed. :rtype: list [OffsetCommitError] :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True :py:class:`exceptions.TypeError`: upon badly formatted input new_offsets FailedPayloadsError: upon send request error. """
valid_new_offsets = _verify_commit_offsets_requests( kafka_client, new_offsets, raise_on_error ) group_offset_reqs = [ OffsetCommitRequestPayload( topic, partition, offset, metadata='', ) for topic, new_partition_offsets in six.iteritems(valid_new_offsets) for partition, offset in six.iteritems(new_partition_offsets) ] send_api = kafka_client.send_offset_commit_request_kafka status = [] if group_offset_reqs: status = send_api( group, group_offset_reqs, raise_on_error, callback=_check_commit_response_error ) return [_f for _f in status if _f and _f.error != 0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nullify_offsets(offsets): """Modify offsets metadata so that the partition offsets have null payloads. :param offsets: dict {<topic>: {<partition>: <offset>}} :returns: a dict topic: partition: offset """
result = {} for topic, partition_offsets in six.iteritems(offsets): result[topic] = _nullify_partition_offsets(partition_offsets) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_table(headers, table): """Print a formatted table. :param headers: A list of header objects that are displayed in the first row of the table. :param table: A list of lists where each sublist is a row of the table. The number of elements in each row should be equal to the number of headers. """
assert all(len(row) == len(headers) for row in table) str_headers = [str(header) for header in headers] str_table = [[str(cell) for cell in row] for row in table] column_lengths = [ max(len(header), *(len(row[i]) for row in str_table)) for i, header in enumerate(str_headers) ] print( " | ".join( str(header).ljust(length) for header, length in zip(str_headers, column_lengths) ) ) print("-+-".join("-" * length for length in column_lengths)) for row in str_table: print( " | ".join( str(cell).ljust(length) for cell, length in zip(row, column_lengths) ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_replica_imbalance(cluster_topologies): """Display replica replication-group distribution imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object. """
assert cluster_topologies rg_ids = list(next(six.itervalues(cluster_topologies)).rgs.keys()) assert all( set(rg_ids) == set(cluster_topology.rgs.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) rg_imbalances = [ stats.get_replication_group_imbalance_stats( list(cluster_topology.rgs.values()), list(cluster_topology.partitions.values()), ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Extra Replica Count', 'Replication Group', rg_ids, list(cluster_topologies.keys()), [ [erc[rg_id] for rg_id in rg_ids] for _, erc in rg_imbalances ], ) for name, imbalance in zip( six.iterkeys(cluster_topologies), (imbalance for imbalance, _ in rg_imbalances) ): print( '\n' '{name}' 'Total extra replica count: {imbalance}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', imbalance=imbalance, ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_partition_imbalance(cluster_topologies): """Display partition count and weight imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object. """
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) broker_partition_counts = [ stats.get_broker_partition_counts( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] broker_weights = [ stats.get_broker_weights( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Partition Count', 'Broker', broker_ids, list(cluster_topologies.keys()), broker_partition_counts, ) print('') _display_table_title_multicolumn( 'Partition Weight', 'Broker', broker_ids, list(cluster_topologies.keys()), broker_weights, ) for name, bpc, bw in zip( list(cluster_topologies.keys()), broker_partition_counts, broker_weights ): print( '\n' '{name}' 'Partition count imbalance: {net_imbalance}\n' 'Broker weight mean: {weight_mean}\n' 'Broker weight stdev: {weight_stdev}\n' 'Broker weight cv: {weight_cv}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', net_imbalance=stats.get_net_imbalance(bpc), weight_mean=stats.mean(bw), weight_stdev=stats.stdevp(bw), weight_cv=stats.coefficient_of_variation(bw), ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_leader_imbalance(cluster_topologies): """Display leader count and weight imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object. """
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) broker_leader_counts = [ stats.get_broker_leader_counts( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] broker_leader_weights = [ stats.get_broker_leader_weights( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Leader Count', 'Brokers', broker_ids, list(cluster_topologies.keys()), broker_leader_counts, ) print('') _display_table_title_multicolumn( 'Leader weight', 'Brokers', broker_ids, list(cluster_topologies.keys()), broker_leader_weights, ) for name, blc, blw in zip( list(cluster_topologies.keys()), broker_leader_counts, broker_leader_weights ): print( '\n' '{name}' 'Leader count imbalance: {net_imbalance}\n' 'Broker leader weight mean: {weight_mean}\n' 'Broker leader weight stdev: {weight_stdev}\n' 'Broker leader weight cv: {weight_cv}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', net_imbalance=stats.get_net_imbalance(blc), weight_mean=stats.mean(blw), weight_stdev=stats.stdevp(blw), weight_cv=stats.coefficient_of_variation(blw), ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_topic_broker_imbalance(cluster_topologies): """Display topic broker imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object. """
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) topic_names = list(next(six.itervalues(cluster_topologies)).topics.keys()) assert all( set(topic_names) == set(cluster_topology.topics.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) imbalances = [ stats.get_topic_imbalance_stats( [cluster_topology.brokers[broker_id] for broker_id in broker_ids], [cluster_topology.topics[tname] for tname in topic_names], ) for cluster_topology in six.itervalues(cluster_topologies) ] weighted_imbalances = [ stats.get_weighted_topic_imbalance_stats( [cluster_topology.brokers[broker_id] for broker_id in broker_ids], [cluster_topology.topics[tname] for tname in topic_names], ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Extra-Topic-Partition Count', 'Brokers', broker_ids, list(cluster_topologies.keys()), [ [i[1][broker_id] for broker_id in broker_ids] for i in imbalances ] ) print('') _display_table_title_multicolumn( 'Weighted Topic Imbalance', 'Brokers', broker_ids, list(cluster_topologies.keys()), [ [wi[1][broker_id] for broker_id in broker_ids] for wi in weighted_imbalances ] ) for name, topic_imbalance, weighted_topic_imbalance in zip( six.iterkeys(cluster_topologies), (i[0] for i in imbalances), (wi[0] for wi in weighted_imbalances), ): print( '\n' '{name}' 'Topic partition imbalance count: {topic_imbalance}\n' 'Weighted topic partition imbalance: {weighted_topic_imbalance}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', topic_imbalance=topic_imbalance, weighted_topic_imbalance=weighted_topic_imbalance, ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_movements_stats(ct, base_assignment): """Display how the amount of movement between two assignments. :param ct: The cluster's ClusterTopology. :param base_assignment: The cluster assignment to compare against. """
movement_count, movement_size, leader_changes = \ stats.get_partition_movement_stats(ct, base_assignment) print( 'Total partition movements: {movement_count}\n' 'Total partition movement size: {movement_size}\n' 'Total leader changes: {leader_changes}' .format( movement_count=movement_count, movement_size=movement_size, leader_changes=leader_changes, ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_assignment_changes(plan_details, to_log=True): """Display current and proposed changes in topic-partition to replica layout over brokers. """
curr_plan_list, new_plan_list, total_changes = plan_details action_cnt = '\n[INFO] Total actions required {0}'.format(total_changes) _log_or_display(to_log, action_cnt) action_cnt = ( '[INFO] Total actions that will be executed {0}' .format(len(new_plan_list)) ) _log_or_display(to_log, action_cnt) changes = ('[INFO] Proposed Changes in current cluster-layout:\n') _log_or_display(to_log, changes) tp_str = 'Topic - Partition' curr_repl_str = 'Previous-Assignment' new_rep_str = 'Proposed-Assignment' tp_list = [tp_repl[0] for tp_repl in curr_plan_list] # Display heading msg = '=' * 80 _log_or_display(to_log, msg) row = ( '{tp:^30s}: {curr_rep_str:^20s} ==> {new_rep_str:^20s}' .format( tp=tp_str, curr_rep_str=curr_repl_str, new_rep_str=new_rep_str, ) ) _log_or_display(to_log, row) msg = '=' * 80 _log_or_display(to_log, msg) # Display each topic-partition list with changes tp_list_sorted = sorted(tp_list, key=lambda tp: (tp[0], tp[1])) for tp in tp_list_sorted: curr_repl = [ tp_repl[1] for tp_repl in curr_plan_list if tp_repl[0] == tp ][0] proposed_repl = [ tp_repl[1] for tp_repl in new_plan_list if tp_repl[0] == tp ][0] tp_str = '{topic} - {partition:<2d}'.format(topic=tp[0], partition=tp[1]) row = ( '{tp:<30s}: {curr_repl:<20s} ==> {proposed_repl:<20s}'.format( tp=tp_str, curr_repl=curr_repl, proposed_repl=proposed_repl, ) ) _log_or_display(to_log, row)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_net_imbalance(count_per_broker): """Calculate and return net imbalance based on given count of partitions or leaders per broker. Net-imbalance in case of partitions implies total number of extra partitions from optimal count over all brokers. This is also implies, the minimum number of partition movements required for overall balancing. For leaders, net imbalance implies total number of extra brokers as leaders from optimal count. """
net_imbalance = 0 opt_count, extra_allowed = \ compute_optimum(len(count_per_broker), sum(count_per_broker)) for count in count_per_broker: extra_cnt, extra_allowed = \ get_extra_element_count(count, opt_count, extra_allowed) net_imbalance += extra_cnt return net_imbalance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_extra_element_count(curr_count, opt_count, extra_allowed_cnt): """Evaluate and return extra same element count based on given values. :key-term: group: In here group can be any base where elements are place i.e. replication-group while placing replicas (elements) or brokers while placing partitions (elements). element: Generic term for units which are optimally placed over group. :params: curr_count: Given count opt_count: Optimal count for each group. extra_allowed_cnt: Count of groups which can have 1 extra element _ on each group. """
if curr_count > opt_count: # We still can allow 1 extra count if extra_allowed_cnt > 0: extra_allowed_cnt -= 1 extra_cnt = curr_count - opt_count - 1 else: extra_cnt = curr_count - opt_count else: extra_cnt = 0 return extra_cnt, extra_allowed_cnt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_replication_group_imbalance_stats(rgs, partitions): """Calculate extra replica count replica count over each replication-group and net extra-same-replica count. """
tot_rgs = len(rgs) extra_replica_cnt_per_rg = defaultdict(int) for partition in partitions: # Get optimal replica-count for each partition opt_replica_cnt, extra_replicas_allowed = \ compute_optimum(tot_rgs, partition.replication_factor) # Extra replica count for each rg for rg in rgs: replica_cnt_rg = rg.count_replica(partition) extra_replica_cnt, extra_replicas_allowed = \ get_extra_element_count( replica_cnt_rg, opt_replica_cnt, extra_replicas_allowed, ) extra_replica_cnt_per_rg[rg.id] += extra_replica_cnt # Evaluate net imbalance across all replication-groups net_imbalance = sum(extra_replica_cnt_per_rg.values()) return net_imbalance, extra_replica_cnt_per_rg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_topic_imbalance_stats(brokers, topics): """Return count of topics and partitions on each broker having multiple partitions of same topic. :rtype dict(broker_id: same-topic-partition count) Example: Total-brokers (b1, b2): 2 Total-partitions of topic t1: 5 (b1 has 4 partitions), (b2 has 1 partition) opt-count: 5/2 = 2 extra-count: 5%2 = 1 i.e. 1 broker can have 2 + 1 = 3 partitions and rest of brokers can have 2 partitions for given topic Extra-partition or imbalance: b1: current-partitions - optimal-count = 4 - 2 - 1(extra allowed) = 1 Net-imbalance = 1 """
extra_partition_cnt_per_broker = defaultdict(int) tot_brokers = len(brokers) # Sort the brokers so that the iteration order is deterministic. sorted_brokers = sorted(brokers, key=lambda b: b.id) for topic in topics: # Optimal partition-count per topic per broker total_partition_replicas = \ len(topic.partitions) * topic.replication_factor opt_partition_cnt, extra_partitions_allowed = \ compute_optimum(tot_brokers, total_partition_replicas) # Get extra-partition count per broker for each topic for broker in sorted_brokers: partition_cnt_broker = broker.count_partitions(topic) extra_partitions, extra_partitions_allowed = \ get_extra_element_count( partition_cnt_broker, opt_partition_cnt, extra_partitions_allowed, ) extra_partition_cnt_per_broker[broker.id] += extra_partitions # Net extra partitions over all brokers net_imbalance = sum(six.itervalues(extra_partition_cnt_per_broker)) return net_imbalance, extra_partition_cnt_per_broker
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_generated_broker_id(meta_properties_path): """reads broker_id from meta.properties file. :param string meta_properties_path: path for meta.properties file :returns int: broker_id from meta_properties_path """
try: with open(meta_properties_path, 'r') as f: broker_id = _parse_meta_properties_file(f) except IOError: raise IOError( "Cannot open meta.properties file: {path}" .format(path=meta_properties_path), ) except ValueError: raise ValueError("Broker id not valid") if broker_id is None: raise ValueError("Autogenerated broker id missing from data directory") return broker_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_broker_id(data_path): """This function will look into the data folder to get the automatically created broker_id. :param string data_path: the path to the kafka data folder :returns int: the real broker_id """
# Path to the meta.properties file. This is used to read the automatic broker id # if the given broker id is -1 META_FILE_PATH = "{data_path}/meta.properties" if not data_path: raise ValueError("You need to specify the data_path if broker_id == -1") meta_properties_path = META_FILE_PATH.format(data_path=data_path) return _read_generated_broker_id(meta_properties_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_offsets_metadata(topics, *offsets_responses): """Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset """
result = dict() for topic in topics: partition_offsets = [ response[topic] for response in offsets_responses if topic in response ] result[topic] = merge_partition_offsets(*partition_offsets) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_partition_offsets(*partition_offsets): """Merge the partition offsets of a single topic from multiple responses. :param partition_offsets: list of dict partition: offset :returns: dict partition: offset """
output = dict() for partition_offset in partition_offsets: for partition, offset in six.iteritems(partition_offset): prev_offset = output.get(partition, 0) output[partition] = max(prev_offset, offset) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rebalance_replicas( self, max_movement_count=None, max_movement_size=None, ): """Balance replicas across replication-groups. :param max_movement_count: The maximum number of partitions to move. :param max_movement_size: The maximum total size of the partitions to move. :returns: A 2-tuple whose first element is the number of partitions moved and whose second element is the total size of the partitions moved. """
movement_count = 0 movement_size = 0 for partition in six.itervalues(self.cluster_topology.partitions): count, size = self._rebalance_partition_replicas( partition, None if not max_movement_count else max_movement_count - movement_count, None if not max_movement_size else max_movement_size - movement_size, ) movement_count += count movement_size += size return movement_count, movement_size
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _rebalance_partition_replicas( self, partition, max_movement_count=None, max_movement_size=None, ): """Rebalance replication groups for given partition."""
# Separate replication-groups into under and over replicated total = partition.replication_factor over_replicated_rgs, under_replicated_rgs = separate_groups( list(self.cluster_topology.rgs.values()), lambda g: g.count_replica(partition), total, ) # Move replicas from over-replicated to under-replicated groups movement_count = 0 movement_size = 0 while ( under_replicated_rgs and over_replicated_rgs ) and ( max_movement_size is None or movement_size + partition.size <= max_movement_size ) and ( max_movement_count is None or movement_count < max_movement_count ): # Decide source and destination group rg_source = self._elect_source_replication_group( over_replicated_rgs, partition, ) rg_destination = self._elect_dest_replication_group( rg_source.count_replica(partition), under_replicated_rgs, partition, ) if rg_source and rg_destination: # Actual movement of partition self.log.debug( 'Moving partition {p_name} from replication-group ' '{rg_source} to replication-group {rg_dest}'.format( p_name=partition.name, rg_source=rg_source.id, rg_dest=rg_destination.id, ), ) rg_source.move_partition(rg_destination, partition) movement_count += 1 movement_size += partition.size else: # Groups balanced or cannot be balanced further break # Re-compute under and over-replicated replication-groups over_replicated_rgs, under_replicated_rgs = separate_groups( list(self.cluster_topology.rgs.values()), lambda g: g.count_replica(partition), total, ) return movement_count, movement_size
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _elect_source_replication_group( self, over_replicated_rgs, partition, ): """Decide source replication-group based as group with highest replica count. """
return max( over_replicated_rgs, key=lambda rg: rg.count_replica(partition), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _elect_dest_replication_group( self, replica_count_source, under_replicated_rgs, partition, ): """Decide destination replication-group based on replica-count."""
min_replicated_rg = min( under_replicated_rgs, key=lambda rg: rg.count_replica(partition), ) # Locate under-replicated replication-group with lesser # replica count than source replication-group if min_replicated_rg.count_replica(partition) < replica_count_source - 1: return min_replicated_rg return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_consumer_offsets(cls, json_file): """Parse current offsets from json-file."""
with open(json_file, 'r') as consumer_offsets_json: try: parsed_offsets = {} parsed_offsets_data = json.load(consumer_offsets_json) # Create new dict with partition-keys as integers parsed_offsets['groupid'] = parsed_offsets_data['groupid'] parsed_offsets['offsets'] = {} for topic, topic_data in six.iteritems(parsed_offsets_data['offsets']): parsed_offsets['offsets'][topic] = {} for partition, offset in six.iteritems(topic_data): parsed_offsets['offsets'][topic][int(partition)] = offset return parsed_offsets except ValueError: print( "Error: Given consumer-data json data-file {file} could not be " "parsed".format(file=json_file), file=sys.stderr, ) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_new_offsets(cls, client, topics_offset_data, topic_partitions, current_offsets): """Build complete consumer offsets from parsed current consumer-offsets and lowmarks and highmarks from current-offsets for. """
new_offsets = defaultdict(dict) try: for topic, partitions in six.iteritems(topic_partitions): # Validate current offsets in range of low and highmarks # Currently we only validate for positive offsets and warn # if out of range of low and highmarks valid_partitions = set() for topic_partition_offsets in current_offsets[topic]: partition = topic_partition_offsets.partition valid_partitions.add(partition) # Skip the partition not present in list if partition not in topic_partitions[topic]: continue lowmark = topic_partition_offsets.lowmark highmark = topic_partition_offsets.highmark new_offset = topics_offset_data[topic][partition] if new_offset < 0: print( "Error: Given offset: {offset} is negative" .format(offset=new_offset), file=sys.stderr, ) sys.exit(1) if new_offset < lowmark or new_offset > highmark: print( "Warning: Given offset {offset} for topic-partition " "{topic}:{partition} is outside the range of lowmark " "{lowmark} and highmark {highmark}".format( offset=new_offset, topic=topic, partition=partition, lowmark=lowmark, highmark=highmark, ) ) new_offsets[topic][partition] = new_offset if not set(partitions).issubset(valid_partitions): print( "Error: Some invalid partitions {partitions} for topic " "{topic} found. Valid partition-list {valid_partitions}. " "Exiting...".format( partitions=', '.join([str(p) for p in partitions]), valid_partitions=', '.join([str(p) for p in valid_partitions]), topic=topic, ), file=sys.stderr, ) sys.exit(1) except KeyError as ex: print( "Error: Possible invalid topic or partition. Error msg: {ex}. " "Exiting...".format(ex=ex), ) sys.exit(1) return new_offsets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore_offsets(cls, client, parsed_consumer_offsets): """Fetch current offsets from kafka, validate them against given consumer-offsets data and commit the new offsets. :param client: Kafka-client :param parsed_consumer_offsets: Parsed consumer offset data from json file :type parsed_consumer_offsets: dict(group: dict(topic: partition-offsets)) """
# Fetch current offsets try: consumer_group = parsed_consumer_offsets['groupid'] topics_offset_data = parsed_consumer_offsets['offsets'] topic_partitions = dict( (topic, [partition for partition in offset_data.keys()]) for topic, offset_data in six.iteritems(topics_offset_data) ) except IndexError: print( "Error: Given parsed consumer-offset data {consumer_offsets} " "could not be parsed".format(consumer_offsets=parsed_consumer_offsets), file=sys.stderr, ) raise current_offsets = get_consumer_offsets_metadata( client, consumer_group, topic_partitions, ) # Build new offsets new_offsets = cls.build_new_offsets( client, topics_offset_data, topic_partitions, current_offsets, ) # Commit offsets consumer_group = parsed_consumer_offsets['groupid'] set_consumer_offsets(client, consumer_group, new_offsets) print("Restored to new offsets {offsets}".format(offsets=dict(new_offsets)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tuple_replace(tup, *pairs): """Return a copy of a tuple with some elements replaced. :param tup: The tuple to be copied. :param pairs: Any number of (index, value) tuples where index is the index of the item to replace and value is the new value of the item. """
tuple_list = list(tup) for index, value in pairs: tuple_list[index] = value return tuple(tuple_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tuple_alter(tup, *pairs): """Return a copy of a tuple with some elements altered. :param tup: The tuple to be copied. :param pairs: Any number of (index, func) tuples where index is the index of the item to alter and the new value is func(tup[index]). """
# timeit says that this is faster than a similar tuple_list = list(tup) for i, f in pairs: tuple_list[i] = f(tuple_list[i]) return tuple(tuple_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tuple_remove(tup, *items): """Return a copy of a tuple with some items removed. :param tup: The tuple to be copied. :param items: Any number of items. The first instance of each item will be removed from the tuple. """
tuple_list = list(tup) for item in items: tuple_list.remove(item) return tuple(tuple_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def positive_int(string): """Convert string to positive integer."""
error_msg = 'Positive integer required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise ArgumentTypeError(error_msg) if value < 0: raise ArgumentTypeError(error_msg) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def positive_nonzero_int(string): """Convert string to positive integer greater than zero."""
error_msg = 'Positive non-zero integer required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise ArgumentTypeError(error_msg) if value <= 0: raise ArgumentTypeError(error_msg) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def positive_float(string): """Convert string to positive float."""
error_msg = 'Positive float required, {string} given.'.format(string=string) try: value = float(string) except ValueError: raise ArgumentTypeError(error_msg) if value < 0: raise ArgumentTypeError(error_msg) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dict_merge(set1, set2): """Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_h(num, suffix='B'): """Converts a byte value in human readable form."""
if num is None: # Show None when data is missing return "None" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """
if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_brokers(self, brokers): """Build broker objects using broker-ids."""
for broker_id, metadata in six.iteritems(brokers): self.brokers[broker_id] = self._create_broker(broker_id, metadata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_broker(self, broker_id, metadata=None): """Create a broker object and assign to a replication group. A broker object with no metadata is considered inactive. An inactive broker may or may not belong to a group. """
broker = Broker(broker_id, metadata) if not metadata: broker.mark_inactive() rg_id = self.extract_group(broker) group = self.rgs.setdefault(rg_id, ReplicationGroup(rg_id)) group.add_broker(broker) broker.replication_group = group return broker
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_partitions(self, assignment): """Builds all partition objects and update corresponding broker and topic objects. """
self.partitions = {} for partition_name, replica_ids in six.iteritems(assignment): # Get topic topic_id = partition_name[0] partition_id = partition_name[1] topic = self.topics.setdefault( topic_id, Topic(topic_id, replication_factor=len(replica_ids)) ) # Creating partition object partition = Partition( topic, partition_id, weight=self.partition_measurer.get_weight(partition_name), size=self.partition_measurer.get_size(partition_name), ) self.partitions[partition_name] = partition topic.add_partition(partition) # Updating corresponding broker objects for broker_id in replica_ids: # Check if broker-id is present in current active brokers if broker_id not in list(self.brokers.keys()): self.log.warning( "Broker %s containing partition %s is not in " "active brokers.", broker_id, partition, ) self.brokers[broker_id] = self._create_broker(broker_id) self.brokers[broker_id].add_partition(partition)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def active_brokers(self): """Set of brokers that are not inactive or decommissioned."""
return { broker for broker in six.itervalues(self.brokers) if not broker.inactive and not broker.decommissioned }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace_broker(self, source_id, dest_id): """Move all partitions in source broker to destination broker. :param source_id: source broker-id :param dest_id: destination broker-id :raises: InvalidBrokerIdError, when either of given broker-ids is invalid. """
try: source = self.brokers[source_id] dest = self.brokers[dest_id] # Move all partitions from source to destination broker for partition in source.partitions.copy(): # Partitions set changes # We cannot move partition directly since that re-orders the # replicas for the partition source.partitions.remove(partition) dest.partitions.add(partition) # Replace broker in replica partition.replace(source, dest) except KeyError as e: self.log.error("Invalid broker id %s.", e.args[0]) raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(e.args[0]) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_cluster_topology(self, assignment): """Modify the cluster-topology with given assignment. Change the replica set of partitions as in given assignment. :param assignment: dict representing actions to be used to update the current cluster-topology :raises: InvalidBrokerIdError when broker-id is invalid :raises: InvalidPartitionError when partition-name is invalid """
try: for partition_name, replica_ids in six.iteritems(assignment): try: new_replicas = [self.brokers[b_id] for b_id in replica_ids] except KeyError: self.log.error( "Invalid replicas %s for topic-partition %s-%s.", ', '.join([str(id) for id in replica_ids]), partition_name[0], partition_name[1], ) raise InvalidBrokerIdError( "Invalid replicas {0}.".format( ', '.join([str(id) for id in replica_ids]) ), ) try: partition = self.partitions[partition_name] old_replicas = [broker for broker in partition.replicas] # No change needed. Save ourself some CPU time. # Replica order matters as the first one is the leader. if new_replicas == old_replicas: continue # Remove old partitions from broker # This also updates partition replicas for broker in old_replicas: broker.remove_partition(partition) # Add new partition to brokers for broker in new_replicas: broker.add_partition(partition) except KeyError: self.log.error( "Invalid topic-partition %s-%s.", partition_name[0], partition_name[1], ) raise InvalidPartitionError( "Invalid topic-partition {0}-{1}." .format(partition_name[0], partition_name[1]), ) except KeyError: self.log.error("Could not parse given assignment {0}".format(assignment)) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plan_to_assignment(plan): """Convert the plan to the format used by cluster-topology."""
assignment = {} for elem in plan['partitions']: assignment[ (elem['topic'], elem['partition']) ] = elem['replicas'] return assignment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assignment_to_plan(assignment): """Convert an assignment to the format used by Kafka to describe a reassignment plan. """
return { 'version': 1, 'partitions': [{'topic': t_p[0], 'partition': t_p[1], 'replicas': replica } for t_p, replica in six.iteritems(assignment)] }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_plan( new_plan, base_plan=None, is_partition_subset=True, allow_rf_change=False, ): """Verify that the new plan is valid for execution. Given kafka-reassignment plan should affirm with following rules: - Plan should have at least one partition for re-assignment - Partition-name list should be subset of base-plan partition-list - Replication-factor for each partition of same topic is same - Replication-factor for each partition remains unchanged - No duplicate broker-ids in each replicas """
if not _validate_plan(new_plan): _log.error('Invalid proposed-plan.') return False # Validate given plan in reference to base-plan if base_plan: if not _validate_plan(base_plan): _log.error('Invalid assignment from cluster.') return False if not _validate_plan_base( new_plan, base_plan, is_partition_subset, allow_rf_change ): return False # Plan validation successful return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_plan_base( new_plan, base_plan, is_partition_subset=True, allow_rf_change=False, ): """Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set """
# Verify that partitions in plan are subset of base plan. new_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in new_plan['partitions'] ]) base_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in base_plan['partitions'] ]) if is_partition_subset: invalid_partitions = list(new_partitions - base_partitions) else: # partition set should be equal invalid_partitions = list( new_partitions.union(base_partitions) - new_partitions.intersection(base_partitions), ) if invalid_partitions: _log.error( 'Invalid partition(s) found: {p_list}'.format( p_list=invalid_partitions, ) ) return False # Verify replication-factor remains consistent base_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in base_plan['partitions'] } new_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in new_plan['partitions'] } if not allow_rf_change: invalid_replication_factor = False for new_partition, replicas in six.iteritems(new_partition_replicas): base_replica_cnt = len(base_partition_replicas[new_partition]) if len(replicas) != base_replica_cnt: invalid_replication_factor = True _log.error( 'Replication-factor Mismatch: Partition: {partition}: ' 'Base-replicas: {expected}, Proposed-replicas: {actual}' .format( partition=new_partition, expected=base_partition_replicas[new_partition], actual=replicas, ), ) if invalid_replication_factor: return False # Validation successful return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_format(plan): """Validate if the format of the plan as expected. Validate format of plan on following rules: a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions' b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas', 'partition', 'topic' c) Verify desired type of each value d) Verify non-empty partitions and replicas Sample-plan format: { "version": 1, "partitions": [ {"partition":0, "topic":'t1', "replicas":[0,1,2]}, {"partition":0, "topic":'t2', "replicas":[1,2]}, ]} """
# Verify presence of required keys if set(plan.keys()) != set(['version', 'partitions']): _log.error( 'Invalid or incomplete keys in given plan. Expected: "version", ' '"partitions". Found:{keys}' .format(keys=', '.join(list(plan.keys()))), ) return False # Invalid version if plan['version'] != 1: _log.error( 'Invalid version of plan {version}' .format(version=plan['version']), ) return False # Empty partitions if not plan['partitions']: _log.error( '"partitions" list found empty"' .format(version=plan['partitions']), ) return False # Invalid partitions type if not isinstance(plan['partitions'], list): _log.error('"partitions" of type list expected.') return False # Invalid partition-data for p_data in plan['partitions']: if set(p_data.keys()) != set(['topic', 'partition', 'replicas']): _log.error( 'Invalid keys in partition-data {keys}' .format(keys=', '.join(list(p_data.keys()))), ) return False # Check types if not isinstance(p_data['topic'], six.text_type): _log.error( '"topic" of type unicode expected {p_data}, found {t_type}' .format(p_data=p_data, t_type=type(p_data['topic'])), ) return False if not isinstance(p_data['partition'], int): _log.error( '"partition" of type int expected {p_data}, found {p_type}' .format(p_data=p_data, p_type=type(p_data['partition'])), ) return False if not isinstance(p_data['replicas'], list): _log.error( '"replicas" of type list expected {p_data}, found {r_type}' .format(p_data=p_data, r_type=type(p_data['replicas'])), ) return False if not p_data['replicas']: _log.error( 'Non-empty "replicas" expected: {p_data}' .format(p_data=p_data), ) return False # Invalid broker-type for broker in p_data['replicas']: if not isinstance(broker, int): _log.error( '"replicas" of type integer list expected {p_data}' .format(p_data=p_data), ) return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_plan(plan): """Validate if given plan is valid based on kafka-cluster-assignment protocols. Validate following parameters: - Correct format of plan - Partition-list should be unique - Every partition of a topic should have same replication-factor - Replicas of a partition should have unique broker-set """
# Validate format of plan if not _validate_format(plan): return False # Verify no duplicate partitions partition_names = [ (p_data['topic'], p_data['partition']) for p_data in plan['partitions'] ] duplicate_partitions = [ partition for partition, count in six.iteritems(Counter(partition_names)) if count > 1 ] if duplicate_partitions: _log.error( 'Duplicate partitions in plan {p_list}' .format(p_list=duplicate_partitions), ) return False # Verify no duplicate brokers in partition-replicas dup_replica_brokers = [] for p_data in plan['partitions']: dup_replica_brokers = [ broker for broker, count in Counter(p_data['replicas']).items() if count > 1 ] if dup_replica_brokers: _log.error( 'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}' .format( topic=p_data['topic'], p_id=p_data['partition'], replicas=p_data['replicas'], ) ) return False # Verify same replication-factor for every topic topic_replication_factor = {} for partition_info in plan['partitions']: topic = partition_info['topic'] replication_factor = len(partition_info['replicas']) if topic in list(topic_replication_factor.keys()): if topic_replication_factor[topic] != replication_factor: _log.error( 'Mismatch in replication-factor of partitions for topic ' '{topic}'.format(topic=topic), ) return False else: topic_replication_factor[topic] = replication_factor return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def percentage_distance(cls, highmark, current): """Percentage of distance the current offset is behind the highmark."""
highmark = int(highmark) current = int(current) if highmark > 0: return round( (highmark - current) * 100.0 / highmark, 2, ) else: return 0.0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_children(self, path, watch=None): """Returns the children of the specified node."""
_log.debug( "ZK: Getting children of {path}".format(path=path), ) return self.zk.get_children(path, watch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, path, watch=None): """Returns the data of the specified node."""
_log.debug( "ZK: Getting {path}".format(path=path), ) return self.zk.get(path, watch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, path, value): """Sets and returns new data for the specified node."""
_log.debug( "ZK: Setting {path} to {value}".format(path=path, value=value) ) return self.zk.set(path, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_json(self, path, watch=None): """Reads the data of the specified node and converts it to json."""
data, _ = self.get(path, watch) return load_json(data) if data else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_brokers(self, names_only=False): """Get information on all the available brokers. :rtype : dict of brokers """
try: broker_ids = self.get_children("/brokers/ids") except NoNodeError: _log.info( "cluster is empty." ) return {} # Return broker-ids only if names_only: return {int(b_id): None for b_id in broker_ids} return {int(b_id): self.get_broker_metadata(b_id) for b_id in broker_ids}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_topic_config(self, topic): """Get configuration information for specified topic. :rtype : dict of configuration """
try: config_data = load_json( self.get( "/config/topics/{topic}".format(topic=topic) )[0] ) except NoNodeError as e: # Kafka version before 0.8.1 does not have "/config/topics/<topic_name>" path in ZK and # if the topic exists, return default dict instead of raising an Exception. # Ref: https://cwiki.apache.org/confluence/display/KAFKA/Kafka+data+structures+in+Zookeeper. topics = self.get_topics(topic_name=topic, fetch_partition_state=False) if len(topics) > 0: _log.info("Configuration not available for topic {topic}.".format(topic=topic)) config_data = {"config": {}} else: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return config_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_topic_config(self, topic, value, kafka_version=(0, 10, )): """Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. """
config_data = dump_json(value) try: # Change value return_value = self.set( "/config/topics/{topic}".format(topic=topic), config_data ) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), "Feature supported with kafka 9 and kafka 10" if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": "topics", "entity_name": topic }) else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "topics/" + topic, }) self.create( '/config/changes/config_change_', change_node, sequence=True ) except NoNodeError as e: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return return_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_topics( self, topic_name=None, names_only=False, fetch_partition_state=True, ): """Get information on all the available topics. Topic-data format with fetch_partition_state as False :- topic_data = { 'version': 1, 'partitions': { <p_id>: { replicas: <broker-ids> } } } Topic-data format with fetch_partition_state as True:- topic_data = { 'version': 1, 'ctime': <timestamp>, 'partitions': { <p_id>:{ controller_epoch: <val>, leader_epoch: <val>, version: 1, leader: <broker-id>, ctime: <timestamp>, } } } Note: By default we also fetch partition-state which results in accessing the zookeeper twice. If just partition-replica information is required fetch_partition_state should be set to False. """
try: topic_ids = [topic_name] if topic_name else self.get_children( "/brokers/topics", ) except NoNodeError: _log.error( "Cluster is empty." ) return {} if names_only: return topic_ids topics_data = {} for topic_id in topic_ids: try: topic_info = self.get("/brokers/topics/{id}".format(id=topic_id)) topic_data = load_json(topic_info[0]) topic_ctime = topic_info[1].ctime / 1000.0 topic_data['ctime'] = topic_ctime except NoNodeError: _log.info( "topic '{topic}' not found.".format(topic=topic_id), ) return {} # Prepare data for each partition partitions_data = {} for p_id, replicas in six.iteritems(topic_data['partitions']): partitions_data[p_id] = {} if fetch_partition_state: # Fetch partition-state from zookeeper partition_state = self._fetch_partition_state(topic_id, p_id) partitions_data[p_id] = load_json(partition_state[0]) partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0 else: # Fetch partition-info from zookeeper partition_info = self._fetch_partition_info(topic_id, p_id) partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0 partitions_data[p_id]['replicas'] = replicas topic_data['partitions'] = partitions_data topics_data[topic_id] = topic_data return topics_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_consumer_groups(self, consumer_group_id=None, names_only=False): """Get information on all the available consumer-groups. If names_only is False, only list of consumer-group ids are sent. If names_only is True, Consumer group offset details are returned for all consumer-groups or given consumer-group if given in dict format as:- { 'group-id': { 'topic': { 'partition': offset-value, } } } :rtype: dict of consumer-group offset details """
if consumer_group_id is None: group_ids = self.get_children("/consumers") else: group_ids = [consumer_group_id] # Return consumer-group-ids only if names_only: return {g_id: None for g_id in group_ids} consumer_offsets = {} for g_id in group_ids: consumer_offsets[g_id] = self.get_group_offsets(g_id) return consumer_offsets