text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, path, match, flags): """ find every matching child path under path """
try: match = re.compile(match, flags) except sre_constants.error as ex: print("Bad regexp: %s" % (ex)) return offset = len(path) for cpath in Tree(self, path).get(): if match.search(cpath[offset:]): yield cpath
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def grep(self, path, content, flags): """ grep every child path under path for content """
try: match = re.compile(content, flags) except sre_constants.error as ex: print("Bad regexp: %s" % (ex)) return for gpath, matches in self.do_grep(path, match): yield (gpath, matches)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_grep(self, path, match): """ grep's work horse """
try: children = self.get_children(path) except (NoNodeError, NoAuthError): children = [] for child in children: full_path = os.path.join(path, child) try: value, _ = self.get(full_path) except (NoNodeError, NoAuthError): value = "" if value is not None: matches = [line for line in value.split("\n") if match.search(line)] if len(matches) > 0: yield (full_path, matches) for mpath, matches in self.do_grep(full_path, match): yield (mpath, matches)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree(self, path, max_depth, full_path=False, include_stat=False): """DFS generator which starts from a given path and goes up to a max depth. :param path: path from which the DFS will start :param max_depth: max depth of DFS (0 means no limit) :param full_path: should the full path of the child node be returned :param include_stat: return the child Znode's stat along with the name & level """
for child_level_stat in self.do_tree(path, max_depth, 0, full_path, include_stat): yield child_level_stat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_tree(self, path, max_depth, level, full_path, include_stat): """ tree's work horse """
try: children = self.get_children(path) except (NoNodeError, NoAuthError): children = [] for child in children: cpath = os.path.join(path, child) if full_path else child if include_stat: yield cpath, level, self.stat(os.path.join(path, child)) else: yield cpath, level if max_depth == 0 or level + 1 < max_depth: cpath = os.path.join(path, child) for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat): yield rchild_rlevel_rstat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def equal(self, path_a, path_b): """ compare if a and b have the same bytes """
content_a, _ = self.get_bytes(path_a) content_b, _ = self.get_bytes(path_b) return content_a == content_b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stat(self, path): """ safely gets the Znode's Stat """
try: stat = self.exists(str(path)) except (NoNodeError, NoAuthError): stat = None return stat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reconnect(self): """ forces a reconnect by shutting down the connected socket return True if the reconnect happened, False otherwise """
state_change_event = self.handler.event_object() def listener(state): if state is KazooState.SUSPENDED: state_change_event.set() self.add_listener(listener) self._connection._socket.shutdown(socket.SHUT_RDWR) state_change_event.wait(1) if not state_change_event.is_set(): return False # wait until we are back while not self.connected: time.sleep(0.1) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dump_by_server(self, hosts): """Returns the output of dump for each server. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of ((server_ip, port), ClientInfo). """
dump_by_endpoint = {} for endpoint in self._to_endpoints(hosts): try: out = self.cmd([endpoint], "dump") except self.CmdFailed as ex: out = "" dump_by_endpoint[endpoint] = out return dump_by_endpoint
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ephemerals_info(self, hosts): """Returns ClientInfo per path. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (path, ClientInfo). """
info_by_path, info_by_id = {}, {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint sid = None for line in dump.split("\n"): mat = self.SESSION_REGEX.match(line) if mat: sid = mat.group(1) continue mat = self.PATH_REGEX.match(line) if mat: info = info_by_id.get(sid, None) if info is None: info = info_by_id[sid] = ClientInfo(sid) info_by_path[mat.group(1)] = info continue mat = self.IP_PORT_REGEX.match(line) if mat: ip, port, sid = mat.groups() if sid not in info_by_id: continue info_by_id[sid](ip, int(port), server_ip, server_port) return info_by_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sessions_info(self, hosts): """Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo). """
info_by_id = {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint for line in dump.split("\n"): mat = self.IP_PORT_REGEX.match(line) if mat is None: continue ip, port, sid = mat.groups() info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port) return info_by_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, path, verbose=False): """ if the path isn't being watched, start watching it if it is, stop watching it """
if path in self._by_path: self.remove(path) else: self.add(path, verbose)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_string(cls, string, exists=False, asynchronous=False, verbose=False): """ if exists is bool, then check it either exists or it doesn't. if exists is None, we don't care. """
result = cls.parse(string) if result.scheme not in cls.TYPES: raise CopyError("Invalid scheme: %s" % (result.scheme)) return cls.TYPES[result.scheme](result, exists, asynchronous, verbose)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zk_walk(self, root_path, branch_path): """ skip ephemeral znodes since there's no point in copying those """
full_path = os.path.join(root_path, branch_path) if branch_path else root_path try: children = self.client.get_children(full_path) except NoNodeError: children = set() except NoAuthError: raise AuthError("read children", full_path) for child in children: child_path = os.path.join(branch_path, child) if branch_path else child try: stat = self.client.exists(os.path.join(root_path, child_path)) except NoAuthError: raise AuthError("read", child) if stat is None or stat.ephemeralOwner != 0: continue yield child_path for new_path in self.zk_walk(root_path, child_path): yield new_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_path(self, path_value): """ this will overwrite dst path - be careful """
parent_dir = os.path.dirname(self.path) try: os.makedirs(parent_dir) except OSError: pass with open(self.path, "w") as fph: fph.write(path_value.value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, exclude_recurse=None): """ Paths matching exclude_recurse will not be recursed. """
reqs = Queue() pending = 1 path = self.path zk = self.zk def child_of(path): return zk.get_children_async(path) def dispatch(path): return Request(path, child_of(path)) stat = zk.exists(path) if stat is None or stat.numChildren == 0: return reqs.put(dispatch(path)) while pending: req = reqs.get() try: children = req.value for child in children: cpath = os.path.join(req.path, child) if exclude_recurse is None or exclude_recurse not in child: pending += 1 reqs.put(dispatch(cpath)) yield cpath except (NoNodeError, NoAuthError): pass pending -= 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_type(value, ptype): """ Convert value to ptype """
if ptype == 'str': return str(value) elif ptype == 'int': return int(value) elif ptype == 'float': return float(value) elif ptype == 'bool': if value.lower() == 'true': return True elif value.lower() == 'false': return False raise ValueError('Bad bool value: %s' % value) elif ptype == 'json': return json.loads(value) return ValueError('Unknown type')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_one(cls, keystr): """ validates one key string """
regex = r'%s$' % cls.ALLOWED_KEY if re.match(regex, keystr) is None: raise cls.Bad("Bad key syntax for: %s. Should be: key1.key2..." % (keystr)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(cls, keystr): """ raises cls.Bad if keys has errors """
if "#{" in keystr: # it's a template with keys vars keys = cls.from_template(keystr) for k in keys: cls.validate_one(cls.extract(k)) else: # plain keys str cls.validate_one(keystr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(cls, obj, keys): """ fetches the value corresponding to keys from obj """
current = obj for key in keys.split("."): if type(current) == list: try: key = int(key) except TypeError: raise cls.Missing(key) try: current = current[key] except (IndexError, KeyError, TypeError) as ex: raise cls.Missing(key) return current
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value(cls, obj, keystr): """ gets the value corresponding to keys from obj. if keys is a template string, it extrapolates the keys in it """
if "#{" in keystr: # it's a template with keys vars keys = cls.from_template(keystr) for k in keys: v = cls.fetch(obj, cls.extract(k)) keystr = keystr.replace(k, str(v)) value = keystr else: # plain keys str value = cls.fetch(obj, keystr) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(cls, obj, keys, value, fill_list_value=None): """ sets the value for the given keys on obj. if any of the given keys does not exist, create the intermediate containers. """
current = obj keys_list = keys.split(".") for idx, key in enumerate(keys_list, 1): if type(current) == list: # Validate this key works with a list. try: key = int(key) except ValueError: raise cls.Missing(key) try: # This is the last key, so set the value. if idx == len(keys_list): if type(current) == list: safe_list_set( current, key, lambda: copy.copy(fill_list_value), value ) else: current[key] = value # done. return # More keys left, ensure we have a container for this key. if type(key) == int: try: current[key] except IndexError: # Create a list for this key. cnext = container_for_key(keys_list[idx]) if type(cnext) == list: def fill_with(): return [] else: def fill_with(): return {} safe_list_set( current, key, fill_with, [] if type(cnext) == list else {} ) else: if key not in current: # Create a list for this key. current[key] = container_for_key(keys_list[idx]) # Move on to the next key. current = current[key] except (IndexError, KeyError, TypeError): raise cls.Missing(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_bytes(num): """ pretty print the given number of bytes """
for unit in ['', 'KB', 'MB', 'GB']: if num < 1024.0: if unit == '': return "%d" % (num) else: return "%3.1f%s" % (num, unit) num /= 1024.0 return "%3.1f%s" % (num, 'TB')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def valid_ipv4(ip): """ check if ip is a valid ipv4 """
match = _valid_ipv4.match(ip) if match is None: return False octets = match.groups() if len(octets) != 4: return False first = int(octets[0]) if first < 1 or first > 254: return False for i in range(1, 4): octet = int(octets[i]) if octet < 0 or octet > 255: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def valid_host(host): """ check valid hostname """
for part in host.split("."): if not _valid_host_part.match(part): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def valid_host_with_port(hostport): """ matches hostname or an IP, optionally with a port """
host, port = hostport.rsplit(":", 1) if ":" in hostport else (hostport, None) # first, validate host or IP if not valid_ipv4(host) and not valid_host(host): return False # now, validate port if port is not None and not valid_port(port): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(path): """ splits path into parent, child """
if path == '/': return ('/', None) parent, child = path.rsplit('/', 1) if parent == '': parent = '/' return (parent, child)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_outliers(group, delta): """ given a list of values, find those that are apart from the rest by `delta`. the indexes for the outliers is returned, if any. examples: values = [100, 6, 7, 8, 9, 10, 150] find_outliers(values, 5) -> [0, 6] values = [5, 6, 5, 4, 5] find_outliers(values, 3) -> [] """
with_pos = sorted([pair for pair in enumerate(group)], key=lambda p: p[1]) outliers_start = outliers_end = -1 for i in range(0, len(with_pos) - 1): cur = with_pos[i][1] nex = with_pos[i + 1][1] if nex - cur > delta: # depending on where we are, outliers are the remaining # items or the ones that we've already seen. if i < (len(with_pos) - i): # outliers are close to the start outliers_start, outliers_end = 0, i + 1 else: # outliers are close to the end outliers_start, outliers_end = i + 1, len(with_pos) break if outliers_start != -1: return [with_pos[i][0] for i in range(outliers_start, outliers_end)] else: return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_matching(content, match): """ filters out lines that don't include match """
if match != "": lines = [line for line in content.split("\n") if match in line] content = "\n".join(lines) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_remote_db(self): """ Load remote S3 DB """
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4") s3 = boto3.resource( 's3', config=botocore.client.Config(signature_version=signature_version), ) if '/tmp/' not in self.settings_dict['NAME']: try: etag = '' if os.path.isfile('/tmp/' + self.settings_dict['NAME']): m = hashlib.md5() with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f: m.update(f.read()) # In general the ETag is the md5 of the file, in some cases it's not, # and in that case we will just need to reload the file, I don't see any other way etag = m.hexdigest() obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME']) obj_bytes = obj.get(IfNoneMatch=etag)["Body"] # Will throw E on 304 or 404 with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f: f.write(obj_bytes.read()) m = hashlib.md5() with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f: m.update(f.read()) self.db_hash = m.hexdigest() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "304": logging.debug("ETag matches md5 of local copy, using local copy of DB!") self.db_hash = etag else: logging.debug("Couldn't load remote DB object.") except Exception as e: # Weird one logging.debug(e) # SQLite DatabaseWrapper will treat our tmp as normal now # Check because Django likes to call this function a lot more than it should if '/tmp/' not in self.settings_dict['NAME']: self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME'] self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME'] # Make sure it exists if it doesn't yet if not os.path.isfile(self.settings_dict['NAME']): open(self.settings_dict['NAME'], 'a').close() logging.debug("Loaded remote DB!")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self, *args, **kwargs): """ Engine closed, copy file to DB if it has changed """
super(DatabaseWrapper, self).close(*args, **kwargs) signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4") s3 = boto3.resource( 's3', config=botocore.client.Config(signature_version=signature_version), ) try: with open(self.settings_dict['NAME'], 'rb') as f: fb = f.read() m = hashlib.md5() m.update(fb) if self.db_hash == m.hexdigest(): logging.debug("Database unchanged, not saving to remote DB!") return bytesIO = BytesIO() bytesIO.write(fb) bytesIO.seek(0) s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME']) result = s3_object.put('rb', Body=bytesIO) except Exception as e: logging.debug(e) logging.debug("Saved to remote DB!")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def id(self): # pylint: disable=invalid-name,too-many-branches,too-many-return-statements """Return a unique id for the detected chip, if any."""
# There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCECHIP'] except KeyError: # no forced chip, continue with testing! pass # Special case, if we have an environment var set, we could use FT232H try: if os.environ['BLINKA_FT232H']: # we can't have ftdi1 as a dependency cause its wierd # to install, sigh. import ftdi1 as ftdi # pylint: disable=import-error try: ctx = None ctx = ftdi.new() # Create a libftdi context. # Enumerate FTDI devices. count, _ = ftdi.usb_find_all(ctx, 0, 0) if count < 0: raise RuntimeError('ftdi_usb_find_all returned error %d : %s' % count, ftdi.get_error_string(self._ctx)) if count == 0: raise RuntimeError('BLINKA_FT232H environment variable' + \ 'set, but no FT232H device found') finally: # Make sure to clean up list and context when done. if ctx is not None: ftdi.free(ctx) return FT232H except KeyError: # no FT232H environment var pass platform = sys.platform if platform == "linux" or platform == "linux2": return self._linux_id() if platform == "esp8266": return ESP8266 if platform == "samd21": return SAMD21 if platform == "pyboard": return STM32 # nothing found! return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _linux_id(self): """Attempt to detect the CPU on a computer running the Linux kernel."""
linux_id = None hardware = self.detector.get_cpuinfo_field("Hardware") if hardware is None: vendor_id = self.detector.get_cpuinfo_field("vendor_id") if vendor_id in ("GenuineIntel", "AuthenticAMD"): linux_id = GENERIC_X86 compatible = self.detector.get_device_compatible() if compatible and 'tegra' in compatible: if 'cv' in compatible or 'nano' in compatible: linux_id = T210 elif 'quill' in compatible: linux_id = T186 elif 'xavier' in compatible: linux_id = T194 elif hardware in ("BCM2708", "BCM2709", "BCM2835"): linux_id = BCM2XXX elif "AM33XX" in hardware: linux_id = AM33XX elif "sun8i" in hardware: linux_id = SUN8I elif "ODROIDC" in hardware: linux_id = S805 elif "ODROID-C2" in hardware: linux_id = S905 elif "SAMA5" in hardware: linux_id = SAMA5 return linux_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def id(self): """Return a unique id for the detected board, if any."""
# There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCEBOARD'] except KeyError: # no forced board, continue with testing! pass chip_id = self.detector.chip.id board_id = None if chip_id == ap_chip.BCM2XXX: board_id = self._pi_id() elif chip_id == ap_chip.AM33XX: board_id = self._beaglebone_id() elif chip_id == ap_chip.GENERIC_X86: board_id = GENERIC_LINUX_PC elif chip_id == ap_chip.SUN8I: board_id = self._armbian_id() elif chip_id == ap_chip.SAMA5: board_id = self._sama5_id() elif chip_id == ap_chip.ESP8266: board_id = FEATHER_HUZZAH elif chip_id == ap_chip.SAMD21: board_id = FEATHER_M0_EXPRESS elif chip_id == ap_chip.STM32: board_id = PYBOARD elif chip_id == ap_chip.S805: board_id = ODROID_C1 elif chip_id == ap_chip.S905: board_id = ODROID_C2 elif chip_id == ap_chip.FT232H: board_id = FTDI_FT232H elif chip_id in (ap_chip.T210, ap_chip.T186, ap_chip.T194): board_id = self._tegra_id() return board_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pi_id(self): """Try to detect id of a Raspberry Pi."""
# Check for Pi boards: pi_rev_code = self._pi_rev_code() if pi_rev_code: for model, codes in _PI_REV_CODES.items(): if pi_rev_code in codes: return model return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pi_rev_code(self): """Attempt to find a Raspberry Pi revision code for this board."""
# 2708 is Pi 1 # 2709 is Pi 2 # 2835 is Pi 3 (or greater) on 4.9.x kernel # Anything else is not a Pi. if self.detector.chip.id != ap_chip.BCM2XXX: # Something else, not a Pi. return None return self.detector.get_cpuinfo_field('Revision')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _beaglebone_id(self): """Try to detect id of a Beaglebone."""
try: with open("/sys/bus/nvmem/devices/0-00500/nvmem", "rb") as eeprom: eeprom_bytes = eeprom.read(16) except FileNotFoundError: return None if eeprom_bytes[:4] != b'\xaaU3\xee': return None id_string = eeprom_bytes[4:].decode("ascii") for model, bb_ids in _BEAGLEBONE_BOARD_IDS.items(): for bb_id in bb_ids: if id_string == bb_id[1]: return model return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _tegra_id(self): """Try to detect the id of aarch64 board."""
board_value = self.detector.get_device_model() if 'tx1' in board_value: return JETSON_TX1 elif 'quill' in board_value: return JETSON_TX2 elif 'xavier' in board_value: return JETSON_XAVIER elif 'nano' in board_value: return JETSON_NANO return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def any_embedded_linux(self): """Check whether the current board is any embedded Linux device."""
return self.any_raspberry_pi or self.any_beaglebone or \ self.any_orange_pi or self.any_giant_board or self.any_jetson_board
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def middleware(request, handler): """ Main middleware function, deals with all the X-Ray segment logic """
# Create X-Ray headers xray_header = construct_xray_header(request.headers) # Get name of service or generate a dynamic one from host name = calculate_segment_name(request.headers['host'].split(':', 1)[0], xray_recorder) sampling_req = { 'host': request.headers['host'], 'method': request.method, 'path': request.path, 'service': name, } sampling_decision = calculate_sampling_decision( trace_header=xray_header, recorder=xray_recorder, sampling_req=sampling_req, ) # Start a segment segment = xray_recorder.begin_segment( name=name, traceid=xray_header.root, parent_id=xray_header.parent, sampling=sampling_decision, ) segment.save_origin_trace_header(xray_header) # Store request metadata in the current segment segment.put_http_meta(http.URL, str(request.url)) segment.put_http_meta(http.METHOD, request.method) if 'User-Agent' in request.headers: segment.put_http_meta(http.USER_AGENT, request.headers['User-Agent']) if 'X-Forwarded-For' in request.headers: segment.put_http_meta(http.CLIENT_IP, request.headers['X-Forwarded-For']) segment.put_http_meta(http.X_FORWARDED_FOR, True) elif 'remote_addr' in request.headers: segment.put_http_meta(http.CLIENT_IP, request.headers['remote_addr']) else: segment.put_http_meta(http.CLIENT_IP, request.remote) try: # Call next middleware or request handler response = await handler(request) except HTTPException as exc: # Non 2XX responses are raised as HTTPExceptions response = exc raise except Exception as err: # Store exception information including the stacktrace to the segment response = None segment.put_http_meta(http.STATUS, 500) stack = stacktrace.get_stacktrace(limit=xray_recorder.max_trace_back) segment.add_exception(err, stack) raise finally: if response is not None: segment.put_http_meta(http.STATUS, response.status) if 'Content-Length' in response.headers: length = int(response.headers['Content-Length']) segment.put_http_meta(http.CONTENT_LENGTH, length) header_str = prepare_response_header(xray_header, segment) response.headers[http.XRAY_HEADER] = header_str xray_recorder.end_segment() return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_id(self): """ Convert TraceId object to a string. """
return "%s%s%s%s%s" % (TraceId.VERSION, TraceId.DELIMITER, format(self.start_time, 'x'), TraceId.DELIMITER, self.__number)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unpatch(): """ Unpatch any previously patched modules. This operation is idempotent. """
_PATCHED_MODULES.discard('httplib') setattr(httplib, PATCH_FLAG, False) # _send_request encapsulates putrequest, putheader[s], and endheaders unwrap(httplib.HTTPConnection, '_send_request') unwrap(httplib.HTTPConnection, 'getresponse') unwrap(httplib.HTTPResponse, 'read')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def task_factory(loop, coro): """ Task factory function Fuction closely mirrors the logic inside of asyncio.BaseEventLoop.create_task. Then if there is a current task and the current task has a context then share that context with the new task """
task = asyncio.Task(coro, loop=loop) if task._source_traceback: # flake8: noqa del task._source_traceback[-1] # flake8: noqa # Share context with new task if possible current_task = asyncio.Task.current_task(loop=loop) if current_task is not None and hasattr(current_task, 'context'): setattr(task, 'context', current_task.context) return task
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match(self, sampling_req): """ Determines whether or not this sampling rule applies to the incoming request based on some of the request's parameters. Any ``None`` parameter provided will be considered an implicit match. """
if sampling_req is None: return False host = sampling_req.get('host', None) method = sampling_req.get('method', None) path = sampling_req.get('path', None) service = sampling_req.get('service', None) service_type = sampling_req.get('service_type', None) return (not host or wildcard_match(self._host, host)) \ and (not method or wildcard_match(self._method, method)) \ and (not path or wildcard_match(self._path, path)) \ and (not service or wildcard_match(self._service, service)) \ and (not service_type or wildcard_match(self._service_type, service_type))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self, rule): """ Migrate all stateful attributes from the old rule """
with self._lock: self._request_count = rule.request_count self._borrow_count = rule.borrow_count self._sampled_count = rule.sampled_count self._reservoir = rule.reservoir rule.reservoir = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def borrow_or_take(self, now, can_borrow): """ Decide whether to borrow or take one quota from the reservoir. Return ``False`` if it can neither borrow nor take. This method is thread-safe. """
with self._lock: return self._borrow_or_take(now, can_borrow)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self, end_time=None): """ Close the trace entity by setting `end_time` and flip the in progress flag to False. :param int end_time: Epoch in seconds. If not specified current time will be used. """
self._check_ended() if end_time: self.end_time = end_time else: self.end_time = time.time() self.in_progress = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_subsegment(self, subsegment): """ Add input subsegment as a child subsegment. """
self._check_ended() subsegment.parent_id = self.id self.subsegments.append(subsegment)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_http_meta(self, key, value): """ Add http related metadata. :param str key: Currently supported keys are: * url * method * user_agent * client_ip * status * content_length :param value: status and content_length are int and for other supported keys string should be used. """
self._check_ended() if value is None: return if key == http.STATUS: if isinstance(value, string_types): value = int(value) self.apply_status_code(value) if key in http.request_keys: if 'request' not in self.http: self.http['request'] = {} self.http['request'][key] = value elif key in http.response_keys: if 'response' not in self.http: self.http['response'] = {} self.http['response'][key] = value else: log.warning("ignoring unsupported key %s in http meta.", key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_annotation(self, key, value): """ Annotate segment or subsegment with a key-value pair. Annotations will be indexed for later search query. :param str key: annotation key :param object value: annotation value. Any type other than string/number/bool will be dropped """
self._check_ended() if not isinstance(key, string_types): log.warning("ignoring non string type annotation key with type %s.", type(key)) return if not isinstance(value, annotation_value_types): log.warning("ignoring unsupported annotation value type %s.", type(value)) return if any(character not in _valid_annotation_key_characters for character in key): log.warning("ignoring annnotation with unsupported characters in key: '%s'.", key) return self.annotations[key] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_metadata(self, key, value, namespace='default'): """ Add metadata to segment or subsegment. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string """
self._check_ended() if not isinstance(namespace, string_types): log.warning("ignoring non string type metadata namespace") return if namespace.startswith('AWS.'): log.warning("Prefix 'AWS.' is reserved, drop metadata with namespace %s", namespace) return if self.metadata.get(namespace, None): self.metadata[namespace][key] = value else: self.metadata[namespace] = {key: value}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_exception(self, exception, stack, remote=False): """ Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service. """
self._check_ended() self.add_fault_flag() if hasattr(exception, '_recorded'): setattr(self, 'cause', getattr(exception, '_cause_id')) return exceptions = [] exceptions.append(Throwable(exception, stack, remote)) self.cause['exceptions'] = exceptions self.cause['working_directory'] = os.getcwd()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(self): """ Serialize to JSON document that can be accepted by the X-Ray backend service. It uses jsonpickle to perform serialization. """
try: return jsonpickle.encode(self, unpicklable=False) except Exception: log.exception("got an exception during serialization")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _delete_empty_properties(self, properties): """ Delete empty properties before serialization to avoid extra keys with empty values in the output json. """
if not self.parent_id: del properties['parent_id'] if not self.subsegments: del properties['subsegments'] if not self.aws: del properties['aws'] if not self.http: del properties['http'] if not self.cause: del properties['cause'] if not self.annotations: del properties['annotations'] if not self.metadata: del properties['metadata'] properties.pop(ORIGIN_TRACE_HEADER_ATTR_KEY, None) del properties['sampled']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reload_settings(*args, **kwargs): """ Reload X-Ray user settings upon Django server hot restart """
global settings setting, value = kwargs['setting'], kwargs['value'] if setting == XRAY_NAMESPACE: settings = XRaySettings(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_subsegment(self, subsegment): """ Add input subsegment as a child subsegment and increment reference counter and total subsegments counter. """
super(Segment, self).add_subsegment(subsegment) self.increment()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_subsegment(self, subsegment): """ Remove the reference of input subsegment. """
super(Segment, self).remove_subsegment(subsegment) self.decrement_subsegments_size()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_user(self, user): """ set user of a segment. One segment can only have one user. User is indexed and can be later queried. """
super(Segment, self)._check_ended() self.user = user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_rule_name(self, rule_name): """ Add the matched centralized sampling rule name if a segment is sampled because of that rule. This method should be only used by the recorder. """
if not self.aws.get('xray', None): self.aws['xray'] = {} self.aws['xray']['sampling_rule_name'] = rule_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inject_trace_header(headers, entity): """ Extract trace id, entity id and sampling decision from the input entity and inject these information to headers. :param dict headers: http headers to inject :param Entity entity: trace entity that the trace header value generated from. """
if not entity: return if hasattr(entity, 'type') and entity.type == 'subsegment': header = entity.parent_segment.get_origin_trace_header() else: header = entity.get_origin_trace_header() data = header.data if header else None to_insert = TraceHeader( root=entity.trace_id, parent=entity.id, sampled=entity.sampled, data=data, ) value = to_insert.to_header_str() headers[http.XRAY_HEADER] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_sampling_decision(trace_header, recorder, sampling_req): """ Return 1 or the matched rule name if should sample and 0 if should not. The sampling decision coming from ``trace_header`` always has the highest precedence. If the ``trace_header`` doesn't contain sampling decision then it checks if sampling is enabled or not in the recorder. If not enbaled it returns 1. Otherwise it uses user defined sampling rules to decide. """
if trace_header.sampled is not None and trace_header.sampled != '?': return trace_header.sampled elif not recorder.sampling: return 1 else: decision = recorder.sampler.should_trace(sampling_req) return decision if decision else 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def construct_xray_header(headers): """ Construct a ``TraceHeader`` object from dictionary headers of the incoming request. This method should always return a ``TraceHeader`` object regardless of tracing header's presence in the incoming request. """
header_str = headers.get(http.XRAY_HEADER) or headers.get(http.ALT_XRAY_HEADER) if header_str: return TraceHeader.from_header_str(header_str) else: return TraceHeader()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_segment_name(host_name, recorder): """ Returns the segment name based on recorder configuration and input host name. This is a helper generally used in web framework middleware where a host name is available from incoming request's headers. """
if recorder.dynamic_naming: return recorder.dynamic_naming.get_name(host_name) else: return recorder.service
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_response_header(origin_header, segment): """ Prepare a trace header to be inserted into response based on original header and the request segment. """
if origin_header and origin_header.sampled == '?': new_header = TraceHeader(root=segment.trace_id, sampled=segment.sampled) else: new_header = TraceHeader(root=segment.trace_id) return new_header.to_header_str()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_snake_case(name): """ Convert the input string to snake-cased string. """
s1 = first_cap_re.sub(r'\1_\2', name) # handle acronym words return all_cap_re.sub(r'\1_\2', s1).lower()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch(): """ Patch botocore client so it generates subsegments when calling AWS services. """
if hasattr(botocore.client, '_xray_enabled'): return setattr(botocore.client, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'botocore.client', 'BaseClient._make_api_call', _xray_traced_botocore, ) wrapt.wrap_function_wrapper( 'botocore.endpoint', 'Endpoint.prepare_request', inject_header, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self, sampling=None, plugins=None, context_missing=None, sampling_rules=None, daemon_address=None, service=None, context=None, emitter=None, streaming=None, dynamic_naming=None, streaming_threshold=None, max_trace_back=None, sampler=None, stream_sql=True): """Configure global X-Ray recorder. Configure needs to run before patching thrid party libraries to avoid creating dangling subsegment. :param bool sampling: If sampling is enabled, every time the recorder creates a segment it decides whether to send this segment to the X-Ray daemon. This setting is not used if the recorder is running in AWS Lambda. The recorder always respect the incoming sampling decisions regardless of this setting. :param sampling_rules: Pass a set of local custom sampling rules. Can be an absolute path of the sampling rule config json file or a dictionary that defines those rules. This will also be the fallback rules in case of centralized sampling opted-in while the cetralized sampling rules are not available. :param sampler: The sampler used to make sampling decisions. The SDK provides two built-in samplers. One is centralized rules based and the other is local rules based. The former is the default. :param tuple plugins: plugins that add extra metadata to each segment. Currently available plugins are EC2Plugin, ECS plugin and ElasticBeanstalkPlugin. If you want to disable all previously enabled plugins, pass an empty tuple ``()``. :param str context_missing: recorder behavior when it tries to mutate a segment or add a subsegment but there is no active segment. RUNTIME_ERROR means the recorder will raise an exception. LOG_ERROR means the recorder will only log the error and do nothing. :param str daemon_address: The X-Ray daemon address where the recorder sends data to. :param str service: default segment name if creating a segment without providing a name. :param context: You can pass your own implementation of context storage for active segment/subsegment by overriding the default ``Context`` class. :param emitter: The emitter that sends a segment/subsegment to the X-Ray daemon. You can override ``UDPEmitter`` class. :param dynamic_naming: a string that defines a pattern that host names should match. Alternatively you can pass a module which overrides ``DefaultDynamicNaming`` module. :param streaming: The streaming module to stream out trace documents when they grow too large. You can override ``DefaultStreaming`` class to have your own implementation of the streaming process. :param streaming_threshold: If breaks within a single segment it will start streaming out children subsegments. By default it is the maximum number of subsegments within a segment. :param int max_trace_back: The maxinum number of stack traces recorded by auto-capture. Lower this if a single document becomes too large. :param bool stream_sql: Whether SQL query texts should be streamed. Environment variables AWS_XRAY_DAEMON_ADDRESS, AWS_XRAY_CONTEXT_MISSING and AWS_XRAY_TRACING_NAME respectively overrides arguments daemon_address, context_missing and service. """
if sampling is not None: self.sampling = sampling if sampler: self.sampler = sampler if service: self.service = os.getenv(TRACING_NAME_KEY, service) if sampling_rules: self._load_sampling_rules(sampling_rules) if emitter: self.emitter = emitter if daemon_address: self.emitter.set_daemon_address(os.getenv(DAEMON_ADDR_KEY, daemon_address)) if context: self.context = context if context_missing: self.context.context_missing = os.getenv(CONTEXT_MISSING_KEY, context_missing) if dynamic_naming: self.dynamic_naming = dynamic_naming if streaming: self.streaming = streaming if streaming_threshold: self.streaming_threshold = streaming_threshold if type(max_trace_back) == int and max_trace_back >= 0: self.max_trace_back = max_trace_back if stream_sql is not None: self.stream_sql = stream_sql if plugins: plugin_modules = get_plugin_modules(plugins) for plugin in plugin_modules: plugin.initialize() if plugin.runtime_context: self._aws_metadata[plugin.SERVICE_NAME] = plugin.runtime_context self._origin = plugin.ORIGIN # handling explicitly using empty list to clean up plugins. elif plugins is not None: self._aws_metadata = copy.deepcopy(XRAY_META) self._origin = None if type(self.sampler).__name__ == 'DefaultSampler': self.sampler.load_settings(DaemonConfig(daemon_address), self.context, self._origin)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def begin_segment(self, name=None, traceid=None, parent_id=None, sampling=None): """ Begin a segment on the current thread and return it. The recorder only keeps one segment at a time. Create the second one without closing existing one will overwrite it. :param str name: the name of the segment :param str traceid: trace id of the segment :param int sampling: 0 means not sampled, 1 means sampled """
seg_name = name or self.service if not seg_name: raise SegmentNameMissingException("Segment name is required.") # Sampling decision is None if not sampled. # In a sampled case it could be either a string or 1 # depending on if centralized or local sampling rule takes effect. decision = True # To disable the recorder, we set the sampling decision to always be false. # This way, when segments are generated, they become dummy segments and are ultimately never sent. # The call to self._sampler.should_trace() is never called either so the poller threads are never started. if not global_sdk_config.sdk_enabled(): sampling = 0 # we respect the input sampling decision # regardless of recorder configuration. if sampling == 0: decision = False elif sampling: decision = sampling elif self.sampling: decision = self._sampler.should_trace() if not decision: segment = DummySegment(seg_name) else: segment = Segment(name=seg_name, traceid=traceid, parent_id=parent_id) self._populate_runtime_context(segment, decision) self.context.put_segment(segment) return segment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def end_segment(self, end_time=None): """ End the current segment and send it to X-Ray daemon if it is ready to send. Ready means segment and all its subsegments are closed. :param float end_time: segment compeletion in unix epoch in seconds. """
self.context.end_segment(end_time) segment = self.current_segment() if segment and segment.ready_to_send(): self._send_segment()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def current_segment(self): """ Return the currently active segment. In a multithreading environment, this will make sure the segment returned is the one created by the same thread. """
entity = self.get_trace_entity() if self._is_subsegment(entity): return entity.parent_segment else: return entity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def begin_subsegment(self, name, namespace='local'): """ Begin a new subsegment. If there is open subsegment, the newly created subsegment will be the child of latest opened subsegment. If not, it will be the child of the current open segment. :param str name: the name of the subsegment. :param str namespace: currently can only be 'local', 'remote', 'aws'. """
segment = self.current_segment() if not segment: log.warning("No segment found, cannot begin subsegment %s." % name) return None if not segment.sampled: subsegment = DummySubsegment(segment, name) else: subsegment = Subsegment(name, namespace, segment) self.context.put_subsegment(subsegment) return subsegment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def end_subsegment(self, end_time=None): """ End the current active subsegment. If this is the last one open under its parent segment, the entire segment will be sent. :param float end_time: subsegment compeletion in unix epoch in seconds. """
if not self.context.end_subsegment(end_time): return # if segment is already close, we check if we can send entire segment # otherwise we check if we need to stream some subsegments if self.current_segment().ready_to_send(): self._send_segment() else: self.stream_subsegments()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_annotation(self, key, value): """ Annotate current active trace entity with a key-value pair. Annotations will be indexed for later search query. :param str key: annotation key :param object value: annotation value. Any type other than string/number/bool will be dropped """
entity = self.get_trace_entity() if entity and entity.sampled: entity.put_annotation(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_metadata(self, key, value, namespace='default'): """ Add metadata to the current active trace entity. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string """
entity = self.get_trace_entity() if entity and entity.sampled: entity.put_metadata(key, value, namespace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_subsegments(self): """ Stream all closed subsegments to the daemon and remove reference to the parent segment. No-op for a not sampled segment. """
segment = self.current_segment() if self.streaming.is_eligible(segment): self.streaming.stream(segment, self._stream_subsegment_out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _send_segment(self): """ Send the current segment to X-Ray daemon if it is present and sampled, then clean up context storage. The emitter will handle failures. """
segment = self.current_segment() if not segment: return if segment.sampled: self.emitter.send_entity(segment) self.clear_trace_entities()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def applies(self, host, method, path): """ Determines whether or not this sampling rule applies to the incoming request based on some of the request's parameters. Any None parameters provided will be considered an implicit match. """
return (not host or wildcard_match(self.host, host)) \ and (not method or wildcard_match(self.method, method)) \ and (not path or wildcard_match(self.path, path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ready(self): """ Configure global XRay recorder based on django settings under XRAY_RECORDER namespace. This method could be called twice during server startup because of base command and reload command. So this function must be idempotent """
if not settings.AWS_XRAY_TRACING_NAME: raise SegmentNameMissingException('Segment name is required.') xray_recorder.configure( daemon_address=settings.AWS_XRAY_DAEMON_ADDRESS, sampling=settings.SAMPLING, sampling_rules=settings.SAMPLING_RULES, context_missing=settings.AWS_XRAY_CONTEXT_MISSING, plugins=settings.PLUGINS, service=settings.AWS_XRAY_TRACING_NAME, dynamic_naming=settings.DYNAMIC_NAMING, streaming_threshold=settings.STREAMING_THRESHOLD, max_trace_back=settings.MAX_TRACE_BACK, stream_sql=settings.STREAM_SQL, ) if settings.PATCH_MODULES: if settings.AUTO_PATCH_PARENT_SEGMENT_NAME is not None: with xray_recorder.in_segment(settings.AUTO_PATCH_PARENT_SEGMENT_NAME): patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS) else: patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS) # if turned on subsegment will be generated on # built-in database and template rendering if settings.AUTO_INSTRUMENT: try: patch_db() except Exception: log.debug('failed to patch Django built-in database') try: patch_template() except Exception: log.debug('failed to patch Django built-in template engine')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Start rule poller and target poller once X-Ray daemon address and context manager is in place. """
if not global_sdk_config.sdk_enabled(): return with self._lock: if not self._started: self._rule_poller.start() self._target_poller.start() self._started = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_trace(self, sampling_req=None): """ Return the matched sampling rule name if the sampler finds one and decide to sample. If no sampling rule matched, it falls back to the local sampler's ``should_trace`` implementation. All optional arguments are extracted from incoming requests by X-Ray middleware to perform path based sampling. """
if not global_sdk_config.sdk_enabled(): return False if not self._started: self.start() # only front-end that actually uses the sampler spawns poller threads now = int(time.time()) if sampling_req and not sampling_req.get('service_type', None): sampling_req['service_type'] = self._origin elif sampling_req is None: sampling_req = {'service_type': self._origin} matched_rule = self._cache.get_matched_rule(sampling_req, now) if matched_rule: log.debug('Rule %s is selected to make a sampling decision.', matched_rule.name) return self._process_matched_rule(matched_rule, now) else: log.info('No effective centralized sampling rule match. Fallback to local rules.') return self._local_sampler.should_trace(sampling_req)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch(): """Patch PynamoDB so it generates subsegements when calling DynamoDB."""
import pynamodb if hasattr(botocore.vendored.requests.sessions, '_xray_enabled'): return setattr(botocore.vendored.requests.sessions, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'botocore.vendored.requests.sessions', 'Session.send', _xray_traced_pynamodb, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def end_segment(self, end_time=None): """ End the current active segment. :param int end_time: epoch in seconds. If not specified the current system time will be used. """
entity = self.get_trace_entity() if not entity: log.warning("No segment to end") return if self._is_subsegment(entity): entity.parent_segment.close(end_time) else: entity.close(end_time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_subsegment(self, subsegment): """ Store the subsegment created by ``xray_recorder`` to the context. If you put a new subsegment while there is already an open subsegment, the new subsegment becomes the child of the existing subsegment. """
entity = self.get_trace_entity() if not entity: log.warning("Active segment or subsegment not found. Discarded %s." % subsegment.name) return entity.add_subsegment(subsegment) self._local.entities.append(subsegment)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def end_subsegment(self, end_time=None): """ End the current active segment. Return False if there is no subsegment to end. :param int end_time: epoch in seconds. If not specified the current system time will be used. """
subsegment = self.get_trace_entity() if self._is_subsegment(subsegment): subsegment.close(end_time) self._local.entities.pop() return True else: log.warning("No subsegment to end.") return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_context_missing(self): """ Called whenever there is no trace entity to access or mutate. """
if self.context_missing == 'RUNTIME_ERROR': log.error(MISSING_SEGMENT_MSG) raise SegmentNotFoundException(MISSING_SEGMENT_MSG) else: log.error(MISSING_SEGMENT_MSG)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_in_lambda(): """ Return None if SDK is not loaded in AWS Lambda worker. Otherwise drop a touch file and return a lambda context. """
if not os.getenv(LAMBDA_TASK_ROOT_KEY): return None try: os.mkdir(TOUCH_FILE_DIR) except OSError: log.debug('directory %s already exists', TOUCH_FILE_DIR) try: f = open(TOUCH_FILE_PATH, 'w+') f.close() # utime force second parameter in python2.7 os.utime(TOUCH_FILE_PATH, None) except (IOError, OSError): log.warning("Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH) return LambdaContext()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _refresh_context(self): """ Get current facade segment. To prevent resource leaking in Lambda worker, every time there is segment present, we compare its trace id to current environment variables. If it is different we create a new facade segment and clean up subsegments stored. """
header_str = os.getenv(LAMBDA_TRACE_HEADER_KEY) trace_header = TraceHeader.from_header_str(header_str) if not global_sdk_config.sdk_enabled(): trace_header._sampled = False segment = getattr(self._local, 'segment', None) if segment: # Ensure customers don't have leaked subsegments across invocations if not trace_header.root or trace_header.root == segment.trace_id: return else: self._initialize_context(trace_header) else: self._initialize_context(trace_header)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initialize_context(self, trace_header): """ Create a facade segment based on environment variables set by AWS Lambda and initialize storage for subsegments. """
sampled = None if not global_sdk_config.sdk_enabled(): # Force subsequent subsegments to be disabled and turned into DummySegments. sampled = False elif trace_header.sampled == 0: sampled = False elif trace_header.sampled == 1: sampled = True segment = FacadeSegment( name='facade', traceid=trace_header.root, entityid=trace_header.parent, sampled=sampled, ) setattr(self._local, 'segment', segment) setattr(self._local, 'entities', [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_sdk_enabled(cls, value): """ Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set, otherwise, set the enabled flag to be equal to the environment variable. If the env variable is an invalid string boolean, it will default to true. :param bool value: Flag to set whether the SDK is enabled or disabled. Environment variables AWS_XRAY_SDK_ENABLED overrides argument value. """
# Environment Variables take precedence over hardcoded configurations. if cls.XRAY_ENABLED_KEY in os.environ: cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false' else: if type(value) == bool: cls.__SDK_ENABLED = value else: cls.__SDK_ENABLED = True log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _xray_register_type_fix(wrapped, instance, args, kwargs): """Send the actual connection or curser to register type."""
our_args = list(copy.copy(args)) if len(our_args) == 2 and isinstance(our_args[1], (XRayTracedConn, XRayTracedCursor)): our_args[1] = our_args[1].__wrapped__ return wrapped(*our_args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_header_str(cls, header): """ Create a TraceHeader object from a tracing header string extracted from a http request headers. """
if not header: return cls() try: params = header.strip().split(HEADER_DELIMITER) header_dict = {} data = {} for param in params: entry = param.split('=') key = entry[0] if key in (ROOT, PARENT, SAMPLE): header_dict[key] = entry[1] # Ignore any "Self=" trace ids injected from ALB. elif key != SELF: data[key] = entry[1] return cls( root=header_dict.get(ROOT, None), parent=header_dict.get(PARENT, None), sampled=header_dict.get(SAMPLE, None), data=data, ) except Exception: log.warning("malformed tracing header %s, ignore.", header) return cls()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_header_str(self): """ Convert to a tracing header string that can be injected to outgoing http request headers. """
h_parts = [] if self.root: h_parts.append(ROOT + '=' + self.root) if self.parent: h_parts.append(PARENT + '=' + self.parent) if self.sampled is not None: h_parts.append(SAMPLE + '=' + str(self.sampled)) if self.data: for key in self.data: h_parts.append(key + '=' + self.data[key]) return HEADER_DELIMITER.join(h_parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_name(self, host_name): """ Returns the segment name based on the input host name. """
if wildcard_match(self._pattern, host_name): return host_name else: return self._fallback
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_daemon_address(self, address): """ Set up UDP ip and port from the raw daemon address string using ``DaemonConfig`` class utlities. """
if address: daemon_config = DaemonConfig(address) self._ip, self._port = daemon_config.udp_ip, daemon_config.udp_port
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch(): """ Patch aiobotocore client so it generates subsegments when calling AWS services. """
if hasattr(aiobotocore.client, '_xray_enabled'): return setattr(aiobotocore.client, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'aiobotocore.client', 'AioBaseClient._make_api_call', _xray_traced_aiobotocore, ) wrapt.wrap_function_wrapper( 'aiobotocore.endpoint', 'AioEndpoint.prepare_request', inject_header, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_eligible(self, segment): """ A segment is eligible to have its children subsegments streamed if it is sampled and it breaches streaming threshold. """
if not segment or not segment.sampled: return False return segment.get_total_subsegments_size() > self.streaming_threshold
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream(self, entity, callback): """ Stream out all eligible children of the input entity. :param entity: The target entity to be streamed. :param callback: The function that takes the node and actually send it out. """
with self._lock: self._stream(entity, callback)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_bind(bind): """Parses a connection string and creates SQL trace metadata"""
if isinstance(bind, Connection): engine = bind.engine else: engine = bind m = re.match(r"Engine\((.*?)\)", str(engine)) if m is not None: u = urlparse(m.group(1)) # Add Scheme to uses_netloc or // will be missing from url. uses_netloc.append(u.scheme) safe_url = "" if u.password is None: safe_url = u.geturl() else: # Strip password from URL host_info = u.netloc.rpartition('@')[-1] parts = u._replace(netloc='{}@{}'.format(u.username, host_info)) safe_url = parts.geturl() sql = {} sql['database_type'] = u.scheme sql['url'] = safe_url if u.username is not None: sql['user'] = "{}".format(u.username) return sql
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_subsegment(self, subsegment): """ Add input subsegment as a child subsegment and increment reference counter and total subsegments counter of the parent segment. """
super(Subsegment, self).add_subsegment(subsegment) self.parent_segment.increment()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_subsegment(self, subsegment): """ Remove input subsegment from child subsegemnts and decrement parent segment total subsegments count. :param Subsegment: subsegment to remove. """
super(Subsegment, self).remove_subsegment(subsegment) self.parent_segment.decrement_subsegments_size()