text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_to_int(value): """Attempts to convert a specified value to an integer :param value: Content to be converted into an integer :type value: string or int """
if not value: return None # Apart from numbers also accept values that end with px if isinstance(value, str): value = value.strip(' px') try: return int(value) except (TypeError, ValueError): return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_oembed_data(oembed_data, data): """Parse OEmbed resposne data to inject into lassie's response dict. :param oembed_data: OEmbed response data. :type oembed_data: dict :param data: Refrence to data variable being updated. :type data: dict """
data.update({ 'oembed': oembed_data, }) _type = oembed_data.get('type') provider_name = oembed_data.get('provider_name') if not _type: return data if oembed_data.get('title'): data.update({ 'title': oembed_data.get('title'), }) if _type == 'video': try: item = { 'width': convert_to_int(oembed_data.get('width')), 'height': convert_to_int(oembed_data.get('height')) } if provider_name in ['YouTube', ]: item['src'] = HYPERLINK_PATTERN.search(oembed_data.get('html')).group(0) data['videos'].append(item) except Exception: pass if oembed_data.get('thumbnail_url'): item = { 'width': convert_to_int(oembed_data.get('thumbnail_width')), 'height': convert_to_int(oembed_data.get('thumbnail_height')), 'src': oembed_data.get('thumbnail_url') } data['images'].append(item) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_meta_data(self, source, soup, data, url=None): """This method filters the web page content for meta tags that match patterns given in the ``FILTER_MAPS`` :param source: The key of the meta dictionary in ``FILTER_MAPS['meta']`` :type source: string :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict) """
meta = FILTER_MAPS['meta'][source] meta_map = meta['map'] html = soup.find_all('meta', {meta['key']: meta['pattern']}) image = {} video = {} for line in html: prop = line.get(meta['key']) value = line.get('content') _prop = meta_map.get(prop) if prop in meta_map and _prop and not data.get(_prop): # this could be bad in cases where any values that the property # is mapped up to (i.e. "src", "type", etc) are found in ``data`` # TODO: Figure out a smoother way to prevent conflicts ^^^^^^^^ image_prop = meta['image_key'] video_prop = meta['video_key'] if prop.startswith((image_prop, video_prop)) and \ prop.endswith(('width', 'height')): if prop.endswith(('width', 'height')): value = convert_to_int(value) if meta_map[prop] == 'locale': locale = normalize_locale(value) if locale: data['locale'] = locale if prop == 'keywords': if isinstance(value, str): value = [v.strip() for v in value.split(',')] else: value = [] if image_prop and prop.startswith(image_prop) and value: # og:image URLs can be relative if prop == 'og:image' and url: value = urljoin(url, value) image[meta_map[prop]] = value elif video_prop and prop.startswith(video_prop) and value: video[meta_map[prop]] = value else: data[meta_map[prop]] = value if image: image['type'] = image_prop data['images'].append(image) if video: data['videos'].append(video)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_link_tag_data(self, source, soup, data, url): """This method filters the web page content for link tags that match patterns given in the ``FILTER_MAPS`` :param source: The key of the meta dictionary in ``FILTER_MAPS['link']`` :type source: string :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict) :param url: URL used for making an absolute url :type url: string """
link = FILTER_MAPS['link'][source] html = soup.find_all('link', {link['key']: link['pattern']}) if link['type'] == 'url': for line in html: data['url'] = line.get('href') else: for line in html: data['images'].append({ 'src': urljoin(url, line.get('href')), 'type': link['type'], })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_all_images(self, soup, data, url): """This method finds all images in the web page content :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict) """
all_images = soup.find_all('img') for image in all_images: item = normalize_image_data(image, url) data['images'].append(item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_mail_header(value, default_charset='us-ascii'): """ Decode a header value into a unicode string. """
try: headers = decode_header(value) except email.errors.HeaderParseError: return str_decode(str_encode(value, default_charset, 'replace'), default_charset) else: for index, (text, charset) in enumerate(headers): logger.debug("Mail header no. {index}: {data} encoding {charset}".format( index=index, data=str_decode(text, charset or 'utf-8', 'replace'), charset=charset)) try: headers[index] = str_decode(text, charset or default_charset, 'replace') except LookupError: # if the charset is unknown, force default headers[index] = str_decode(text, default_charset, 'replace') return ''.join(headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mail_addresses(message, header_name): """ Retrieve all email addresses from one message header. """
headers = [h for h in message.get_all(header_name, [])] addresses = email.utils.getaddresses(headers) for index, (address_name, address_email) in enumerate(addresses): addresses[index] = {'name': decode_mail_header(address_name), 'email': address_email} logger.debug("{} Mail address in message: <{}> {}".format( header_name.upper(), address_name, address_email)) return addresses
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self, state): """Return a random variable if any, otherwise create a new default variable."""
if self.count >= random.randint(DharmaConst.VARIABLE_MIN, DharmaConst.VARIABLE_MAX): return "%s%d" % (self.var, random.randint(1, self.count)) var = random.choice(self) prefix = self.eval(var[0], state) suffix = self.eval(var[1], state) self.count += 1 element_name = "%s%d" % (self.var, self.count) self.default += "%s%s%s\n" % (prefix, element_name, suffix) return element_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_settings(self, settings): """A lazy way of feeding Dharma with configuration settings."""
logging.debug("Using configuration from: %s", settings.name) exec(compile(settings.read(), settings.name, 'exec'), globals(), locals())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_xrefs(self, token): """Search token for +value+ and !variable! style references. Be careful to not xref a new variable. """
out, end = [], 0 token = token.replace("\\n", "\n") for m in re.finditer(self.xref_registry, token, re.VERBOSE | re.DOTALL): if m.start(0) > end: out.append(String(token[end:m.start(0)], self.current_obj)) end = m.end(0) if m.group("type"): xref_type = {"+": ValueXRef, "!": VariableXRef, "@": ElementXRef}[m.group("type")] out.append(xref_type(m.group("xref"), self.current_obj)) elif m.group("uri") is not None: path = m.group("uri") out.append(MetaURI(path, self.current_obj)) elif m.group("repeat") is not None: repeat, separator, nodups = m.group("repeat", "separator", "nodups") if separator is None: separator = "" if nodups is None: nodups = "" out.append(MetaRepeat(self.parse_xrefs(repeat), separator, nodups, self.current_obj)) elif m.group("block") is not None: path = m.group("block") out.append(MetaBlock(path, self.current_obj)) elif m.group("choices") is not None: choices = m.group("choices") out.append(MetaChoice(choices, self.current_obj)) else: startval, endval = m.group("start", "end") out.append(MetaRange(startval, endval, self.current_obj)) if end < len(token): out.append(String(token[end:], self.current_obj)) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_leaf_paths(self): """Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves. """
reverse_xref = {} leaves = set() for v in self.value.values(): if v.leaf: leaves.add(v) for xref in v.value_xref: reverse_xref.setdefault(xref, []).append(v.ident) for leaf in leaves: self.calculate_leaf_path(leaf, reverse_xref)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_content(self): """Generates a test case as a string."""
# Setup pre-conditions. if not self.variance: logging.error("%s: No variance information %s", self.id(), self.variance) sys.exit(-1) for var in self.variable.values(): var.clear() # Handle variances variances = [] for _ in range(random.randint(DharmaConst.VARIANCE_MIN, DharmaConst.VARIANCE_MAX)): var = random.choice(list(self.variance.values())) variances.append(DharmaConst.VARIANCE_TEMPLATE % var.generate(GenState())) variances.append("\n") # Handle variables variables = [] for var in self.variable.values(): if var.default: variables.append(DharmaConst.VARIANCE_TEMPLATE % var.default) variables.append("\n") # Build content content = "".join(chain([self.prefix], variables, variances, [self.suffix])) if self.template: return Template(self.template).safe_substitute(testcase_content=content) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_grammars(self, grammars): """Process provided grammars by parsing them into Python objects."""
for path in self.default_grammars: grammars.insert(0, open(os.path.relpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.normcase(path))))) for fo in grammars: logging.debug("Processing grammar content of %s", fo.name) self.set_namespace(os.path.splitext(os.path.basename(fo.name))[0]) for line in fo: self.parse_line(line) self.handle_empty_line() self.resolve_xref() self.calculate_leaf_paths()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_preferences(request, dashboard_id): """ This view serves and validates a preferences form. """
try: preferences = DashboardPreferences.objects.get( user=request.user, dashboard_id=dashboard_id ) except DashboardPreferences.DoesNotExist: preferences = None if request.method == "POST": form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, data=request.POST, instance=preferences ) if form.is_valid(): preferences = form.save() if request.is_ajax(): return HttpResponse('true') messages.success(request, 'Preferences saved') elif request.is_ajax(): return HttpResponse('false') else: form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, instance=preferences ) return render_to_response( 'admin_tools/dashboard/preferences_form.html', {'form': form} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_tools_render_menu(context, menu=None): """ Template tag that renders the menu, it takes an optional ``Menu`` instance as unique argument, if not given, the menu will be retrieved with the ``get_admin_menu`` function. """
if menu is None: menu = get_admin_menu(context) menu.init_with_context(context) has_bookmark_item = False bookmark = None if len([c for c in menu.children if isinstance(c, items.Bookmarks)]) > 0: has_bookmark_item = True url = context['request'].get_full_path() try: bookmark = Bookmark.objects.filter( user=context['request'].user, url=url )[0] except: pass context.update({ 'template': menu.template, 'menu': menu, 'has_bookmark_item': has_bookmark_item, 'bookmark': bookmark, 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_tools_render_menu_item(context, item, index=None): """ Template tag that renders a given menu item, it takes a ``MenuItem`` instance as unique parameter. """
item.init_with_context(context) context.update({ 'template': item.template, 'item': item, 'index': index, 'selected': item.is_selected(context['request']), 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_tools_render_menu_css(context, menu=None): """ Template tag that renders the menu css files,, it takes an optional ``Menu`` instance as unique argument, if not given, the menu will be retrieved with the ``get_admin_menu`` function. """
if menu is None: menu = get_admin_menu(context) context.update({ 'template': 'admin_tools/menu/css.html', 'css_files': menu.Media.css, }) return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_theming_css(): """ Template tag that renders the needed css files for the theming app. """
css = getattr(settings, 'ADMIN_TOOLS_THEMING_CSS', False) if not css: css = '/'.join(['admin_tools', 'css', 'theming.css']) return mark_safe( '<link rel="stylesheet" type="text/css" media="screen" href="%s" />' % staticfiles_storage.url(css) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_bookmark(request): """ This view serves and validates a bookmark form. If requested via ajax it also returns the drop bookmark form to replace the add bookmark form. """
if request.method == "POST": form = BookmarkForm(user=request.user, data=request.POST) if form.is_valid(): bookmark = form.save() if not request.is_ajax(): messages.success(request, 'Bookmark added') if request.POST.get('next'): return HttpResponseRedirect(request.POST.get('next')) return HttpResponse('Added') return render_to_response( 'admin_tools/menu/remove_bookmark_form.html', {'bookmark': bookmark, 'url': bookmark.url} ) else: form = BookmarkForm(user=request.user) return render_to_response( 'admin_tools/menu/form.html', {'form': form, 'title': 'Add Bookmark'} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_bookmark(request, id): """ This view deletes a bookmark. If requested via ajax it also returns the add bookmark form to replace the drop bookmark form. """
bookmark = get_object_or_404(Bookmark, id=id, user=request.user) if request.method == "POST": bookmark.delete() if not request.is_ajax(): messages.success(request, 'Bookmark removed') if request.POST.get('next'): return HttpResponseRedirect(request.POST.get('next')) return HttpResponse('Deleted') return render_to_response( 'admin_tools/menu/add_bookmark_form.html', { 'url': request.POST.get('next'), 'title': '**title**' # replaced on the javascript side } ) return render_to_response( 'admin_tools/menu/delete_confirm.html', {'bookmark': bookmark, 'title': 'Delete Bookmark'} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def autodiscover(blacklist=[]): """ Automagically discover custom dashboards and menus for installed apps. Optionally you can pass a ``blacklist`` of apps that you don't want to provide their own app index dashboard. """
import imp from django.conf import settings try: from importlib import import_module except ImportError: # Django < 1.9 and Python < 2.7 from django.utils.importlib import import_module blacklist.append('admin_tools.dashboard') blacklist.append('admin_tools.menu') blacklist.append('admin_tools.theming') for app in settings.INSTALLED_APPS: # skip blacklisted apps if app in blacklist: continue # try to import the app try: app_path = import_module(app).__path__ except AttributeError: continue # try to find a app.dashboard module try: imp.find_module('dashboard', app_path) except ImportError: continue # looks like we found it so import it ! import_module('%s.dashboard' % app)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_css_classes(self): """ Return a string containing the css classes for the module. 'dashboard-module disabled draggable collapsible deletable' 'dashboard-module disabled draggable collapsible deletable foo' 'dashboard-module draggable collapsible deletable foo' """
ret = ['dashboard-module'] if not self.enabled: ret.append('disabled') if self.draggable: ret.append('draggable') if self.collapsible: ret.append('collapsible') if self.deletable: ret.append('deletable') ret += self.css_classes return ' '.join(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_empty(self): """ A group of modules is considered empty if it has no children or if all its children are empty. True True False """
if super(Group, self).is_empty(): return True for child in self.children: if not child.is_empty(): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_app_index_dashboard(context): """ Returns the admin dashboard defined by the user or the default one. """
# this is a mess, needs cleanup ! app = context['app_list'][0] model_list = [] app_label = None app_title = app['name'] admin_site = get_admin_site(context=context) for model, model_admin in admin_site._registry.items(): if app['app_label'] == model._meta.app_label: split = model.__module__.find(model._meta.app_label) app_label = model.__module__[0:split] + model._meta.app_label for m in app['models']: if m['name'] == capfirst(model._meta.verbose_name_plural): mod = '%s.%s' % (model.__module__, model.__name__) model_list.append(mod) # if an app has registered its own dashboard, use it if app_label is not None and app_label in Registry.registry: return Registry.registry[app_label](app_title, model_list) # try to discover a general app_index dashboard (with fallback to the # default dashboard) return _get_dashboard_cls(getattr( settings, 'ADMIN_TOOLS_APP_INDEX_DASHBOARD', 'admin_tools.dashboard.dashboards.DefaultAppIndexDashboard' ), context)(app_title, model_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_app_model_classes(self): """ Helper method that returns a list of model classes for the current app. """
models = [] for m in self.models: mod, cls = m.rsplit('.', 1) mod = import_module(mod) models.append(getattr(mod, cls)) return models
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_app_content_types(self): """ Return a list of all content_types for this app. """
# Import this here to silence RemovedInDjango19Warning. See #15 from django.contrib.contenttypes.models import ContentType return [ContentType.objects.get_for_model(c) for c in self.get_app_model_classes()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_tools_render_dashboard_module(context, module): """ Template tag that renders a given dashboard module, it takes a ``DashboardModule`` instance as first parameter. """
module.init_with_context(context) context.update({ 'template': module.template, 'module': module, 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_selected(self, request): """ Helper method that returns ``True`` if the menu item is active. A menu item is considered as active if it's URL or one of its descendants URL is equals to the current URL. """
current_url = request.get_full_path() return self.url == current_url or \ len([c for c in self.children if c.is_selected(request)]) > 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uniquify(value, seen_values): """ Adds value to seen_values set and ensures it is unique """
id = 1 new_value = value while new_value in seen_values: new_value = "%s%s" % (value, id) id += 1 seen_values.add(new_value) return new_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_create_thread(callback): """ Default thread creation - used to create threads when the client doesn't want to provide their own thread creation. :param function callback: the callback function provided to threading.Thread """
thread = threading.Thread(None, callback) thread.daemon = True # Don't let thread prevent termination thread.start() return thread
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_headers(lines, offset=0): """ Parse the headers in a STOMP response :param list(str) lines: the lines received in the message response :param int offset: the starting line number :rtype: dict(str,str) """
headers = {} for header_line in lines[offset:]: header_match = HEADER_LINE_RE.match(header_line) if header_match: key = header_match.group('key') key = re.sub(r'\\.', _unescape_header, key) if key not in headers: value = header_match.group('value') value = re.sub(r'\\.', _unescape_header, value) headers[key] = value return headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_frame(frame): """ Parse a STOMP frame into a Frame object. :param bytes frame: the frame received from the server (as a byte string) :rtype: Frame """
f = Frame() if frame == b'\x0a': f.cmd = 'heartbeat' return f mat = PREAMBLE_END_RE.search(frame) if mat: preamble_end = mat.start() body_start = mat.end() else: preamble_end = len(frame) body_start = preamble_end preamble = decode(frame[0:preamble_end]) preamble_lines = LINE_END_RE.split(preamble) preamble_len = len(preamble_lines) f.body = frame[body_start:] # Skip any leading newlines first_line = 0 while first_line < preamble_len and len(preamble_lines[first_line]) == 0: first_line += 1 if first_line >= preamble_len: return None # Extract frame type/command f.cmd = preamble_lines[first_line] # Put headers into a key/value map f.headers = parse_headers(preamble_lines, first_line + 1) return f
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_headers(header_map_list): """ Helper function for combining multiple header maps into one. :param list(dict) header_map_list: list of maps :rtype: dict """
headers = {} for header_map in header_map_list: if header_map: headers.update(header_map) return headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_heartbeats(shb, chb): """ Given a heartbeat string from the server, and a heartbeat tuple from the client, calculate what the actual heartbeat settings should be. :param (str,str) shb: server heartbeat numbers :param (int,int) chb: client heartbeat numbers :rtype: (int,int) """
(sx, sy) = shb (cx, cy) = chb x = 0 y = 0 if cx != 0 and sy != '0': x = max(cx, int(sy)) if cy != 0 and sx != '0': y = max(cy, int(sx)) return x, y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_frame(frame, body_encoding=None): """ Convert a frame to a list of lines separated by newlines. :param Frame frame: the Frame object to convert :rtype: list(str) """
lines = [] body = None if frame.body: if body_encoding: body = encode(frame.body, body_encoding) else: body = encode(frame.body) if HDR_CONTENT_LENGTH in frame.headers: frame.headers[HDR_CONTENT_LENGTH] = len(body) if frame.cmd: lines.append(encode(frame.cmd)) lines.append(ENC_NEWLINE) for key, vals in sorted(frame.headers.items()): if vals is None: continue if type(vals) != tuple: vals = (vals,) for val in vals: lines.append(encode("%s:%s\n" % (key, val))) lines.append(ENC_NEWLINE) if body: lines.append(body) if frame.cmd: lines.append(ENC_NULL) return lines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_send(self, frame): """ Add the heartbeat header to the frame when connecting, and bump next outbound heartbeat timestamp. :param Frame frame: the Frame object """
if frame.cmd == CMD_CONNECT or frame.cmd == CMD_STOMP: if self.heartbeats != (0, 0): frame.headers[HDR_HEARTBEAT] = '%s,%s' % self.heartbeats if self.next_outbound_heartbeat is not None: self.next_outbound_heartbeat = monotonic() + self.send_sleep
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_receipt(self, headers, body): """ If the receipt id can be found in the headers, then notify the waiting thread. :param dict headers: headers in the message :param body: the message content """
if 'receipt-id' in headers and headers['receipt-id'] == self.receipt: with self.receipt_condition: self.received = True self.receipt_condition.notify()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_on_receipt(self): """ Wait until we receive a message receipt. """
with self.receipt_condition: while not self.received: self.receipt_condition.wait() self.received = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_frame(self, cmd, headers=None, body=''): """ Encode and send a stomp frame through the underlying transport. :param str cmd: the protocol command :param dict headers: a map of headers to include in the frame :param body: the content of the message """
frame = utils.Frame(cmd, headers, body) self.transport.transmit(frame)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def abort(self, transaction, headers=None, **keyword_headers): """ Abort a transaction. :param str transaction: the identifier of the transaction :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
assert transaction is not None, "'transaction' is required" headers = utils.merge_headers([headers, keyword_headers]) headers[HDR_TRANSACTION] = transaction self.send_frame(CMD_ABORT, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ack(self, id, transaction=None, receipt=None): """ Acknowledge 'consumption' of a message by id. :param str id: identifier of the message :param str transaction: include the acknowledgement in the specified transaction """
assert id is not None, "'id' is required" headers = {HDR_MESSAGE_ID: id} if transaction: headers[HDR_TRANSACTION] = transaction if receipt: headers[HDR_RECEIPT] = receipt self.send_frame(CMD_ACK, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commit(self, transaction=None, headers=None, **keyword_headers): """ Commit a transaction. :param str transaction: the identifier for the transaction :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
assert transaction is not None, "'transaction' is required" headers = utils.merge_headers([headers, keyword_headers]) headers[HDR_TRANSACTION] = transaction self.send_frame(CMD_COMMIT, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(self, destination, body, content_type=None, headers=None, **keyword_headers): """ Send a message to a destination. :param str destination: the destination of the message (e.g. queue or topic name) :param body: the content of the message :param str content_type: the content type of the message :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
assert destination is not None, "'destination' is required" assert body is not None, "'body' is required" headers = utils.merge_headers([headers, keyword_headers]) headers[HDR_DESTINATION] = destination if content_type: headers[HDR_CONTENT_TYPE] = content_type if self.auto_content_length and body and HDR_CONTENT_LENGTH not in headers: headers[HDR_CONTENT_LENGTH] = len(body) self.send_frame(CMD_SEND, headers, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe(self, destination, id=None, ack='auto', headers=None, **keyword_headers): """ Subscribe to a destination. :param str destination: the topic or queue to subscribe to :param str id: a unique id to represent the subscription :param str ack: acknowledgement mode, either auto, client, or client-individual (see http://stomp.github.io/stomp-specification-1.2.html#SUBSCRIBE_ack_Header) for more information :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
assert destination is not None, "'destination' is required" headers = utils.merge_headers([headers, keyword_headers]) headers[HDR_DESTINATION] = destination if id: headers[HDR_ID] = id headers[HDR_ACK] = ack self.send_frame(CMD_SUBSCRIBE, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unsubscribe(self, destination=None, id=None, headers=None, **keyword_headers): """ Unsubscribe from a destination by either id or the destination name. :param str destination: the name of the topic or queue to unsubscribe from :param str id: the unique identifier of the topic or queue to unsubscribe from :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
assert id is not None or destination is not None, "'id' or 'destination' is required" headers = utils.merge_headers([headers, keyword_headers]) if id: headers[HDR_ID] = id if destination: headers[HDR_DESTINATION] = destination self.send_frame(CMD_UNSUBSCRIBE, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self, username=None, passcode=None, wait=False, headers=None, **keyword_headers): """ Start a connection. :param str username: the username to connect with :param str passcode: the password used to authenticate with :param bool wait: if True, wait for the connection to be established/acknowledged :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires """
cmd = CMD_STOMP headers = utils.merge_headers([headers, keyword_headers]) headers[HDR_ACCEPT_VERSION] = self.version if self.transport.vhost: headers[HDR_HOST] = self.transport.vhost if username is not None: headers[HDR_LOGIN] = username if passcode is not None: headers[HDR_PASSCODE] = passcode self.send_frame(cmd, headers) if wait: self.transport.wait_for_connection() if self.transport.connection_error: raise ConnectFailedException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_socket(host, port, timeout=None): """ Return a socket. :param str host: the hostname to connect to :param int port: the port number to connect to :param timeout: if specified, set the socket timeout """
for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not None: sock.settimeout(timeout) sock.connect(sa) return sock except error: if sock is not None: sock.close() raise error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Start the connection. This should be called after all listeners have been registered. If this method is not called, no frames will be received by the connection. """
self.running = True self.attempt_connection() receiver_thread = self.create_thread_fc(self.__receiver_loop) receiver_thread.name = "StompReceiver%s" % getattr(receiver_thread, "name", "Thread") self.notify('connecting')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self): """ Stop the connection. Performs a clean shutdown by waiting for the receiver thread to exit. """
with self.__receiver_thread_exit_condition: while not self.__receiver_thread_exited and self.is_connected(): self.__receiver_thread_exit_condition.wait()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify(self, frame_type, headers=None, body=None): """ Utility function for notifying listeners of incoming and outgoing messages :param str frame_type: the type of message :param dict headers: the map of headers associated with the message :param body: the content of the message """
if frame_type == 'receipt': # logic for wait-on-receipt notification receipt = headers['receipt-id'] receipt_value = self.__receipts.get(receipt) with self.__send_wait_condition: self.set_receipt(receipt, None) self.__send_wait_condition.notify() if receipt_value == CMD_DISCONNECT: self.set_connected(False) # received a stomp 1.1+ disconnect receipt if receipt == self.__disconnect_receipt: self.disconnect_socket() self.__disconnect_receipt = None elif frame_type == 'connected': self.set_connected(True) elif frame_type == 'disconnected': self.set_connected(False) with self.__listeners_change_condition: listeners = sorted(self.listeners.items()) for (_, listener) in listeners: if not listener: continue notify_func = getattr(listener, 'on_%s' % frame_type, None) if not notify_func: log.debug("listener %s has no method on_%s", listener, frame_type) continue if frame_type in ('heartbeat', 'disconnected'): notify_func() continue if frame_type == 'connecting': notify_func(self.current_host_and_port) continue if frame_type == 'error' and not self.connected: with self.__connect_wait_condition: self.connection_error = True self.__connect_wait_condition.notify() rtn = notify_func(headers, body) if rtn: (headers, body) = rtn return (headers, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transmit(self, frame): """ Convert a frame object to a frame string and transmit to the server. :param Frame frame: the Frame object to transmit """
with self.__listeners_change_condition: listeners = sorted(self.listeners.items()) for (_, listener) in listeners: if not listener: continue try: listener.on_send(frame) except AttributeError: continue if frame.cmd == CMD_DISCONNECT and HDR_RECEIPT in frame.headers: self.__disconnect_receipt = frame.headers[HDR_RECEIPT] lines = utils.convert_frame(frame) packed_frame = pack(lines) if log.isEnabledFor(logging.DEBUG): log.debug("Sending frame: %s", lines) else: log.info("Sending frame: %r", frame.cmd or "heartbeat") self.send(packed_frame)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_connection(self, timeout=None): """ Wait until we've established a connection with the server. :param float timeout: how long to wait, in seconds """
if timeout is not None: wait_time = timeout / 10.0 else: wait_time = None with self.__connect_wait_condition: while self.running and not self.is_connected() and not self.connection_error: self.__connect_wait_condition.wait(wait_time) if not self.running or not self.is_connected(): raise exception.ConnectFailedException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __receiver_loop(self): """ Main loop listening for incoming data. """
log.info("Starting receiver loop") notify_disconnected = True try: while self.running: try: while self.running: frames = self.__read() for frame in frames: f = utils.parse_frame(frame) if f is None: continue if self.__auto_decode: f.body = decode(f.body) self.process_frame(f, frame) except exception.ConnectionClosedException: if self.running: # # Clear out any half-received messages after losing connection # self.__recvbuf = b'' self.running = False notify_disconnected = True break finally: self.cleanup() finally: with self.__receiver_thread_exit_condition: self.__receiver_thread_exited = True self.__receiver_thread_exit_condition.notifyAll() log.info("Receiver loop ended") self.notify('receiver_loop_completed') if notify_disconnected: self.notify('disconnected') with self.__connect_wait_condition: self.__connect_wait_condition.notifyAll()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_connected(self): """ Return true if the socket managed by this connection is connected :rtype: bool """
try: return self.socket is not None and self.socket.getsockname()[1] != 0 and BaseTransport.is_connected(self) except socket.error: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disconnect_socket(self): """ Disconnect the underlying socket connection """
self.running = False if self.socket is not None: if self.__need_ssl(): # # Even though we don't want to use the socket, unwrap is the only API method which does a proper SSL # shutdown # try: self.socket = self.socket.unwrap() except Exception: # # unwrap seems flaky on Win with the back-ported ssl mod, so catch any exception and log it # _, e, _ = sys.exc_info() log.warning(e) elif hasattr(socket, 'SHUT_RDWR'): try: self.socket.shutdown(socket.SHUT_RDWR) except socket.error: _, e, _ = sys.exc_info() # ignore when socket already closed if get_errno(e) != errno.ENOTCONN: log.warning("Unable to issue SHUT_RDWR on socket because of error '%s'", e) # # split this into a separate check, because sometimes the socket is nulled between shutdown and this call # if self.socket is not None: try: self.socket.close() except socket.error: _, e, _ = sys.exc_info() log.warning("Unable to close socket because of error '%s'", e) self.current_host_and_port = None self.socket = None self.notify('disconnected')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_ssl(self, for_hosts=[], key_file=None, cert_file=None, ca_certs=None, cert_validator=None, ssl_version=DEFAULT_SSL_VERSION, password=None): """ Sets up SSL configuration for the given hosts. This ensures socket is wrapped in a SSL connection, raising an exception if the SSL module can't be found. :param for_hosts: a list of tuples describing hosts this SSL configuration should be applied to :param cert_file: the path to a X509 certificate :param key_file: the path to a X509 key file :param ca_certs: the path to the a file containing CA certificates to validate the server against. If this is not set, server side certificate validation is not done. :param cert_validator: function which performs extra validation on the client certificate, for example checking the returned certificate has a commonName attribute equal to the hostname (to avoid man in the middle attacks). The signature is: (OK, err_msg) = validation_function(cert, hostname) where OK is a boolean, and cert is a certificate structure as returned by ssl.SSLSocket.getpeercert() :param ssl_version: SSL protocol to use for the connection. This should be one of the PROTOCOL_x constants provided by the ssl module. The default is ssl.PROTOCOL_TLSv1 """
if not ssl: raise Exception("SSL connection requested, but SSL library not found") for host_port in for_hosts: self.__ssl_params[host_port] = dict(key_file=key_file, cert_file=cert_file, ca_certs=ca_certs, cert_validator=cert_validator, ssl_version=ssl_version, password=password)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __need_ssl(self, host_and_port=None): """ Whether current host needs SSL or not. :param (str,int) host_and_port: the host/port pair to check, default current_host_and_port """
if not host_and_port: host_and_port = self.current_host_and_port return host_and_port in self.__ssl_params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ssl(self, host_and_port=None): """ Get SSL params for the given host. :param (str,int) host_and_port: the host/port pair we want SSL params for, default current_host_and_port """
if not host_and_port: host_and_port = self.current_host_and_port return self.__ssl_params.get(host_and_port)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __print_async(self, frame_type, headers, body): """ Utility function to print a message and setup the command prompt for the next input """
if self.__quit: return if self.verbose: self.__sysout(frame_type) for k, v in headers.items(): self.__sysout('%s: %s' % (k, v)) else: if 'message-id' in headers: self.__sysout('message-id: %s' % headers['message-id']) if 'subscription' in headers: self.__sysout('subscription: %s' % headers['subscription']) if self.prompt != '': self.__sysout('') self.__sysout(body) if not self.__start: self.__sysout(self.prompt, end='') else: self.__start = False self.stdout.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def simple_tokenize(text, include_punctuation=False): """ Tokenize the given text using a straightforward, Unicode-aware token expression. The expression mostly implements the rules of Unicode Annex #29 that are contained in the `regex` module's word boundary matching, including the refinement that splits words between apostrophes and vowels in order to separate tokens such as the French article «l'». It makes sure not to split in the middle of a grapheme, so that zero-width joiners and marks on Devanagari words work correctly. Our customizations to the expression are: - It leaves sequences of Chinese or Japanese characters (specifically, Han ideograms and hiragana) relatively untokenized, instead of splitting each character into its own token. - If `include_punctuation` is False (the default), it outputs only the tokens that start with a word-like character, or miscellaneous symbols such as emoji. If `include_punctuation` is True, it outputs all non-space tokens. - It keeps Southeast Asian scripts, such as Thai, glued together. This yields tokens that are much too long, but the alternative is that every grapheme would end up in its own token, which is worse. """
text = unicodedata.normalize('NFC', text) if include_punctuation: return [ token.casefold() for token in TOKEN_RE_WITH_PUNCTUATION.findall(text) ] else: return [ token.strip("'").casefold() for token in TOKEN_RE.findall(text) ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(text, lang, include_punctuation=False, external_wordlist=False): """ Tokenize this text in a way that's relatively simple but appropriate for the language. Strings that are looked up in wordfreq will be run through this function first, so that they can be expected to match the data. The text will be run through a number of pre-processing steps that vary by language; see the docstring of `wordfreq.preprocess.preprocess_text`. If `include_punctuation` is True, punctuation will be included as separate tokens. Otherwise, punctuation will be omitted in the output. CJK scripts In the CJK languages, word boundaries can't usually be identified by a regular expression. Instead, there needs to be some language-specific handling. In Chinese, we use the Jieba tokenizer, with a custom word list to match the words whose frequencies we can look up. In Japanese and Korean, we use the MeCab tokenizer. The `external_wordlist` option only affects Chinese tokenization. If it's True, then wordfreq will not use its own Chinese wordlist for tokenization. Instead, it will use the large wordlist packaged with the Jieba tokenizer, and it will leave Traditional Chinese characters as is. This will probably give more accurate tokenization, but the resulting tokens won't necessarily have word frequencies that can be looked up. If you end up seeing tokens that are entire phrases or sentences glued together, that probably means you passed in CJK text with the wrong language code. """
# Use globals to load CJK tokenizers on demand, so that we can still run # in environments that lack the CJK dependencies global _mecab_tokenize, _jieba_tokenize language = langcodes.get(lang) info = get_language_info(language) text = preprocess_text(text, language) if info['tokenizer'] == 'mecab': from wordfreq.mecab import mecab_tokenize as _mecab_tokenize # Get just the language code out of the Language object, so we can # use it to select a MeCab dictionary tokens = _mecab_tokenize(text, language.language) if not include_punctuation: tokens = [token for token in tokens if not PUNCT_RE.match(token)] elif info['tokenizer'] == 'jieba': from wordfreq.chinese import jieba_tokenize as _jieba_tokenize tokens = _jieba_tokenize(text, external_wordlist=external_wordlist) if not include_punctuation: tokens = [token for token in tokens if not PUNCT_RE.match(token)] else: # This is the default case where we use the regex tokenizer. First # let's complain a bit if we ended up here because we don't have an # appropriate tokenizer. if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES: logger.warning( "The language '{}' is in the '{}' script, which we don't " "have a tokenizer for. The results will be bad." .format(lang, info['script']) ) _WARNED_LANGUAGES.add(lang) tokens = simple_tokenize(text, include_punctuation=include_punctuation) return tokens
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False): """ Get a list of tokens for this text, with largely the same results and options as `tokenize`, but aggressively normalize some text in a lossy way that's good for counting word frequencies. In particular: - Any sequence of 2 or more adjacent digits, possibly with intervening punctuation such as a decimal point, will replace each digit with '0' so that frequencies for numbers don't have to be counted separately. This is similar to but not quite identical to the word2vec Google News data, which replaces digits with '#' in tokens with more than one digit. - In Chinese, unless Traditional Chinese is specifically requested using 'zh-Hant', all characters will be converted to Simplified Chinese. """
global _simplify_chinese info = get_language_info(lang) tokens = tokenize(text, lang, include_punctuation, external_wordlist) if info['lookup_transliteration'] == 'zh-Hans': from wordfreq.chinese import simplify_chinese as _simplify_chinese tokens = [_simplify_chinese(token) for token in tokens] return [smash_numbers(token) for token in tokens]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_cBpack(filename): """ Read a file from an idiosyncratic format that we use for storing approximate word frequencies, called "cBpack". The cBpack format is as follows: - The file on disk is a gzipped file in msgpack format, which decodes to a list whose first element is a header, and whose remaining elements are lists of words. - The header is a dictionary with 'format' and 'version' keys that make sure that we're reading the right thing. - Each inner list of words corresponds to a particular word frequency, rounded to the nearest centibel -- that is, one tenth of a decibel, or a factor of 10 ** .01. 0 cB represents a word that occurs with probability 1, so it is the only word in the data (this of course doesn't happen). -200 cB represents a word that occurs once per 100 tokens, -300 cB represents a word that occurs once per 1000 tokens, and so on. - The index of each list within the overall list (without the header) is the negative of its frequency in centibels. - Each inner list is sorted in alphabetical order. As an example, consider a corpus consisting only of the words "red fish blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red" and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word frequencies would decode to this: [ {'format': 'cB', 'version': 1}, ['fish'], ['blue', 'red'] ] """
with gzip.open(filename, 'rb') as infile: data = msgpack.load(infile, raw=False) header = data[0] if ( not isinstance(header, dict) or header.get('format') != 'cB' or header.get('version') != 1 ): raise ValueError("Unexpected header: %r" % header) return data[1:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def available_languages(wordlist='best'): """ Given a wordlist name, return a dictionary of language codes to filenames, representing all the languages in which that wordlist is available. """
if wordlist == 'best': available = available_languages('small') available.update(available_languages('large')) return available elif wordlist == 'combined': logger.warning( "The 'combined' wordlists have been renamed to 'small'." ) wordlist = 'small' available = {} for path in DATA_PATH.glob('*.msgpack.gz'): if not path.name.startswith('_'): list_name = path.name.split('.')[0] name, lang = list_name.split('_') if name == wordlist: available[lang] = str(path) return available
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_frequency_dict(lang, wordlist='best', match_cutoff=30): """ Get a word frequency list as a dictionary, mapping tokens to frequencies as floating-point probabilities. """
freqs = {} pack = get_frequency_list(lang, wordlist, match_cutoff) for index, bucket in enumerate(pack): freq = cB_to_freq(-index) for word in bucket: freqs[word] = freq return freqs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def word_frequency(word, lang, wordlist='best', minimum=0.): """ Get the frequency of `word` in the language with code `lang`, from the specified `wordlist`. These wordlists can be specified: - 'large': a wordlist built from at least 5 sources, containing word frequencies of 10^-8 and higher - 'small': a wordlist built from at least 3 sources, containing word frquencies of 10^-6 and higher - 'best': uses 'large' if available, and 'small' otherwise The value returned will always be at least as large as `minimum`. You could set this value to 10^-8, for example, to return 10^-8 for unknown words in the 'large' list instead of 0, avoiding a discontinuity. """
args = (word, lang, wordlist, minimum) try: return _wf_cache[args] except KeyError: if len(_wf_cache) >= CACHE_SIZE: _wf_cache.clear() _wf_cache[args] = _word_frequency(*args) return _wf_cache[args]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zipf_frequency(word, lang, wordlist='best', minimum=0.): """ Get the frequency of `word`, in the language with code `lang`, on the Zipf scale. The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert, who compiled the SUBTLEX data. The goal of the Zipf scale is to map reasonable word frequencies to understandable, small positive numbers. A word rates as x on the Zipf scale when it occurs 10**x times per billion words. For example, a word that occurs once per million words is at 3.0 on the Zipf scale. Zipf values for reasonable words are between 0 and 8. The value this function returns will always be at last as large as `minimum`, even for a word that never appears. The default minimum is 0, representing words that appear once per billion words or less. wordfreq internally quantizes its frequencies to centibels, which are 1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to the nearest hundredth to match this quantization. """
freq_min = zipf_to_freq(minimum) freq = word_frequency(word, lang, wordlist, freq_min) return round(freq_to_zipf(freq), 2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def top_n_list(lang, n, wordlist='best', ascii_only=False): """ Return a frequency list of length `n` in descending order of frequency. This list contains words from `wordlist`, of the given language. If `ascii_only`, then only ascii words are considered. """
results = [] for word in iter_wordlist(lang, wordlist): if (not ascii_only) or max(word) <= '~': results.append(word) if len(results) >= n: break return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12, ascii_only=False): """ Returns a string of random, space separated words. These words are of the given language and from the given wordlist. There will be `nwords` words in the string. `bits_per_word` determines the amount of entropy provided by each word; when it's higher, this function will choose from a larger list of words, some of which are more rare. You can restrict the selection of words to those written in ASCII characters by setting `ascii_only` to True. """
n_choices = 2 ** bits_per_word choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only) if len(choices) < n_choices: raise ValueError( "There aren't enough words in the wordlist to provide %d bits of " "entropy per word." % bits_per_word ) return ' '.join([random.choice(choices) for i in range(nwords)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_ascii_words(lang='en', wordlist='best', nwords=5, bits_per_word=12): """ Returns a string of random, space separated, ASCII words. These words are of the given language and from the given wordlist. There will be `nwords` words in the string. `bits_per_word` determines the amount of entropy provided by each word; when it's higher, this function will choose from a larger list of words, some of which are more rare. """
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jieba_tokenize(text, external_wordlist=False): """ Tokenize the given text into tokens whose word frequencies can probably be looked up. This uses Jieba, a word-frequency-based tokenizer. If `external_wordlist` is False, we tell Jieba to default to using wordfreq's own Chinese wordlist, and not to infer unknown words using a hidden Markov model. This ensures that the multi-character tokens that it outputs will be ones whose word frequencies we can look up. If `external_wordlist` is True, this will use the largest version of Jieba's original dictionary, with HMM enabled, so its results will be independent of the data in wordfreq. These results will be better optimized for purposes that aren't looking up word frequencies, such as general- purpose tokenization, or collecting word frequencies in the first place. """
global jieba_tokenizer, jieba_orig_tokenizer if external_wordlist: if jieba_orig_tokenizer is None: jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME) return jieba_orig_tokenizer.lcut(text) else: if jieba_tokenizer is None: jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME) # Tokenize the Simplified Chinese version of the text, but return # those spans from the original text, even if it's in Traditional # Chinese tokens = [] for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False): tokens.append(text[start:end]) return tokens
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preprocess_text(text, language): """ This function applies pre-processing steps that convert forms of words considered equivalent into one standardized form. As one straightforward step, it case-folds the text. For the purposes of wordfreq and related tools, a capitalized word shouldn't have a different frequency from its lowercase version. The steps that are applied in order, only some of which apply to each language, are: - NFC or NFKC normalization, as needed for the language - Transliteration of multi-script languages - Abjad mark removal - Case folding - Fixing of diacritics We'll describe these steps out of order, to start with the more obvious steps. Case folding The most common effect of this function is that it case-folds alphabetic text to lowercase: 'word' This is proper Unicode-aware case-folding, so it eliminates distinctions in lowercase letters that would not appear in uppercase. This accounts for the German ß and the Greek final sigma: 'gross' 'λέξισ' In Turkish (and Azerbaijani), case-folding is different, because the uppercase and lowercase I come in two variants, one with a dot and one without. They are matched in a way that preserves the number of dots, which the usual pair of "I" and "i" do not. 'hakkında istanbul' Fixing of diacritics While we're talking about Turkish: the Turkish alphabet contains letters with cedillas attached to the bottom. In the case of "ş" and "ţ", these letters are very similar to two Romanian letters, "ș" and "ț", which have separate _commas_ below them. (Did you know that a cedilla is not the same as a comma under a letter? I didn't until I started dealing with text normalization. My keyboard layout even inputs a letter with a cedilla when you hit Compose+comma.) Because these letters look so similar, and because some fonts only include one pair of letters and not the other, there are many cases where the letters are confused with each other. Our preprocessing normalizes these Turkish and Romanian letters to the letters each language prefers. 'kişinin' 'același' Unicode normalization Unicode text is NFC normalized in most languages, removing trivial distinctions between strings that should be considered equivalent in all cases: 'natürlich' True NFC normalization is sufficient (and NFKC normalization is a bit too strong) for many languages that are written in cased, alphabetic scripts. Languages in other scripts tend to need stronger normalization to properly compare text. So we use NFC normalization when the language's script is Latin, Greek, or Cyrillic, and we use NFKC normalization for all other languages. Here's an example in Japanese, where preprocessing changes the width (and the case) of a Latin letter that's used as part of a word: 'uターン' In Korean, NFKC normalization is important because it aligns two different ways of encoding text -- as individual letters that are grouped together into square characters, or as the entire syllables that those characters represent: '낱말' 6 '낱말' 2 Abjad mark removal There are many abjad languages, such as Arabic, Hebrew, Persian, and Urdu, where words can be marked with vowel points but rarely are. In languages that use abjad scripts, we remove all modifiers that are classified by Unicode as "marks". We also remove an Arabic character called the tatweel, which is used to visually lengthen a word. 'كلمة' 'الحمد' Transliteration of multi-script languages Some languages are written in multiple scripts, and require special care. These languages include Chinese, Serbian, and Azerbaijani. In Serbian, there is a well-established mapping from Cyrillic letters to Latin letters. We apply this mapping so that Serbian is always represented in Latin letters. 'shvataš' The transliteration is more complete than it needs to be to cover just Serbian, so that -- for example -- borrowings from Russian can be transliterated, instead of coming out in a mixed script. "kul'tury" Azerbaijani (Azeri) has a similar transliteration step to Serbian, and then the Latin-alphabet text is handled similarly to Turkish. 'bağırtı' We don't transliterate Traditional to Simplified Chinese in this step. There are some steps where we unify them internally: see chinese.py for more information. """
# NFC or NFKC normalization, as needed for the language info = get_language_info(language) text = unicodedata.normalize(info['normal_form'], text) # Transliteration of multi-script languages if info['transliteration'] is not None: text = transliterate(info['transliteration'], text) # Abjad mark removal if info['remove_marks']: text = remove_marks(text) # Case folding if info['dotless_i']: text = casefold_with_i_dots(text) else: text = text.casefold() # Fixing of diacritics if info['diacritics_under'] == 'commas': text = cedillas_to_commas(text) elif info['diacritics_under'] == 'cedillas': text = commas_to_cedillas(text) return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _language_in_list(language, targets, min_score=80): """ A helper function to determine whether this language matches one of the target languages, with a match score above a certain threshold. The languages can be given as strings (language tags) or as Language objects. `targets` can be any iterable of such languages. """
matched = best_match(language, targets, min_score=min_score) return matched[1] > 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mecab_tokenize(text, lang): """ Use the mecab-python3 package to tokenize the given text. The `lang` must be 'ja' for Japanese or 'ko' for Korean. The simplest output from mecab-python3 is the single-string form, which contains the same table that the command-line version of MeCab would output. We find the tokens in the first column of this table. """
if lang not in MECAB_DICTIONARY_NAMES: raise ValueError("Can't run MeCab on language %r" % lang) if lang not in MECAB_ANALYZERS: MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang]) analyzer = MECAB_ANALYZERS[lang] text = unicodedata.normalize('NFKC', text.strip()) analyzed = analyzer.parse(text) if not analyzed: return [] return [line.split('\t')[0] for line in analyzed.split('\n') if line != '' and line != 'EOS']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transliterate(table, text): """ Transliterate text according to one of the tables above. `table` chooses the table. It looks like a language code but comes from a very restricted set: - 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the Latin alphabet. - 'az-Latn' means the same for Azerbaijani Cyrillic to Latn. """
if table == 'sr-Latn': return text.translate(SR_LATN_TABLE) elif table == 'az-Latn': return text.translate(AZ_LATN_TABLE) else: raise ValueError("Unknown transliteration table: {!r}".format(table))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _update_exit_code_from_stats(cls, statistics: Statistics, app: Application): '''Set the current exit code based on the Statistics.''' for error_type in statistics.errors: exit_code = app.ERROR_CODE_MAP.get(error_type) if exit_code: app.update_exit_code(exit_code)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_response(cls, response): '''Return whether the document is likely to be a Sitemap.''' if response.body: if cls.is_file(response.body): return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_file(cls, file): '''Return whether the file is likely a Sitemap.''' peeked_data = wpull.util.peek_file(file) if is_gzip(peeked_data): try: peeked_data = wpull.decompression.gzip_uncompress( peeked_data, truncated=True ) except zlib.error: pass peeked_data = wpull.string.printable_bytes(peeked_data) if b'<?xml' in peeked_data \ and (b'<sitemapindex' in peeked_data or b'<urlset' in peeked_data): return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_hostname(hostname): '''Normalizes a hostname so that it is ASCII and valid domain name.''' try: new_hostname = hostname.encode('idna').decode('ascii').lower() except UnicodeError as error: raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error if hostname != new_hostname: # Check for round-trip. May raise UnicodeError new_hostname.encode('idna') return new_hostname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_path(path, encoding='utf-8'): '''Normalize a path string. Flattens a path by removing dot parts, percent-encodes unacceptable characters and ensures percent-encoding is uppercase. ''' if not path.startswith('/'): path = '/' + path path = percent_encode(flatten_path(path, flatten_slashes=True), encoding=encoding) return uppercase_percent_encoding(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_query(text, encoding='utf-8'): '''Normalize a query string. Percent-encodes unacceptable characters and ensures percent-encoding is uppercase. ''' path = percent_encode_plus(text, encoding=encoding) return uppercase_percent_encoding(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_fragment(text, encoding='utf-8'): '''Normalize a fragment. Percent-encodes unacceptable characters and ensures percent-encoding is uppercase. ''' path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET) return uppercase_percent_encoding(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_username(text, encoding='utf-8'): '''Normalize a username Percent-encodes unacceptable characters and ensures percent-encoding is uppercase. ''' path = percent_encode(text, encoding=encoding, encode_set=USERNAME_ENCODE_SET) return uppercase_percent_encoding(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def normalize_password(text, encoding='utf-8'): '''Normalize a password Percent-encodes unacceptable characters and ensures percent-encoding is uppercase. ''' path = percent_encode(text, encoding=encoding, encode_set=PASSWORD_ENCODE_SET) return uppercase_percent_encoding(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def percent_encode(text, encode_set=DEFAULT_ENCODE_SET, encoding='utf-8'): '''Percent encode text. Unlike Python's ``quote``, this function accepts a blacklist instead of a whitelist of safe characters. ''' byte_string = text.encode(encoding) try: mapping = _percent_encoder_map_cache[encode_set] except KeyError: mapping = _percent_encoder_map_cache[encode_set] = PercentEncoderMap( encode_set).__getitem__ return ''.join([mapping(char) for char in byte_string])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def percent_encode_plus(text, encode_set=QUERY_ENCODE_SET, encoding='utf-8'): '''Percent encode text for query strings. Unlike Python's ``quote_plus``, this function accepts a blacklist instead of a whitelist of safe characters. ''' if ' ' not in text: return percent_encode(text, encode_set, encoding) else: result = percent_encode(text, encode_set, encoding) return result.replace(' ', '+')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def schemes_similar(scheme1, scheme2): '''Return whether URL schemes are similar. This function considers the following schemes to be similar: * HTTP and HTTPS ''' if scheme1 == scheme2: return True if scheme1 in ('http', 'https') and scheme2 in ('http', 'https'): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False): '''Return whether the a path is a subpath of another. Args: base_path: The base path test_path: The path which we are testing trailing_slash: If True, the trailing slash is treated with importance. For example, ``/images/`` is a directory while ``/images`` is a file. wildcards: If True, globbing wildcards are matched against paths ''' if trailing_slash: base_path = base_path.rsplit('/', 1)[0] + '/' test_path = test_path.rsplit('/', 1)[0] + '/' else: if not base_path.endswith('/'): base_path += '/' if not test_path.endswith('/'): test_path += '/' if wildcards: return fnmatch.fnmatchcase(test_path, base_path) else: return test_path.startswith(base_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def uppercase_percent_encoding(text): '''Uppercases percent-encoded sequences.''' if '%' not in text: return text return re.sub( r'%[a-f0-9][a-f0-9]', lambda match: match.group(0).upper(), text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def split_query(qs, keep_blank_values=False): '''Split the query string. Note for empty values: If an equal sign (``=``) is present, the value will be an empty string (``''``). Otherwise, the value will be ``None``:: >>> list(split_query('a=&b', keep_blank_values=True)) [('a', ''), ('b', None)] No processing is done on the actual values. ''' items = [] for pair in qs.split('&'): name, delim, value = pair.partition('=') if not delim and keep_blank_values: value = None if keep_blank_values or value: items.append((name, value)) return items
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def query_to_map(text): '''Return a key-values mapping from a query string. Plus symbols are replaced with spaces. ''' dict_obj = {} for key, value in split_query(text, True): if key not in dict_obj: dict_obj[key] = [] if value: dict_obj[key].append(value.replace('+', ' ')) else: dict_obj[key].append('') return query_to_map(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def urljoin(base_url, url, allow_fragments=True): '''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.''' if url.startswith('//') and len(url) > 2: scheme = base_url.partition(':')[0] if scheme: return urllib.parse.urljoin( base_url, '{0}:{1}'.format(scheme, url), allow_fragments=allow_fragments ) return urllib.parse.urljoin( base_url, url, allow_fragments=allow_fragments)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def flatten_path(path, flatten_slashes=False): '''Flatten an absolute URL path by removing the dot segments. :func:`urllib.parse.urljoin` has some support for removing dot segments, but it is conservative and only removes them as needed. Arguments: path (str): The URL path. flatten_slashes (bool): If True, consecutive slashes are removed. The path returned will always have a leading slash. ''' # Based on posixpath.normpath # Fast path if not path or path == '/': return '/' # Take off leading slash if path[0] == '/': path = path[1:] parts = path.split('/') new_parts = collections.deque() for part in parts: if part == '.' or (flatten_slashes and not part): continue elif part != '..': new_parts.append(part) elif new_parts: new_parts.pop() # If the filename is empty string if flatten_slashes and path.endswith('/') or not len(new_parts): new_parts.append('') # Put back leading slash new_parts.appendleft('') return '/'.join(new_parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse(cls, url, default_scheme='http', encoding='utf-8'): '''Parse a URL and return a URLInfo.''' if url is None: return None url = url.strip() if frozenset(url) & C0_CONTROL_SET: raise ValueError('URL contains control codes: {}'.format(ascii(url))) scheme, sep, remaining = url.partition(':') if not scheme: raise ValueError('URL missing scheme: {}'.format(ascii(url))) scheme = scheme.lower() if not sep and default_scheme: # Likely something like example.com/mystuff remaining = url scheme = default_scheme elif not sep: raise ValueError('URI missing colon: {}'.format(ascii(url))) if default_scheme and '.' in scheme or scheme == 'localhost': # Maybe something like example.com:8080/mystuff or # maybe localhost:8080/mystuff remaining = '{}:{}'.format(scheme, remaining) scheme = default_scheme info = URLInfo() info.encoding = encoding if scheme not in RELATIVE_SCHEME_DEFAULT_PORTS: info.raw = url info.scheme = scheme info.path = remaining return info if remaining.startswith('//'): remaining = remaining[2:] path_index = remaining.find('/') query_index = remaining.find('?') fragment_index = remaining.find('#') try: index_tuple = (path_index, query_index, fragment_index) authority_index = min(num for num in index_tuple if num >= 0) except ValueError: authority_index = len(remaining) authority = remaining[:authority_index] resource = remaining[authority_index:] try: index_tuple = (query_index, fragment_index) path_index = min(num for num in index_tuple if num >= 0) except ValueError: path_index = len(remaining) path = remaining[authority_index + 1:path_index] or '/' if fragment_index >= 0: query_index = fragment_index else: query_index = len(remaining) query = remaining[path_index + 1:query_index] fragment = remaining[query_index + 1:] userinfo, host = cls.parse_authority(authority) hostname, port = cls.parse_host(host) username, password = cls.parse_userinfo(userinfo) if not hostname: raise ValueError('Hostname is empty: {}'.format(ascii(url))) info.raw = url info.scheme = scheme info.authority = authority info.path = normalize_path(path, encoding=encoding) info.query = normalize_query(query, encoding=encoding) info.fragment = normalize_fragment(fragment, encoding=encoding) info.userinfo = userinfo info.username = percent_decode(username, encoding=encoding) info.password = percent_decode(password, encoding=encoding) info.host = host info.hostname = hostname info.port = port or RELATIVE_SCHEME_DEFAULT_PORTS[scheme] info.resource = resource return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_authority(cls, authority): '''Parse the authority part and return userinfo and host.''' userinfo, sep, host = authority.partition('@') if not sep: return '', userinfo else: return userinfo, host
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_userinfo(cls, userinfo): '''Parse the userinfo and return username and password.''' username, sep, password = userinfo.partition(':') return username, password
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_host(cls, host): '''Parse the host and return hostname and port.''' if host.endswith(']'): return cls.parse_hostname(host), None else: hostname, sep, port = host.rpartition(':') if sep: port = int(port) if port < 0 or port > 65535: raise ValueError('Port number invalid') else: hostname = port port = None return cls.parse_hostname(hostname), port
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_hostname(cls, hostname): '''Parse the hostname and normalize.''' if hostname.startswith('['): return cls.parse_ipv6_hostname(hostname) else: try: new_hostname = normalize_ipv4_address(hostname) except ValueError: # _logger.debug('', exc_info=True) new_hostname = hostname new_hostname = normalize_hostname(new_hostname) if any(char in new_hostname for char in FORBIDDEN_HOSTNAME_CHARS): raise ValueError('Invalid hostname: {}' .format(ascii(hostname))) return new_hostname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_ipv6_hostname(cls, hostname): '''Parse and normalize a IPv6 address.''' if not hostname.startswith('[') or not hostname.endswith(']'): raise ValueError('Invalid IPv6 address: {}' .format(ascii(hostname))) hostname = ipaddress.IPv6Address(hostname[1:-1]).compressed return hostname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_dict(self): '''Return a dict of the attributes.''' return dict( raw=self.raw, scheme=self.scheme, authority=self.authority, netloc=self.authority, path=self.path, query=self.query, fragment=self.fragment, userinfo=self.userinfo, username=self.username, password=self.password, host=self.host, hostname=self.hostname, port=self.port, resource=self.resource, url=self.url, encoding=self.encoding, )