text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path_exists_or_creatable_portable(pathname: str) -> bool: """OS-portable check for whether current path exists or is creatable. This function is guaranteed to _never_ raise exceptions. Returns ------ `True` if the passed pathname is a valid pathname on the current OS _and_ either currently exists or is hypothetically creatable in a cross-platform manner optimized for POSIX-unfriendly filesystems; `False` otherwise. """
try: # To prevent "os" module calls from raising undesirable exceptions on # invalid pathnames, is_pathname_valid() is explicitly called first. return is_pathname_valid(pathname) and ( os.path.exists(pathname) or is_path_sibling_creatable(pathname)) # Report failure on non-fatal filesystem complaints (e.g., connection # timeouts, permissions issues) implying this path to be inaccessible. All # other exceptions are unrelated fatal issues and should not be caught # here. except OSError: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Q(name): """Gets a variable from the current sketch. Processing has a number of methods and variables with the same name, 'mousePressed' for example. This allows us to disambiguate. Also casts numeric values as floats to make it easier to translate code from pde to python. """
retval = PApplet.getDeclaredField(name).get(Sketch.get_instance()) if isinstance(retval, (long, int)): return float(retval) else: return retval
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_source(link): """ Return source of the `link` whether it is filename or url. Args: link (str): Filename or URL. Returns: str: Content. Raises: UserWarning: When the `link` couldn't be resolved. """
if link.startswith("http://") or link.startswith("https://"): down = httpkie.Downloader() return down.download(link) if os.path.exists(link): with open(link) as f: return f.read() raise UserWarning("html: '%s' is neither URL or data!" % link)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_config_item(item, dirname): """ Process one item from the configuration file, which contains multiple items saved as dictionary. This function reads additional data from the config and do some replacements - for example, if you specify url, it will download data from this url and so on. Args: item (dict): Item, which will be processed. Note: Returned data format:: { "link": "link to html page/file", "html": "html code from file/url", "vars": { "varname": { "data": "matching data..", } } } Returns: dict: Dictionary in format showed above. """
item = copy.deepcopy(item) html = item.get("html", None) if not html: raise UserWarning("Can't find HTML source for item:\n%s" % str(item)) # process HTML link link = html if "://" in html else os.path.join(dirname, html) del item["html"] # replace $name with the actual name of the field for key, val in item.items(): if "notfoundmsg" in val: val["notfoundmsg"] = val["notfoundmsg"].replace("$name", key) return { "html": _get_source(link), "link": link, "vars": item }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_config(file_name): """ Read YAML file with configuration and pointers to example data. Args: file_name (str): Name of the file, where the configuration is stored. Returns: dict: Parsed and processed data (see :func:`_process_config_item`). Example YAML file:: html: simple_xml.xml first: data: i wan't this required: true notfoundmsg: Can't find variable $name. second: data: and this --- html: simple_xml2.xml first: data: something wanted required: true notfoundmsg: Can't find variable $name. second: data: another wanted thing """
dirname = os.path.dirname( os.path.abspath(file_name) ) dirname = os.path.relpath(dirname) # create utf-8 strings, not unicode def custom_str_constructor(loader, node): return loader.construct_scalar(node).encode('utf-8') yaml.add_constructor(u'tag:yaml.org,2002:str', custom_str_constructor) config = [] with open(file_name) as f: for item in yaml.load_all(f.read()): config.append( _process_config_item(item, dirname) ) return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hash(value, arg): """ Returns a hex-digest of the passed in value for the hash algorithm given. """
arg = str(arg).lower() if sys.version_info >= (3,0): value = value.encode("utf-8") if not arg in get_available_hashes(): raise TemplateSyntaxError("The %s hash algorithm does not exist. Supported algorithms are: %" % (arg, get_available_hashes())) try: f = getattr(hashlib, arg) hashed = f(value).hexdigest() except Exception: raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg) return hashed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paginator(context, adjacent_pages=2): """ To be used in conjunction with the object_list generic view. Adds pagination context variables for use in displaying first, adjacent and last page links in addition to those created by the object_list generic view. """
current_page = context.get('page') paginator = context.get('paginator') if not paginator: return pages = paginator.num_pages current_range = range(current_page - adjacent_pages, current_page + adjacent_pages + 1) page_numbers = [n for n in current_range if n > 0 and n <= pages] slugtype = '' if 'topic_slug' in context: page_url = context["topic"].get_short_url() slugtype = 'topic' elif 'forum_slug' in context: page_url = '/forum/%s/' % context["forum_slug"] slugtype = 'forum' else: page_url = context['request'].get_full_path() return { "is_paginated": context["is_paginated"], "page": current_page, "pages": pages, "page_obj": context['page_obj'], "page_numbers": page_numbers, "has_next": context["page_obj"].has_next(), "has_previous": context["page_obj"].has_previous(), "page_url" : page_url, 'slugtype' : slugtype, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_headers_link(headers): """Returns the parsed header links of the response, if any."""
header = CaseInsensitiveDict(headers).get('link') l = {} if header: links = parse_link(header) for link in links: key = link.get('rel') or link.get('url') l[key] = link return l
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, cmd, timeout=None, key=None): """ Run a command on the phablet device using ssh :param cmd: a list of strings to execute as a command :param timeout: a timeout (in seconds) for device discovery :param key: a path to a public ssh key to use for connection :returns: the exit code of the command This method will not allow you to capture stdout/stderr from the target process. If you wish to do that please consider switching to one of subprocess functions along with. :meth:`cmdline()`. """
if not isinstance(cmd, list): raise TypeError("cmd needs to be a list") if not all(isinstance(item, str) for item in cmd): raise TypeError("cmd needs to be a list of strings") self.connect(timeout, key) return self._run_ssh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self, timeout=None, key=None): """ Perform one-time setup procedure. :param timeout: a timeout (in seconds) for device discovery :param key: a path to a public ssh key to use for connection This method will allow you to execute :meth:`cmdline()` repeatedly without incurring the extra overhead of the setup procedure. Note that this procedure needs to be repeated whenever: - the target device reboots - the local adb server is restarted - your ssh keys change .. versionadded:: 0.2 """
if self.port is not None: return self._wait_for_device(timeout) self._setup_port_forwarding() self._purge_known_hosts_entry() self._copy_ssh_key(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def FromType(name, otype): """ ValueOption subclasses factory, creates a convenient option to store data from a given Type. attribute precedence : * ``|attrs| > 0`` (``multi`` and ``uniq`` are implicit) => NotImplementedError * ``uniq`` (``multi`` is implicit) => NotImplementedError * ``multi`` and ``not uniq`` => NotImplementedError * ``not multi`` => ValueOption :param name: Name of the option :type name: str :param otype: the desired type of field :type otype: subclass of :class:`.GenericType` """
if otype.attrs is not None and len(otype.attrs): raise NotImplementedError("for otype, options can't have attributs") #return VectorField(ftype) elif otype.uniq: return SetOption(name, otype) elif otype.multi: #XXX: dbl check needed? #raise NotImplementedError("for now, options can't have multiple values") return ListOption(name, otype) else: return ValueOption(name, otype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upsert_object_property(self, identifier, properties, ignore_constraints=False): """Manipulate an object's property set. Inserts or updates properties in given dictionary. If a property key does not exist in the object's property set it is created. If the value is None an existing property is deleted. Existing object properties that are not present in the given property set remain unaffacted. Deleting mandatory properties or updating immutable properties results in a ValueError. These constraints can be disabled using the ignore_constraints parameter. Parameters identifier : string Unique object identifier properties : Dictionary() Dictionary of property names and their new values. ignore_constraints : Boolean Flag indicating whether to ignore immutable and mandatory property constraints (True) or nore (False, Default). Returns ------- ObjectHandle Handle to updated object or None if object does not exist """
# Retrieve the object with the gievn identifier. This is a (sub-)class # of ObjectHandle obj = self.get_object(identifier) if not obj is None: # Modify property set of retrieved object handle. Raise exception if # and of the upserts is not valid. for key in properties: value = properties[key] # If the update affects an immutable property raise exception if not ignore_constraints and key in self.immutable_properties: raise ValueError('update to immutable property: ' + key) # Check whether the operation is an UPSERT (value != None) or # DELETE (value == None) if not value is None: obj.properties[key] = value else: # DELETE. Make sure the property is not mandatory if not ignore_constraints and key in self.mandatory_properties: raise ValueError('delete mandatory property: ' + key) elif key in obj.properties: del obj.properties[key] # Update object in database self.replace_object(obj) # Return object handle return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_object(self, identifier, erase=False): """Delete the entry with given identifier in the database. Returns the handle for the deleted object or None if object identifier is unknown. If the read-only property of the object is set to true a ValueError is raised. Parameters identifier : string Unique object identifier erase : Boolean, optinal If true, the record will be deleted from the database. Otherwise, the active flag will be set to False to support provenance tracking. Returns ------- (Sub-class of)ObjectHandle """
# Get object to ensure that it exists. db_object = self.get_object(identifier) # Set active flag to False if object exists. if db_object is None: return None # Check whether the read-only property is set to true if PROPERTY_READONLY in db_object.properties: if db_object.properties[PROPERTY_READONLY]: raise ValueError('cannot delete read-only resource') if erase: # Erase object from database self.collection.delete_many({"_id": identifier}) else: # Delete object with given identifier by setting active flag # to False self.collection.update_one({"_id": identifier}, {'$set' : {'active' : False}}) # Return retrieved object or None if it didn't exist. return db_object
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_object(self, identifier, include_inactive=False): """Retrieve object with given identifier from the database. Parameters identifier : string Unique object identifier include_inactive : Boolean Flag indicating whether inactive (i.e., deleted) object should be included in the search (i.e., return an object with given identifier if it has been deleted or return None) Returns ------- (Sub-class of)ObjectHandle The database object with given identifier or None if no object with identifier exists. """
# Find all objects with given identifier. The result size is expected # to be zero or one query = {'_id': identifier} if not include_inactive: query['active'] = True cursor = self.collection.find(query) if cursor.count() > 0: return self.from_dict(cursor.next()) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_object(self, db_object): """Create new entry in the database. Parameters db_object : (Sub-class of)ObjectHandle """
# Create object using the to_dict() method. obj = self.to_dict(db_object) obj['active'] = True self.collection.insert_one(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_objects(self, query=None, limit=-1, offset=-1): """List of all objects in the database. Optinal parameter limit and offset for pagination. A dictionary of key,value-pairs can be given as addictional query condition for document properties. Parameters query : Dictionary Filter objects by property-value pairs defined by dictionary. limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- ObjectListing """
result = [] # Build the document query doc = {'active' : True} if not query is None: for key in query: doc[key] = query[key] # Iterate over all objects in the MongoDB collection and add them to # the result coll = self.collection.find(doc).sort([('timestamp', pymongo.DESCENDING)]) count = 0 for document in coll: # We are done if the limit is reached. Test first in case limit is # zero. if limit >= 0 and len(result) == limit: break if offset < 0 or count >= offset: result.append(self.from_dict(document)) count += 1 return ObjectListing(result, offset, limit, coll.count())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self, db_obj): """Create a Json-like dictionary for objects managed by this object store. Parameters db_obj : (Sub-class of)ObjectHandle Returns ------- (JSON) Json-like object, i.e., dictionary. """
# Base Json serialization for database objects return { '_id' : db_obj.identifier, 'timestamp' : str(db_obj.timestamp.isoformat()), 'properties' : db_obj.properties}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compile_msg_payload(self, invite): """ Determine recipient, message content, return it as a dict that can be Posted to the message sender """
self.l.info("Compiling the outbound message payload") update_invite = False # Determine the recipient address if "to_addr" in invite.invite: to_addr = invite.invite["to_addr"] else: update_invite = True to_addr = get_identity_address(invite.identity) # Determine the message content if "content" in invite.invite: content = invite.invite["content"] else: update_invite = True content = settings.INVITE_TEXT # Determine the metadata if "metadata" in invite.invite: metadata = invite.invite["metadata"] else: update_invite = True metadata = {} msg_payload = { "to_addr": to_addr, "content": content, "metadata": metadata } if update_invite is True: self.l.info("Updating the invite.invite field") invite.invite = msg_payload invite.save() self.l.info("Compiled the outbound message payload") return msg_payload
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_message(self, payload): """ Create a post request to the message sender """
self.l.info("Creating outbound message request") result = ms_client.create_outbound(payload) self.l.info("Created outbound message request") return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, invite_id, **kwargs): """ Sends a message about service rating to invitee """
self.l = self.get_logger(**kwargs) self.l.info("Looking up the invite") invite = Invite.objects.get(id=invite_id) msg_payload = self.compile_msg_payload(invite) result = self.send_message(msg_payload) self.l.info("Creating task to update invite after send") post_send_update_invite.apply_async(args=[invite_id]) self.l.info("Created task to update invite after send") return "Message queued for send. ID: <%s>" % str(result["id"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, key): """ Stop a concurrent operation. This gets the concurrency limiter for the given key (creating it if necessary) and stops a concurrent operation on it. If the concurrency limiter is empty, it is deleted. """
self._get_limiter(key).stop() self._cleanup_limiter(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fields_for(context, form, template="includes/form_fields.html"): """ Renders fields for a form with an optional template choice. """
context["form_for_fields"] = form return get_template(template).render(context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_by(items, attr): """ General sort filter - sorts by either attribute or key. """
def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) # Reraise AttributeError return sorted(items, key=key_func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gravatar_url(email, size=32): """ Return the full URL for a Gravatar given an email hash. """
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size) return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def metablock(parsed): """ Remove HTML tags, entities and superfluous characters from meta blocks. """
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",") return escape(strip_tags(decode_entities(parsed)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pagination_for(context, current_page, page_var="page", exclude_vars=""): """ Include the pagination template and data for persisting querystring in pagination links. Can also contain a comma separated string of var names in the current querystring to exclude from the pagination links, via the ``exclude_vars`` arg. """
querystring = context["request"].GET.copy() exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var] for exclude_var in exclude_vars: if exclude_var in querystring: del querystring[exclude_var] querystring = querystring.urlencode() return { "current_page": current_page, "querystring": querystring, "page_var": page_var, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_form(context, search_model_names=None): """ Includes the search form with a list of models to use as choices for filtering the search by. Models should be a string with models in the format ``app_label.model_name`` separated by spaces. The string ``all`` can also be used, in which case the models defined by the ``SEARCH_MODEL_CHOICES`` setting will be used. """
template_vars = { "request": context["request"], } if not search_model_names or not settings.SEARCH_MODEL_CHOICES: search_model_names = [] elif search_model_names == "all": search_model_names = list(settings.SEARCH_MODEL_CHOICES) else: search_model_names = search_model_names.split(" ") search_model_choices = [] for model_name in search_model_names: try: model = apps.get_model(*model_name.split(".", 1)) except LookupError: pass else: verbose_name = model._meta.verbose_name_plural.capitalize() search_model_choices.append((verbose_name, model_name)) template_vars["search_model_choices"] = sorted(search_model_choices) return template_vars
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def richtext_filters(content): """ Takes a value edited via the WYSIWYG editor, and passes it through each of the functions specified by the RICHTEXT_FILTERS setting. """
for filter_name in settings.RICHTEXT_FILTERS: filter_func = import_dotted_path(filter_name) content = filter_func(content) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def editable(parsed, context, token): """ Add the required HTML to the parsed content for in-line editing, such as the icon and edit form if the object is deemed to be editable - either it has an ``editable`` method which returns ``True``, or the logged in user has change permissions for the model. """
def parse_field(field): field = field.split(".") obj = context.get(field.pop(0), None) attr = field.pop() while field: obj = getattr(obj, field.pop(0)) if callable(obj): # Allows {% editable page.get_content_model.content %} obj = obj() return obj, attr fields = [parse_field(f) for f in token.split_contents()[1:]] if fields: fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]] if not parsed.strip(): try: parsed = "".join([str(getattr(*field)) for field in fields]) except AttributeError: pass if settings.INLINE_EDITING_ENABLED and fields and "request" in context: obj = fields[0][0] if isinstance(obj, Model) and is_editable(obj, context["request"]): field_names = ",".join([f[1] for f in fields]) context["editable_form"] = get_edit_form(obj, field_names) context["original"] = parsed t = get_template("includes/editable_form.html") return t.render(context) return parsed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def try_url(url_name): """ Mimics Django's ``url`` template tag but fails silently. Used for url names in admin templates as these won't resolve when admin tests are running. """
from warnings import warn warn("try_url is deprecated, use the url tag with the 'as' arg instead.") try: url = reverse(url_name) except NoReverseMatch: return "" return url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_dropdown_menu(context): """ Renders the app list for the admin dropdown menu navigation. """
template_vars = context.flatten() user = context["request"].user if user.is_staff: template_vars["dropdown_menu_app_list"] = admin_app_list( context["request"]) if user.is_superuser: sites = Site.objects.all() else: try: sites = user.sitepermissions.sites.all() except ObjectDoesNotExist: sites = Site.objects.none() template_vars["dropdown_menu_sites"] = list(sites) template_vars["dropdown_menu_selected_site_id"] = current_site_id() template_vars["settings"] = context["settings"] template_vars["request"] = context["request"] return template_vars
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dashboard_column(context, token): """ Takes an index for retrieving the sequence of template tags from ``yacms.conf.DASHBOARD_TAGS`` to render into the admin dashboard. """
column_index = int(token.split_contents()[1]) output = [] for tag in settings.DASHBOARD_TAGS[column_index]: t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split("."))) output.append(t.render(context)) return "".join(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def raise_error(self, message='', errors=None, field_name=None): """Raises a ValidationError. """
field_name = field_name if field_name else self.field_name raise ValidationError(message, errors=errors, field_name=field_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, value): """Make sure that value is of the right type """
if not isinstance(value, self.nested_klass): self.raise_error('NestedClass is of the wrong type: {0} vs expected {1}' .format(value.__class__.__name__, self.nested_klass.__name__)) super(NestedDocumentField, self).validate(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, value): """Make sure that the inspected value is of type `list` or `tuple` """
if not isinstance(value, (list, tuple)) or isinstance(value, str_types): self.raise_error('Only lists and tuples may be used in the ListField vs provided {0}' .format(type(value).__name__)) super(ListField, self).validate(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, value): """Make sure that the inspected value is of type `dict` """
if not isinstance(value, dict): self.raise_error('Only Python dict may be used in the DictField vs provided {0}' .format(type(value).__name__)) super(DictField, self).validate(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_cache(expire_after=12 * 3600, cache_post=False): """ Patches the requests library with requests_cache. """
allowable_methods = ['GET'] if cache_post: allowable_methods.append('POST') requests_cache.install_cache( expire_after=expire_after, allowable_methods=allowable_methods)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_url(url, back_off=True, **kwargs): """ Get the content of a URL and return a file-like object. back_off=True provides retry """
if back_off: return _download_with_backoff(url, as_file=True, **kwargs) else: return _download_without_backoff(url, as_file=True, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _download_without_backoff(url, as_file=True, method='GET', **kwargs): """ Get the content of a URL and return a file-like object. """
# Make requests consistently hashable for caching. # 'headers' is handled by requests itself. # 'cookies' and 'proxies' contributes to headers. # 'files' and 'json' contribute to data. for k in ['data', 'params']: if k in kwargs and isinstance(kwargs[k], dict): kwargs[k] = OrderedDict(sorted(kwargs[k].items())) kwargs_copy = dict(kwargs) if not _is_url_in_cache(method, url, **kwargs): now = datetime.datetime.now() _rate_limit_for_url(url, now) _rate_limit_touch_url(url, now) L.info("Download {}".format(url)) if 'timeout' not in kwargs_copy: kwargs_copy['timeout'] = _TIMEOUT if 'headers' in kwargs_copy: head_dict = CaseInsensitiveDict(kwargs_copy['headers']) if 'user-agent' not in head_dict: head_dict['user-agent'] = _USER_AGENT kwargs_copy['headers'] = head_dict else: kwargs_copy['headers'] = CaseInsensitiveDict({'user-agent': _USER_AGENT}) response = requests.request(method, url, **kwargs_copy) if logging.getLogger().isEnabledFor(logging.DEBUG): # This can be slow on large responses, due to chardet. L.debug('"{}"'.format(response.text)) response.raise_for_status() if as_file: return BytesIO(response.content) else: return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_url_in_cache(*args, **kwargs): """ Return True if request has been cached or False otherwise. """
# Only include allowed arguments for a PreparedRequest. allowed_args = inspect.getargspec( requests.models.PreparedRequest.prepare).args # self is in there as .prepare() is a method. allowed_args.remove('self') kwargs_cleaned = {} for key, value in dict(kwargs).items(): if key in allowed_args: kwargs_cleaned[key] = value prepared_request = _prepare(*args, **kwargs_cleaned) request_hash = _get_hash(prepared_request) try: return requests_cache.get_cache().has_key(request_hash) except AttributeError as e: # requests_cache not enabled if str(e) == "'Session' object has no attribute 'cache'": return False raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_idxmat_sets(idxmat, b): """Reshapes idxmat into the idx vectors for the training set and validation set Parameters: idxmat : ndarray Matrix with N shuffled row indicies assigned to K blocks/columns from the oxyba.block_idxmat_shuffle function b : int Returns: -------- idx_train : ndarray Vector with row indicies of the current training blocks, The vector contains int(N/K)*(K-1) elements. idx_valid : ndarray Vector with row indicies of the current validation block "b". The vector contains int(N/K) elements. Example: -------- K = idxmat.shape[1] for b in range(K): idx_train, idx_valid = block_idxmat_sets(idxmat, b) """
import numpy as np idx_train = idxmat[:, [c for c in range(idxmat.shape[1]) if c is not b]] idx_train = idx_train.reshape((np.prod(idx_train.shape),)) return idx_train, idxmat[:, b]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_logging(parser, log_format=LOG_FORMAT, log_level=LOG_LEVEL, color=True): """Configures the `argparse.ArgumentParser` with arguments to configure logging. This adds arguments: * ``-v`` to increase the log level * ``-q`` to decrease the log level * ``--color`` to enable color logging when available * ``--no-color`` to disable color logging The root logger is configured with the given format and log level. ANSI color codes are supported in the logging format string. If color is enabled and stderr is a tty, the codes will be passed through. Otherwise the logging formatter will strip them out. The logging format supports these additional format variables for coloration: %(levelcolor)s If stderr is a terminal, an ANSI color code appropriate for the level of the logged record. %(resetcolor)s If stderr is a terminal, an ANSI color reset code. """
parser.set_defaults(log_level=log_level) parser.add_argument('-v', dest='log_level', action=_LogLevelAddAction, const=1, help='use more verbose logging (stackable)') parser.add_argument('-q', dest='log_level', action=_LogLevelAddAction, const=-1, help='use less verbose logging (stackable)') root_logger = logging.getLogger() root_logger.setLevel(log_level) handler = logging.StreamHandler() # using sys.stderr if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): class ColorAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, True) handler.setFormatter(_ColorLogFormatter(log_format)) class NoColorAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, False) handler.setFormatter(_NoColorLogFormatter(log_format)) parser.add_argument('--color', dest='color', action=ColorAction, nargs=0, help='use color in log (when available)') parser.add_argument('--no-color', dest='color', action=NoColorAction, nargs=0, help='use no color in log') if color: formatter_class = _ColorLogFormatter else: formatter_class = _NoColorLogFormatter else: # Make the options available, but they don't do anything. parser.add_argument('--color', dest='color', action='store_true', help='use color in log (when available)') parser.add_argument('--no-color', dest='color', action='store_false', help='use no color in log') formatter_class = _NoColorLogFormatter handler.setFormatter(formatter_class(log_format)) root_logger.addHandler(handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assertHeader(self, name, value=None, *args, **kwargs): """ Returns `True` if ``name`` was in the headers and, if ``value`` is True, whether or not the values match, or `False` otherwise. """
return name in self.raw_headers and ( True if value is None else self.raw_headers[name] == value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ConfigureLazyWorkers(self): """ Lazy workers are instances that are running and reachable but failed to register with the cldb to join the mapr cluster. This trys to find these missing workers and add them to the cluster. """
lazy_worker_instances = self.__GetMissingWorkers() if not lazy_worker_instances: return reachable_states = self.__AreInstancesReachable(lazy_worker_instances) reachable_instances = [t[0] for t in zip(lazy_worker_instances, reachable_states) if t[1]] print 'reachable_instances: %s' % reachable_instances self.__ConfigureWorkers(reachable_instances) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __StartMaster(self): """ Starts a master node, configures it, and starts services. """
num_masters = len(self.cluster.get_instances_in_role("master", "running")) assert(num_masters < 1) logging.info( "waiting for masters to start") if self.config.master_on_spot_instances: self.__LaunchSpotMasterInstances() else: self.__LaunchOnDemandMasterInstances() time.sleep(1) self.__ConfigureMaster() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __AddWorkers(self, num_to_add): """ Adds workers evenly across all enabled zones."""
# Check preconditions assert(self.__IsWebUiReady()) zone_to_ips = self.__GetZoneToWorkerIpsTable() zone_old_new = [] for zone, ips in zone_to_ips.iteritems(): num_nodes_in_zone = len(ips) num_nodes_to_add = 0 zone_old_new.append((zone, num_nodes_in_zone, num_nodes_to_add)) print 'num_to_add %s' % num_to_add for _ in range(num_to_add): zone_old_new.sort(key= lambda z : z[1]+z[2]) zt = zone_old_new[0] zone_old_new[0] = (zt[0], zt[1], zt[2]+1) #print zone_old_new zone_plan = [(zt[2], zt[0]) for zt in zone_old_new] print 'resize plan' if self.config.workers_on_spot_instances: new_worker_instances = self.__LaunchSpotWorkerInstances(zone_plan) else: new_worker_instances = self.__LaunchOnDemandWorkerInstances(zone_plan) self.__WaitForInstancesReachable(new_worker_instances) self.__ConfigureWorkers(new_worker_instances) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __IpsToServerIds(self): """ Get list of mapping of ip address into a server id"""
master_instance = self.__GetMasterInstance() assert(master_instance) retval, response = self.__RunMaprCli('node list -columns id') ip_to_id = {} for line_num, line in enumerate(response.split('\n')): tokens = line.split() if len(tokens) == 3 and tokens[0] != 'id': instance_id = tokens[0] ip = tokens[2] ip_to_id[ip] = instance_id return ip_to_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contents(self): """ This method downloads the contents of the file represented by a `GettFile` object's metadata. Input: * None Output: * A byte stream **NOTE**: You are responsible for handling any encoding/decoding which may be necessary. Example:: file = client.get_file("4ddfds", 0) print file.contents() """
response = GettRequest().get("/files/%s/%s/blob" % (self.sharename, self.fileid)) return response.response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def thumbnail(self): """ This method returns a thumbnail representation of the file if the data is a supported graphics format. Input: * None Output: * A byte stream representing a thumbnail of a support graphics file Example:: file = client.get_file("4ddfds", 0) open("thumbnail.jpg", "wb").write(file.thumbnail()) """
response = GettRequest().get("/files/%s/%s/blob/thumb" % (self.sharename, self.fileid)) return response.response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_data(self, **kwargs): """ This method transmits data to the Gett service. Input: * ``put_url`` A PUT url to use when transmitting the data (required) * ``data`` A byte stream (required) Output: * ``True`` Example:: if file.send_data(put_url=file.upload_url, data=open("example.txt", "rb").read()): print "Your file has been uploaded." """
put_url = None if 'put_url' in kwargs: put_url = kwargs['put_url'] else: put_url = self.put_upload_url if 'data' not in kwargs: raise AttributeError("'data' parameter is required") if not put_url: raise AttributeError("'put_url' cannot be None") if not isinstance(kwargs['data'], str): raise TypeError("'data' parameter must be of type 'str'") response = GettRequest().put(put_url, kwargs['data']) if response.http_status == 200: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __signal(self, sig, verbose=None): ''' Helper class preventing code duplication.. :param sig: Signal to use (e.g. "HUP", "ALRM") :param verbose: Overwrite :func:`photon.Photon.m`'s `verbose` :returns: |kill_return| with specified `pid` .. |kill_return| replace:: :func:`photon.Photon.m`'s result of killing `pid` .. |kill_verbose| replace:: with visible shell warning ''' return self.m( 'killing process %s with "%s"' % (self.__pid, sig), cmdd=dict(cmd='%s kill -%s %d' % (self.__sudo, sig, self.__pid)), verbose=verbose )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False): """MLE for Linear Regression Model Parameters: y : ndarray target variable with N observations X : ndarray The <N x C> design matrix with C independent variables, features, factors, etc. algorithm : str Optional. Default 'Nelder-Mead' (Simplex). The algorithm used in scipy.optimize.minimize debug : bool Optional. Returns: -------- beta : ndarray Estimated regression coefficients. results : scipy.optimize.optimize.OptimizeResult Optional. If debug=True then only scipy's optimization result variable is returned. """
import numpy as np import scipy.stats as sstat import scipy.optimize as sopt def objective_nll_linreg(theta, y, X): yhat = np.dot(X, theta[:-1]) # =X*beta return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum() # check eligible algorithm if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'): raise Exception('Optimization Algorithm not supported.') # set start values theta0 = np.ones((X.shape[1] + 1, )) # run solver results = sopt.minimize( objective_nll_linreg, theta0, args=(y, X), method=algorithm, options={'disp': False}) # debug? if debug: return results # done return results.x[:-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill(self, paths): """ Initialise the tree. paths is a list of strings where each string is the relative path to some file. """
for path in paths: tree = self.tree parts = tuple(path.split('/')) dir_parts = parts[:-1] built = () for part in dir_parts: self.cache[built] = tree built += (part, ) parent = tree tree = parent.folders.get(part, empty) if tree is empty: tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent) self.cache[dir_parts] = tree tree.files.add(parts[-1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(self, prefix, name): """ Remove a path from the tree prefix is a tuple of the parts in the dirpath name is a string representing the name of the file itself. Any empty folders from the point of the file backwards to the root of the tree is removed. """
tree = self.cache.get(prefix, empty) if tree is empty: return False if name not in tree.files: return False tree.files.remove(name) self.remove_folder(tree, list(prefix)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_folder(self, tree, prefix): """ Used to remove any empty folders If this folder is empty then it is removed. If the parent is empty as a result, then the parent is also removed, and so on. """
while True: child = tree tree = tree.parent if not child.folders and not child.files: del self.cache[tuple(prefix)] if tree: del tree.folders[prefix.pop()] if not tree or tree.folders or tree.files: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def can_manage(user, semester=None, pool=None, any_pool=False): """ Whether a user is allowed to manage a workshift semester. This includes the current workshift managers, that semester's workshift managers, and site superusers. """
if semester and user in semester.workshift_managers.all(): return True if Manager and Manager.objects.filter( incumbent__user=user, workshift_manager=True, ).count() > 0: return True if pool and pool.managers.filter(incumbent__user=user).count() > 0: return True if any_pool and WorkshiftPool.objects.filter( managers__incumbent__user=user, ): return True return user.is_superuser or user.is_staff
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_year_season(day=None): """ Returns a guess of the year and season of the current semester. """
if day is None: day = date.today() year = day.year if day.month > 3 and day.month <= 7: season = Semester.SUMMER elif day.month > 7 and day.month <= 10: season = Semester.FALL else: season = Semester.SPRING if day.month > 10: year += 1 return year, season
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_semester_start_end(year, season): """ Returns a guess of the start and end dates for given semester. """
if season == Semester.SPRING: start_month, start_day = 1, 20 end_month, end_day = 5, 17 elif season == Semester.SUMMER: start_month, start_day = 5, 25 end_month, end_day = 8, 16 else: start_month, start_day = 8, 24 end_month, end_day = 12, 20 return date(year, start_month, start_day), date(year, end_month, end_day)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def randomly_assign_instances(semester, pool, profiles=None, instances=None): """ Randomly assigns workshift instances to profiles. Returns ------- list of workshift.WorkshiftProfile list of workshift.WorkshiftInstance """
if profiles is None: profiles = WorkshiftProfile.objects.filter(semester=semester) if instances is None: instances = WorkshiftInstance.objects.filter( Q(info__pool=pool) | Q(weekly_workshift__pool=pool), workshifter__isnull=True, closed=False, ).exclude( weekly_workshift__workshift_type__assignment=WorkshiftType.NO_ASSIGN, ) instances = list(instances) profiles = list(profiles) # List of hours assigned to each profile hours_mapping = defaultdict(float) total_hours_owed = defaultdict(float) semester_weeks = (semester.end_date - semester.start_date).days / 7 # Initialize with already-assigned instances for profile in profiles: for shift in profile.instance_workshifter.filter( Q(info__pool=pool) | Q(weekly_workshift__pool=pool) ): hours_mapping[profile] += float(shift.hours) pool_hours = profile.pool_hours.get(pool=pool) if pool.weeks_per_period == 0: total_hours_owed[profile] = pool_hours.hours else: periods = semester_weeks / pool.weeks_per_period total_hours_owed[profile] = periods * float(pool_hours.hours) while profiles and instances: for profile in profiles[:]: instance = random.choice(instances) instance.workshifter = profile instance.save(update_fields=["workshifter"]) instance.logs.add( ShiftLogEntry.objects.create( person=instance.workshifter, entry_type=ShiftLogEntry.ASSIGNED, note="Randomly assigned.", ) ) instances.remove(instance) hours_mapping[profile] += float(instance.hours) if hours_mapping[profile] >= total_hours_owed[profile]: profiles.remove(profile) if not instances: break return profiles, instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_all_assignments(semester=None, pool=None, shifts=None): """ Clears all regular workshift assignments. Parameters semester : workshift.models.Semester, optional pool : workshift.models.WorkshiftPool, optional If set, grab workshifts from a specific pool. Otherwise, the primary workshift pool will be used. shifts : list of workshift.models.RegularWorkshift, optional """
if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return [] if pool is None: pool = WorkshiftPool.objects.get( semester=semester, is_primary=True, ) if shifts is None: shifts = RegularWorkshift.objects.filter( pool=pool, is_manager_shift=False, workshift_type__assignment=WorkshiftType.AUTO_ASSIGN, ) for shift in shifts: shift.current_assignees.clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_standings(semester=None, pool_hours=None, moment=None): """ This function acts to update a list of PoolHours objects to adjust their current standing based on the time in the semester. Parameters semester : workshift.models.Semester, optional pool_hours : list of workshift.models.PoolHours, optional If None, runs on all pool hours for semester. moment : datetime, optional """
if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return [] if moment is None: moment = localtime(now()) if pool_hours is None: pool_hours = PoolHours.objects.filter(pool__semester=semester) for hours in pool_hours: # Don't update hours after the semester ends if hours.last_updated and \ hours.last_updated.date() > semester.end_date: continue # Calculate the number of periods (n weeks / once per semester) that # have passed since last update periods = hours.periods_since_last_update(moment=moment) # Update the actual standings if periods > 0: hours.standing -= hours.hours * periods hours.last_updated = moment hours.save(update_fields=["standing", "last_updated"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_standings(semester=None, pool_hours=None): """ Utility function to recalculate workshift standings. This function is meant to only be called from the manager shell, it is not referenced anywhere else in the workshift module. Parameters semester : workshift.models.Semester, optional pool_hours : list of workshift.models.PoolHours, optional """
if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return if pool_hours is None: pool_hours = PoolHours.objects.filter(pool__semester=semester) for hours in pool_hours: hours.last_updated = None hours.standing = hours.hour_adjustment profile = WorkshiftProfile.objects.get(pool_hours=hours) instances = WorkshiftInstance.objects.filter( Q(weekly_workshift__pool=hours.pool) | Q(info__pool=hours.pool), Q(workshifter=profile) | Q(liable=profile), closed=True, ) for instance in instances: if instance.blown: hours.standing -= instance.hours else: hours.standing += instance.hours hours.save(update_fields=["standing", "last_updated"]) update_standings( semester=semester, pool_hours=pool_hours, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_assigned_hours(semester=None, profiles=None): """ Utility function to recalculate the assigned workshift hours. This function is meant to only be called from the manager shell, it is not referenced anywhere else in the workshift module. Parameters semester : workshift.models.Semester, optional profiles : list of workshift.models.WorkshiftProfile, optional """
if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return if profiles is None: profiles = WorkshiftProfile.objects.filter(semester=semester) for profile in profiles: for pool_hours in profile.pool_hours.all(): shifts = RegularWorkshift.objects.filter( current_assignees=profile, pool=pool_hours.pool, active=True, ) pool_hours.assigned_hours = sum(shift.hours for shift in shifts) pool_hours.save(update_fields=["assigned_hours"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_instance_assignments(semester=None, shifts=None): """ Utility function to reset instance assignments. This function is meant to only be called from the manager shell, it is not referenced anywhere else in the workshift module. Parameters semester : workshift.models.Semester, optional shifts : list of workshift.models.RegularWorkshift, optional """
if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return if shifts is None: shifts = RegularWorkshift.objects.filter( pool__semester=semester, ) for shift in shifts: instances = WorkshiftInstance.objects.filter( closed=False, weekly_workshift=shift, ).order_by("date") assignees = list(shift.current_assignees.all()) assignees += [None] * (shift.count - len(assignees)) dates = defaultdict(set) for assignee, instance in zip(cycle(assignees), instances): if assignee is not None: if instance.date in dates[assignee.pk]: continue dates[assignee.pk].add(instance.date) instance.workshifter = assignee instance.liable = None instance.save(update_fields=["workshifter", "liable"]) instance.logs.add( ShiftLogEntry.objects.create( person=instance.workshifter, entry_type=ShiftLogEntry.ASSIGNED, note="Manager reset assignment.", ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pid_exists(pid): """ Determines if a system process identifer exists in process table. """
try: os.kill(pid, 0) except OSError as exc: return exc.errno == errno.EPERM else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def daemonize(pid_file, working_dir, func): """ Turns the current process into a daemon. `pid_file` File path to use as pid lock file for daemon. `working_dir` Working directory to switch to when daemon starts. `func` Callable to run after daemon is forked. """
def _fork(): """ Fork a child process. Returns ``False`` if fork failed; otherwise, we are inside the new child process. """ try: pid = os.fork() if pid > 0: os._exit(0) # exit parent return True except OSError: return False def _register_pidfile(filename): """ Registers a pid file for the current process which will cleaned up when the process terminates. `filename` Filename to save pid to. """ if common.writefile(filename, str(os.getpid()) + os.linesep): os.chmod(filename, 0644) # rw-r--r-- def _cleanup_pid(): """ Removes pidfile. """ common.safe_remove_file(filename) atexit.register(_cleanup_pid) if not pid_file or not working_dir or not func: return if not os.path.isfile(pid_file): # enforce pid lock file # attempt first fork if not _fork(): return try: # detach from current environment os.chdir(working_dir) os.setsid() os.umask(0) except OSError: return # attempt second fork if not _fork(): return # we'll ignore closing file descriptors.. # redirecting the streams should be sufficient # redirect streams to /dev/null _fd = os.open(os.devnull, os.O_RDWR) os.dup2(_fd, sys.stdin.fileno()) os.dup2(_fd, sys.stdout.fileno()) os.dup2(_fd, sys.stderr.fileno()) # setup pidfile _register_pidfile(pid_file) # execute provided callable func()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shell_focusd(data_dir): """ Shells a new instance of a focusd daemon process. `data_dir` Home directory for focusd data. Returns boolean. * Raises ``ValueError`` if sudo used and all passwords tries failed. """
command = 'focusd {0}'.format(data_dir) # see what event hook plugins are registered plugins = registration.get_registered(event_hooks=True) if not plugins: # none registered, bail raise errors.NoPluginsRegistered # do any of the plugins need root access? # if so, wrap command with sudo to escalate privs, if not already root needs_root = any(p for p in plugins if p.needs_root) if needs_root and os.getuid() != 0: # only if not already root command = 'sudo ' + command else: needs_root = False # shell the daemon process _, code = common.shell_process(command, exitcode=True) if code == 1 and needs_root: # passwords failed? raise ValueError return code == 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def focusd(task): """ Forks the current process as a daemon to run a task. `task` ``Task`` instance for the task to run. """
# determine if command server should be started if registration.get_registered(event_hooks=True, root_access=True): # root event plugins available start_cmd_srv = (os.getuid() == 0) # must be root else: start_cmd_srv = False # daemonize our current process _run = lambda: Focusd(task).run(start_cmd_srv) daemonize(get_daemon_pidfile(task), task.task_dir, _run)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reg_sighandlers(self): """ Registers signal handlers to this class. """
# SIGCHLD, so we shutdown when any of the child processes exit _handler = lambda signo, frame: self.shutdown() signal.signal(signal.SIGCHLD, _handler) signal.signal(signal.SIGTERM, _handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _drop_privs(self): """ Reduces effective privileges for this process to that of the task owner. The umask and environment variables are also modified to recreate the environment of the user. """
uid = self._task.owner # get pwd database info for task owner try: pwd_info = pwd.getpwuid(uid) except OSError: pwd_info = None # set secondary group ids for user, must come first if pwd_info: try: gids = [g.gr_gid for g in grp.getgrall() if pwd_info.pw_name in g.gr_mem] gids.append(pwd_info.pw_gid) os.setgroups(gids) except OSError: pass # set group id, must come before uid try: os.setgid(pwd_info.pw_gid) except OSError: pass # set user id try: os.setuid(uid) # update user env variables if pwd_info: for k in ('USER', 'USERNAME', 'SHELL', 'HOME'): if k in os.environ: if k in ('USER', 'USERNAME'): val = pwd_info.pw_name elif k == 'SHELL': val = pwd_info.pw_shell elif k == 'HOME': val = pwd_info.pw_dir # update value os.environ[k] = val # remove unneeded env variables keys = [] for k, _ in os.environ.iteritems(): if k.startswith('SUDO_') or k == 'LOGNAME': keys.append(k) for k in keys: del os.environ[k] except OSError: pass # set default umask try: os.umask(022) except OSError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shutdown(self): """ Shuts down the daemon process. """
if not self._exited: self._exited = True # signal task runner to terminate via SIGTERM if self._task_runner.is_alive(): self._task_runner.terminate() # if command server is running, then block until # task runner completes so it has time to use # the command server to clean up root plugins if self._command_server.is_alive(): if self._task_runner.is_alive(): self._task_runner.join() _shutdown_pipe(self._pipe) self._task.stop()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, start_command_srv): """ Setup daemon process, start child forks, and sleep until events are signalled. `start_command_srv` Set to ``True`` if command server should be started. """
if start_command_srv: # note, this must be established *before* the task runner is forked # so the task runner can communicate with the command server. # fork the command server self._command_server.start() # drop root privileges; command server will remain as the only # daemon process with root privileges. while root plugins have root # shell access, they are known and the commands are logged by the # command server. self._drop_privs() # fork the task runner self._task_runner.start() # setup signal handlers self._reg_sighandlers() while self.running: time.sleep(self._sleep_period) self.shutdown()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def running(self): """ Determines if daemon is active. Returns boolean. """
# check if task is active and pid file exists return (not self._exited and os.path.isfile(self._pidfile) and self._task.active)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _register_sigterm(self): """ Registers SIGTERM signal handler. """
_handler = lambda signo, frame: self.shutdown() signal.signal(signal.SIGTERM, _handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Main process loop. """
self._prepare() while self.running: if self._run() is False: break time.sleep(self._sleep_period) self.shutdown()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _setup_root_plugins(self): """ Injects a `run_root` method into the registered root event plugins. """
def run_root(_self, command): """ Executes a shell command as root. `command` Shell command string. Returns boolean. """ try: # get lock, so this plugin has exclusive access to command pipe self._rlock.acquire() # TODO: log root command for this plugin self._cmd_pipe.send_bytes('\x80'.join(['SHL', command])) res = self._cmd_pipe.recv_bytes() if res != 'TRM': # sentinel value, shutdown return res == 'OK' except (EOFError, IOError): pass finally: self._rlock.release() self.shutdown(skip_hooks=True) return False # inject method into each event plugin for plugin in registration.get_registered(event_hooks=True, root_access=True): plugin.run_root = types.MethodType(run_root, plugin)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_events(self, shutdown=False): """ Runs event hooks for registered event plugins. `shutdown` Set to ``True`` to run task_end events; otherwise, run task_run events. """
# run task_start events, if not ran already if not self._ran_taskstart: self._ran_taskstart = True registration.run_event_hooks('task_start', self._task) # run events event = 'task_end' if shutdown else 'task_run' registration.run_event_hooks(event, self._task) # reclaim any subprocesses plugins may have forked try: os.waitpid(-1, os.P_NOWAIT) except OSError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_commands(self): """ Processes commands received and executes them accordingly. Returns ``True`` if successful, ``False`` if connection closed or server terminated. """
try: # poll for data, so we don't block forever if self._cmd_pipe.poll(1): # 1 sec timeout payload = self._cmd_pipe.recv_bytes() if payload: # segment payload parts = payload.split('\x80', 2) _op = parts[0] # terminate operation if _op == 'TRM': raise EOFError # shell command operation elif _op == 'SHL' and len(parts) == 2: command = parts[1] if command: # run command and return success or fail res = common.shell_process(command) if res is not None: self._cmd_pipe.send_bytes('OK') return True # everything else, should reply with "FAIL" self._cmd_pipe.send_bytes('FAIL') except (EOFError, IOError): return False else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_settings(cls, settings): """Read Mongodb Source configuration from the provided settings"""
if not 'mongodb' in settings or not 'collection' in settings or \ settings['mongodb'] == '' or settings['collection'] == '': raise Exception( "Erroneous mongodb settings, " "needs a collection and mongodb setting", settings) cx_uri = urlparse.urlsplit(settings["mongodb"]) db_name = cx_uri.path if '?' in db_name: db_name, query = db_name.split('?', 1) db_name = db_name[1:] if db_name == "": raise Exception( "Erroneous mongodb settings, " "missing db_name", settings) cx_uri = urlparse.urlunsplit( (cx_uri.scheme, cx_uri.netloc, "/", cx_uri.query, cx_uri.fragment)) options = copy.deepcopy(settings) del options['mongodb'] del options['collection'] return Mongodb( cls.connection_for_uri(cx_uri), db_name, settings['collection'], options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_key(cli, familly, discriminant=None): """This function is used to compute a unique key from all connection parametters."""
hash_key = hashlib.sha256() hash_key.update(familly) hash_key.update(cli.host) hash_key.update(cli.user) hash_key.update(cli.password) if discriminant: if isinstance(discriminant, list): for i in discriminant: if i is not None and i is not False: hash_key.update(str(i)) elif isinstance(discriminant, tuple): for i in discriminant: if i is not None and i is not False: hash_key.update(str(i)) else: hash_key.update(discriminant) hash_key = hash_key.hexdigest() cli.log.debug("hash_key: " + hash_key) return hash_key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def override_familly(self, args): """Look in the current wrapped object to find a cache configuration to override the current default configuration."""
resourceapi = args[0] cache_cfg = resourceapi.cache if cache_cfg.has_key('familly'): self.familly = cache_cfg['familly'] if cache_cfg.has_key('whole_familly'): self.whole_familly = cache_cfg['whole_familly'] if self.familly is None: raise Exception("Invalid familly value for Cache decorator.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def friendly_type_name(raw_type: typing.Type) -> str: """ Returns a user-friendly type name :return: user friendly type as string """
try: return _TRANSLATE_TYPE[raw_type] except KeyError: LOGGER.error('unmanaged value type: %s', raw_type) return str(raw_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loads(content): """Loads variable definitions from a string."""
lines = _group_lines(line for line in content.split('\n')) lines = [ (i, _parse_envfile_line(line)) for i, line in lines if line.strip() ] errors = [] # Reject files with duplicate variables (no sane default). duplicates = _find_duplicates(((i, line[0]) for i, line in lines)) for i, variable, j in duplicates: errors.append(''.join([ 'Line %d: duplicate environment variable "%s": ', 'already appears on line %d.', ]) % (i + 1, variable, j + 1) ) # Done! if errors: raise ValueError(errors) return {k: v for _, (k, v) in lines}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self): """ Loads the configuration and returns it as a dictionary """
with open(self.filename, 'r') as f: self.config = ujson.load(f) return self.config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, option, value): """ Sets an option to a value. """
if self.config is None: self.config = {} self.config[option] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, option): """ Deletes an option if exists """
if self.config is not None: if option in self.config: del self.config[option]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Saves the configuration """
with open(self.filename, 'w') as f: ujson.dump(self.config, f, indent=4)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timestamp(stamp, tolerance=150): """Validate timestamp specified by request. See `validate.request` for additional info. Args: stamp: str. Time request was made as ISO 8601 timestamp. tolerance: int. Number of seconds request remains valid from timestamp. Returns bool: True if valid, False otherwise. """
try: tolerance = datetime.timedelta(0, tolerance) timestamp_low = dateutil.parser.parse(stamp) timestamp_high = timestamp_low + tolerance now = datetime.datetime.now(timestamp_low.tzinfo) except ValueError: return False return now >= timestamp_low and now <= timestamp_high
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def signature_cert_chain_url(url): """Validate URL specified by SignatureCertChainUrl. See `validate.request` for additional info. Args: url: str. SignatureCertChainUrl header value sent by request. Returns: bool: True if valid, False otherwise. """
r = urlparse(url) if not r.scheme.lower() == 'https': warnings.warn('Certificate URL scheme is invalid.') return False if not r.hostname.lower() == 's3.amazonaws.com': warnings.warn('Certificate URL hostname is invalid.') return False if not os.path.normpath(r.path).startswith('/echo.api/'): warnings.warn('Certificate URL path is invalid.') return False if r.port and not r.port == 443: warnings.warn('Certificate URL port is invalid.') return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve(url): """Retrieve and parse PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: url: str. SignatureCertChainUrl header value sent by request. Returns: list or bool: If url is valid, returns the certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates where certs[0] is the first certificate in the file; if url is invalid, returns False. """
try: pem_data = urlopen(url).read() except (ValueError, HTTPError): warnings.warn('Certificate URL is invalid.') return False if sys.version >= '3': try: pem_data = pem_data.decode() except(UnicodeDecodeError): warnings.warn('Certificate encoding is not utf-8.') return False return _parse_pem_data(pem_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_pem_data(pem_data): """Parse PEM-encoded X.509 certificate chain. Args: pem_data: str. PEM file retrieved from SignatureCertChainUrl. Returns: list or bool: If url is valid, returns the certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates where certs[0] is the first certificate in the file; if url is invalid, returns False. """
sep = '-----BEGIN CERTIFICATE-----' cert_chain = [six.b(sep + s) for s in pem_data.split(sep)[1:]] certs = [] load_cert = x509.load_pem_x509_certificate for cert in cert_chain: try: certs.append(load_cert(cert, default_backend())) except ValueError: warnings.warn('Certificate is invalid.') return False return certs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cert_chain(certs): """Validate PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: certs: list. The certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates. See `validate.retrieve` to create certs obj. Returns: bool: True if valid, False otherwise. """
if len(certs) < 2: warnings.warn('Certificate chain contains < 3 certificates.') return False cert = certs[0] today = datetime.datetime.today() if not today > cert.not_valid_before: warnings.warn('Certificate Not Before date is invalid.') return False if not today < cert.not_valid_after: warnings.warn('Certificate Not After date is invalid.') return False oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ext = cert.extensions.get_extension_for_oid(oid_san) sans = ext.value.get_values_for_type(x509.DNSName) if not 'echo-api.amazon.com' in sans: return False for i in range(len(certs) - 1): if not certs[i].issuer == certs[i + 1].subject: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def signature(cert, sig, body): """Validate data request signature. See `validate.request` for additional info. Args: cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon signing certificate. sig: str. Signature header value sent by request. body: str. HTTPS request body. Returns: bool: True if valid, False otherwise. """
body = six.b(body) sig = base64.decodestring(sig) padder = padding.PKCS1v15() public_key = cert.public_key() try: public_key.verify(sig, body, padder, hashes.SHA1()) return True except InvalidSignature: warnings.warn('Signature verification failed.') return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def application_id(self, app_id): """Validate request application id matches true application id. Verifying the Application ID matches: https://goo.gl/qAdqe4. Args: app_id: str. Request application_id. Returns: bool: True if valid, False otherwise. """
if self.app_id != app_id: warnings.warn('Application ID is invalid.') return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sender(self, body, stamp, url, sig): """Validate request is from Alexa. Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5. Checking the Signature of the Request: https://goo.gl/FDkjBN. Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ Args: body: str. HTTPS request body. stamp: str. Value of timestamp within request object of HTTPS request body. url: str. SignatureCertChainUrl header value sent by request. sig: str. Signature header value sent by request. Returns: bool: True if valid, False otherwise. """
if not timestamp(stamp): return False if self.url != url: if not signature_cert_chain_url(url): return False certs = retrieve(url) if not certs: return False if not cert_chain(certs): return False self.url = url self.cert = certs[0] if not signature(self.cert, sig, body): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def request(self, app_id=None, body=None, stamp=None, url=None, sig=None): """Validate application ID and request is from Alexa."""
if self.app_id: if not self.application_id(app_id): return False if (url or sig): if not (body and stamp and url and sig): raise ValueError('Unable to validate sender, check arguments.') else: if not self.sender(body, stamp, url, sig): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_tables_from_files(db_connection): """ Looks in the current working directory for all required tables. """
_log.info('Loading tables from disk to DB.') sde_dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'sde') for sde_file_name in os.listdir(sde_dir_path): _log.info('Loading the following table: {}'.format(sde_file_name)) sde_file_path = os.path.join(sde_dir_path, sde_file_name) with open(sde_file_path, 'r') as sde_file: sql = sde_file.read() execute_sql(sql, db_connection) _log.info('Finished loading all requested tables.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_connection(connection_details=None): """ Creates a connection to the MySQL DB. """
if connection_details is None: connection_details = get_default_connection_details() return MySQLdb.connect( connection_details['host'], connection_details['user'], connection_details['password'], connection_details['database'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_default_connection_details(): """ Gets the connection details based on environment vars or Thanatos default settings. :return: Returns a dictionary of connection details. :rtype: dict """
return { 'host': os.environ.get('MYSQL_HOST', '127.0.0.1'), 'user': os.environ.get('MYSQL_USER', 'vagrant'), 'password': os.environ.get('MYSQL_PASSWORD', 'vagrant'), 'database': os.environ.get('MYSQL_DB', 'thanatos'), }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unwrap_or(self, default: U) -> Union[T, U]: """ Returns the contained value or ``default``. Args: default: The default value. Returns: The contained value if the :py:class:`Option` is ``Some``, otherwise ``default``. Notes: If you wish to use a result of a function call as the default, it is recommnded to use :py:meth:`unwrap_or_else` instead. Examples: 0 0 """
return self.unwrap_or_else(lambda: default)