text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trending(params): """gets trending content values """
# get params try: series = params.get("site", [DEFAULT_SERIES])[0] offset = params.get("offset", [DEFAULT_GROUP_BY])[0] limit = params.get("limit", [20])[0] except Exception as e: LOGGER.exception(e) return json.dumps({"error": e.message}), "500 Internal Error" # check the cache cache_key = "{}:{}:{}:{}:{}".format(memcached_prefix, "trending.json", series, offset, limit) try: data = MEMCACHED_CLIENT.get(cache_key) if data: return data, "200 OK" except Exception as e: LOGGER.exception(e) # update series name series = update_trending_series(series) # parse the limit try: limit = int(limit) except ValueError: LOGGER.error("limit param must be an integer") return json.dumps({"error": "limit param must be an integer"}), "400 Bad Request" # build the query query = "SELECT content_id, sum(value) as value " \ "FROM {series} " \ "WHERE time > now() - {offset} " \ "GROUP BY content_id;" args = {"series": series, "offset": offset} # send the request try: res = INFLUXDB_CLIENT.query(query.format(**args)) # capture errors and send them back along with the query (for inspection/debugging) except Exception as e: LOGGER.exception(e) return json.dumps({"error": e.message, "query": query.format(**args)}), "500 Internal Error" # build the response object response = flatten_response(res) # limit the number of content per site for site, points in response.items(): sorted_content = sorted(points, key=lambda p: p["value"], reverse=True)[:limit] response[site] = sorted_content clean_response = {} for site, values in response.items(): clean_name = site.split("-")[0] clean_response[clean_name] = values res = json.dumps(clean_response) # cache the response try: MEMCACHED_CLIENT.set(cache_key, res, time=MEMCACHED_EXPIRATION) except Exception as e: LOGGER.exception(e) return res, "200 OK"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maha_dist(df): """Compute the squared Mahalanobis Distance for each row in the dataframe Given a list of rows `x`, each with `p` elements, a vector :math:\mu of the row means of length `p`, and the :math:pxp covarence matrix of the columns :math:\Sigma, The returned value for each row is: .. math:: D^{2} = (x - \mu)^{T} \Sigma^{-1} (x - \mu) Args: df: The input DataFrame Returns: Series: The squared Mahalanobis Distance for each row Notes: This implimentation is based on the `R function`_ for the same mahalanobis calculation .. _R function: https://stat.ethz.ch/R-manual/R-devel/library/stats/html/mahalanobis.html """
mean = df.mean() S_1 = np.linalg.inv(df.cov()) def fun(row): A = np.dot((row.T - mean), S_1) return np.dot(A, (row-mean)) return df.apply(fun, axis=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def within_n_sds(n, series): """Return true if all values in sequence are within n SDs"""
z_score = (series - series.mean()) / series.std() return (z_score.abs() <= n).all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def within_n_mads(n, series): """Return true if all values in sequence are within n MADs"""
mad_score = (series - series.mean()) / series.mad() return (mad_score.abs() <= n).all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(filename, flag='c', protocol=None, writeback=False, maxsize=DEFAULT_MAXSIZE, timeout=DEFAULT_TIMEOUT): """Open a database file as a persistent dictionary. The persistent dictionary file is opened using :func:`dbm.open`, so performance will depend on which :mod:`dbm` modules are installed. :func:`open` chooses to open a :class:`Shelf <shelve.Shelf>`, :class:`LRUShelf`, :class:`TimeoutShelf`, or :class:`LRUTimeoutShelf` depending on the values of keyword arguments *maxsize* and *timeout*. A :data:`None` value for *maxsize* and *timeout* will disable the LRU cache management and automatic data timeout features respectively. :param filename: The base filename for the underlying database that is passed to :func:`dbm.open`. :param flag: The flag to pass to :func:`dbm.open`. :param protocol: The pickle protocol to pass to :func:`pickle.dump`. :param writeback: Whether or not to write back all accessed entries on :meth:`Shelf.sync <shelve.Shelf.sync>` and :meth:`Shelf.close <shelve.Shelf.close>` :type writeback: bool :param maxsize: The maximum size the container is allowed to grow to. ``0`` means that no size limit is enforced. :data:`None` means that LRU cache management is disabled. :type maxsize: integer or :data:`None` :param timeout: The default timeout value for data (in seconds). ``0`` means that the data never expires. :data:`None` means that automatic timeout features will be disabled. :type timeout: integer or :data:`None` :return: A shelf :rtype: :class:`~shelve.Shelf`, :class:`LRUShelf`, :class:`TimeoutShelf`, or :class:`LRUTimeoutShelf` """
import dbm dict = dbm.open(filename, flag) if maxsize is None and timeout is None: return Shelf(dict, protocol, writeback) elif maxsize is None: return TimeoutShelf(dict, protocol, writeback, timeout=timeout) elif timeout is None: return LRUShelf(dict, protocol, writeback, maxsize=maxsize) return LRUTimeoutShelf(dict, protocol, writeback, timeout=timeout, maxsize=maxsize)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _remove_add_key(self, key): """Move a key to the end of the linked list and discard old entries."""
if not hasattr(self, '_queue'): return # haven't initialized yet, so don't bother if key in self._queue: self._queue.remove(key) self._queue.append(key) if self.maxsize == 0: return while len(self._queue) > self.maxsize: del self[self._queue[0]]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_expired(self, key): """Check if a key is expired. If so, delete the key."""
if not hasattr(self, '_index'): return False # haven't initalized yet, so don't bother try: timeout = self._index[key] except KeyError: if self.timeout: self._index[key] = int(time() + self.timeout) else: self._index[key] = None return False if timeout is None or timeout >= time(): return False del self[key] # key expired, so delete it from container return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, key, func, *args, **kwargs): """Return key's value if it exists, otherwise call given function. :param key: The key to lookup/set. :param func: A function to use if the key doesn't exist. All other arguments and keyword arguments are passed to *func*. """
if key in self: return self[key] self[key] = value = func(*args, **kwargs) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync(self): """Sync the timeout index entry with the shelf."""
if self.writeback and self.cache: super(_TimeoutMixin, self).__delitem__(self._INDEX) super(_TimeoutMixin, self).sync() self.writeback = False super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index) self.writeback = True if hasattr(self.dict, 'sync'): self.dict.sync()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_model(self, request, obj, form, change): """ Save model for every language so that field auto-population is done for every each of it. """
super(DisplayableAdmin, self).save_model(request, obj, form, change) if settings.USE_MODELTRANSLATION: lang = get_language() for code in OrderedDict(settings.LANGUAGES): if code != lang: # Already done try: activate(code) except: pass else: obj.save() activate(lang)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fields(self, request, obj=None): """ For subclasses of ``Orderable``, the ``_order`` field must always be present and be the last field. """
fields = super(BaseDynamicInlineAdmin, self).get_fields(request, obj) if issubclass(self.model, Orderable): fields = list(fields) try: fields.remove("_order") except ValueError: pass fields.append("_order") return fields
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fieldsets(self, request, obj=None): """ Same as above, but for fieldsets. """
fieldsets = super(BaseDynamicInlineAdmin, self).get_fieldsets( request, obj) if issubclass(self.model, Orderable): for fieldset in fieldsets: fields = [f for f in list(fieldset[1]["fields"]) if not hasattr(f, "translated_field")] try: fields.remove("_order") except ValueError: pass fieldset[1]["fields"] = fields fieldsets[-1][1]["fields"].append("_order") return fieldsets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_form(self, request, form, change): """ Set the object's owner as the logged in user. """
obj = form.save(commit=False) if obj.user_id is None: obj.user = request.user return super(OwnableAdmin, self).save_form(request, form, change)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def base_concrete_modeladmin(self): """ The class inheriting directly from ContentModelAdmin. """
candidates = [self.__class__] while candidates: candidate = candidates.pop() if ContentTypedAdmin in candidate.__bases__: return candidate candidates.extend(candidate.__bases__) raise Exception("Can't find base concrete ModelAdmin class.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def changelist_view(self, request, extra_context=None): """ Redirect to the changelist view for subclasses. """
if self.model is not self.concrete_model: return HttpResponseRedirect( admin_url(self.concrete_model, "changelist")) extra_context = extra_context or {} extra_context["content_models"] = self.get_content_models() return super(ContentTypedAdmin, self).changelist_view( request, extra_context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_content_models(self): """ Return all subclasses that are admin registered. """
models = [] for model in self.concrete_model.get_content_models(): try: admin_url(model, "add") except NoReverseMatch: continue else: setattr(model, "meta_verbose_name", model._meta.verbose_name) setattr(model, "add_url", admin_url(model, "add")) models.append(model) return models
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_model(self, request, obj, form, change): """ Provides a warning if the user is an active admin with no admin access. """
super(SitePermissionUserAdmin, self).save_model( request, obj, form, change) user = self.model.objects.get(id=obj.id) has_perms = len(user.get_all_permissions()) > 0 has_sites = SitePermission.objects.filter(user=user).count() > 0 if user.is_active and user.is_staff and not user.is_superuser and not ( has_perms and has_sites): error(request, "The user is active but won't be able to access " "the admin, due to no edit/site permissions being " "selected")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind(self, args, kwargs): """ Bind arguments and keyword arguments to the encapsulated function. Returns a dictionary of parameters (named according to function parameters) with the values that were bound to each name. """
spec = self._spec resolution = self.resolve(args, kwargs) params = dict(zip(spec.args, resolution.slots)) if spec.varargs: params[spec.varargs] = resolution.varargs if spec.varkw: params[spec.varkw] = resolution.varkw if spec.kwonlyargs: params.update(resolution.kwonlyargs) return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply(self, args, kwargs): """ Replicate a call to the encapsulated function. Unlike func(*args, **kwargs) the call is deterministic in the order kwargs are being checked by python. In other words, it behaves exactly the same as if typed into the repl prompt. This is usually only a problem when a function is given two invalid keyword arguments. In such cases func(*args, **kwargs) syntax will result in random error on either of those invalid keyword arguments. This is most likely caused by a temporary dictionary created by the runtime. For testing a OderedDictionary instance may be passed as kwargs. In such case the call, and the error message, is fully deterministic. This function is implemented with eval() """
# Construct helper locals that only contain the function to call as # 'func', all positional arguments as 'argX' and all keyword arguments # as 'kwX' _locals = {'func': self._func} if args is not None: _locals.update({ "arg{}".format(index): args[index] for index, value in enumerate(args)}) if kwargs is not None: # Explicitly build a list of keyword arguments so that we never # traverse kwargs more than once kw_list = list(kwargs.keys()) _locals.update({ "kw{}".format(index): kwargs[key] for index, key in enumerate(kw_list)}) # Construct the call expression string by carefully # placing each positional and keyword arguments in right # order that _exactly_ matches how apply() was called. params = [] if args is not None: params.extend([ "arg{}".format(index) for index in range(len(args))]) if kwargs is not None: params.extend([ "{}=kw{}".format(key, index) for index, key in enumerate(kw_list)]) expr = "func({})".format(", ".join(params)) return eval(expr, globals(), _locals)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, **kwargs): """ Add, remove or modify a share's title. Input: * ``title`` The share title, if any (optional) **NOTE**: Passing ``None`` or calling this method with an empty argument list will remove the share's title. Output: * None Example:: share = client.get_share("4ddfds") share.update(title="Example") # Set title to Example share.update() # Remove title """
if 'title' in kwargs: params = {"title": kwargs['title']} else: params = {"title": None} response = GettRequest().post("/shares/%s/update?accesstoken=%s" % (self.sharename, self.user.access_token()), params) if response.http_status == 200: self.__init__(self.user, **response.response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def destroy(self): """ This method removes this share and all of its associated files. There is no way to recover a share or its contents once this method has been called. Input: * None Output: * ``True`` Example:: client.get_share("4ddfds").destroy() """
response = GettRequest().post("/shares/%s/destroy?accesstoken=%s" % (self.sharename, self.user.access_token()), None) if response.http_status == 200: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh(self): """ This method refreshes the object with current metadata from the Gett service. Input: * None Output: * None Example:: share = client.get_share("4ddfds") print share.files[0].filename # prints 'foobar' if share.files[0].destroy(): share.refresh() print share.files[0].filename # now prints 'barbaz' """
response = GettRequest().get("/shares/%s" % self.sharename) if response.http_status == 200: self.__init__(self.user, **response.response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def code_timer(reset=False): '''Sets a global variable for tracking the timer accross multiple files ''' global CODE_TIMER if reset: CODE_TIMER = CodeTimer() else: if CODE_TIMER is None: return CodeTimer() else: return CODE_TIMER
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def log(self, timer_name, node): ''' logs a event in the timer ''' timestamp = time.time() if hasattr(self, timer_name): getattr(self, timer_name).append({ "node":node, "time":timestamp}) else: setattr(self, timer_name, [{"node":node, "time":timestamp}])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def print_timer(self, timer_name, **kwargs): ''' prints the timer to the terminal keyword args: delete -> True/False -deletes the timer after printing ''' if hasattr(self, timer_name): _delete_timer = kwargs.get("delete", False) print("|-------- {} [Time Log Calculation]-----------------|".format(\ timer_name)) print("StartDiff\tLastNodeDiff\tNodeName") time_log = getattr(self, timer_name) start_time = time_log[0]['time'] previous_time = start_time for entry in time_log: time_diff = (entry['time'] - previous_time) *1000 time_from_start = (entry['time'] - start_time) * 1000 previous_time = entry['time'] print("{:.1f}\t\t{:.1f}\t\t{}".format(time_from_start, time_diff, entry['node'])) print("|--------------------------------------------------------|") if _delete_timer: self.delete_timer(timer_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_cluster_exists(self, name): """Check if cluster exists. If it does not, raise exception."""
self.kubeconf.open() clusters = self.kubeconf.get_clusters() names = [c['name'] for c in clusters] if name in names: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, name=None, provider='AwsEKS', print_output=True): """List all cluster. """
# Create cluster object Cluster = getattr(providers, provider) cluster = Cluster(name) self.kubeconf.open() if name is None: clusters = self.kubeconf.get_clusters() print("Running Clusters:") for cluster in clusters: print(f" - {cluster['name']}") else: # Check that cluster exists. if self.check_cluster_exists(name) is False: raise JhubctlError("Cluster name not found in availabe clusters.") cluster = self.kubeconf.get_cluster(name=cluster.cluster_name) pprint.pprint(cluster, depth=4)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, name, provider='AwsEKS'): """Create a Kubernetes cluster on a given provider. """
# ----- Create K8s cluster on provider ------- # Create cluster object Cluster = getattr(providers, provider) cluster = Cluster(name=name, ssh_key_name='zsailer') cluster.create() # -------- Add cluster to kubeconf ----------- # Add cluster to kubeconf self.kubeconf.open() self.kubeconf.add_cluster( cluster.cluster_name, server=cluster.endpoint_url, certificate_authority_data=cluster.ca_cert ) # Add a user to kubeconf self.kubeconf.add_user(name) # Add a user exec call for this provider. self.kubeconf.add_to_user( name, **cluster.kube_user_data ) # Add context mapping user to cluster. self.kubeconf.add_context( name, cluster_name=cluster.cluster_name, user_name=cluster.name ) # Switch contexts. self.kubeconf.set_current_context(name) # Commit changes to file. self.kubeconf.close() # ------ Setup autorization ------- kubectl('apply', input=cluster.get_auth_config()) # -------- Setup Storage ---------- kubectl('delete', 'storageclass', 'gp2') kubectl('apply', input=cluster.get_storage_config()) # ------- setup helm locally ------ kubectl( '--namespace', 'kube-system', 'create', 'serviceaccount', 'tiller' ) kubectl( 'create', 'clusterrolebinding', 'tiller', '--clusterrole=cluster-admin', '--serviceaccount=kube-system:tiller' ) # -------- Initialize Helm ----------- helm( 'init', '--service-account', 'tiller' ) # --------- Secure Helm -------------- kubectl( 'patch', 'deployment', 'tiller-deploy', namespace='kube-system', type='json', patch='[{"op": "add", "path": "/spec/template/spec/containers/0/command", "value": ["/tiller", "--listen=localhost:44134"]}]' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, name, provider='AwsEKS'): """Delete a Kubernetes cluster. """
# if self.check_cluster_exists(name) is False: # raise JhubctlError("Cluster name not found in availabe clusters.") # Create cluster object Cluster = getattr(providers, provider) cluster = Cluster(name) cluster.delete() # Remove from kubeconf self.kubeconf.open() self.kubeconf.remove_context(name) self.kubeconf.remove_user(name) self.kubeconf.remove_cluster(cluster.cluster_name) self.kubeconf.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def random_string(length=8, charset=None): ''' Generates a string with random characters. If no charset is specified, only letters and digits are used. Args: length (int) length of the returned string charset (string) list of characters to choose from Returns: (str) with random characters from charset Raises: - ''' if length < 1: raise ValueError('Length must be > 0') if not charset: charset = string.letters + string.digits return ''.join(random.choice(charset) for unused in xrange(length))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2dict(str_in): ''' Extracts a dict from a string. Args: str_in (string) that contains python dict Returns: (dict) or None if no valid dict was found Raises: - ''' dict_out = safe_eval(str_in) if not isinstance(dict_out, dict): dict_out = None return dict_out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2tuple(str_in): ''' Extracts a tuple from a string. Args: str_in (string) that contains python tuple Returns: (dict) or None if no valid tuple was found Raises: - ''' tuple_out = safe_eval(str_in) if not isinstance(tuple_out, tuple): tuple_out = None return tuple_out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2dict_keys(str_in): ''' Extracts the keys from a string that represents a dict and returns them sorted by key. Args: str_in (string) that contains python dict Returns: (list) with keys or None if no valid dict was found Raises: - ''' tmp_dict = str2dict(str_in) if tmp_dict is None: return None return sorted([k for k in tmp_dict])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2dict_values(str_in): ''' Extracts the values from a string that represents a dict and returns them sorted by key. Args: str_in (string) that contains python dict Returns: (list) with values or None if no valid dict was found Raises: - ''' tmp_dict = str2dict(str_in) if tmp_dict is None: return None return [tmp_dict[key] for key in sorted(k for k in tmp_dict)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expr_to_str(n, l=None): """ construct SQL string from expression node """
op = n[0] if op.startswith('_') and op.endswith('_'): op = op.strip('_') if op == 'var': return n[1] elif op == 'literal': if isinstance(n[1], basestring): return "'%s'" % n[1] return str(n[1]) elif op == 'cast': return "(%s)::%s" % (expr_to_str(n[1]), n[2]) elif op in '+-*/': return "(%s) %s (%s)" % (expr_to_str(n[1]), op, expr_to_str(n[2])) elif op == "extract": return "extract( %s from %s )" % (n[1], expr_to_str(n[2])) else: arg = ','.join(map(expr_to_str, n[1:])) return "%s(%s)" % (op, arg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def construct_func_expr(n): """ construct the function expression """
op = n[0] if op.startswith('_') and op.endswith('_'): op = op.strip('_') if op == 'var': return Var(str(n[1])) elif op == 'literal': if isinstance(n[1], basestring): raise "not implemented" return Constant(n[1]) elif op == 'cast': raise "not implemented" elif op in '+-/*': return ArithErrFunc(op, *map(construct_func_expr,n[1:])) else: klass = __agg2f__.get(op, None) if klass: return klass(map(construct_func_expr, n[1:])) raise "no klass"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_api_by_name(api_name): """ Fetch an api record by its name """
api_records = console.get_rest_apis()['items'] matches = filter(lambda x: x['name'] == api_name, api_records) if not matches: return None return matches[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_method(api_id, resource_id, verb): """ Fetch extra metadata for this particular method """
return console.get_method( restApiId=api_id, resourceId=resource_id, httpMethod=verb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def battery_voltage(self): """ Returns voltage in mV """
msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_LSB_REG) voltage_bin = msb << 4 | lsb & 0x0f return voltage_bin * 1.1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def internal_temperature(self): """ Returns temperature in celsius C """
temp_msb = self.bus.read_byte_data(AXP209_ADDRESS, INTERNAL_TEMPERATURE_MSB_REG) temp_lsb = self.bus.read_byte_data(AXP209_ADDRESS, INTERNAL_TEMPERATURE_LSB_REG) # MSB is 8 bits, LSB is lower 4 bits temp = temp_msb << 4 | temp_lsb & 0x0f # -144.7c -> 000h, 0.1c/bit FFFh -> 264.8c return temp*0.1-144.7
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copen(fileobj, mode='rb', **kwargs): """Detects and opens compressed file for reading and writing. Args: fileobj (File): any File-like object supported by an underlying compression algorithm mode (unicode): mode to open fileobj with **kwargs: keyword-arguments to pass to the compression algorithm Returns: File: TextWrapper if no compression, else returns appropriate wrapper for the compression type Example: .. code-block:: Python b'bzip2' """
algo = io.open # Only used as io.open in write mode mode = mode.lower().strip() modules = {} # Later populated by compression algorithms write_mode = False if mode.lstrip('U')[0] == 'r' else True kwargs['mode'] = mode # Currently supported compression algorithms modules_to_import = { 'bz2': 'BZ2File', 'gzip': 'GzipFile', 'lzma': 'LZMAFile' } # Dynamically import compression libraries and warn about failures for mod, _class in modules_to_import.items(): try: modules[_class] = getattr(import_module(mod), _class) except (ImportError, AttributeError) as e: modules[_class] = open warn('Cannot process {0} files due to following error:' '{1}{2}{1}You will need to install the {0} library to ' 'properly use these files. Currently, such files will ' 'open in "text" mode.'.format(mod, linesep, e)) # Write mode if write_mode is True: # Map file extensions to decompression classes algo_map = { 'bz2': modules['BZ2File'], 'gz': modules['GzipFile'], 'xz': modules['LZMAFile'] } # Determine the compression algorithm via the file extension ext = fileobj.split('.')[-1] try: algo = algo_map[ext] except KeyError: pass # Read mode else: algo = io.TextIOWrapper # Default to plaintext buffer # Magic headers of encryption formats file_sigs = { b'\x42\x5a\x68': modules['BZ2File'], b'\x1f\x8b\x08': modules['GzipFile'], b'\xfd7zXZ\x00': modules['LZMAFile'] } # Open the file, buffer it, and identify the compression algorithm fileobj = io.BufferedReader(io.open(fileobj, 'rb')) max_len = max(len(x) for x in file_sigs.keys()) start = fileobj.peek(max_len) for sig in file_sigs.keys(): if start.startswith(sig): algo = file_sigs[sig] break # Stop iterating once a good signature is found # Filter all **kwargs by the args accepted by the compression algorithm algo_args = set(getfullargspec(algo).args) good_args = set(kwargs.keys()).intersection(algo_args) _kwargs = {arg: kwargs[arg] for arg in good_args} # Open the file using parameters defined above and store in namespace if write_mode is True: handle = algo(fileobj, **_kwargs) else: try: # For algorithms that need to be explicitly given a fileobj handle = algo(fileobj=fileobj, **_kwargs) except TypeError: # For algorithms that detect file objects handle = algo(fileobj, **_kwargs) return handle
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_sg_name_dict(self, data, page_size, no_nameconv): """Get names of security groups referred in the retrieved rules. :return: a dict from secgroup ID to secgroup name """
if no_nameconv: return {} neutron_client = self.get_client() search_opts = {'fields': ['id', 'name']} if self.pagination_support: if page_size: search_opts.update({'limit': page_size}) sec_group_ids = set() for rule in data: for key in self.replace_rules: if rule.get(key): sec_group_ids.add(rule[key]) sec_group_ids = list(sec_group_ids) def _get_sec_group_list(sec_group_ids): search_opts['id'] = sec_group_ids return neutron_client.list_security_groups( **search_opts).get('security_groups', []) try: secgroups = _get_sec_group_list(sec_group_ids) except exceptions.RequestURITooLong as uri_len_exc: # Length of a query filter on security group rule id # id=<uuid>& (with len(uuid)=36) sec_group_id_filter_len = 40 # The URI is too long because of too many sec_group_id filters # Use the excess attribute of the exception to know how many # sec_group_id filters can be inserted into a single request sec_group_count = len(sec_group_ids) max_size = ((sec_group_id_filter_len * sec_group_count) - uri_len_exc.excess) chunk_size = max_size // sec_group_id_filter_len secgroups = [] for i in range(0, sec_group_count, chunk_size): secgroups.extend( _get_sec_group_list(sec_group_ids[i: i + chunk_size])) return dict([(sg['id'], sg['name']) for sg in secgroups if sg['name']])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Loads the user's account details and Raises parseException """
pg = self.usr.getPage("http://www.neopets.com/bank.phtml") # Verifies account exists if not "great to see you again" in pg.content: logging.getLogger("neolib.user").info("Could not load user's bank. Most likely does not have an account.", {'pg': pg}) raise noBankAcct self.__loadDetails(pg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collectInterest(self): """ Collects user's daily interest, returns result Returns bool - True if successful, False otherwise """
if self.collectedInterest: return False pg = self.usr.getPage("http://www.neopets.com/bank.phtml") form = pg.form(action="process_bank.phtml") form['type'] = "interest" pg = form.submit() # Success redirects to bank page if "It's great to see you again" in pg.content: self.__loadDetails(pg) return True else: logging.getLogger("neolib.user").info("Failed to collect daily interest for unknown reason.", {'pg': pg}) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connected_components(G): """ Check if G is connected and return list of sets. Every set contains all vertices in one connected component. """
result = [] vertices = set(G.vertices) while vertices: n = vertices.pop() group = {n} queue = Queue() queue.put(n) while not queue.empty(): n = queue.get() neighbors = set(G.vertices[n]) neighbors.difference_update(group) vertices.difference_update(neighbors) group.update(neighbors) for element in neighbors: queue.put(element) result.append(group) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prim(G, start, weight='weight'): """ Algorithm for finding a minimum spanning tree for a weighted undirected graph. """
if len(connected_components(G)) != 1: raise GraphInsertError("Prim algorithm work with connected graph only") if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) pred = {} key = {} pqueue = {} lowest = 0 for edge in G.edges: if G.edges[edge][weight] > lowest: lowest = G.edges[edge][weight] for vertex in G.vertices: pred[vertex] = None key[vertex] = 2 * lowest key[start] = 0 for vertex in G.vertices: pqueue[vertex] = key[vertex] while pqueue: current = popmin(pqueue, lowest) for neighbor in G.vertices[current]: if (neighbor in pqueue and G.edges[(current, neighbor)][weight] < key[neighbor]): pred[neighbor] = current key[neighbor] = G.edges[(current, neighbor)][weight] pqueue[neighbor] = G.edges[(current, neighbor)][weight] return pred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(cls, api_name): """ Find or create an API model object by name """
if api_name in cls.apis_by_name: return cls.apis_by_name[api_name] api = cls(api_name) api._fetch_from_aws() if api.exists_in_aws: api._fetch_resources() cls.apis_by_name[api_name] = api return api
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_from_configs(self, filename): """ Return content of file which located in configuration directory """
config_filename = os.path.join(self._config_path, filename) if os.path.exists(config_filename): try: f = open(config_filename, 'r') content = ''.join(f.readlines()) f.close() return content except Exception as err: raise err else: raise IOError("File not found: {}".format(config_filename))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Load application configuration """
try: if not self.__in_memory: self._json = json.loads(self._load_from_configs(self._main_config)) # ToDo: make this via extension for root logger # self._log = aLogger.getLogger(__name__, cfg=self) # reload logger using loaded configuration self._load_modules() else: self._json = {} # parse command line, currently used for re-assign settings in configuration, but can't be used as replacement self._load_from_commandline() except Exception as err: self._json = None raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, path, default=None, check_type=None, module_name=None): """ Get option property :param path: full path to the property with name :param default: default value if original is not present :param check_type: cast param to passed type, if fail, default will returned :param module_name: get property from module name :return: """
if self._json is not None: # process whole json or just concrete module node = self._json if module_name is None else self.get_module_config(module_name) path_data = path.split('.') try: while len(path_data) > 0: node = node[path_data.pop(0)] if check_type is not None: return check_type(node) else: return node except KeyError: if default is not None: return default else: raise KeyError("Key {} not present".format(path)) except ValueError: if default is not None: return default else: raise KeyError("Key {} has a wrong format".format(path)) else: return ""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_module_config(self, name): """ Return module configuration loaded from separate file or None """
if self.exists("modules"): if name in self._json["modules"] and not isinstance(self._json["modules"][name], str): return self._json["modules"][name] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_hook(hook_name): """Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError """
if not pkg_resources.resource_exists(__name__, hook_name): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def end(self, s=None, post=None, noraise=False): """ Prints the end banner and raises ``ProgressOK`` exception When ``noraise`` flag is set to ``True``, then the exception is not raised, and progress is allowed to continue. If ``post`` function is supplied it is invoked with no arguments after the close banner is printed, but before exceptions are raised. The ``post`` function takes no arguments. """
s = s or self.end_msg self.printer(self.color.green(s)) if post: post() if noraise: return raise ProgressOK()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def abrt(self, s=None, post=None, noraise=False): """ Prints the abrt banner and raises ``ProgressAbrt`` exception When ``noraise`` flag is set to ``True``, then the exception is not raised, and progress is allowed to continue. If ``post`` function is supplied it is invoked with no arguments after the close banner is printed, but before exceptions are raised. The ``post`` function takes no arguments. """
s = s or self.abrt_msg self.printer(self.color.red(s)) if post: post() if noraise: return raise ProgressAbrt()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prog(self, s=None): """ Prints the progress indicator """
s = s or self.prog_msg self.printer(s, end='')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def where(self, custom_restrictions=[], **restrictions): """ Analog to SQL "WHERE". Does not perform a query until `select` is called. Returns a repo object. Options selected through keyword arguments are assumed to use == unles the value is a list, tuple, or dictionary. List or tuple values translate to an SQL `IN` over those values, and a dictionary looks up under a different table when joined. ex) SELECT foos.* FROM foos WHERE foos.id == 11 SELECT foos.* FROM foos WHERE foos.id > 12 SELECT foos.* FROM foos WHERE foos.id IN (1, 2, 3) """
# Generate the SQL pieces and the relevant values standard_names, standard_values = self._standard_items(restrictions) custom_names, custom_values = self._custom_items(custom_restrictions) in_names, in_values = self._in_items(restrictions) query_names = standard_names + custom_names + in_names # Stitch them into a clause with values if query_names: self.where_values = standard_values + custom_values + in_values self.where_clause = "where {query} ".format( query=" and ".join(query_names)) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def order_by(self, **kwargs): """ Analog to SQL "ORDER BY". +kwargs+ should only contain one item. examples) NO: repo.order_by() NO: repo.order_by(id="desc", name="asc") YES: repo.order_by(id="asc) """
if kwargs: col, order = kwargs.popitem() self.order_clause = "order by {col} {order} ".format( col=col, order=order) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, *attributes): """ Select the passed +attributes+ from the table, subject to the restrictions provided by the other methods in this class. ex) SELECT foos.name, foos.id FROM foos """
namespaced_attributes = [ "{table}.{attr}".format(table=self.table_name, attr=attr) for attr in attributes ] cmd = ('select {attrs} from {table} ' '{join_clause}{where_clause}{order_clause}' '{group_clause}{having_clause}{limit_clause}').format( table=self.table_name, attrs=", ".join(namespaced_attributes), where_clause=self.where_clause, join_clause=self.join_clause, order_clause=self.order_clause, group_clause=self.group_clause, having_clause=self.having_clause, limit_clause=self.limit_clause, ).rstrip() return Repo.db.execute(cmd, self.where_values + self.having_values + \ self.limit_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(self): """ Count the number of records in the table, subject to the query. """
cmd = ("select COUNT(*) from {table} " "{join_clause}{where_clause}{order_clause}").format( table=self.table_name, where_clause=self.where_clause, join_clause=self.join_clause, order_clause=self.order_clause).rstrip() return Repo.db.execute(cmd, self.where_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, **data): """ Update records in the table with +data+. Often combined with `where`, as it acts on all records in the table unless restricted. ex) UPDATE foos SET name = "bar" """
data = data.items() update_command_arg = ", ".join("{} = ?".format(entry[0]) for entry in data) cmd = "update {table} set {update_command_arg} {where_clause}".format( update_command_arg=update_command_arg, where_clause=self.where_clause, table=self.table_name).rstrip() Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """ Remove entries from the table. Often combined with `where`, as it acts on all records in the table unless restricted. """
cmd = "delete from {table} {where_clause}".format( table=self.table_name, where_clause=self.where_clause ).rstrip() Repo.db.execute(cmd, self.where_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect_db(Repo, database=":memory:"): """ Connect Repo to a database with path +database+ so all instances can interact with the database. """
Repo.db = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES) return Repo.db
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _trace(self, frame, event, arg_unused): """ The trace function passed to sys.settrace. """
cur_time = time.time() lineno = frame.f_lineno depth = self.depth filename = inspect.getfile(frame) if self.last_exc_back: if frame == self.last_exc_back: self.data['time_spent'] += (cur_time - self.start_time) self.depth -= 1 self.data = self.data_stack.pop() self.last_exc_back = None if event == 'call': # Update our state self.depth += 1 if self.log: print >> sys.stdout, '%s >> %s:%s' % (' ' * (depth - 1), filename, frame.f_code.co_name) # origin line number (where it was called from) o_lineno = frame.f_back.f_lineno if self.pause_until is not None: if depth == self.pause_until: self.pause_until = None else: return self._trace if o_lineno not in self.data['lines']: self.pause_until = depth return self._trace # Append it to the stack self.data_stack.append(self.data) call_sig = '%s:%s' % (inspect.getfile(frame), frame.f_code.co_name) if call_sig not in self.data['children']: self.data['children'][o_lineno][call_sig] = self._get_struct(frame, event) self.data = self.data['children'][o_lineno][call_sig] self.data['num_calls'] += 1 elif event == 'line': # Record an executed line. if self.pause_until is None and lineno in self.data['lines']: self.data['lines'][lineno]['num_calls'] += 1 self.data['lines'][lineno]['time_spent'] += (cur_time - self.start_time) if self.log: print >> sys.stdout, '%s -- %s:%s executing line %d' % (' ' * (depth - 1), filename, frame.f_code.co_name, lineno) elif event == 'return': timing = (cur_time - self.start_time) # Leaving this function, pop the filename stack. if self.pause_until is None: self.data['time_spent'] += timing self.data = self.data_stack.pop() self.data['time_spent'] += timing # self.data['lines'][lineno]['num_calls'] += 1 # self.data['lines'][lineno]['time_spent'] += (cur_time - self.start_time) if self.log: print >> sys.stdout, '%s << %s:%s %.3fs' % (' ' * (depth - 1), filename, frame.f_code.co_name, timing) self.depth -= 1 elif event == 'exception': self.last_exc_back = frame.f_back self.last_exc_firstlineno = frame.f_code.co_firstlineno return self._trace
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, origin): """ Start this Tracer. Return a Python function suitable for use with sys.settrace(). """
self.start_time = time.time() self.pause_until = None self.data.update(self._get_struct(origin, 'origin')) self.data_stack.append(self.data) sys.settrace(self._trace) return self._trace
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self): """ Stop this Tracer. """
if hasattr(sys, "gettrace") and self.log: if sys.gettrace() != self._trace: msg = "Trace function changed, measurement is likely wrong: %r" print >> sys.stdout, msg % sys.gettrace() sys.settrace(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolver(*for_resolve, attr_package='__package_for_resolve_deco__'): """ Resolve dotted names in function arguments Usage: """
def decorator(func): spec = inspect.getargspec(func).args if set(for_resolve) - set(spec): raise ValueError('bad arguments') @wraps(func) def wrapper(*args, **kwargs): args = list(args) if args and attr_package: package = getattr(args[0], attr_package, None) else: package = None for item in for_resolve: n = spec.index(item) if n >= len(args): continue if n is not None and isinstance(args[n], str): args[n] = resolve(args[n], package) for kw, value in kwargs.copy().items(): if kw in for_resolve and isinstance(value, str): kwargs[kw] = resolve(value, package) return func(*args, **kwargs) return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relateObjectLocs(obj, entities, selectF): """calculate the minimum distance to reach any iterable of entities with a loc"""
#if obj in entities: return 0 # is already one of the entities try: obj = obj.loc # get object's location, if it has one except AttributeError: pass # assume obj is already a MapPoint try: func = obj.direct2dDistance # assume obj is a MapPoint except AttributeError: raise ValueError("object %s (%s) does not possess and is not a %s"%(obj, type(obj), MapPoint)) try: return selectF([(func(b.loc), b) for b in entities]) except AttributeError: return selectF([(func(b) , b) for b in entities])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convertToMapPic(byteString, mapWidth): """convert a bytestring into a 2D row x column array, representing an existing map of fog-of-war, creep, etc."""
data = [] line = "" for idx,char in enumerate(byteString): line += str(ord(char)) if ((idx+1)%mapWidth)==0: data.append(line) line = "" return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_component_definition(self, definition): """ Add a ComponentDefinition to the document """
# definition.identity = self._to_uri_from_namespace(definition.identity) if definition.identity not in self._components.keys(): self._components[definition.identity] = definition else: raise ValueError("{} has already been defined".format(definition.identity))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assemble_component(self, into_component, using_components): """ Assemble a list of already defined components into a structual hirearchy """
if not isinstance(using_components, list) or len(using_components) == 0: raise Exception('Must supply list of ComponentDefinitions') components = [] sequence_annotations = [] seq_elements = '' for k, c in enumerate(using_components): try: self._components[c.identity] except KeyError: raise Exception('Must already have defined ComponentDefinition in document') else: identity = into_component.identity + '/' + c.identity # All components are initially public, this can be changed later component = Component(identity, c, 'public', display_id=c.identity) components.append(component) # If there is a sequence on the ComponentDefinition use the first element if len(c.sequences) > 0: # Add the sequence to the document self._add_sequence(c.sequences[0]) # Get start/end points of sequence start = len(seq_elements) + 1 # The sequence is usually 1 indexed end = start + len(c.sequences[0].elements) # Add to the component sequence element seq_elements += c.sequences[0].elements # Create a Range object to hold seq range range_identity = identity + '_sequence_annotation/range' seq_range = Range(range_identity, start, end, display_id='range') # Create a SequenceAnnotation object to hold the range annot_identity = identity + '_sequence_annotation' seq_annot = SequenceAnnotation(annot_identity, component=component, locations=[seq_range], display_id=c.identity + '_sequence_annotation') sequence_annotations.append(seq_annot) if seq_elements != '': seq_encoding = using_components[0].sequences[0].encoding seq_identity = '{}_sequence'.format(into_component.identity) seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding) self._add_sequence(seq) into_component.sequences.append(seq) into_component.components = components into_component.sequence_annotations = sequence_annotations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_sequence(self, sequence): """ Add a Sequence to the document """
if sequence.identity not in self._sequences.keys(): self._sequences[sequence.identity] = sequence else: raise ValueError("{} has already been defined".format(sequence.identity))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_model(self, model): """ Add a model to the document """
if model.identity not in self._models.keys(): self._models[model.identity] = model else: raise ValueError("{} has already been defined".format(model.identity))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_module_definition(self, module_definition): """ Add a ModuleDefinition to the document """
if module_definition.identity not in self._module_definitions.keys(): self._module_definitions[module_definition.identity] = module_definition else: raise ValueError("{} has already been defined".format(module_definition.identity))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_components(self, uri): """ Get components from a component definition in order """
try: component_definition = self._components[uri] except KeyError: return False sorted_sequences = sorted(component_definition.sequence_annotations, key=attrgetter('first_location')) return [c.component for c in sorted_sequences]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_document(self): """ Clears ALL items from document, reseting it to clean """
self._components.clear() self._sequences.clear() self._namespaces.clear() self._models.clear() self._modules.clear() self._collections.clear() self._annotations.clear() self._functional_component_store.clear() self._collection_store.clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_triplet_value(self, graph, identity, rdf_type): """ Get a value from an RDF triple """
value = graph.value(subject=identity, predicate=rdf_type) return value.toPython() if value is not None else value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_triplet_value_list(self, graph, identity, rdf_type): """ Get a list of values from RDF triples when more than one may be present """
values = [] for elem in graph.objects(identity, rdf_type): values.append(elem.toPython()) return values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_sequences(self, graph): """ Read graph and add sequences to document """
for e in self._get_elements(graph, SBOL.Sequence): identity = e[0] c = self._get_rdf_identified(graph, identity) c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements) c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding) seq = Sequence(**c) self._sequences[identity.toPython()] = seq self._collection_store[identity.toPython()] = seq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_component_definitions(self, graph): """ Read graph and add component defintions to document """
for e in self._get_elements(graph, SBOL.ComponentDefinition): identity = e[0] # Store component values in dict c = self._get_rdf_identified(graph, identity) c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role) c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type) obj = ComponentDefinition(**c) self._components[identity.toPython()] = obj self._collection_store[identity.toPython()] = obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_models(self, graph): """ Read graph and add models to document """
for e in self._get_elements(graph, SBOL.Model): identity = e[0] m = self._get_rdf_identified(graph, identity) m['source'] = self._get_triplet_value(graph, identity, SBOL.source) m['language'] = self._get_triplet_value(graph, identity, SBOL.language) m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework) obj = Model(**m) self._models[identity.toPython()] = obj self._collection_store[identity.toPython()] = obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_module_definitions(self, graph): """ Read graph and add module defintions to document """
for e in self._get_elements(graph, SBOL.ModuleDefinition): identity = e[0] m = self._get_rdf_identified(graph, identity) m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role) functional_components = {} for func_comp in graph.triples((identity, SBOL.functionalComponent, None)): func_identity = func_comp[2] fc = self._get_rdf_identified(graph, func_identity) definition = self._get_triplet_value(graph, func_identity, SBOL.definition) fc['definition'] = self._components[definition] fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access) fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction) functional_components[func_identity.toPython()] = FunctionalComponent(**fc) self._functional_component_store[func_identity.toPython()] = \ functional_components[func_identity.toPython()] interactions = [] for inter in graph.triples((identity, SBOL.interaction, None)): inter_identity = inter[2] it = self._get_rdf_identified(graph, inter_identity) it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types) participations = [] for p in graph.triples((inter_identity, SBOL.participation, None)): pc = self._get_rdf_identified(graph, p[2]) roles = self._get_triplet_value_list(graph, p[2], SBOL.role) # Need to use one of the functional component created above participant_id = self._get_triplet_value(graph, p[2], SBOL.participant) participant = functional_components[participant_id] participations.append(Participation(roles=roles, participant=participant, **pc)) interactions.append(Interaction(participations=participations, **it)) obj = ModuleDefinition(functional_components=functional_components.values(), interactions=interactions, **m) self._modules[identity.toPython()] = obj self._collection_store[identity.toPython()] = obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extend_module_definitions(self, graph): """ Using collected module definitions extend linkages """
for mod_id in self._modules: mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module) modules = [] for mod in graph.triples((mod_identity, SBOL.module, None)): md = self._get_rdf_identified(graph, mod[2]) definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition) md['definition'] = self._modules[definition_id] maps_to = [] for m in graph.triples((mod[2], SBOL.mapsTo, None)): mt = self._get_rdf_identified(graph, m[2]) mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement) local_id = self._get_triplet_value(graph, m[2], SBOL.local) remote_id = self._get_triplet_value(graph, m[2], SBOL.remote) mt['local'] = self._functional_component_store[local_id] mt['remote'] = self._functional_component_store[remote_id] maps_to.append(MapsTo(**mt)) modules.append(Module(maps_to=maps_to, **md)) self._modules[mod_id].modules = modules
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_annotations(self, graph): """ Find any non-defined elements at TopLevel and create annotations """
flipped_namespaces = {v: k for k, v in self._namespaces.items()} for triple in graph.triples((None, RDF.type, None)): namespace, obj = split_uri(triple[2]) prefix = flipped_namespaces[namespace] as_string = '{}:{}'.format(prefix, obj) if as_string not in VALID_ENTITIES: identity = triple[0] gt = self._get_rdf_identified(graph, identity) q_name = QName(namespace=namespace, local_name=obj, prefix=prefix) gt['rdf_type'] = q_name gt_obj = GenericTopLevel(**gt) self._annotations[identity.toPython()] = gt_obj self._collection_store[identity.toPython()] = gt_obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_collections(self, graph): """ Read graph and add collections to document """
for e in self._get_elements(graph, SBOL.Collection): identity = e[0] c = self._get_rdf_identified(graph, identity) members = [] # Need to handle other non-standard TopLevel objects first for m in graph.triples((identity, SBOL.member, None)): members.append(self._collection_store[m[2].toPython()]) obj = Collection(members=members, **c) self._collections[identity.toPython()] = obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self, f): """ Read in an SBOL file, replacing current document contents """
self.clear_document() g = Graph() g.parse(f, format='xml') for n in g.namespaces(): ns = n[1].toPython() if not ns.endswith(('#', '/', ':')): ns = ns + '/' self._namespaces[n[0]] = ns # Extend the existing namespaces available XML_NS[n[0]] = ns self._read_sequences(g) self._read_component_definitions(g) self._extend_component_definitions(g) self._read_models(g) self._read_module_definitions(g) self._extend_module_definitions(g) self._read_annotations(g) # Last as this needs all other top level objects created self._read_collections(g)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, f): """ Write an SBOL file from current document contents """
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS) # TODO: TopLevel Annotations sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity) self._add_to_root(rdf, sequence_values) component_values = sorted(self._components.values(), key=lambda x: x.identity) self._add_to_root(rdf, component_values) model_values = sorted(self._models.values(), key=lambda x: x.identity) self._add_to_root(rdf, model_values) module_values = sorted(self._modules.values(), key=lambda x: x.identity) self._add_to_root(rdf, module_values) collection_values = sorted(self._collections.values(), key=lambda x: x.identity) self._add_to_root(rdf, collection_values) annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity) self._add_to_root(rdf, annotation_values) f.write(ET.tostring(rdf, pretty_print=True, xml_declaration=True, encoding='utf-8'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(cls, dname): """ Get the requested domain @param dname: Domain name @type dname: str @rtype: Domain or None """
Domain = cls dname = dname.hostname if hasattr(dname, 'hostname') else dname.lower() return Session.query(Domain).filter(Domain.name == dname).first()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_or_create(cls, dname): """ Get the requested domain, or create it if it doesn't exist already @param dname: Domain name @type dname: str @rtype: Domain """
Domain = cls dname = dname.hostname if hasattr(dname, 'hostname') else dname extras = 'www.{dn}'.format(dn=dname) if dname not in ('localhost', ) and not \ re.match('^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$', dname) else None # Fetch the domain entry if it already exists logging.getLogger('ipsv.sites.domain').debug('Checking if the domain %s has already been registered', dname) domain = Session.query(Domain).filter(Domain.name == dname).first() # Otherwise create it now if not domain: logging.getLogger('ipsv.sites.domain')\ .debug('Domain name does not yet exist, creating a new database entry') domain = Domain(name=dname, extras=extras) Session.add(domain) Session.commit() return domain
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all(cls, domain=None): """ Return all sites @param domain: The domain to filter by @type domain: Domain @rtype: list of Site """
Site = cls site = Session.query(Site) if domain: site.filter(Site.domain == domain) return site.all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(cls, domain, name): """ Get the requested site entry @param domain: Domain name @type domain: Domain @param name: Site name @type name: str @rtype: Domain """
Site = cls return Session.query(Site).filter(Site.domain == domain).filter(collate(Site.name, 'NOCASE') == name).first()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, drop_database=True): """ Delete the site entry @param drop_database: Drop the sites associated MySQL database @type drop_database: bool """
self.disable() Session.delete(self) if drop_database and self.db_name: mysql = create_engine('mysql://root:secret@localhost') mysql.execute('DROP DATABASE IF EXISTS `{db}`'.format(db=self.db_name)) try: mysql.execute('DROP USER `{u}`'.format(u=self.db_user)) except SQLAlchemyError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def version(self, value): """ Save the Site's version from a string or version tuple @type value: tuple or str """
if isinstance(value, tuple): value = unparse_version(value) self._version = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable(self, force=False): """ Enable this site """
log = logging.getLogger('ipsv.models.sites.site') log.debug('Disabling all other sites under the domain %s', self.domain.name) Session.query(Site).filter(Site.id != self.id).filter(Site.domain == self.domain).update({'enabled': 0}) sites_enabled_path = _cfg.get('Paths', 'NginxSitesEnabled') server_config_path = os.path.join(_cfg.get('Paths', 'NginxSitesAvailable'), self.domain.name) server_config_path = os.path.join(server_config_path, '{fn}.conf'.format(fn=self.slug)) symlink_path = os.path.join(sites_enabled_path, '{domain}-{fn}'.format(domain=self.domain.name, fn=os.path.basename(server_config_path))) links = glob(os.path.join(sites_enabled_path, '{domain}-*'.format(domain=self.domain.name))) for link in links: if os.path.islink(link): log.debug('Removing existing configuration symlink: %s', link) os.unlink(link) else: if not force: log.error('Configuration symlink path already exists, but it is not a symlink') raise Exception('Misconfiguration detected: symlink path already exists, but it is not a symlink ' 'and --force was not passed. Unable to continue') log.warn('Configuration symlink path already exists, but it is not a symlink. Removing anyways ' 'since --force was set') if os.path.isdir(symlink_path): shutil.rmtree(symlink_path) else: os.remove(symlink_path) log.info('Enabling Nginx configuration file') os.symlink(server_config_path, symlink_path) self.enabled = 1 Session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disable(self): """ Disable this site """
log = logging.getLogger('ipsv.models.sites.site') sites_enabled_path = _cfg.get('Paths', 'NginxSitesEnabled') symlink_path = os.path.join(sites_enabled_path, '{domain}-{fn}.conf'.format(domain=self.domain.name, fn=self.slug)) log.debug('Symlink path: %s', symlink_path) if os.path.islink(symlink_path): log.info('Removing configuration symlink: %s', symlink_path) os.unlink(symlink_path) self.enabled = 0 Session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_nginx_config(self): """ Write the Nginx configuration file for this Site """
log = logging.getLogger('ipsv.models.sites.site') if not os.path.exists(self.root): log.debug('Creating HTTP root directory: %s', self.root) os.makedirs(self.root, 0o755) # Generate our server block configuration server_block = ServerBlock(self) server_config_path = os.path.join(_cfg.get('Paths', 'NginxSitesAvailable'), self.domain.name) if not os.path.exists(server_config_path): log.debug('Creating new configuration path: %s', server_config_path) os.makedirs(server_config_path, 0o755) server_config_path = os.path.join(server_config_path, '{fn}.conf'.format(fn=self.slug)) if os.path.exists(server_config_path): log.info('Server block configuration file already exists, overwriting: %s', server_config_path) os.remove(server_config_path) log.info('Writing Nginx server block configuration file') with open(server_config_path, 'w') as f: f.write(server_block.template)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extend_list(self, data, parsed_args): """Add subnet information to a network list."""
neutron_client = self.get_client() search_opts = {'fields': ['id', 'cidr']} if self.pagination_support: page_size = parsed_args.page_size if page_size: search_opts.update({'limit': page_size}) subnet_ids = [] for n in data: if 'subnets' in n: subnet_ids.extend(n['subnets']) def _get_subnet_list(sub_ids): search_opts['id'] = sub_ids return neutron_client.list_subnets( **search_opts).get('subnets', []) try: subnets = _get_subnet_list(subnet_ids) except exceptions.RequestURITooLong as uri_len_exc: # The URI is too long because of too many subnet_id filters # Use the excess attribute of the exception to know how many # subnet_id filters can be inserted into a single request subnet_count = len(subnet_ids) max_size = ((self.subnet_id_filter_len * subnet_count) - uri_len_exc.excess) chunk_size = max_size // self.subnet_id_filter_len subnets = [] for i in range(0, subnet_count, chunk_size): subnets.extend( _get_subnet_list(subnet_ids[i: i + chunk_size])) subnet_dict = dict([(s['id'], s) for s in subnets]) for n in data: if 'subnets' in n: n['subnets'] = [(subnet_dict.get(s) or {"id": s}) for s in n['subnets']]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bump(self, bump_part): """Return a new bumped version instance."""
major, minor, patch, stage, n = tuple(self) # stage bump if bump_part not in {"major", "minor", "patch"}: if bump_part not in self.stages: raise ValueError(f"Unknown {bump_part} stage") # We can not bump from final stage to final again. if self.stage == "final" and bump_part == "final": raise ValueError(f"{self} is already in final stage.") # bump in the same stage (numeric part) if bump_part == self.stage: n += 1 else: new_stage_number = tuple(self.stages).index(bump_part) # We can not bump to a previous stage if new_stage_number < self._stage_number: raise ValueError(f"{bump_part} stage is previous to {self}") stage = bump_part n = 0 else: # major, minor, or patch bump # Only version in final stage can do a major, minor or patch # bump if self.stage != "final": raise ValueError( f"{self} is a pre-release version." f" Can't do a {bump_part} version bump" ) if bump_part == "major": major += 1 minor, patch = 0, 0 elif bump_part == "minor": minor += 1 patch = 0 else: patch += 1 return Version(major=major, minor=minor, patch=patch, stage=stage, n=n)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: -> asyncio.tasks.Task: """Create task and add to our collection of pending tasks."""
if asyncio.iscoroutine(target): task = self._loop.create_task(target) elif asyncio.iscoroutinefunction(target): task = self._loop.create_task(target(*args)) else: raise ValueError("Expected coroutine as target") self._pending_tasks.append(task) return task
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cancel_pending_tasks(self): """Cancel all pending tasks."""
for task in self._pending_tasks: task.cancel() if not self._loop.is_running(): try: self._loop.run_until_complete(task) except asyncio.CancelledError: pass except Exception: # pylint: disable=broad-except _LOGGER.error("Unhandled exception from async task", exc_info=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, fork=True): """Starts the registry aggregator. :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process """
if not fork: distributed_logger.info('Starting metrics aggregator, not forking') _registry_aggregator(self.reporter, self.socket_addr) else: distributed_logger.info('Starting metrics aggregator, forking') p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, )) p.start() distributed_logger.info('Started metrics aggregator as PID %s', p.pid) self.process = p